Datasets:
File size: 2,384 Bytes
adad30d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import argparse
import csv
import os
import json
import sys
import time
import requests
import requests_ratelimiter
import tqdm
def slurp(outfile):
query = (
'count:all file:\.json$ content:\'{\n "$schema": "https://json-schema.org/\''
)
session = requests.Session()
adapter = requests_ratelimiter.LimiterAdapter(per_second=2)
session.mount("http://", adapter)
session.mount("https://", adapter)
matches = 0
with session.get(
"https://sourcegraph.com/.api/search/stream",
params={"q": query},
headers={
"Accept": "text/event-stream",
"Authorization": "token " + os.environ["SRC_ACCESS_TOKEN"],
},
stream=True,
) as resp, open(outfile, "w") as f:
pbar = tqdm.tqdm()
writer = csv.writer(f)
writer.writerow(
["repository", "repoStars", "repoLastFetched", "commit", "path"]
)
event = None
for line in resp.iter_lines():
if not line:
continue
time.sleep(0.1)
line = line.decode("utf-8").strip()
if line.startswith("event:"):
event = line.split(":", maxsplit=1)[1].strip()
if event != "matches":
sys.stderr.write(event + "\n")
elif line.startswith("data:"):
data = line.split(":", maxsplit=1)[1].strip()
if event == "filters":
# We don't need to record filtering information
continue
if event == "matches":
record = [
(
m["repository"],
m.get("repoStars", ""),
m.get("repoLastFetched", ""),
m["commit"],
m["path"],
)
for m in json.loads(data)
]
writer.writerows(record)
matches += len(record)
pbar.update(len(record))
elif event == "progress":
sys.stderr.write(data + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--outfile", required=True)
args = parser.parse_args()
slurp(args.outfile)
|