Datasets:
File size: 2,302 Bytes
7b8ecc4 9f91866 7b8ecc4 9f91866 7b8ecc4 9f91866 7b8ecc4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import csv
import json
import os
import sys
import requests
import requests_ratelimiter
import tqdm
def get_commits(session, repo, path):
query = {"path": path}
headers = {
"Accept": "application/vnd.github+json",
"Authorization": "Bearer " + os.environ["GITHUB_TOKEN"],
"X-GitHub-Api-Version": "2022-11-28",
}
try:
r = session.get(
"https://api.github.com/repos/" + repo + "/commits",
params=query,
headers=headers,
timeout=10,
)
except (
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
):
# Skip on request error
return None
else:
# Get the commit hashes
obj = r.json()
if isinstance(obj, list):
commits = []
for c in obj:
try:
commits.append(
{"sha": c["sha"], "date": c["commit"]["committer"]["date"]}
)
except KeyError:
pass
return commits
else:
return None
def main():
# Initialize a new session
session = requests.Session()
adapter = requests_ratelimiter.LimiterAdapter(per_second=2)
session.mount("http://", adapter)
session.mount("https://", adapter)
with open("repos.csv", "r") as csvfile:
# Count number of rows and reset
reader = csv.DictReader(csvfile)
rows = sum(1 for row in reader)
csvfile.seek(0)
reader = csv.DictReader(csvfile)
for row in tqdm.tqdm(reader, total=rows):
# Remove github.com/ from the beginning and fetch commits
repo = row["repository"].split("/", maxsplit=1)[1]
commits = get_commits(session, repo, row["path"])
# Write the collected commits
if commits:
obj = {
"repository": repo,
"path": row["path"],
"repoStars": row["repoStars"],
"repoLastFetched": row["repoLastFetched"],
"commits": list(commits),
}
json.dump(obj, sys.stdout)
sys.stdout.write("\n")
if __name__ == "__main__":
main()
|