Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
e3778f4
1 Parent(s): 495fbae

refactor github api

Browse files
Files changed (1) hide show
  1. objaverse_xl/github.py +52 -18
objaverse_xl/github.py CHANGED
@@ -131,7 +131,7 @@ class GitHubDownloader(ObjaverseSource):
131
  handle_missing_object: Optional[Callable],
132
  handle_new_object: Optional[Callable],
133
  commit_hash: Optional[str],
134
- ) -> List[Dict[str, str]]:
135
  """Process a single repo.
136
 
137
  Args:
@@ -144,12 +144,13 @@ class GitHubDownloader(ObjaverseSource):
144
  {and the rest of the args are the same as download_objects}
145
 
146
  Returns:
147
- List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier"
148
- and "sha256" for each downloaded object.
149
  """
150
  # NOTE: assuming that the user has already checked that the repo doesn't exist,
151
  org, repo = repo_id.split("/")
152
 
 
153
  with tempfile.TemporaryDirectory() as temp_dir:
154
  # clone the repo to a temp directory
155
  target_directory = os.path.join(temp_dir, repo)
@@ -165,7 +166,7 @@ class GitHubDownloader(ObjaverseSource):
165
  sha256=sha256,
166
  metadata=dict(github_organization=org, github_repo=repo),
167
  )
168
- return []
169
 
170
  # use the commit hash if specified
171
  repo_commit_hash = self._get_commit_hash_from_local_git_dir(
@@ -225,6 +226,7 @@ class GitHubDownloader(ObjaverseSource):
225
 
226
  # handle the object under different conditions
227
  if github_url in expected_objects:
 
228
  if expected_objects[github_url] == file_hash:
229
  if handle_found_object is not None:
230
  handle_found_object(
@@ -265,8 +267,12 @@ class GitHubDownloader(ObjaverseSource):
265
  # remove the .git directory
266
  shutil.rmtree(os.path.join(target_directory, ".git"))
267
 
268
- if save_repo_format is not None:
269
- logger.debug(f"Saving as {save_repo_format}")
 
 
 
 
270
  # save the repo to a zip file
271
  if save_repo_format == "zip":
272
  shutil.make_archive(target_directory, "zip", target_directory)
@@ -295,10 +301,20 @@ class GitHubDownloader(ObjaverseSource):
295
  os.path.join(temp_dir, f"{repo}.{save_repo_format}"),
296
  os.path.join(dirname, f"{repo}.{save_repo_format}"),
297
  )
 
 
 
 
 
298
  else:
299
  # move the repo to the correct location (with put)
300
  fs.put(target_directory, dirname, recursive=True)
301
 
 
 
 
 
 
302
  # get each object that was missing from the expected objects
303
  if handle_missing_object is not None:
304
  obtained_urls = {x["fileIdentifier"] for x in file_hashes}
@@ -310,7 +326,7 @@ class GitHubDownloader(ObjaverseSource):
310
  metadata=dict(github_organization=org, github_repo=repo),
311
  )
312
 
313
- return file_hashes
314
 
315
  def _list_files(self, root_dir: str) -> List[str]:
316
  return [
@@ -344,7 +360,7 @@ class GitHubDownloader(ObjaverseSource):
344
  commit_hash = result.stdout.strip().decode("utf-8")
345
  return commit_hash
346
 
347
- def _parallel_process_repo(self, args) -> List[Dict[str, str]]:
348
  """Helper function to process a repo in parallel.
349
 
350
  Note: This function is used to parallelize the processing of repos. It is not
@@ -354,8 +370,8 @@ class GitHubDownloader(ObjaverseSource):
354
  args (Tuple): Tuple of arguments to pass to _process_repo.
355
 
356
  Returns:
357
- List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier"
358
- and "sha256" for each downloaded object.
359
  """
360
 
361
  (
@@ -391,7 +407,7 @@ class GitHubDownloader(ObjaverseSource):
391
  def download_objects(
392
  self,
393
  objects: pd.DataFrame,
394
- download_dir: str = "~/.objaverse",
395
  processes: Optional[int] = None,
396
  handle_found_object: Optional[Callable] = None,
397
  handle_modified_object: Optional[Callable] = None,
@@ -400,15 +416,17 @@ class GitHubDownloader(ObjaverseSource):
400
  save_repo_format: Optional[Literal["zip", "tar", "tar.gz", "files"]] = None,
401
  handle_new_object: Optional[Callable] = None,
402
  **kwargs,
403
- ) -> List[Dict[str, str]]:
404
  """Download the specified GitHub objects.
405
 
406
  Args:
407
  objects (pd.DataFrame): GitHub objects to download. Must have columns for
408
  the object "fileIdentifier" and "sha256". Use the `get_annotations`
409
  function to get the metadata.
410
- download_dir (str, optional): Directory to download the GitHub objects to.
411
- Supports all file systems supported by fsspec. Defaults to
 
 
412
  "~/.objaverse".
413
  processes (Optional[int], optional): Number of processes to use for
414
  downloading. If None, will use the number of CPUs on the machine.
@@ -467,12 +485,23 @@ class GitHubDownloader(ObjaverseSource):
467
  GitHub organization and repo names.
468
  Return is not used. Defaults to None.
469
 
 
 
 
 
470
  Returns:
471
- List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier"
472
- and "sha256" for each downloaded object.
473
  """
474
  if processes is None:
475
  processes = multiprocessing.cpu_count()
 
 
 
 
 
 
 
476
 
477
  base_download_dir = os.path.join(download_dir, "github")
478
  fs, path = fsspec.core.url_to_fs(base_download_dir)
@@ -558,7 +587,12 @@ class GitHubDownloader(ObjaverseSource):
558
  tqdm(
559
  pool.imap_unordered(self._parallel_process_repo, all_args),
560
  total=len(all_args),
 
561
  )
562
  )
563
- out_list = [item for sublist in out for item in sublist]
564
- return out_list
 
 
 
 
 
131
  handle_missing_object: Optional[Callable],
132
  handle_new_object: Optional[Callable],
133
  commit_hash: Optional[str],
134
+ ) -> Dict[str, Optional[str]]:
135
  """Process a single repo.
136
 
137
  Args:
 
144
  {and the rest of the args are the same as download_objects}
145
 
146
  Returns:
147
+ Dict[str, Optional[str]]: A dictionary that maps from the "fileIdentifier" to the
148
+ path of the downloaded object.
149
  """
150
  # NOTE: assuming that the user has already checked that the repo doesn't exist,
151
  org, repo = repo_id.split("/")
152
 
153
+ out = {}
154
  with tempfile.TemporaryDirectory() as temp_dir:
155
  # clone the repo to a temp directory
156
  target_directory = os.path.join(temp_dir, repo)
 
166
  sha256=sha256,
167
  metadata=dict(github_organization=org, github_repo=repo),
168
  )
169
+ return {}
170
 
171
  # use the commit hash if specified
172
  repo_commit_hash = self._get_commit_hash_from_local_git_dir(
 
226
 
227
  # handle the object under different conditions
228
  if github_url in expected_objects:
229
+ out[github_url] = file[len(target_directory) + 1 :]
230
  if expected_objects[github_url] == file_hash:
231
  if handle_found_object is not None:
232
  handle_found_object(
 
267
  # remove the .git directory
268
  shutil.rmtree(os.path.join(target_directory, ".git"))
269
 
270
+ if save_repo_format is None:
271
+ # remove the relative path, since it's not downloaded
272
+ for file_identifier in out:
273
+ out[file_identifier] = None
274
+ else:
275
+ logger.debug(f"Saving {org}/{repo} as {save_repo_format}")
276
  # save the repo to a zip file
277
  if save_repo_format == "zip":
278
  shutil.make_archive(target_directory, "zip", target_directory)
 
301
  os.path.join(temp_dir, f"{repo}.{save_repo_format}"),
302
  os.path.join(dirname, f"{repo}.{save_repo_format}"),
303
  )
304
+
305
+ for file_identifier in out:
306
+ out[file_identifier] = os.path.join(
307
+ dirname, f"{repo}.{save_repo_format}", out[file_identifier]
308
+ )
309
  else:
310
  # move the repo to the correct location (with put)
311
  fs.put(target_directory, dirname, recursive=True)
312
 
313
+ for file_identifier in out:
314
+ out[file_identifier] = os.path.join(
315
+ dirname, repo, out[file_identifier]
316
+ )
317
+
318
  # get each object that was missing from the expected objects
319
  if handle_missing_object is not None:
320
  obtained_urls = {x["fileIdentifier"] for x in file_hashes}
 
326
  metadata=dict(github_organization=org, github_repo=repo),
327
  )
328
 
329
+ return out
330
 
331
  def _list_files(self, root_dir: str) -> List[str]:
332
  return [
 
360
  commit_hash = result.stdout.strip().decode("utf-8")
361
  return commit_hash
362
 
363
+ def _parallel_process_repo(self, args) -> Dict[str, Optional[str]]:
364
  """Helper function to process a repo in parallel.
365
 
366
  Note: This function is used to parallelize the processing of repos. It is not
 
370
  args (Tuple): Tuple of arguments to pass to _process_repo.
371
 
372
  Returns:
373
+ Dict[str, Optional[str]]: A dictionary that maps from the "fileIdentifier"
374
+ to the path of the downloaded object.
375
  """
376
 
377
  (
 
407
  def download_objects(
408
  self,
409
  objects: pd.DataFrame,
410
+ download_dir: Optional[str] = "~/.objaverse",
411
  processes: Optional[int] = None,
412
  handle_found_object: Optional[Callable] = None,
413
  handle_modified_object: Optional[Callable] = None,
 
416
  save_repo_format: Optional[Literal["zip", "tar", "tar.gz", "files"]] = None,
417
  handle_new_object: Optional[Callable] = None,
418
  **kwargs,
419
+ ) -> Dict[str, Optional[str]]:
420
  """Download the specified GitHub objects.
421
 
422
  Args:
423
  objects (pd.DataFrame): GitHub objects to download. Must have columns for
424
  the object "fileIdentifier" and "sha256". Use the `get_annotations`
425
  function to get the metadata.
426
+ download_dir (Optional[str], optional): Directory to download the GitHub
427
+ objects to. Supports all file systems supported by fsspec. If None, the
428
+ repository will not be saved (note that save_repo_format must also be
429
+ None in this case, otherwise a ValueError is raised). Defaults to
430
  "~/.objaverse".
431
  processes (Optional[int], optional): Number of processes to use for
432
  downloading. If None, will use the number of CPUs on the machine.
 
485
  GitHub organization and repo names.
486
  Return is not used. Defaults to None.
487
 
488
+ Raises:
489
+ ValueError: If download_dir is None and save_repo_format is not None.
490
+ Otherwise, we don't know where to save the repo!
491
+
492
  Returns:
493
+ Dict[str, Optional[str]]: A dictionary that maps from the "fileIdentifier" to the
494
+ path of the downloaded object.
495
  """
496
  if processes is None:
497
  processes = multiprocessing.cpu_count()
498
+ if download_dir is None:
499
+ if save_repo_format is not None:
500
+ raise ValueError(
501
+ f"If {save_repo_format=} is not None, {download_dir=} must be specified."
502
+ )
503
+ # path doesn't matter if we're not saving the repo
504
+ download_dir = "~/.objaverse"
505
 
506
  base_download_dir = os.path.join(download_dir, "github")
507
  fs, path = fsspec.core.url_to_fs(base_download_dir)
 
587
  tqdm(
588
  pool.imap_unordered(self._parallel_process_repo, all_args),
589
  total=len(all_args),
590
+ desc="Downloading repositories",
591
  )
592
  )
593
+
594
+ out_dict = {}
595
+ for x in out:
596
+ out_dict.update(x)
597
+
598
+ return out_dict