diff options
Diffstat (limited to 'subcmds')
| -rw-r--r-- | subcmds/init.py | 21 | ||||
| -rw-r--r-- | subcmds/sync.py | 176 |
2 files changed, 176 insertions, 21 deletions
diff --git a/subcmds/init.py b/subcmds/init.py index 1c809ab4..eaa6da50 100644 --- a/subcmds/init.py +++ b/subcmds/init.py | |||
| @@ -115,6 +115,13 @@ to update the working directory files. | |||
| 115 | g.add_option('--depth', type='int', default=None, | 115 | g.add_option('--depth', type='int', default=None, |
| 116 | dest='depth', | 116 | dest='depth', |
| 117 | help='create a shallow clone with given depth; see git clone') | 117 | help='create a shallow clone with given depth; see git clone') |
| 118 | g.add_option('--partial-clone', action='store_true', | ||
| 119 | dest='partial_clone', | ||
| 120 | help='perform partial clone (https://git-scm.com/' | ||
| 121 | 'docs/gitrepository-layout#_code_partialclone_code)') | ||
| 122 | g.add_option('--clone-filter', action='store', default='blob:none', | ||
| 123 | dest='clone_filter', | ||
| 124 | help='filter for use with --partial-clone [default: %default]') | ||
| 118 | g.add_option('--archive', | 125 | g.add_option('--archive', |
| 119 | dest='archive', action='store_true', | 126 | dest='archive', action='store_true', |
| 120 | help='checkout an archive instead of a git repository for ' | 127 | help='checkout an archive instead of a git repository for ' |
| @@ -253,13 +260,25 @@ to update the working directory files. | |||
| 253 | 'in another location.', file=sys.stderr) | 260 | 'in another location.', file=sys.stderr) |
| 254 | sys.exit(1) | 261 | sys.exit(1) |
| 255 | 262 | ||
| 263 | if opt.partial_clone: | ||
| 264 | if opt.mirror: | ||
| 265 | print('fatal: --mirror and --partial-clone are mutually exclusive', | ||
| 266 | file=sys.stderr) | ||
| 267 | sys.exit(1) | ||
| 268 | m.config.SetString('repo.partialclone', 'true') | ||
| 269 | if opt.clone_filter: | ||
| 270 | m.config.SetString('repo.clonefilter', opt.clone_filter) | ||
| 271 | else: | ||
| 272 | opt.clone_filter = None | ||
| 273 | |||
| 256 | if opt.submodules: | 274 | if opt.submodules: |
| 257 | m.config.SetString('repo.submodules', 'true') | 275 | m.config.SetString('repo.submodules', 'true') |
| 258 | 276 | ||
| 259 | if not m.Sync_NetworkHalf(is_new=is_new, quiet=opt.quiet, | 277 | if not m.Sync_NetworkHalf(is_new=is_new, quiet=opt.quiet, |
| 260 | clone_bundle=not opt.no_clone_bundle, | 278 | clone_bundle=not opt.no_clone_bundle, |
| 261 | current_branch_only=opt.current_branch_only, | 279 | current_branch_only=opt.current_branch_only, |
| 262 | no_tags=opt.no_tags, submodules=opt.submodules): | 280 | no_tags=opt.no_tags, submodules=opt.submodules, |
| 281 | clone_filter=opt.clone_filter): | ||
| 263 | r = m.GetRemote(m.remote.name) | 282 | r = m.GetRemote(m.remote.name) |
| 264 | print('fatal: cannot obtain manifest %s' % r.url, file=sys.stderr) | 283 | print('fatal: cannot obtain manifest %s' % r.url, file=sys.stderr) |
| 265 | 284 | ||
diff --git a/subcmds/sync.py b/subcmds/sync.py index 02cd3879..b752cfbe 100644 --- a/subcmds/sync.py +++ b/subcmds/sync.py | |||
| @@ -85,6 +85,9 @@ class _FetchError(Exception): | |||
| 85 | """Internal error thrown in _FetchHelper() when we don't want stack trace.""" | 85 | """Internal error thrown in _FetchHelper() when we don't want stack trace.""" |
| 86 | pass | 86 | pass |
| 87 | 87 | ||
| 88 | class _CheckoutError(Exception): | ||
| 89 | """Internal error thrown in _CheckoutOne() when we don't want stack trace.""" | ||
| 90 | |||
| 88 | class Sync(Command, MirrorSafeCommand): | 91 | class Sync(Command, MirrorSafeCommand): |
| 89 | jobs = 1 | 92 | jobs = 1 |
| 90 | common = True | 93 | common = True |
| @@ -266,7 +269,7 @@ later is required to fix a server side protocol bug. | |||
| 266 | help=SUPPRESS_HELP) | 269 | help=SUPPRESS_HELP) |
| 267 | 270 | ||
| 268 | def _FetchProjectList(self, opt, projects, sem, *args, **kwargs): | 271 | def _FetchProjectList(self, opt, projects, sem, *args, **kwargs): |
| 269 | """Main function of the fetch threads when jobs are > 1. | 272 | """Main function of the fetch threads. |
| 270 | 273 | ||
| 271 | Delegates most of the work to _FetchHelper. | 274 | Delegates most of the work to _FetchHelper. |
| 272 | 275 | ||
| @@ -286,7 +289,8 @@ later is required to fix a server side protocol bug. | |||
| 286 | finally: | 289 | finally: |
| 287 | sem.release() | 290 | sem.release() |
| 288 | 291 | ||
| 289 | def _FetchHelper(self, opt, project, lock, fetched, pm, err_event): | 292 | def _FetchHelper(self, opt, project, lock, fetched, pm, err_event, |
| 293 | clone_filter): | ||
| 290 | """Fetch git objects for a single project. | 294 | """Fetch git objects for a single project. |
| 291 | 295 | ||
| 292 | Args: | 296 | Args: |
| @@ -300,6 +304,7 @@ later is required to fix a server side protocol bug. | |||
| 300 | lock held). | 304 | lock held). |
| 301 | err_event: We'll set this event in the case of an error (after printing | 305 | err_event: We'll set this event in the case of an error (after printing |
| 302 | out info about the error). | 306 | out info about the error). |
| 307 | clone_filter: Filter for use in a partial clone. | ||
| 303 | 308 | ||
| 304 | Returns: | 309 | Returns: |
| 305 | Whether the fetch was successful. | 310 | Whether the fetch was successful. |
| @@ -312,7 +317,6 @@ later is required to fix a server side protocol bug. | |||
| 312 | 317 | ||
| 313 | # Encapsulate everything in a try/except/finally so that: | 318 | # Encapsulate everything in a try/except/finally so that: |
| 314 | # - We always set err_event in the case of an exception. | 319 | # - We always set err_event in the case of an exception. |
| 315 | # - We always make sure we call sem.release(). | ||
| 316 | # - We always make sure we unlock the lock if we locked it. | 320 | # - We always make sure we unlock the lock if we locked it. |
| 317 | start = time.time() | 321 | start = time.time() |
| 318 | success = False | 322 | success = False |
| @@ -325,7 +329,8 @@ later is required to fix a server side protocol bug. | |||
| 325 | clone_bundle=not opt.no_clone_bundle, | 329 | clone_bundle=not opt.no_clone_bundle, |
| 326 | no_tags=opt.no_tags, archive=self.manifest.IsArchive, | 330 | no_tags=opt.no_tags, archive=self.manifest.IsArchive, |
| 327 | optimized_fetch=opt.optimized_fetch, | 331 | optimized_fetch=opt.optimized_fetch, |
| 328 | prune=opt.prune) | 332 | prune=opt.prune, |
| 333 | clone_filter=clone_filter) | ||
| 329 | self._fetch_times.Set(project, time.time() - start) | 334 | self._fetch_times.Set(project, time.time() - start) |
| 330 | 335 | ||
| 331 | # Lock around all the rest of the code, since printing, updating a set | 336 | # Lock around all the rest of the code, since printing, updating a set |
| @@ -389,7 +394,8 @@ later is required to fix a server side protocol bug. | |||
| 389 | lock=lock, | 394 | lock=lock, |
| 390 | fetched=fetched, | 395 | fetched=fetched, |
| 391 | pm=pm, | 396 | pm=pm, |
| 392 | err_event=err_event) | 397 | err_event=err_event, |
| 398 | clone_filter=self.manifest.CloneFilter) | ||
| 393 | if self.jobs > 1: | 399 | if self.jobs > 1: |
| 394 | t = _threading.Thread(target = self._FetchProjectList, | 400 | t = _threading.Thread(target = self._FetchProjectList, |
| 395 | kwargs = kwargs) | 401 | kwargs = kwargs) |
| @@ -416,6 +422,148 @@ later is required to fix a server side protocol bug. | |||
| 416 | 422 | ||
| 417 | return fetched | 423 | return fetched |
| 418 | 424 | ||
| 425 | def _CheckoutWorker(self, opt, sem, project, *args, **kwargs): | ||
| 426 | """Main function of the fetch threads. | ||
| 427 | |||
| 428 | Delegates most of the work to _CheckoutOne. | ||
| 429 | |||
| 430 | Args: | ||
| 431 | opt: Program options returned from optparse. See _Options(). | ||
| 432 | projects: Projects to fetch. | ||
| 433 | sem: We'll release() this semaphore when we exit so that another thread | ||
| 434 | can be started up. | ||
| 435 | *args, **kwargs: Remaining arguments to pass to _CheckoutOne. See the | ||
| 436 | _CheckoutOne docstring for details. | ||
| 437 | """ | ||
| 438 | try: | ||
| 439 | success = self._CheckoutOne(opt, project, *args, **kwargs) | ||
| 440 | if not success: | ||
| 441 | sys.exit(1) | ||
| 442 | finally: | ||
| 443 | sem.release() | ||
| 444 | |||
| 445 | def _CheckoutOne(self, opt, project, lock, pm, err_event): | ||
| 446 | """Checkout work tree for one project | ||
| 447 | |||
| 448 | Args: | ||
| 449 | opt: Program options returned from optparse. See _Options(). | ||
| 450 | project: Project object for the project to checkout. | ||
| 451 | lock: Lock for accessing objects that are shared amongst multiple | ||
| 452 | _CheckoutWorker() threads. | ||
| 453 | pm: Instance of a Project object. We will call pm.update() (with our | ||
| 454 | lock held). | ||
| 455 | err_event: We'll set this event in the case of an error (after printing | ||
| 456 | out info about the error). | ||
| 457 | |||
| 458 | Returns: | ||
| 459 | Whether the fetch was successful. | ||
| 460 | """ | ||
| 461 | # We'll set to true once we've locked the lock. | ||
| 462 | did_lock = False | ||
| 463 | |||
| 464 | if not opt.quiet: | ||
| 465 | print('Checking out project %s' % project.name) | ||
| 466 | |||
| 467 | # Encapsulate everything in a try/except/finally so that: | ||
| 468 | # - We always set err_event in the case of an exception. | ||
| 469 | # - We always make sure we unlock the lock if we locked it. | ||
| 470 | start = time.time() | ||
| 471 | syncbuf = SyncBuffer(self.manifest.manifestProject.config, | ||
| 472 | detach_head=opt.detach_head) | ||
| 473 | success = False | ||
| 474 | try: | ||
| 475 | try: | ||
| 476 | project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync) | ||
| 477 | success = syncbuf.Finish() | ||
| 478 | |||
| 479 | # Lock around all the rest of the code, since printing, updating a set | ||
| 480 | # and Progress.update() are not thread safe. | ||
| 481 | lock.acquire() | ||
| 482 | did_lock = True | ||
| 483 | |||
| 484 | if not success: | ||
| 485 | err_event.set() | ||
| 486 | print('error: Cannot checkout %s' % (project.name), | ||
| 487 | file=sys.stderr) | ||
| 488 | raise _CheckoutError() | ||
| 489 | |||
| 490 | pm.update() | ||
| 491 | except _CheckoutError: | ||
| 492 | pass | ||
| 493 | except Exception as e: | ||
| 494 | print('error: Cannot checkout %s: %s: %s' % | ||
| 495 | (project.name, type(e).__name__, str(e)), | ||
| 496 | file=sys.stderr) | ||
| 497 | err_event.set() | ||
| 498 | raise | ||
| 499 | finally: | ||
| 500 | if did_lock: | ||
| 501 | lock.release() | ||
| 502 | finish = time.time() | ||
| 503 | self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL, | ||
| 504 | start, finish, success) | ||
| 505 | |||
| 506 | return success | ||
| 507 | |||
| 508 | def _Checkout(self, all_projects, opt): | ||
| 509 | """Checkout projects listed in all_projects | ||
| 510 | |||
| 511 | Args: | ||
| 512 | all_projects: List of all projects that should be checked out. | ||
| 513 | opt: Program options returned from optparse. See _Options(). | ||
| 514 | """ | ||
| 515 | |||
| 516 | # Perform checkouts in multiple threads when we are using partial clone. | ||
| 517 | # Without partial clone, all needed git objects are already downloaded, | ||
| 518 | # in this situation it's better to use only one process because the checkout | ||
| 519 | # would be mostly disk I/O; with partial clone, the objects are only | ||
| 520 | # downloaded when demanded (at checkout time), which is similar to the | ||
| 521 | # Sync_NetworkHalf case and parallelism would be helpful. | ||
| 522 | if self.manifest.CloneFilter: | ||
| 523 | syncjobs = self.jobs | ||
| 524 | else: | ||
| 525 | syncjobs = 1 | ||
| 526 | |||
| 527 | lock = _threading.Lock() | ||
| 528 | pm = Progress('Syncing work tree', len(all_projects)) | ||
| 529 | |||
| 530 | threads = set() | ||
| 531 | sem = _threading.Semaphore(syncjobs) | ||
| 532 | err_event = _threading.Event() | ||
| 533 | |||
| 534 | for project in all_projects: | ||
| 535 | # Check for any errors before running any more tasks. | ||
| 536 | # ...we'll let existing threads finish, though. | ||
| 537 | if err_event.isSet() and not opt.force_broken: | ||
| 538 | break | ||
| 539 | |||
| 540 | sem.acquire() | ||
| 541 | if project.worktree: | ||
| 542 | kwargs = dict(opt=opt, | ||
| 543 | sem=sem, | ||
| 544 | project=project, | ||
| 545 | lock=lock, | ||
| 546 | pm=pm, | ||
| 547 | err_event=err_event) | ||
| 548 | if syncjobs > 1: | ||
| 549 | t = _threading.Thread(target=self._CheckoutWorker, | ||
| 550 | kwargs=kwargs) | ||
| 551 | # Ensure that Ctrl-C will not freeze the repo process. | ||
| 552 | t.daemon = True | ||
| 553 | threads.add(t) | ||
| 554 | t.start() | ||
| 555 | else: | ||
| 556 | self._CheckoutWorker(**kwargs) | ||
| 557 | |||
| 558 | for t in threads: | ||
| 559 | t.join() | ||
| 560 | |||
| 561 | pm.end() | ||
| 562 | # If we saw an error, exit with code 1 so that other scripts can check. | ||
| 563 | if err_event.isSet(): | ||
| 564 | print('\nerror: Exited sync due to checkout errors', file=sys.stderr) | ||
| 565 | sys.exit(1) | ||
| 566 | |||
| 419 | def _GCProjects(self, projects): | 567 | def _GCProjects(self, projects): |
| 420 | gc_gitdirs = {} | 568 | gc_gitdirs = {} |
| 421 | for project in projects: | 569 | for project in projects: |
| @@ -746,7 +894,8 @@ later is required to fix a server side protocol bug. | |||
| 746 | current_branch_only=opt.current_branch_only, | 894 | current_branch_only=opt.current_branch_only, |
| 747 | no_tags=opt.no_tags, | 895 | no_tags=opt.no_tags, |
| 748 | optimized_fetch=opt.optimized_fetch, | 896 | optimized_fetch=opt.optimized_fetch, |
| 749 | submodules=self.manifest.HasSubmodules) | 897 | submodules=self.manifest.HasSubmodules, |
| 898 | clone_filter=self.manifest.CloneFilter) | ||
| 750 | finish = time.time() | 899 | finish = time.time() |
| 751 | self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK, | 900 | self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK, |
| 752 | start, finish, success) | 901 | start, finish, success) |
| @@ -846,20 +995,7 @@ later is required to fix a server side protocol bug. | |||
| 846 | if self.UpdateProjectList(opt): | 995 | if self.UpdateProjectList(opt): |
| 847 | sys.exit(1) | 996 | sys.exit(1) |
| 848 | 997 | ||
| 849 | syncbuf = SyncBuffer(mp.config, | 998 | self._Checkout(all_projects, opt) |
| 850 | detach_head = opt.detach_head) | ||
| 851 | pm = Progress('Syncing work tree', len(all_projects)) | ||
| 852 | for project in all_projects: | ||
| 853 | pm.update() | ||
| 854 | if project.worktree: | ||
| 855 | start = time.time() | ||
| 856 | project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync) | ||
| 857 | self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL, | ||
| 858 | start, time.time(), syncbuf.Recently()) | ||
| 859 | pm.end() | ||
| 860 | print(file=sys.stderr) | ||
| 861 | if not syncbuf.Finish(): | ||
| 862 | sys.exit(1) | ||
| 863 | 999 | ||
| 864 | # If there's a notice that's supposed to print at the end of the sync, print | 1000 | # If there's a notice that's supposed to print at the end of the sync, print |
| 865 | # it now... | 1001 | # it now... |
