James Ennis pushed to branch jennis/migrate_pull_push_commands at BuildStream / buildstream
Commits:
-
891fcb0e
by Tristan Van Berkom at 2019-01-07T16:47:01Z
-
5de42d43
by Tristan Van Berkom at 2019-01-07T18:00:37Z
-
059035b9
by Tristan Van Berkom at 2019-01-07T18:02:00Z
-
b83d1b1f
by Tristan Van Berkom at 2019-01-07T18:02:00Z
-
16a8816f
by Tristan Van Berkom at 2019-01-07T18:02:00Z
-
c2fc2a5e
by Tristan Van Berkom at 2019-01-07T18:02:00Z
-
3e3984ad
by Tristan Van Berkom at 2019-01-07T18:50:23Z
-
512c726e
by Tristan Van Berkom at 2019-01-08T03:38:11Z
-
01171988
by Tristan Van Berkom at 2019-01-08T04:20:14Z
-
6c1d06d6
by Phil Dawson at 2019-01-08T10:24:32Z
-
914ecb72
by Jürg Billeter at 2019-01-08T10:54:02Z
-
babe3023
by James Ennis at 2019-01-09T10:02:33Z
-
3065df5b
by Richard Maw at 2019-01-09T10:02:33Z
-
d141fae5
by James Ennis at 2019-01-09T10:41:50Z
27 changed files:
- buildstream/_frontend/app.py
- buildstream/_frontend/cli.py
- buildstream/_scheduler/__init__.py
- buildstream/_scheduler/jobs/__init__.py
- buildstream/_scheduler/jobs/cachesizejob.py
- buildstream/_scheduler/jobs/cleanupjob.py
- buildstream/_scheduler/jobs/elementjob.py
- buildstream/_scheduler/jobs/job.py
- buildstream/_scheduler/queues/buildqueue.py
- buildstream/_scheduler/queues/fetchqueue.py
- buildstream/_scheduler/queues/pullqueue.py
- buildstream/_scheduler/queues/queue.py
- buildstream/_scheduler/queues/trackqueue.py
- buildstream/_scheduler/scheduler.py
- buildstream/element.py
- buildstream/sandbox/sandbox.py
- buildstream/utils.py
- tests/artifactcache/config.py
- tests/artifactcache/junctions.py
- tests/completions/completions.py
- tests/frontend/help.py
- tests/frontend/pull.py
- tests/frontend/push.py
- tests/frontend/workspace.py
- tests/integration/build-tree.py
- tests/integration/pullbuildtrees.py
- tests/sandboxes/remote-exec-config.py
Changes:
| ... | ... | @@ -38,7 +38,7 @@ from .._message import Message, MessageType, unconditional_messages |
| 38 | 38 |
from .._stream import Stream
|
| 39 | 39 |
from .._versions import BST_FORMAT_VERSION
|
| 40 | 40 |
from .. import _yaml
|
| 41 |
-from .._scheduler import ElementJob
|
|
| 41 |
+from .._scheduler import ElementJob, JobStatus
|
|
| 42 | 42 |
|
| 43 | 43 |
# Import frontend assets
|
| 44 | 44 |
from . import Profile, LogLine, Status
|
| ... | ... | @@ -515,13 +515,13 @@ class App(): |
| 515 | 515 |
self._status.add_job(job)
|
| 516 | 516 |
self._maybe_render_status()
|
| 517 | 517 |
|
| 518 |
- def _job_completed(self, job, success):
|
|
| 518 |
+ def _job_completed(self, job, status):
|
|
| 519 | 519 |
self._status.remove_job(job)
|
| 520 | 520 |
self._maybe_render_status()
|
| 521 | 521 |
|
| 522 | 522 |
# Dont attempt to handle a failure if the user has already opted to
|
| 523 | 523 |
# terminate
|
| 524 |
- if not success and not self.stream.terminated:
|
|
| 524 |
+ if status == JobStatus.FAIL and not self.stream.terminated:
|
|
| 525 | 525 |
|
| 526 | 526 |
if isinstance(job, ElementJob):
|
| 527 | 527 |
element = job.element
|
| ... | ... | @@ -355,78 +355,6 @@ def build(app, elements, all_, track_, track_save, track_all, track_except, trac |
| 355 | 355 |
build_all=all_)
|
| 356 | 356 |
|
| 357 | 357 |
|
| 358 |
-##################################################################
|
|
| 359 |
-# Pull Command #
|
|
| 360 |
-##################################################################
|
|
| 361 |
-@cli.command(short_help="Pull a built artifact")
|
|
| 362 |
-@click.option('--deps', '-d', default='none',
|
|
| 363 |
- type=click.Choice(['none', 'all']),
|
|
| 364 |
- help='The dependency artifacts to pull (default: none)')
|
|
| 365 |
-@click.option('--remote', '-r',
|
|
| 366 |
- help="The URL of the remote cache (defaults to the first configured cache)")
|
|
| 367 |
-@click.argument('elements', nargs=-1,
|
|
| 368 |
- type=click.Path(readable=False))
|
|
| 369 |
-@click.pass_obj
|
|
| 370 |
-def pull(app, elements, deps, remote):
|
|
| 371 |
- """Pull a built artifact from the configured remote artifact cache.
|
|
| 372 |
- |
|
| 373 |
- By default the artifact will be pulled one of the configured caches
|
|
| 374 |
- if possible, following the usual priority order. If the `--remote` flag
|
|
| 375 |
- is given, only the specified cache will be queried.
|
|
| 376 |
- |
|
| 377 |
- Specify `--deps` to control which artifacts to pull:
|
|
| 378 |
- |
|
| 379 |
- \b
|
|
| 380 |
- none: No dependencies, just the element itself
|
|
| 381 |
- all: All dependencies
|
|
| 382 |
- """
|
|
| 383 |
- |
|
| 384 |
- with app.initialized(session_name="Pull"):
|
|
| 385 |
- if not elements:
|
|
| 386 |
- guessed_target = app.context.guess_element()
|
|
| 387 |
- if guessed_target:
|
|
| 388 |
- elements = (guessed_target,)
|
|
| 389 |
- |
|
| 390 |
- app.stream.pull(elements, selection=deps, remote=remote)
|
|
| 391 |
- |
|
| 392 |
- |
|
| 393 |
-##################################################################
|
|
| 394 |
-# Push Command #
|
|
| 395 |
-##################################################################
|
|
| 396 |
-@cli.command(short_help="Push a built artifact")
|
|
| 397 |
-@click.option('--deps', '-d', default='none',
|
|
| 398 |
- type=click.Choice(['none', 'all']),
|
|
| 399 |
- help='The dependencies to push (default: none)')
|
|
| 400 |
-@click.option('--remote', '-r', default=None,
|
|
| 401 |
- help="The URL of the remote cache (defaults to the first configured cache)")
|
|
| 402 |
-@click.argument('elements', nargs=-1,
|
|
| 403 |
- type=click.Path(readable=False))
|
|
| 404 |
-@click.pass_obj
|
|
| 405 |
-def push(app, elements, deps, remote):
|
|
| 406 |
- """Push a built artifact to a remote artifact cache.
|
|
| 407 |
- |
|
| 408 |
- The default destination is the highest priority configured cache. You can
|
|
| 409 |
- override this by passing a different cache URL with the `--remote` flag.
|
|
| 410 |
- |
|
| 411 |
- If bst has been configured to include build trees on artifact pulls,
|
|
| 412 |
- an attempt will be made to pull any required build trees to avoid the
|
|
| 413 |
- skipping of partial artifacts being pushed.
|
|
| 414 |
- |
|
| 415 |
- Specify `--deps` to control which artifacts to push:
|
|
| 416 |
- |
|
| 417 |
- \b
|
|
| 418 |
- none: No dependencies, just the element itself
|
|
| 419 |
- all: All dependencies
|
|
| 420 |
- """
|
|
| 421 |
- with app.initialized(session_name="Push"):
|
|
| 422 |
- if not elements:
|
|
| 423 |
- guessed_target = app.context.guess_element()
|
|
| 424 |
- if guessed_target:
|
|
| 425 |
- elements = (guessed_target,)
|
|
| 426 |
- |
|
| 427 |
- app.stream.push(elements, selection=deps, remote=remote)
|
|
| 428 |
- |
|
| 429 |
- |
|
| 430 | 358 |
##################################################################
|
| 431 | 359 |
# Show Command #
|
| 432 | 360 |
##################################################################
|
| ... | ... | @@ -973,36 +901,48 @@ def workspace_list(app): |
| 973 | 901 |
#############################################################
|
| 974 | 902 |
# Artifact Commands #
|
| 975 | 903 |
#############################################################
|
| 976 |
-def _classify_artifacts(names, cas, project_directory):
|
|
| 977 |
- element_targets = []
|
|
| 978 |
- artifact_refs = []
|
|
| 979 |
- element_globs = []
|
|
| 980 |
- artifact_globs = []
|
|
| 981 |
- |
|
| 904 |
+def _classify_element_targets(names, project_directory):
|
|
| 905 |
+ globs = []
|
|
| 906 |
+ targets = []
|
|
| 907 |
+ unmatched = []
|
|
| 982 | 908 |
for name in names:
|
| 983 | 909 |
if name.endswith('.bst'):
|
| 984 | 910 |
if any(c in "*?[" for c in name):
|
| 985 |
- element_globs.append(name)
|
|
| 911 |
+ globs.append(name)
|
|
| 986 | 912 |
else:
|
| 987 |
- element_targets.append(name)
|
|
| 913 |
+ targets.append(name)
|
|
| 988 | 914 |
else:
|
| 989 |
- if any(c in "*?[" for c in name):
|
|
| 990 |
- artifact_globs.append(name)
|
|
| 991 |
- else:
|
|
| 992 |
- artifact_refs.append(name)
|
|
| 915 |
+ unmatched.append(name)
|
|
| 993 | 916 |
|
| 994 |
- if element_globs:
|
|
| 917 |
+ if globs:
|
|
| 995 | 918 |
for dirpath, _, filenames in os.walk(project_directory):
|
| 996 | 919 |
for filename in filenames:
|
| 997 |
- element_path = os.path.join(dirpath, filename).lstrip(project_directory).lstrip('/')
|
|
| 998 |
- if any(fnmatch(element_path, glob) for glob in element_globs):
|
|
| 999 |
- element_targets.append(element_path)
|
|
| 920 |
+ element_path = os.path.relpath(os.path.join(dirpath, filename), start=project_directory)
|
|
| 921 |
+ if any(fnmatch(element_path, glob) for glob in globs):
|
|
| 922 |
+ targets.append(element_path)
|
|
| 923 |
+ return targets, unmatched
|
|
| 924 |
+ |
|
| 925 |
+ |
|
| 926 |
+def _classify_artifact_refs(names, cas):
|
|
| 927 |
+ globs = []
|
|
| 928 |
+ refs = []
|
|
| 929 |
+ for name in names:
|
|
| 930 |
+ if any(c in "*?[" for c in name):
|
|
| 931 |
+ globs.append(name)
|
|
| 932 |
+ else:
|
|
| 933 |
+ refs.append(name)
|
|
| 934 |
+ if globs:
|
|
| 935 |
+ refs.extend(ref for ref in cas.list_refs()
|
|
| 936 |
+ if any(fnmatch(ref, glob) for glob in globs))
|
|
| 937 |
+ return refs
|
|
| 938 |
+ |
|
| 1000 | 939 |
|
| 1001 |
- if artifact_globs:
|
|
| 1002 |
- artifact_refs.extend(ref for ref in cas.list_refs()
|
|
| 1003 |
- if any(fnmatch(ref, glob) for glob in artifact_globs))
|
|
| 940 |
+def _classify_artifacts(names, cas, project_directory):
|
|
| 941 |
+ targets, unmatched = _classify_element_targets(names, project_directory)
|
|
| 942 |
+ refs = _classify_artifact_refs(unmatched, cas)
|
|
| 943 |
+ |
|
| 944 |
+ return targets, refs
|
|
| 1004 | 945 |
|
| 1005 |
- return element_targets, artifact_refs
|
|
| 1006 | 946 |
|
| 1007 | 947 |
|
| 1008 | 948 |
@cli.group(short_help="Manipulate cached artifacts")
|
| ... | ... | @@ -1010,6 +950,109 @@ def artifact(): |
| 1010 | 950 |
"""Manipulate cached artifacts"""
|
| 1011 | 951 |
|
| 1012 | 952 |
|
| 953 |
+################################################################
|
|
| 954 |
+# Artifact Pull Command #
|
|
| 955 |
+################################################################
|
|
| 956 |
+@artifact.command(name="pull", short_help="Pull a built artifact")
|
|
| 957 |
+@click.option('--deps', '-d', default='none',
|
|
| 958 |
+ type=click.Choice(['none', 'all']),
|
|
| 959 |
+ help='The dependency artifacts to pull (default: none)')
|
|
| 960 |
+@click.option('--remote', '-r',
|
|
| 961 |
+ help="The URL of the remote cache (defaults to the first configured cache)")
|
|
| 962 |
+@click.argument('artifacts', type=click.Path(), nargs=-1)
|
|
| 963 |
+@click.pass_obj
|
|
| 964 |
+def artifact_pull(app, artifacts, deps, remote):
|
|
| 965 |
+ """Pull a built artifact from the configured remote artifact cache.
|
|
| 966 |
+ |
|
| 967 |
+ By default the artifact will be pulled one of the configured caches
|
|
| 968 |
+ if possible, following the usual priority order. If the `--remote` flag
|
|
| 969 |
+ is given, only the specified cache will be queried.
|
|
| 970 |
+ |
|
| 971 |
+ Specify `--deps` to control which artifacts to pull:
|
|
| 972 |
+ |
|
| 973 |
+ \b
|
|
| 974 |
+ none: No dependencies, just the element itself
|
|
| 975 |
+ all: All dependencies
|
|
| 976 |
+ """
|
|
| 977 |
+ |
|
| 978 |
+ with app.initialized(session_name="Pull"):
|
|
| 979 |
+ cache = app.context.artifactcache
|
|
| 980 |
+ |
|
| 981 |
+ elements, artifacts = _classify_artifacts(artifacts, cache.cas,
|
|
| 982 |
+ app.project.directory)
|
|
| 983 |
+ |
|
| 984 |
+ # Guess the element if we're in a workspace
|
|
| 985 |
+ if not elements and not artifacts:
|
|
| 986 |
+ guessed_target = app.context.guess_element()
|
|
| 987 |
+ if guessed_target:
|
|
| 988 |
+ elements = (guessed_target,)
|
|
| 989 |
+ |
|
| 990 |
+ if artifacts and deps is not 'none':
|
|
| 991 |
+ raise AppError("--deps may not be used with artifact refs") # NOTE: I *think* we're good for multiple artifacts and --deps.
|
|
| 992 |
+ |
|
| 993 |
+ |
|
| 994 |
+ if elements:
|
|
| 995 |
+ app.stream.pull(elements, selection=deps, remote=remote)
|
|
| 996 |
+ # FIXME: We can only obtain project/user config through the stream API,
|
|
| 997 |
+ # which we need to determine the remote in order for pull to pull from.
|
|
| 998 |
+ # We can't just go straight to artifactcache here. Thus Stream.Pull()
|
|
| 999 |
+ # will fail because it expects a list of element names (.bst).
|
|
| 1000 |
+ if artifacts:
|
|
| 1001 |
+ app.stream.pull(artifacts, selection='none', remote=remote)
|
|
| 1002 |
+ |
|
| 1003 |
+ |
|
| 1004 |
+##################################################################
|
|
| 1005 |
+# Artifact Push Command #
|
|
| 1006 |
+##################################################################
|
|
| 1007 |
+@artifact.command(name="push", short_help="Push a built artifact")
|
|
| 1008 |
+@click.option('--deps', '-d', default='none',
|
|
| 1009 |
+ type=click.Choice(['none', 'all']),
|
|
| 1010 |
+ help='The dependencies to push (default: none)')
|
|
| 1011 |
+@click.option('--remote', '-r', default=None,
|
|
| 1012 |
+ help="The URL of the remote cache (defaults to the first configured cache)")
|
|
| 1013 |
+@click.argument('artifacts', type=click.Path(), nargs=-1)
|
|
| 1014 |
+@click.pass_obj
|
|
| 1015 |
+def artifact_push(app, artifacts, deps, remote):
|
|
| 1016 |
+ """Push a built artifact to a remote artifact cache.
|
|
| 1017 |
+ |
|
| 1018 |
+ The default destination is the highest priority configured cache. You can
|
|
| 1019 |
+ override this by passing a different cache URL with the `--remote` flag.
|
|
| 1020 |
+ |
|
| 1021 |
+ If bst has been configured to include build trees on artifact pulls,
|
|
| 1022 |
+ an attempt will be made to pull any required build trees to avoid the
|
|
| 1023 |
+ skipping of partial artifacts being pushed.
|
|
| 1024 |
+ |
|
| 1025 |
+ Specify `--deps` to control which artifacts to push:
|
|
| 1026 |
+ |
|
| 1027 |
+ \b
|
|
| 1028 |
+ none: No dependencies, just the element itself
|
|
| 1029 |
+ all: All dependencies
|
|
| 1030 |
+ """
|
|
| 1031 |
+ with app.initialized(session_name="Push"):
|
|
| 1032 |
+ cache = app.context.artifactcache
|
|
| 1033 |
+ |
|
| 1034 |
+ elements, artifacts = _classify_artifacts(artifacts, cache.cas,
|
|
| 1035 |
+ app.project.directory)
|
|
| 1036 |
+ |
|
| 1037 |
+ # Guess the element if we're in a workspace
|
|
| 1038 |
+ if not elements:
|
|
| 1039 |
+ guessed_target = app.context.guess_element()
|
|
| 1040 |
+ if guessed_target:
|
|
| 1041 |
+ elements = (guessed_target,)
|
|
| 1042 |
+ |
|
| 1043 |
+ if artifacts and deps is not 'none':
|
|
| 1044 |
+ raise AppError("--deps may not be used with artifact refs")
|
|
| 1045 |
+ |
|
| 1046 |
+ if elements:
|
|
| 1047 |
+ app.stream.push(elements, selection=deps, remote=remote)
|
|
| 1048 |
+ # FIXME: We can only obtain project/user config through the stream API,
|
|
| 1049 |
+ # which we need to determine the remote in order for pull to pull from.
|
|
| 1050 |
+ # We can't just go straight to artifactcache here. Thus Stream.Pull()
|
|
| 1051 |
+ # will fail because it expects a list of element names (.bst).
|
|
| 1052 |
+ if artifacts:
|
|
| 1053 |
+ app.stream.push(artifacts, selection='none', remote=remote)
|
|
| 1054 |
+ |
|
| 1055 |
+ |
|
| 1013 | 1056 |
################################################################
|
| 1014 | 1057 |
# Artifact Log Command #
|
| 1015 | 1058 |
################################################################
|
| ... | ... | @@ -1116,3 +1159,37 @@ def fetch(app, elements, deps, track_, except_, track_cross_junctions): |
| 1116 | 1159 |
def track(app, elements, deps, except_, cross_junctions):
|
| 1117 | 1160 |
click.echo("This command is now obsolete. Use `bst source track` instead.", err=True)
|
| 1118 | 1161 |
sys.exit(1)
|
| 1162 |
+ |
|
| 1163 |
+ |
|
| 1164 |
+################################################################
|
|
| 1165 |
+# Pull Command #
|
|
| 1166 |
+################################################################
|
|
| 1167 |
+@cli.command(short_help="Pull a built artifact", hidden=True)
|
|
| 1168 |
+@click.option('--deps', '-d', default='none',
|
|
| 1169 |
+ type=click.Choice(['none', 'all']),
|
|
| 1170 |
+ help='The dependency artifacts to pull (default: none)')
|
|
| 1171 |
+@click.option('--remote', '-r',
|
|
| 1172 |
+ help="The URL of the remote cache (defaults to the first configured cache)")
|
|
| 1173 |
+@click.argument('elements', nargs=-1,
|
|
| 1174 |
+ type=click.Path(readable=False))
|
|
| 1175 |
+@click.pass_obj
|
|
| 1176 |
+def pull(app, elements, deps, remote):
|
|
| 1177 |
+ click.echo("This command is now obsolete. Use `bst artifact pull` instead.", err=True)
|
|
| 1178 |
+ sys.exit(1)
|
|
| 1179 |
+ |
|
| 1180 |
+ |
|
| 1181 |
+##################################################################
|
|
| 1182 |
+# Push Command #
|
|
| 1183 |
+##################################################################
|
|
| 1184 |
+@cli.command(short_help="Push a built artifact", hidden=True)
|
|
| 1185 |
+@click.option('--deps', '-d', default='none',
|
|
| 1186 |
+ type=click.Choice(['none', 'all']),
|
|
| 1187 |
+ help='The dependencies to push (default: none)')
|
|
| 1188 |
+@click.option('--remote', '-r', default=None,
|
|
| 1189 |
+ help="The URL of the remote cache (defaults to the first configured cache)")
|
|
| 1190 |
+@click.argument('elements', nargs=-1,
|
|
| 1191 |
+ type=click.Path(readable=False))
|
|
| 1192 |
+@click.pass_obj
|
|
| 1193 |
+def push(app, elements, deps, remote):
|
|
| 1194 |
+ click.echo("This command is now obsolete. Use `bst artifact push` instead.", err=True)
|
|
| 1195 |
+ sys.exit(1)
|
| ... | ... | @@ -26,4 +26,4 @@ from .queues.pushqueue import PushQueue |
| 26 | 26 |
from .queues.pullqueue import PullQueue
|
| 27 | 27 |
|
| 28 | 28 |
from .scheduler import Scheduler, SchedStatus
|
| 29 |
-from .jobs import ElementJob
|
|
| 29 |
+from .jobs import ElementJob, JobStatus
|
| ... | ... | @@ -20,3 +20,4 @@ |
| 20 | 20 |
from .elementjob import ElementJob
|
| 21 | 21 |
from .cachesizejob import CacheSizeJob
|
| 22 | 22 |
from .cleanupjob import CleanupJob
|
| 23 |
+from .job import JobStatus
|
| ... | ... | @@ -16,7 +16,7 @@ |
| 16 | 16 |
# Author:
|
| 17 | 17 |
# Tristan Daniël Maat <tristan maat codethink co uk>
|
| 18 | 18 |
#
|
| 19 |
-from .job import Job
|
|
| 19 |
+from .job import Job, JobStatus
|
|
| 20 | 20 |
|
| 21 | 21 |
|
| 22 | 22 |
class CacheSizeJob(Job):
|
| ... | ... | @@ -30,8 +30,8 @@ class CacheSizeJob(Job): |
| 30 | 30 |
def child_process(self):
|
| 31 | 31 |
return self._artifacts.compute_cache_size()
|
| 32 | 32 |
|
| 33 |
- def parent_complete(self, success, result):
|
|
| 34 |
- if success:
|
|
| 33 |
+ def parent_complete(self, status, result):
|
|
| 34 |
+ if status == JobStatus.OK:
|
|
| 35 | 35 |
self._artifacts.set_cache_size(result)
|
| 36 | 36 |
|
| 37 | 37 |
if self._complete_cb:
|
| ... | ... | @@ -16,7 +16,7 @@ |
| 16 | 16 |
# Author:
|
| 17 | 17 |
# Tristan Daniël Maat <tristan maat codethink co uk>
|
| 18 | 18 |
#
|
| 19 |
-from .job import Job
|
|
| 19 |
+from .job import Job, JobStatus
|
|
| 20 | 20 |
|
| 21 | 21 |
|
| 22 | 22 |
class CleanupJob(Job):
|
| ... | ... | @@ -29,6 +29,6 @@ class CleanupJob(Job): |
| 29 | 29 |
def child_process(self):
|
| 30 | 30 |
return self._artifacts.clean()
|
| 31 | 31 |
|
| 32 |
- def parent_complete(self, success, result):
|
|
| 33 |
- if success:
|
|
| 32 |
+ def parent_complete(self, status, result):
|
|
| 33 |
+ if status == JobStatus.OK:
|
|
| 34 | 34 |
self._artifacts.set_cache_size(result)
|
| ... | ... | @@ -60,7 +60,7 @@ from .job import Job |
| 60 | 60 |
# Args:
|
| 61 | 61 |
# job (Job): The job object which completed
|
| 62 | 62 |
# element (Element): The element passed to the Job() constructor
|
| 63 |
-# success (bool): True if the action_cb did not raise an exception
|
|
| 63 |
+# status (JobStatus): The status of whether the workload raised an exception
|
|
| 64 | 64 |
# result (object): The deserialized object returned by the `action_cb`, or None
|
| 65 | 65 |
# if `success` is False
|
| 66 | 66 |
#
|
| ... | ... | @@ -93,8 +93,8 @@ class ElementJob(Job): |
| 93 | 93 |
# Run the action
|
| 94 | 94 |
return self._action_cb(self._element)
|
| 95 | 95 |
|
| 96 |
- def parent_complete(self, success, result):
|
|
| 97 |
- self._complete_cb(self, self._element, success, self._result)
|
|
| 96 |
+ def parent_complete(self, status, result):
|
|
| 97 |
+ self._complete_cb(self, self._element, status, self._result)
|
|
| 98 | 98 |
|
| 99 | 99 |
def message(self, message_type, message, **kwargs):
|
| 100 | 100 |
args = dict(kwargs)
|
| ... | ... | @@ -28,8 +28,6 @@ import traceback |
| 28 | 28 |
import asyncio
|
| 29 | 29 |
import multiprocessing
|
| 30 | 30 |
|
| 31 |
-import psutil
|
|
| 32 |
- |
|
| 33 | 31 |
# BuildStream toplevel imports
|
| 34 | 32 |
from ..._exceptions import ImplError, BstError, set_last_task_error, SkipJob
|
| 35 | 33 |
from ..._message import Message, MessageType, unconditional_messages
|
| ... | ... | @@ -43,6 +41,22 @@ RC_PERM_FAIL = 2 |
| 43 | 41 |
RC_SKIPPED = 3
|
| 44 | 42 |
|
| 45 | 43 |
|
| 44 |
+# JobStatus:
|
|
| 45 |
+#
|
|
| 46 |
+# The job completion status, passed back through the
|
|
| 47 |
+# complete callbacks.
|
|
| 48 |
+#
|
|
| 49 |
+class JobStatus():
|
|
| 50 |
+ # Job succeeded
|
|
| 51 |
+ OK = 0
|
|
| 52 |
+ |
|
| 53 |
+ # A temporary BstError was raised
|
|
| 54 |
+ FAIL = 1
|
|
| 55 |
+ |
|
| 56 |
+ # A SkipJob was raised
|
|
| 57 |
+ SKIPPED = 3
|
|
| 58 |
+ |
|
| 59 |
+ |
|
| 46 | 60 |
# Used to distinguish between status messages and return values
|
| 47 | 61 |
class Envelope():
|
| 48 | 62 |
def __init__(self, message_type, message):
|
| ... | ... | @@ -118,7 +132,6 @@ class Job(): |
| 118 | 132 |
self._max_retries = max_retries # Maximum number of automatic retries
|
| 119 | 133 |
self._result = None # Return value of child action in the parent
|
| 120 | 134 |
self._tries = 0 # Try count, for retryable jobs
|
| 121 |
- self._skipped_flag = False # Indicate whether the job was skipped.
|
|
| 122 | 135 |
self._terminated = False # Whether this job has been explicitly terminated
|
| 123 | 136 |
|
| 124 | 137 |
# If False, a retry will not be attempted regardless of whether _tries is less than _max_retries.
|
| ... | ... | @@ -215,17 +228,10 @@ class Job(): |
| 215 | 228 |
# Forcefully kill the process, and any children it might have.
|
| 216 | 229 |
#
|
| 217 | 230 |
def kill(self):
|
| 218 |
- |
|
| 219 | 231 |
# Force kill
|
| 220 | 232 |
self.message(MessageType.WARN,
|
| 221 | 233 |
"{} did not terminate gracefully, killing".format(self.action_name))
|
| 222 |
- |
|
| 223 |
- try:
|
|
| 224 |
- utils._kill_process_tree(self._process.pid)
|
|
| 225 |
- # This can happen if the process died of its own accord before
|
|
| 226 |
- # we try to kill it
|
|
| 227 |
- except psutil.NoSuchProcess:
|
|
| 228 |
- return
|
|
| 234 |
+ utils._kill_process_tree(self._process.pid)
|
|
| 229 | 235 |
|
| 230 | 236 |
# suspend()
|
| 231 | 237 |
#
|
| ... | ... | @@ -282,18 +288,6 @@ class Job(): |
| 282 | 288 |
def set_task_id(self, task_id):
|
| 283 | 289 |
self._task_id = task_id
|
| 284 | 290 |
|
| 285 |
- # skipped
|
|
| 286 |
- #
|
|
| 287 |
- # This will evaluate to True if the job was skipped
|
|
| 288 |
- # during processing, or if it was forcefully terminated.
|
|
| 289 |
- #
|
|
| 290 |
- # Returns:
|
|
| 291 |
- # (bool): Whether the job should appear as skipped
|
|
| 292 |
- #
|
|
| 293 |
- @property
|
|
| 294 |
- def skipped(self):
|
|
| 295 |
- return self._skipped_flag or self._terminated
|
|
| 296 |
- |
|
| 297 | 291 |
#######################################################
|
| 298 | 292 |
# Abstract Methods #
|
| 299 | 293 |
#######################################################
|
| ... | ... | @@ -304,10 +298,10 @@ class Job(): |
| 304 | 298 |
# pass the result to the main thread.
|
| 305 | 299 |
#
|
| 306 | 300 |
# Args:
|
| 307 |
- # success (bool): Whether the job was successful.
|
|
| 301 |
+ # status (JobStatus): The job exit status
|
|
| 308 | 302 |
# result (any): The result returned by child_process().
|
| 309 | 303 |
#
|
| 310 |
- def parent_complete(self, success, result):
|
|
| 304 |
+ def parent_complete(self, status, result):
|
|
| 311 | 305 |
raise ImplError("Job '{kind}' does not implement parent_complete()"
|
| 312 | 306 |
.format(kind=type(self).__name__))
|
| 313 | 307 |
|
| ... | ... | @@ -571,16 +565,23 @@ class Job(): |
| 571 | 565 |
#
|
| 572 | 566 |
self._retry_flag = returncode == RC_FAIL
|
| 573 | 567 |
|
| 574 |
- # Set the flag to alert Queue that this job skipped.
|
|
| 575 |
- self._skipped_flag = returncode == RC_SKIPPED
|
|
| 576 |
- |
|
| 577 | 568 |
if self._retry_flag and (self._tries <= self._max_retries) and not self._scheduler.terminated:
|
| 578 | 569 |
self.spawn()
|
| 579 | 570 |
return
|
| 580 | 571 |
|
| 581 |
- success = returncode in (RC_OK, RC_SKIPPED)
|
|
| 582 |
- self.parent_complete(success, self._result)
|
|
| 583 |
- self._scheduler.job_completed(self, success)
|
|
| 572 |
+ # Resolve the outward facing overall job completion status
|
|
| 573 |
+ #
|
|
| 574 |
+ if returncode == RC_OK:
|
|
| 575 |
+ status = JobStatus.OK
|
|
| 576 |
+ elif returncode == RC_SKIPPED:
|
|
| 577 |
+ status = JobStatus.SKIPPED
|
|
| 578 |
+ elif returncode in (RC_FAIL, RC_PERM_FAIL):
|
|
| 579 |
+ status = JobStatus.FAIL
|
|
| 580 |
+ else:
|
|
| 581 |
+ status = JobStatus.FAIL
|
|
| 582 |
+ |
|
| 583 |
+ self.parent_complete(status, self._result)
|
|
| 584 |
+ self._scheduler.job_completed(self, status)
|
|
| 584 | 585 |
|
| 585 | 586 |
# Force the deletion of the queue and process objects to try and clean up FDs
|
| 586 | 587 |
self._queue = self._process = None
|
| ... | ... | @@ -21,7 +21,7 @@ |
| 21 | 21 |
from datetime import timedelta
|
| 22 | 22 |
|
| 23 | 23 |
from . import Queue, QueueStatus
|
| 24 |
-from ..jobs import ElementJob
|
|
| 24 |
+from ..jobs import ElementJob, JobStatus
|
|
| 25 | 25 |
from ..resources import ResourceType
|
| 26 | 26 |
from ..._message import MessageType
|
| 27 | 27 |
|
| ... | ... | @@ -104,7 +104,7 @@ class BuildQueue(Queue): |
| 104 | 104 |
if artifacts.has_quota_exceeded():
|
| 105 | 105 |
self._scheduler.check_cache_size()
|
| 106 | 106 |
|
| 107 |
- def done(self, job, element, result, success):
|
|
| 107 |
+ def done(self, job, element, result, status):
|
|
| 108 | 108 |
|
| 109 | 109 |
# Inform element in main process that assembly is done
|
| 110 | 110 |
element._assemble_done()
|
| ... | ... | @@ -117,5 +117,5 @@ class BuildQueue(Queue): |
| 117 | 117 |
# artifact cache size for a successful build even though we know a
|
| 118 | 118 |
# failed build also grows the artifact cache size.
|
| 119 | 119 |
#
|
| 120 |
- if success:
|
|
| 120 |
+ if status == JobStatus.OK:
|
|
| 121 | 121 |
self._check_cache_size(job, element, result)
|
| ... | ... | @@ -24,6 +24,7 @@ from ... import Consistency |
| 24 | 24 |
# Local imports
|
| 25 | 25 |
from . import Queue, QueueStatus
|
| 26 | 26 |
from ..resources import ResourceType
|
| 27 |
+from ..jobs import JobStatus
|
|
| 27 | 28 |
|
| 28 | 29 |
|
| 29 | 30 |
# A queue which fetches element sources
|
| ... | ... | @@ -66,9 +67,9 @@ class FetchQueue(Queue): |
| 66 | 67 |
|
| 67 | 68 |
return QueueStatus.READY
|
| 68 | 69 |
|
| 69 |
- def done(self, _, element, result, success):
|
|
| 70 |
+ def done(self, _, element, result, status):
|
|
| 70 | 71 |
|
| 71 |
- if not success:
|
|
| 72 |
+ if status == JobStatus.FAIL:
|
|
| 72 | 73 |
return
|
| 73 | 74 |
|
| 74 | 75 |
element._update_state()
|
| ... | ... | @@ -21,6 +21,7 @@ |
| 21 | 21 |
# Local imports
|
| 22 | 22 |
from . import Queue, QueueStatus
|
| 23 | 23 |
from ..resources import ResourceType
|
| 24 |
+from ..jobs import JobStatus
|
|
| 24 | 25 |
from ..._exceptions import SkipJob
|
| 25 | 26 |
|
| 26 | 27 |
|
| ... | ... | @@ -54,9 +55,9 @@ class PullQueue(Queue): |
| 54 | 55 |
else:
|
| 55 | 56 |
return QueueStatus.SKIP
|
| 56 | 57 |
|
| 57 |
- def done(self, _, element, result, success):
|
|
| 58 |
+ def done(self, _, element, result, status):
|
|
| 58 | 59 |
|
| 59 |
- if not success:
|
|
| 60 |
+ if status == JobStatus.FAIL:
|
|
| 60 | 61 |
return
|
| 61 | 62 |
|
| 62 | 63 |
element._pull_done()
|
| ... | ... | @@ -64,4 +65,5 @@ class PullQueue(Queue): |
| 64 | 65 |
# Build jobs will check the "approximate" size first. Since we
|
| 65 | 66 |
# do not get an artifact size from pull jobs, we have to
|
| 66 | 67 |
# actually check the cache size.
|
| 67 |
- self._scheduler.check_cache_size()
|
|
| 68 |
+ if status == JobStatus.OK:
|
|
| 69 |
+ self._scheduler.check_cache_size()
|
| ... | ... | @@ -25,7 +25,7 @@ from enum import Enum |
| 25 | 25 |
import traceback
|
| 26 | 26 |
|
| 27 | 27 |
# Local imports
|
| 28 |
-from ..jobs import ElementJob
|
|
| 28 |
+from ..jobs import ElementJob, JobStatus
|
|
| 29 | 29 |
from ..resources import ResourceType
|
| 30 | 30 |
|
| 31 | 31 |
# BuildStream toplevel imports
|
| ... | ... | @@ -133,10 +133,9 @@ class Queue(): |
| 133 | 133 |
# job (Job): The job which completed processing
|
| 134 | 134 |
# element (Element): The element which completed processing
|
| 135 | 135 |
# result (any): The return value of the process() implementation
|
| 136 |
- # success (bool): True if the process() implementation did not
|
|
| 137 |
- # raise any exception
|
|
| 136 |
+ # status (JobStatus): The return status of the Job
|
|
| 138 | 137 |
#
|
| 139 |
- def done(self, job, element, result, success):
|
|
| 138 |
+ def done(self, job, element, result, status):
|
|
| 140 | 139 |
pass
|
| 141 | 140 |
|
| 142 | 141 |
#####################################################
|
| ... | ... | @@ -291,7 +290,7 @@ class Queue(): |
| 291 | 290 |
#
|
| 292 | 291 |
# See the Job object for an explanation of the call signature
|
| 293 | 292 |
#
|
| 294 |
- def _job_done(self, job, element, success, result):
|
|
| 293 |
+ def _job_done(self, job, element, status, result):
|
|
| 295 | 294 |
|
| 296 | 295 |
# Update values that need to be synchronized in the main task
|
| 297 | 296 |
# before calling any queue implementation
|
| ... | ... | @@ -301,7 +300,7 @@ class Queue(): |
| 301 | 300 |
# and determine if it should be considered as processed
|
| 302 | 301 |
# or skipped.
|
| 303 | 302 |
try:
|
| 304 |
- self.done(job, element, result, success)
|
|
| 303 |
+ self.done(job, element, result, status)
|
|
| 305 | 304 |
except BstError as e:
|
| 306 | 305 |
|
| 307 | 306 |
# Report error and mark as failed
|
| ... | ... | @@ -332,12 +331,10 @@ class Queue(): |
| 332 | 331 |
# All jobs get placed on the done queue for later processing.
|
| 333 | 332 |
self._done_queue.append(job)
|
| 334 | 333 |
|
| 335 |
- # A Job can be skipped whether or not it has failed,
|
|
| 336 |
- # we want to only bookkeep them as processed or failed
|
|
| 337 |
- # if they are not skipped.
|
|
| 338 |
- if job.skipped:
|
|
| 334 |
+ # These lists are for bookkeeping purposes for the UI and logging.
|
|
| 335 |
+ if status == JobStatus.SKIPPED:
|
|
| 339 | 336 |
self.skipped_elements.append(element)
|
| 340 |
- elif success:
|
|
| 337 |
+ elif status == JobStatus.OK:
|
|
| 341 | 338 |
self.processed_elements.append(element)
|
| 342 | 339 |
else:
|
| 343 | 340 |
self.failed_elements.append(element)
|
| ... | ... | @@ -24,6 +24,7 @@ from ...plugin import _plugin_lookup |
| 24 | 24 |
# Local imports
|
| 25 | 25 |
from . import Queue, QueueStatus
|
| 26 | 26 |
from ..resources import ResourceType
|
| 27 |
+from ..jobs import JobStatus
|
|
| 27 | 28 |
|
| 28 | 29 |
|
| 29 | 30 |
# A queue which tracks sources
|
| ... | ... | @@ -47,9 +48,9 @@ class TrackQueue(Queue): |
| 47 | 48 |
|
| 48 | 49 |
return QueueStatus.READY
|
| 49 | 50 |
|
| 50 |
- def done(self, _, element, result, success):
|
|
| 51 |
+ def done(self, _, element, result, status):
|
|
| 51 | 52 |
|
| 52 |
- if not success:
|
|
| 53 |
+ if status == JobStatus.FAIL:
|
|
| 53 | 54 |
return
|
| 54 | 55 |
|
| 55 | 56 |
# Set the new refs in the main process one by one as they complete
|
| ... | ... | @@ -38,6 +38,16 @@ class SchedStatus(): |
| 38 | 38 |
TERMINATED = 1
|
| 39 | 39 |
|
| 40 | 40 |
|
| 41 |
+# Our _REDUNDANT_EXCLUSIVE_ACTIONS jobs are special ones
|
|
| 42 |
+# which we launch dynamically, they have the property of being
|
|
| 43 |
+# meaningless to queue if one is already queued, and it also
|
|
| 44 |
+# doesnt make sense to run them in parallel
|
|
| 45 |
+#
|
|
| 46 |
+_ACTION_NAME_CLEANUP = 'cleanup'
|
|
| 47 |
+_ACTION_NAME_CACHE_SIZE = 'cache_size'
|
|
| 48 |
+_REDUNDANT_EXCLUSIVE_ACTIONS = [_ACTION_NAME_CLEANUP, _ACTION_NAME_CACHE_SIZE]
|
|
| 49 |
+ |
|
| 50 |
+ |
|
| 41 | 51 |
# Scheduler()
|
| 42 | 52 |
#
|
| 43 | 53 |
# The scheduler operates on a list queues, each of which is meant to accomplish
|
| ... | ... | @@ -94,6 +104,15 @@ class Scheduler(): |
| 94 | 104 |
self._suspendtime = None
|
| 95 | 105 |
self._queue_jobs = True # Whether we should continue to queue jobs
|
| 96 | 106 |
|
| 107 |
+ # Whether our exclusive jobs, like 'cleanup' are currently already
|
|
| 108 |
+ # waiting or active.
|
|
| 109 |
+ #
|
|
| 110 |
+ # This is just a bit quicker than scanning the wait queue and active
|
|
| 111 |
+ # queue and comparing job action names.
|
|
| 112 |
+ #
|
|
| 113 |
+ self._exclusive_waiting = set()
|
|
| 114 |
+ self._exclusive_active = set()
|
|
| 115 |
+ |
|
| 97 | 116 |
self._resources = Resources(context.sched_builders,
|
| 98 | 117 |
context.sched_fetchers,
|
| 99 | 118 |
context.sched_pushers)
|
| ... | ... | @@ -211,19 +230,6 @@ class Scheduler(): |
| 211 | 230 |
starttime = timenow
|
| 212 | 231 |
return timenow - starttime
|
| 213 | 232 |
|
| 214 |
- # schedule_jobs()
|
|
| 215 |
- #
|
|
| 216 |
- # Args:
|
|
| 217 |
- # jobs ([Job]): A list of jobs to schedule
|
|
| 218 |
- #
|
|
| 219 |
- # Schedule 'Job's for the scheduler to run. Jobs scheduled will be
|
|
| 220 |
- # run as soon any other queueing jobs finish, provided sufficient
|
|
| 221 |
- # resources are available for them to run
|
|
| 222 |
- #
|
|
| 223 |
- def schedule_jobs(self, jobs):
|
|
| 224 |
- for job in jobs:
|
|
| 225 |
- self.waiting_jobs.append(job)
|
|
| 226 |
- |
|
| 227 | 233 |
# job_completed():
|
| 228 | 234 |
#
|
| 229 | 235 |
# Called when a Job completes
|
| ... | ... | @@ -231,12 +237,14 @@ class Scheduler(): |
| 231 | 237 |
# Args:
|
| 232 | 238 |
# queue (Queue): The Queue holding a complete job
|
| 233 | 239 |
# job (Job): The completed Job
|
| 234 |
- # success (bool): Whether the Job completed with a success status
|
|
| 240 |
+ # status (JobStatus): The status of the completed job
|
|
| 235 | 241 |
#
|
| 236 |
- def job_completed(self, job, success):
|
|
| 242 |
+ def job_completed(self, job, status):
|
|
| 237 | 243 |
self._resources.clear_job_resources(job)
|
| 238 | 244 |
self.active_jobs.remove(job)
|
| 239 |
- self._job_complete_callback(job, success)
|
|
| 245 |
+ if job.action_name in _REDUNDANT_EXCLUSIVE_ACTIONS:
|
|
| 246 |
+ self._exclusive_active.remove(job.action_name)
|
|
| 247 |
+ self._job_complete_callback(job, status)
|
|
| 240 | 248 |
self._schedule_queue_jobs()
|
| 241 | 249 |
self._sched()
|
| 242 | 250 |
|
| ... | ... | @@ -246,18 +254,13 @@ class Scheduler(): |
| 246 | 254 |
# size is calculated, a cleanup job will be run automatically
|
| 247 | 255 |
# if needed.
|
| 248 | 256 |
#
|
| 249 |
- # FIXME: This should ensure that only one cache size job
|
|
| 250 |
- # is ever pending at a given time. If a cache size
|
|
| 251 |
- # job is already running, it is correct to queue
|
|
| 252 |
- # a new one, it is incorrect to have more than one
|
|
| 253 |
- # of these jobs pending at a given time, though.
|
|
| 254 |
- #
|
|
| 255 | 257 |
def check_cache_size(self):
|
| 256 |
- job = CacheSizeJob(self, 'cache_size', 'cache_size/cache_size',
|
|
| 258 |
+ job = CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE,
|
|
| 259 |
+ 'cache_size/cache_size',
|
|
| 257 | 260 |
resources=[ResourceType.CACHE,
|
| 258 | 261 |
ResourceType.PROCESS],
|
| 259 | 262 |
complete_cb=self._run_cleanup)
|
| 260 |
- self.schedule_jobs([job])
|
|
| 263 |
+ self._schedule_jobs([job])
|
|
| 261 | 264 |
|
| 262 | 265 |
#######################################################
|
| 263 | 266 |
# Local Private Methods #
|
| ... | ... | @@ -276,10 +279,19 @@ class Scheduler(): |
| 276 | 279 |
if not self._resources.reserve_job_resources(job):
|
| 277 | 280 |
continue
|
| 278 | 281 |
|
| 282 |
+ # Postpone these jobs if one is already running
|
|
| 283 |
+ if job.action_name in _REDUNDANT_EXCLUSIVE_ACTIONS and \
|
|
| 284 |
+ job.action_name in self._exclusive_active:
|
|
| 285 |
+ continue
|
|
| 286 |
+ |
|
| 279 | 287 |
job.spawn()
|
| 280 | 288 |
self.waiting_jobs.remove(job)
|
| 281 | 289 |
self.active_jobs.append(job)
|
| 282 | 290 |
|
| 291 |
+ if job.action_name in _REDUNDANT_EXCLUSIVE_ACTIONS:
|
|
| 292 |
+ self._exclusive_waiting.remove(job.action_name)
|
|
| 293 |
+ self._exclusive_active.add(job.action_name)
|
|
| 294 |
+ |
|
| 283 | 295 |
if self._job_start_callback:
|
| 284 | 296 |
self._job_start_callback(job)
|
| 285 | 297 |
|
| ... | ... | @@ -287,6 +299,33 @@ class Scheduler(): |
| 287 | 299 |
if not self.active_jobs and not self.waiting_jobs:
|
| 288 | 300 |
self.loop.stop()
|
| 289 | 301 |
|
| 302 |
+ # _schedule_jobs()
|
|
| 303 |
+ #
|
|
| 304 |
+ # The main entry point for jobs to be scheduled.
|
|
| 305 |
+ #
|
|
| 306 |
+ # This is called either as a result of scanning the queues
|
|
| 307 |
+ # in _schedule_queue_jobs(), or directly by the Scheduler
|
|
| 308 |
+ # to insert special jobs like cleanups.
|
|
| 309 |
+ #
|
|
| 310 |
+ # Args:
|
|
| 311 |
+ # jobs ([Job]): A list of jobs to schedule
|
|
| 312 |
+ #
|
|
| 313 |
+ def _schedule_jobs(self, jobs):
|
|
| 314 |
+ for job in jobs:
|
|
| 315 |
+ |
|
| 316 |
+ # Special treatment of our redundant exclusive jobs
|
|
| 317 |
+ #
|
|
| 318 |
+ if job.action_name in _REDUNDANT_EXCLUSIVE_ACTIONS:
|
|
| 319 |
+ |
|
| 320 |
+ # Drop the job if one is already queued
|
|
| 321 |
+ if job.action_name in self._exclusive_waiting:
|
|
| 322 |
+ continue
|
|
| 323 |
+ |
|
| 324 |
+ # Mark this action type as queued
|
|
| 325 |
+ self._exclusive_waiting.add(job.action_name)
|
|
| 326 |
+ |
|
| 327 |
+ self.waiting_jobs.append(job)
|
|
| 328 |
+ |
|
| 290 | 329 |
# _schedule_queue_jobs()
|
| 291 | 330 |
#
|
| 292 | 331 |
# Ask the queues what jobs they want to schedule and schedule
|
| ... | ... | @@ -331,7 +370,7 @@ class Scheduler(): |
| 331 | 370 |
# the next queue and process them.
|
| 332 | 371 |
process_queues = any(q.dequeue_ready() for q in self.queues)
|
| 333 | 372 |
|
| 334 |
- self.schedule_jobs(ready)
|
|
| 373 |
+ self._schedule_jobs(ready)
|
|
| 335 | 374 |
self._sched()
|
| 336 | 375 |
|
| 337 | 376 |
# _run_cleanup()
|
| ... | ... | @@ -353,11 +392,11 @@ class Scheduler(): |
| 353 | 392 |
if not artifacts.has_quota_exceeded():
|
| 354 | 393 |
return
|
| 355 | 394 |
|
| 356 |
- job = CleanupJob(self, 'cleanup', 'cleanup/cleanup',
|
|
| 395 |
+ job = CleanupJob(self, _ACTION_NAME_CLEANUP, 'cleanup/cleanup',
|
|
| 357 | 396 |
resources=[ResourceType.CACHE,
|
| 358 | 397 |
ResourceType.PROCESS],
|
| 359 | 398 |
exclusive_resources=[ResourceType.CACHE])
|
| 360 |
- self.schedule_jobs([job])
|
|
| 399 |
+ self._schedule_jobs([job])
|
|
| 361 | 400 |
|
| 362 | 401 |
# _suspend_jobs()
|
| 363 | 402 |
#
|
| ... | ... | @@ -65,7 +65,7 @@ Miscellaneous abstract methods also exist: |
| 65 | 65 |
|
| 66 | 66 |
* :func:`Element.generate_script() <buildstream.element.Element.generate_script>`
|
| 67 | 67 |
|
| 68 |
- For the purpose of ``bst source bundle``, an Element may optionally implement this.
|
|
| 68 |
+ For the purpose of ``bst source checkout --include-build-scripts``, an Element may optionally implement this.
|
|
| 69 | 69 |
|
| 70 | 70 |
|
| 71 | 71 |
Class Reference
|
| ... | ... | @@ -592,7 +592,7 @@ class _SandboxBatch(): |
| 592 | 592 |
if command.label:
|
| 593 | 593 |
context = self.sandbox._get_context()
|
| 594 | 594 |
message = Message(self.sandbox._get_plugin_id(), MessageType.STATUS,
|
| 595 |
- 'Running {}'.format(command.label))
|
|
| 595 |
+ 'Running command', detail=command.label)
|
|
| 596 | 596 |
context.message(message)
|
| 597 | 597 |
|
| 598 | 598 |
exitcode = self.sandbox._run(command.command, self.flags, cwd=command.cwd, env=command.env)
|
| ... | ... | @@ -1050,6 +1050,11 @@ def _kill_process_tree(pid): |
| 1050 | 1050 |
# Ignore this error, it can happen with
|
| 1051 | 1051 |
# some setuid bwrap processes.
|
| 1052 | 1052 |
pass
|
| 1053 |
+ except psutil.NoSuchProcess:
|
|
| 1054 |
+ # It is certain that this has already been sent
|
|
| 1055 |
+ # SIGTERM, so there is a window where the process
|
|
| 1056 |
+ # could have exited already.
|
|
| 1057 |
+ pass
|
|
| 1053 | 1058 |
|
| 1054 | 1059 |
# Bloody Murder
|
| 1055 | 1060 |
for child in children:
|
| ... | ... | @@ -138,5 +138,5 @@ def test_missing_certs(cli, datafiles, config_key, config_value): |
| 138 | 138 |
# Use `pull` here to ensure we try to initialize the remotes, triggering the error
|
| 139 | 139 |
#
|
| 140 | 140 |
# This does not happen for a simple `bst show`.
|
| 141 |
- result = cli.run(project=project, args=['pull', 'element.bst'])
|
|
| 141 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'element.bst'])
|
|
| 142 | 142 |
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
|
| ... | ... | @@ -58,7 +58,7 @@ def test_push_pull(cli, tmpdir, datafiles): |
| 58 | 58 |
project_set_artifacts(base_project, base_share.repo)
|
| 59 | 59 |
|
| 60 | 60 |
# Now try bst push
|
| 61 |
- result = cli.run(project=project, args=['push', '--deps', 'all', 'target.bst'])
|
|
| 61 |
+ result = cli.run(project=project, args=['artifact', 'push', '--deps', 'all', 'target.bst'])
|
|
| 62 | 62 |
assert result.exit_code == 0
|
| 63 | 63 |
|
| 64 | 64 |
# And finally assert that the artifacts are in the right shares
|
| ... | ... | @@ -78,7 +78,7 @@ def test_push_pull(cli, tmpdir, datafiles): |
| 78 | 78 |
assert state != 'cached'
|
| 79 | 79 |
|
| 80 | 80 |
# Now try bst pull
|
| 81 |
- result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
|
|
| 81 |
+ result = cli.run(project=project, args=['artifact', 'pull', '--deps', 'all', 'target.bst'])
|
|
| 82 | 82 |
assert result.exit_code == 0
|
| 83 | 83 |
|
| 84 | 84 |
# And assert that they are again in the local cache, without having built
|
| ... | ... | @@ -11,8 +11,6 @@ MAIN_COMMANDS = [ |
| 11 | 11 |
'checkout ',
|
| 12 | 12 |
'help ',
|
| 13 | 13 |
'init ',
|
| 14 |
- 'pull ',
|
|
| 15 |
- 'push ',
|
|
| 16 | 14 |
'shell ',
|
| 17 | 15 |
'show ',
|
| 18 | 16 |
'source ',
|
| ... | ... | @@ -54,6 +52,12 @@ SOURCE_COMMANDS = [ |
| 54 | 52 |
'track ',
|
| 55 | 53 |
]
|
| 56 | 54 |
|
| 55 |
+ARTIFACT_COMMANDS = [
|
|
| 56 |
+ 'push ',
|
|
| 57 |
+ 'pull ',
|
|
| 58 |
+ 'log ',
|
|
| 59 |
+]
|
|
| 60 |
+ |
|
| 57 | 61 |
WORKSPACE_COMMANDS = [
|
| 58 | 62 |
'close ',
|
| 59 | 63 |
'list ',
|
| ... | ... | @@ -117,8 +121,7 @@ def assert_completion_failed(cli, cmd, word_idx, expected, cwd=None): |
| 117 | 121 |
@pytest.mark.parametrize("cmd,word_idx,expected", [
|
| 118 | 122 |
('bst', 0, []),
|
| 119 | 123 |
('bst ', 1, MAIN_COMMANDS),
|
| 120 |
- ('bst pu', 1, ['pull ', 'push ']),
|
|
| 121 |
- ('bst pul', 1, ['pull ']),
|
|
| 124 |
+ ('bst artifact ', 2, ARTIFACT_COMMANDS),
|
|
| 122 | 125 |
('bst source ', 2, SOURCE_COMMANDS),
|
| 123 | 126 |
('bst w ', 1, ['workspace ']),
|
| 124 | 127 |
('bst workspace ', 2, WORKSPACE_COMMANDS),
|
| ... | ... | @@ -272,9 +275,8 @@ def test_argument_element_invalid(datafiles, cli, project, cmd, word_idx, expect |
| 272 | 275 |
@pytest.mark.parametrize("cmd,word_idx,expected", [
|
| 273 | 276 |
('bst he', 1, ['help ']),
|
| 274 | 277 |
('bst help ', 2, MAIN_COMMANDS),
|
| 278 |
+ ('bst help artifact ', 3, ARTIFACT_COMMANDS),
|
|
| 275 | 279 |
('bst help in', 2, ['init ']),
|
| 276 |
- ('bst help p', 2, ['pull ', 'push ']),
|
|
| 277 |
- ('bst help p', 2, ['pull ', 'push ']),
|
|
| 278 | 280 |
('bst help source ', 3, SOURCE_COMMANDS),
|
| 279 | 281 |
('bst help w', 2, ['workspace ']),
|
| 280 | 282 |
('bst help workspace ', 3, WORKSPACE_COMMANDS),
|
| ... | ... | @@ -18,10 +18,9 @@ def test_help_main(cli): |
| 18 | 18 |
|
| 19 | 19 |
|
| 20 | 20 |
@pytest.mark.parametrize("command", [
|
| 21 |
+ ('artifact'),
|
|
| 21 | 22 |
('build'),
|
| 22 | 23 |
('checkout'),
|
| 23 |
- ('pull'),
|
|
| 24 |
- ('push'),
|
|
| 25 | 24 |
('shell'),
|
| 26 | 25 |
('show'),
|
| 27 | 26 |
('source'),
|
| ... | ... | @@ -70,7 +70,7 @@ def test_push_pull_all(cli, tmpdir, datafiles): |
| 70 | 70 |
assert cli.get_element_state(project, element_name) != 'cached'
|
| 71 | 71 |
|
| 72 | 72 |
# Now try bst pull
|
| 73 |
- result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
|
|
| 73 |
+ result = cli.run(project=project, args=['artifact', 'pull', '--deps', 'all', 'target.bst'])
|
|
| 74 | 74 |
result.assert_success()
|
| 75 | 75 |
|
| 76 | 76 |
# And assert that it's again in the local cache, without having built
|
| ... | ... | @@ -111,7 +111,7 @@ def test_pull_secondary_cache(cli, tmpdir, datafiles): |
| 111 | 111 |
assert cli.get_element_state(project, 'target.bst') != 'cached'
|
| 112 | 112 |
|
| 113 | 113 |
# Now try bst pull
|
| 114 |
- result = cli.run(project=project, args=['pull', 'target.bst'])
|
|
| 114 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'target.bst'])
|
|
| 115 | 115 |
result.assert_success()
|
| 116 | 116 |
|
| 117 | 117 |
# And assert that it's again in the local cache, without having built,
|
| ... | ... | @@ -146,7 +146,7 @@ def test_push_pull_specific_remote(cli, tmpdir, datafiles): |
| 146 | 146 |
|
| 147 | 147 |
# Now try `bst push` to the good_share.
|
| 148 | 148 |
result = cli.run(project=project, args=[
|
| 149 |
- 'push', 'target.bst', '--remote', good_share.repo
|
|
| 149 |
+ 'artifact', 'push', 'target.bst', '--remote', good_share.repo
|
|
| 150 | 150 |
])
|
| 151 | 151 |
result.assert_success()
|
| 152 | 152 |
|
| ... | ... | @@ -161,7 +161,7 @@ def test_push_pull_specific_remote(cli, tmpdir, datafiles): |
| 161 | 161 |
artifacts = os.path.join(cli.directory, 'artifacts')
|
| 162 | 162 |
shutil.rmtree(artifacts)
|
| 163 | 163 |
|
| 164 |
- result = cli.run(project=project, args=['pull', 'target.bst', '--remote',
|
|
| 164 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'target.bst', '--remote',
|
|
| 165 | 165 |
good_share.repo])
|
| 166 | 166 |
result.assert_success()
|
| 167 | 167 |
|
| ... | ... | @@ -216,7 +216,7 @@ def test_push_pull_non_strict(cli, tmpdir, datafiles): |
| 216 | 216 |
assert cli.get_element_state(project, 'target.bst') == 'waiting'
|
| 217 | 217 |
|
| 218 | 218 |
# Now try bst pull
|
| 219 |
- result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
|
|
| 219 |
+ result = cli.run(project=project, args=['artifact', 'pull', '--deps', 'all', 'target.bst'])
|
|
| 220 | 220 |
result.assert_success()
|
| 221 | 221 |
|
| 222 | 222 |
# And assert that the target is again in the local cache, without having built
|
| ... | ... | @@ -291,7 +291,7 @@ def test_push_pull_cross_junction(cli, tmpdir, datafiles): |
| 291 | 291 |
assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'buildable'
|
| 292 | 292 |
|
| 293 | 293 |
# Now try bst pull
|
| 294 |
- result = cli.run(project=project, args=['pull', 'junction.bst:import-etc.bst'])
|
|
| 294 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'junction.bst:import-etc.bst'])
|
|
| 295 | 295 |
result.assert_success()
|
| 296 | 296 |
|
| 297 | 297 |
# And assert that it's again in the local cache, without having built
|
| ... | ... | @@ -82,7 +82,7 @@ def test_push(cli, tmpdir, datafiles): |
| 82 | 82 |
with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
|
| 83 | 83 |
|
| 84 | 84 |
# Try pushing with no remotes configured. This should fail.
|
| 85 |
- result = cli.run(project=project, args=['push', 'target.bst'])
|
|
| 85 |
+ result = cli.run(project=project, args=['artifact', 'push', 'target.bst'])
|
|
| 86 | 86 |
result.assert_main_error(ErrorDomain.STREAM, None)
|
| 87 | 87 |
|
| 88 | 88 |
# Configure bst to pull but not push from a cache and run `bst push`.
|
| ... | ... | @@ -90,7 +90,7 @@ def test_push(cli, tmpdir, datafiles): |
| 90 | 90 |
cli.configure({
|
| 91 | 91 |
'artifacts': {'url': share1.repo, 'push': False},
|
| 92 | 92 |
})
|
| 93 |
- result = cli.run(project=project, args=['push', 'target.bst'])
|
|
| 93 |
+ result = cli.run(project=project, args=['artifact', 'push', 'target.bst'])
|
|
| 94 | 94 |
result.assert_main_error(ErrorDomain.STREAM, None)
|
| 95 | 95 |
|
| 96 | 96 |
# Configure bst to push to one of the caches and run `bst push`. This works.
|
| ... | ... | @@ -100,7 +100,7 @@ def test_push(cli, tmpdir, datafiles): |
| 100 | 100 |
{'url': share2.repo, 'push': True},
|
| 101 | 101 |
]
|
| 102 | 102 |
})
|
| 103 |
- result = cli.run(project=project, args=['push', 'target.bst'])
|
|
| 103 |
+ result = cli.run(project=project, args=['artifact', 'push', 'target.bst'])
|
|
| 104 | 104 |
|
| 105 | 105 |
assert_not_shared(cli, share1, project, 'target.bst')
|
| 106 | 106 |
assert_shared(cli, share2, project, 'target.bst')
|
| ... | ... | @@ -114,7 +114,7 @@ def test_push(cli, tmpdir, datafiles): |
| 114 | 114 |
{'url': share2.repo, 'push': True},
|
| 115 | 115 |
]
|
| 116 | 116 |
})
|
| 117 |
- result = cli.run(project=project, args=['push', 'target.bst'])
|
|
| 117 |
+ result = cli.run(project=project, args=['artifact', 'push', 'target.bst'])
|
|
| 118 | 118 |
|
| 119 | 119 |
assert_shared(cli, share1, project, 'target.bst')
|
| 120 | 120 |
assert_shared(cli, share2, project, 'target.bst')
|
| ... | ... | @@ -156,7 +156,7 @@ def test_push_all(cli, tmpdir, datafiles): |
| 156 | 156 |
|
| 157 | 157 |
# Now try bst push all the deps
|
| 158 | 158 |
result = cli.run(project=project, args=[
|
| 159 |
- 'push', 'target.bst',
|
|
| 159 |
+ 'artifact', 'push', 'target.bst',
|
|
| 160 | 160 |
'--deps', 'all'
|
| 161 | 161 |
])
|
| 162 | 162 |
result.assert_success()
|
| ... | ... | @@ -346,7 +346,7 @@ def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir): |
| 346 | 346 |
assert cli.get_element_state(project, 'element1.bst') != 'cached'
|
| 347 | 347 |
|
| 348 | 348 |
# Pull the element1 from the remote cache (this should update its mtime)
|
| 349 |
- result = cli.run(project=project, args=['pull', 'element1.bst', '--remote',
|
|
| 349 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'element1.bst', '--remote',
|
|
| 350 | 350 |
share.repo])
|
| 351 | 351 |
result.assert_success()
|
| 352 | 352 |
|
| ... | ... | @@ -386,7 +386,7 @@ def test_push_cross_junction(cli, tmpdir, datafiles): |
| 386 | 386 |
cli.configure({
|
| 387 | 387 |
'artifacts': {'url': share.repo, 'push': True},
|
| 388 | 388 |
})
|
| 389 |
- result = cli.run(project=project, args=['push', 'junction.bst:import-etc.bst'])
|
|
| 389 |
+ result = cli.run(project=project, args=['artifact', 'push', 'junction.bst:import-etc.bst'])
|
|
| 390 | 390 |
|
| 391 | 391 |
cache_key = cli.get_element_key(project, 'junction.bst:import-etc.bst')
|
| 392 | 392 |
assert share.has_artifact('subtest', 'import-etc.bst', cache_key)
|
| ... | ... | @@ -407,7 +407,7 @@ def test_push_already_cached(caplog, cli, tmpdir, datafiles): |
| 407 | 407 |
result.assert_success()
|
| 408 | 408 |
assert "SKIPPED Push" not in result.stderr
|
| 409 | 409 |
|
| 410 |
- result = cli.run(project=project, args=['push', 'target.bst'])
|
|
| 410 |
+ result = cli.run(project=project, args=['artifact', 'push', 'target.bst'])
|
|
| 411 | 411 |
|
| 412 | 412 |
result.assert_success()
|
| 413 | 413 |
assert not result.get_pushed_elements(), "No elements should have been pushed since the cache was populated"
|
| ... | ... | @@ -1105,10 +1105,10 @@ def test_external_push_pull(cli, datafiles, tmpdir_factory, guess_element): |
| 1105 | 1105 |
'artifacts': {'url': share.repo, 'push': True}
|
| 1106 | 1106 |
})
|
| 1107 | 1107 |
|
| 1108 |
- result = cli.run(project=project, args=['-C', workspace, 'push'] + arg_elm)
|
|
| 1108 |
+ result = cli.run(project=project, args=['-C', workspace, 'artifact', 'push'] + arg_elm)
|
|
| 1109 | 1109 |
result.assert_success()
|
| 1110 | 1110 |
|
| 1111 |
- result = cli.run(project=project, args=['-C', workspace, 'pull', '--deps', 'all'] + arg_elm)
|
|
| 1111 |
+ result = cli.run(project=project, args=['-C', workspace, 'artifact', 'pull', '--deps', 'all'] + arg_elm)
|
|
| 1112 | 1112 |
result.assert_success()
|
| 1113 | 1113 |
|
| 1114 | 1114 |
|
| ... | ... | @@ -130,7 +130,8 @@ def test_buildtree_pulled(cli, tmpdir, datafiles): |
| 130 | 130 |
assert cli.get_element_state(project, element_name) != 'cached'
|
| 131 | 131 |
|
| 132 | 132 |
# Pull from cache, ensuring cli options is set to pull the buildtree
|
| 133 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'pull', '--deps', 'all', element_name])
|
|
| 133 |
+ result = cli.run(project=project,
|
|
| 134 |
+ args=['--pull-buildtrees', 'artifact', 'pull', '--deps', 'all', element_name])
|
|
| 134 | 135 |
result.assert_success()
|
| 135 | 136 |
|
| 136 | 137 |
# Check it's using the cached build tree
|
| ... | ... | @@ -164,7 +165,7 @@ def test_buildtree_options(cli, tmpdir, datafiles): |
| 164 | 165 |
assert cli.get_element_state(project, element_name) != 'cached'
|
| 165 | 166 |
|
| 166 | 167 |
# Pull from cache, but do not include buildtrees.
|
| 167 |
- result = cli.run(project=project, args=['pull', '--deps', 'all', element_name])
|
|
| 168 |
+ result = cli.run(project=project, args=['artifact', 'pull', '--deps', 'all', element_name])
|
|
| 168 | 169 |
result.assert_success()
|
| 169 | 170 |
|
| 170 | 171 |
# The above is the simplest way I know to create a local cache without any buildtrees.
|
| ... | ... | @@ -55,12 +55,12 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache): |
| 55 | 55 |
# Pull artifact with default config, assert that pulling again
|
| 56 | 56 |
# doesn't create a pull job, then assert with buildtrees user
|
| 57 | 57 |
# config set creates a pull job.
|
| 58 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 58 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 59 | 59 |
assert element_name in result.get_pulled_elements()
|
| 60 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 60 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 61 | 61 |
assert element_name not in result.get_pulled_elements()
|
| 62 | 62 |
cli.configure({'cache': {'pull-buildtrees': True}})
|
| 63 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 63 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 64 | 64 |
assert element_name in result.get_pulled_elements()
|
| 65 | 65 |
default_state(cli, tmpdir, share1)
|
| 66 | 66 |
|
| ... | ... | @@ -68,13 +68,13 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache): |
| 68 | 68 |
# with buildtrees cli flag set creates a pull job.
|
| 69 | 69 |
# Also assert that the buildtree is added to the artifact's
|
| 70 | 70 |
# extract dir
|
| 71 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 71 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 72 | 72 |
assert element_name in result.get_pulled_elements()
|
| 73 | 73 |
elementdigest = share1.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
| 74 | 74 |
buildtreedir = os.path.join(str(tmpdir), 'artifacts', 'extract', 'test', 'autotools-amhello',
|
| 75 | 75 |
elementdigest.hash, 'buildtree')
|
| 76 | 76 |
assert not os.path.isdir(buildtreedir)
|
| 77 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'pull', element_name])
|
|
| 77 |
+ result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
| 78 | 78 |
assert element_name in result.get_pulled_elements()
|
| 79 | 79 |
assert os.path.isdir(buildtreedir)
|
| 80 | 80 |
default_state(cli, tmpdir, share1)
|
| ... | ... | @@ -83,21 +83,21 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache): |
| 83 | 83 |
# that pulling with the same user config doesn't creates a pull job,
|
| 84 | 84 |
# or when buildtrees cli flag is set.
|
| 85 | 85 |
cli.configure({'cache': {'pull-buildtrees': True}})
|
| 86 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 86 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 87 | 87 |
assert element_name in result.get_pulled_elements()
|
| 88 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 88 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 89 | 89 |
assert element_name not in result.get_pulled_elements()
|
| 90 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'pull', element_name])
|
|
| 90 |
+ result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
| 91 | 91 |
assert element_name not in result.get_pulled_elements()
|
| 92 | 92 |
default_state(cli, tmpdir, share1)
|
| 93 | 93 |
|
| 94 | 94 |
# Pull artifact with default config and buildtrees cli flag set, then assert
|
| 95 | 95 |
# that pulling with pullbuildtrees set in user config doesn't create a pull
|
| 96 | 96 |
# job.
|
| 97 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'pull', element_name])
|
|
| 97 |
+ result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
| 98 | 98 |
assert element_name in result.get_pulled_elements()
|
| 99 | 99 |
cli.configure({'cache': {'pull-buildtrees': True}})
|
| 100 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 100 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 101 | 101 |
assert element_name not in result.get_pulled_elements()
|
| 102 | 102 |
default_state(cli, tmpdir, share1)
|
| 103 | 103 |
|
| ... | ... | @@ -105,10 +105,10 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache): |
| 105 | 105 |
# can't be pushed to an artifact share, then assert that a complete build element
|
| 106 | 106 |
# can be. This will attempt a partial pull from share1 and then a partial push
|
| 107 | 107 |
# to share2
|
| 108 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 108 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 109 | 109 |
assert element_name in result.get_pulled_elements()
|
| 110 | 110 |
cli.configure({'artifacts': {'url': share2.repo, 'push': True}})
|
| 111 |
- result = cli.run(project=project, args=['push', element_name])
|
|
| 111 |
+ result = cli.run(project=project, args=['artifact', 'push', element_name])
|
|
| 112 | 112 |
assert element_name not in result.get_pushed_elements()
|
| 113 | 113 |
assert not share2.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
| 114 | 114 |
|
| ... | ... | @@ -116,10 +116,10 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache): |
| 116 | 116 |
# successfully pushed to the remote. This will attempt to pull the buildtree
|
| 117 | 117 |
# from share1 and then a 'complete' push to share2
|
| 118 | 118 |
cli.configure({'artifacts': {'url': share1.repo, 'push': False}})
|
| 119 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'pull', element_name])
|
|
| 119 |
+ result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
| 120 | 120 |
assert element_name in result.get_pulled_elements()
|
| 121 | 121 |
cli.configure({'artifacts': {'url': share2.repo, 'push': True}})
|
| 122 |
- result = cli.run(project=project, args=['push', element_name])
|
|
| 122 |
+ result = cli.run(project=project, args=['artifact', 'push', element_name])
|
|
| 123 | 123 |
assert element_name in result.get_pushed_elements()
|
| 124 | 124 |
assert share2.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
| 125 | 125 |
default_state(cli, tmpdir, share1)
|
| ... | ... | @@ -128,10 +128,10 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache): |
| 128 | 128 |
# if pull-buildtrees is set, however as share3 is the only defined remote and is empty,
|
| 129 | 129 |
# assert that no element artifact buildtrees are pulled (no available remote buildtree) and thus the
|
| 130 | 130 |
# artifact cannot be pushed.
|
| 131 |
- result = cli.run(project=project, args=['pull', element_name])
|
|
| 131 |
+ result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
| 132 | 132 |
assert element_name in result.get_pulled_elements()
|
| 133 | 133 |
cli.configure({'artifacts': {'url': share3.repo, 'push': True}})
|
| 134 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'push', element_name])
|
|
| 134 |
+ result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'push', element_name])
|
|
| 135 | 135 |
assert "Attempting to fetch missing artifact buildtrees" in result.stderr
|
| 136 | 136 |
assert element_name not in result.get_pulled_elements()
|
| 137 | 137 |
assert not os.path.isdir(buildtreedir)
|
| ... | ... | @@ -143,7 +143,7 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache): |
| 143 | 143 |
# to the empty share3. This gives the ability to attempt push currently partial artifacts to a remote,
|
| 144 | 144 |
# without exlipictly requiring a bst pull.
|
| 145 | 145 |
cli.configure({'artifacts': [{'url': share1.repo, 'push': False}, {'url': share3.repo, 'push': True}]})
|
| 146 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'push', element_name])
|
|
| 146 |
+ result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'push', element_name])
|
|
| 147 | 147 |
assert "Attempting to fetch missing artifact buildtrees" in result.stderr
|
| 148 | 148 |
assert element_name in result.get_pulled_elements()
|
| 149 | 149 |
assert os.path.isdir(buildtreedir)
|
| ... | ... | @@ -42,7 +42,7 @@ def test_old_and_new_configs(cli, datafiles): |
| 42 | 42 |
# Use `pull` here to ensure we try to initialize the remotes, triggering the error
|
| 43 | 43 |
#
|
| 44 | 44 |
# This does not happen for a simple `bst show`.
|
| 45 |
- result = cli.run(project=project, args=['pull', 'element.bst'])
|
|
| 45 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'element.bst'])
|
|
| 46 | 46 |
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "specify one")
|
| 47 | 47 |
|
| 48 | 48 |
|
| ... | ... | @@ -97,5 +97,5 @@ def test_empty_config(cli, datafiles): |
| 97 | 97 |
# Use `pull` here to ensure we try to initialize the remotes, triggering the error
|
| 98 | 98 |
#
|
| 99 | 99 |
# This does not happen for a simple `bst show`.
|
| 100 |
- result = cli.run(project=project, args=['pull', 'element.bst'])
|
|
| 100 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'element.bst'])
|
|
| 101 | 101 |
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "specify one")
|
