Valentin David pushed to branch valentindavid/local-cache-exec-leak at BuildStream / buildstream
Commits:
-
352f4ad9
by Jonathan Maw at 2019-02-12T16:44:35Z
-
62396af9
by Jonathan Maw at 2019-02-12T16:44:35Z
-
f1e9cb66
by Jonathan Maw at 2019-02-12T16:44:35Z
-
e51116d5
by Jonathan Maw at 2019-02-12T17:59:25Z
-
478e5c47
by Valentin David at 2019-02-12T18:06:50Z
-
6de65306
by Valentin David at 2019-02-12T19:13:43Z
-
0a414729
by Valentin David at 2019-02-13T10:01:16Z
8 changed files:
- buildstream/_cas/cascache.py
- buildstream/_cas/casremote.py
- buildstream/_cas/casserver.py
- buildstream/_loader/loader.py
- buildstream/_project.py
- tests/frontend/buildcheckout.py
- tests/frontend/pull.py
- tests/testutils/artifactshare.py
Changes:
| ... | ... | @@ -45,9 +45,10 @@ _BUFFER_SIZE = 65536 |
| 45 | 45 |
#
|
| 46 | 46 |
class CASCache():
|
| 47 | 47 |
|
| 48 |
- def __init__(self, path):
|
|
| 48 |
+ def __init__(self, path, *, disable_exec=False):
|
|
| 49 | 49 |
self.casdir = os.path.join(path, 'cas')
|
| 50 | 50 |
self.tmpdir = os.path.join(path, 'tmp')
|
| 51 |
+ self._disable_exec = disable_exec
|
|
| 51 | 52 |
os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True)
|
| 52 | 53 |
os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True)
|
| 53 | 54 |
os.makedirs(self.tmpdir, exist_ok=True)
|
| ... | ... | @@ -342,8 +343,12 @@ class CASCache(): |
| 342 | 343 |
# Returns:
|
| 343 | 344 |
# (str): The path of the object
|
| 344 | 345 |
#
|
| 345 |
- def objpath(self, digest):
|
|
| 346 |
- return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:])
|
|
| 346 |
+ def objpath(self, digest, *, is_exec=False):
|
|
| 347 |
+ if is_exec and not self._disable_exec:
|
|
| 348 |
+ filename = '{}.exec'.format(digest.hash[2:])
|
|
| 349 |
+ else:
|
|
| 350 |
+ filename = digest.hash[2:]
|
|
| 351 |
+ return os.path.join(self.casdir, 'objects', digest.hash[:2], filename)
|
|
| 347 | 352 |
|
| 348 | 353 |
# add_object():
|
| 349 | 354 |
#
|
| ... | ... | @@ -360,7 +365,7 @@ class CASCache(): |
| 360 | 365 |
#
|
| 361 | 366 |
# Either `path` or `buffer` must be passed, but not both.
|
| 362 | 367 |
#
|
| 363 |
- def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False):
|
|
| 368 |
+ def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False, is_exec=False):
|
|
| 364 | 369 |
# Exactly one of the two parameters has to be specified
|
| 365 | 370 |
assert (path is None) != (buffer is None)
|
| 366 | 371 |
|
| ... | ... | @@ -376,10 +381,7 @@ class CASCache(): |
| 376 | 381 |
for chunk in iter(lambda: tmp.read(_BUFFER_SIZE), b""):
|
| 377 | 382 |
h.update(chunk)
|
| 378 | 383 |
else:
|
| 379 |
- tmp = stack.enter_context(utils._tempnamedfile(dir=self.tmpdir))
|
|
| 380 |
- # Set mode bits to 0644
|
|
| 381 |
- os.chmod(tmp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
|
|
| 382 |
- |
|
| 384 |
+ tmp = stack.enter_context(self._temporary_object(is_exec=is_exec))
|
|
| 383 | 385 |
if path:
|
| 384 | 386 |
with open(path, 'rb') as f:
|
| 385 | 387 |
for chunk in iter(lambda: f.read(_BUFFER_SIZE), b""):
|
| ... | ... | @@ -395,7 +397,7 @@ class CASCache(): |
| 395 | 397 |
digest.size_bytes = os.fstat(tmp.fileno()).st_size
|
| 396 | 398 |
|
| 397 | 399 |
# Place file at final location
|
| 398 |
- objpath = self.objpath(digest)
|
|
| 400 |
+ objpath = self.objpath(digest, is_exec=is_exec)
|
|
| 399 | 401 |
os.makedirs(os.path.dirname(objpath), exist_ok=True)
|
| 400 | 402 |
os.link(tmp.name, objpath)
|
| 401 | 403 |
|
| ... | ... | @@ -604,11 +606,7 @@ class CASCache(): |
| 604 | 606 |
for filenode in directory.files:
|
| 605 | 607 |
# regular file, create hardlink
|
| 606 | 608 |
fullpath = os.path.join(dest, filenode.name)
|
| 607 |
- os.link(self.objpath(filenode.digest), fullpath)
|
|
| 608 |
- |
|
| 609 |
- if filenode.is_executable:
|
|
| 610 |
- os.chmod(fullpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
|
|
| 611 |
- stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
|
|
| 609 |
+ os.link(self.objpath(filenode.digest, is_exec=filenode.is_executable), fullpath)
|
|
| 612 | 610 |
|
| 613 | 611 |
for dirnode in directory.directories:
|
| 614 | 612 |
# Don't try to checkout a dangling ref
|
| ... | ... | @@ -700,8 +698,8 @@ class CASCache(): |
| 700 | 698 |
elif stat.S_ISREG(mode):
|
| 701 | 699 |
filenode = directory.files.add()
|
| 702 | 700 |
filenode.name = name
|
| 703 |
- self.add_object(path=full_path, digest=filenode.digest)
|
|
| 704 | 701 |
filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR
|
| 702 |
+ self.add_object(path=full_path, digest=filenode.digest, is_exec=filenode.is_executable)
|
|
| 705 | 703 |
elif stat.S_ISLNK(mode):
|
| 706 | 704 |
symlinknode = directory.symlinks.add()
|
| 707 | 705 |
symlinknode.name = name
|
| ... | ... | @@ -800,7 +798,7 @@ class CASCache(): |
| 800 | 798 |
|
| 801 | 799 |
for filenode in directory.files:
|
| 802 | 800 |
if update_mtime:
|
| 803 |
- os.utime(self.objpath(filenode.digest))
|
|
| 801 |
+ os.utime(self.objpath(filenode.digest, is_exec=filenode.is_executable))
|
|
| 804 | 802 |
reachable.add(filenode.digest.hash)
|
| 805 | 803 |
|
| 806 | 804 |
for dirnode in directory.directories:
|
| ... | ... | @@ -811,7 +809,7 @@ class CASCache(): |
| 811 | 809 |
d = remote_execution_pb2.Digest()
|
| 812 | 810 |
d.hash = directory_digest.hash
|
| 813 | 811 |
d.size_bytes = directory_digest.size_bytes
|
| 814 |
- yield d
|
|
| 812 |
+ yield False, d
|
|
| 815 | 813 |
|
| 816 | 814 |
directory = remote_execution_pb2.Directory()
|
| 817 | 815 |
|
| ... | ... | @@ -822,11 +820,26 @@ class CASCache(): |
| 822 | 820 |
d = remote_execution_pb2.Digest()
|
| 823 | 821 |
d.hash = filenode.digest.hash
|
| 824 | 822 |
d.size_bytes = filenode.digest.size_bytes
|
| 825 |
- yield d
|
|
| 823 |
+ yield filenode.is_executable, d
|
|
| 826 | 824 |
|
| 827 | 825 |
for dirnode in directory.directories:
|
| 828 | 826 |
yield from self._required_blobs(dirnode.digest)
|
| 829 | 827 |
|
| 828 |
+ # _temporary_object():
|
|
| 829 |
+ #
|
|
| 830 |
+ # Returns:
|
|
| 831 |
+ # (file): A file object to a named temporary file.
|
|
| 832 |
+ #
|
|
| 833 |
+ # Create a named temporary file with 0o0644 access rights.
|
|
| 834 |
+ @contextlib.contextmanager
|
|
| 835 |
+ def _temporary_object(self, *, is_exec=False):
|
|
| 836 |
+ with utils._tempnamedfile(dir=self.tmpdir) as f:
|
|
| 837 |
+ access = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
|
|
| 838 |
+ if is_exec and not self._disable_exec:
|
|
| 839 |
+ access |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
|
|
| 840 |
+ os.chmod(f.name, access)
|
|
| 841 |
+ yield f
|
|
| 842 |
+ |
|
| 830 | 843 |
# _ensure_blob():
|
| 831 | 844 |
#
|
| 832 | 845 |
# Fetch and add blob if it's not already local.
|
| ... | ... | @@ -838,27 +851,27 @@ class CASCache(): |
| 838 | 851 |
# Returns:
|
| 839 | 852 |
# (str): The path of the object
|
| 840 | 853 |
#
|
| 841 |
- def _ensure_blob(self, remote, digest):
|
|
| 842 |
- objpath = self.objpath(digest)
|
|
| 854 |
+ def _ensure_blob(self, remote, digest, is_exec=False):
|
|
| 855 |
+ objpath = self.objpath(digest, is_exec=is_exec)
|
|
| 843 | 856 |
if os.path.exists(objpath):
|
| 844 | 857 |
# already in local repository
|
| 845 | 858 |
return objpath
|
| 846 | 859 |
|
| 847 |
- with utils._tempnamedfile(dir=self.tmpdir) as f:
|
|
| 860 |
+ with self._temporary_object(is_exec=is_exec) as f:
|
|
| 848 | 861 |
remote._fetch_blob(digest, f)
|
| 849 | 862 |
|
| 850 |
- added_digest = self.add_object(path=f.name, link_directly=True)
|
|
| 863 |
+ added_digest = self.add_object(path=f.name, link_directly=True, is_exec=is_exec)
|
|
| 851 | 864 |
assert added_digest.hash == digest.hash
|
| 852 | 865 |
|
| 853 | 866 |
return objpath
|
| 854 | 867 |
|
| 855 | 868 |
def _batch_download_complete(self, batch):
|
| 856 |
- for digest, data in batch.send():
|
|
| 857 |
- with utils._tempnamedfile(dir=self.tmpdir) as f:
|
|
| 869 |
+ for digest, data, is_exec in batch.send():
|
|
| 870 |
+ with self._temporary_object(is_exec=is_exec) as f:
|
|
| 858 | 871 |
f.write(data)
|
| 859 | 872 |
f.flush()
|
| 860 | 873 |
|
| 861 |
- added_digest = self.add_object(path=f.name, link_directly=True)
|
|
| 874 |
+ added_digest = self.add_object(path=f.name, link_directly=True, is_exec=is_exec)
|
|
| 862 | 875 |
assert added_digest.hash == digest.hash
|
| 863 | 876 |
|
| 864 | 877 |
# Helper function for _fetch_directory().
|
| ... | ... | @@ -872,8 +885,9 @@ class CASCache(): |
| 872 | 885 |
return _CASBatchRead(remote)
|
| 873 | 886 |
|
| 874 | 887 |
# Helper function for _fetch_directory().
|
| 875 |
- def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
|
|
| 876 |
- in_local_cache = os.path.exists(self.objpath(digest))
|
|
| 888 |
+ def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue,
|
|
| 889 |
+ *, recursive=False, is_exec=False):
|
|
| 890 |
+ in_local_cache = os.path.exists(self.objpath(digest, is_exec=is_exec))
|
|
| 877 | 891 |
|
| 878 | 892 |
if in_local_cache:
|
| 879 | 893 |
# Skip download, already in local cache.
|
| ... | ... | @@ -881,14 +895,14 @@ class CASCache(): |
| 881 | 895 |
elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
| 882 | 896 |
not remote.batch_read_supported):
|
| 883 | 897 |
# Too large for batch request, download in independent request.
|
| 884 |
- self._ensure_blob(remote, digest)
|
|
| 898 |
+ self._ensure_blob(remote, digest, is_exec=is_exec)
|
|
| 885 | 899 |
in_local_cache = True
|
| 886 | 900 |
else:
|
| 887 |
- if not batch.add(digest):
|
|
| 901 |
+ if not batch.add(digest, is_exec=is_exec):
|
|
| 888 | 902 |
# Not enough space left in batch request.
|
| 889 | 903 |
# Complete pending batch first.
|
| 890 | 904 |
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
| 891 |
- batch.add(digest)
|
|
| 905 |
+ batch.add(digest, is_exec=is_exec)
|
|
| 892 | 906 |
|
| 893 | 907 |
if recursive:
|
| 894 | 908 |
if in_local_cache:
|
| ... | ... | @@ -936,11 +950,13 @@ class CASCache(): |
| 936 | 950 |
for dirnode in directory.directories:
|
| 937 | 951 |
if dirnode.name not in excluded_subdirs:
|
| 938 | 952 |
batch = self._fetch_directory_node(remote, dirnode.digest, batch,
|
| 939 |
- fetch_queue, fetch_next_queue, recursive=True)
|
|
| 953 |
+ fetch_queue, fetch_next_queue,
|
|
| 954 |
+ recursive=True)
|
|
| 940 | 955 |
|
| 941 | 956 |
for filenode in directory.files:
|
| 942 | 957 |
batch = self._fetch_directory_node(remote, filenode.digest, batch,
|
| 943 |
- fetch_queue, fetch_next_queue)
|
|
| 958 |
+ fetch_queue, fetch_next_queue,
|
|
| 959 |
+ is_exec=filenode.is_executable)
|
|
| 944 | 960 |
|
| 945 | 961 |
# Fetch final batch
|
| 946 | 962 |
self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
| ... | ... | @@ -962,7 +978,7 @@ class CASCache(): |
| 962 | 978 |
tree.children.extend([tree.root])
|
| 963 | 979 |
for directory in tree.children:
|
| 964 | 980 |
for filenode in directory.files:
|
| 965 |
- self._ensure_blob(remote, filenode.digest)
|
|
| 981 |
+ self._ensure_blob(remote, filenode.digest, is_exec=filenode.is_executable)
|
|
| 966 | 982 |
|
| 967 | 983 |
# place directory blob only in final location when we've downloaded
|
| 968 | 984 |
# all referenced blobs to avoid dangling references in the repository
|
| ... | ... | @@ -975,22 +991,28 @@ class CASCache(): |
| 975 | 991 |
def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
|
| 976 | 992 |
required_blobs = self._required_blobs(digest)
|
| 977 | 993 |
|
| 994 |
+ executable = {}
|
|
| 995 |
+ |
|
| 978 | 996 |
missing_blobs = dict()
|
| 979 | 997 |
# Limit size of FindMissingBlobs request
|
| 980 | 998 |
for required_blobs_group in _grouper(required_blobs, 512):
|
| 981 | 999 |
request = remote_execution_pb2.FindMissingBlobsRequest(instance_name=remote.spec.instance_name)
|
| 982 | 1000 |
|
| 983 |
- for required_digest in required_blobs_group:
|
|
| 1001 |
+ for is_exec, required_digest in required_blobs_group:
|
|
| 984 | 1002 |
d = request.blob_digests.add()
|
| 985 | 1003 |
d.hash = required_digest.hash
|
| 986 | 1004 |
d.size_bytes = required_digest.size_bytes
|
| 1005 |
+ if required_digest.hash not in executable:
|
|
| 1006 |
+ executable[required_digest.hash] = set()
|
|
| 1007 |
+ executable[required_digest.hash].add(is_exec)
|
|
| 987 | 1008 |
|
| 988 | 1009 |
response = remote.cas.FindMissingBlobs(request)
|
| 989 | 1010 |
for missing_digest in response.missing_blob_digests:
|
| 990 | 1011 |
d = remote_execution_pb2.Digest()
|
| 991 | 1012 |
d.hash = missing_digest.hash
|
| 992 | 1013 |
d.size_bytes = missing_digest.size_bytes
|
| 993 |
- missing_blobs[d.hash] = d
|
|
| 1014 |
+ for is_exec in executable[missing_digest.hash]:
|
|
| 1015 |
+ missing_blobs[d.hash] = (is_exec, d)
|
|
| 994 | 1016 |
|
| 995 | 1017 |
# Upload any blobs missing on the server
|
| 996 | 1018 |
self._send_blobs(remote, missing_blobs.values(), u_uid)
|
| ... | ... | @@ -998,8 +1020,8 @@ class CASCache(): |
| 998 | 1020 |
def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
|
| 999 | 1021 |
batch = _CASBatchUpdate(remote)
|
| 1000 | 1022 |
|
| 1001 |
- for digest in digests:
|
|
| 1002 |
- with open(self.objpath(digest), 'rb') as f:
|
|
| 1023 |
+ for is_exec, digest in digests:
|
|
| 1024 |
+ with open(self.objpath(digest, is_exec=is_exec), 'rb') as f:
|
|
| 1003 | 1025 |
assert os.fstat(f.fileno()).st_size == digest.size_bytes
|
| 1004 | 1026 |
|
| 1005 | 1027 |
if (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
| ... | ... | @@ -306,8 +306,9 @@ class _CASBatchRead(): |
| 306 | 306 |
self._request = remote_execution_pb2.BatchReadBlobsRequest()
|
| 307 | 307 |
self._size = 0
|
| 308 | 308 |
self._sent = False
|
| 309 |
+ self._is_exec = {}
|
|
| 309 | 310 |
|
| 310 |
- def add(self, digest):
|
|
| 311 |
+ def add(self, digest, *, is_exec=False):
|
|
| 311 | 312 |
assert not self._sent
|
| 312 | 313 |
|
| 313 | 314 |
new_batch_size = self._size + digest.size_bytes
|
| ... | ... | @@ -319,6 +320,9 @@ class _CASBatchRead(): |
| 319 | 320 |
request_digest.hash = digest.hash
|
| 320 | 321 |
request_digest.size_bytes = digest.size_bytes
|
| 321 | 322 |
self._size = new_batch_size
|
| 323 |
+ if digest.hash not in self._is_exec:
|
|
| 324 |
+ self._is_exec[digest.hash] = set()
|
|
| 325 |
+ self._is_exec[digest.hash].add(is_exec)
|
|
| 322 | 326 |
return True
|
| 323 | 327 |
|
| 324 | 328 |
def send(self):
|
| ... | ... | @@ -341,7 +345,8 @@ class _CASBatchRead(): |
| 341 | 345 |
raise CASRemoteError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
|
| 342 | 346 |
response.digest.hash, response.digest.size_bytes, len(response.data)))
|
| 343 | 347 |
|
| 344 |
- yield (response.digest, response.data)
|
|
| 348 |
+ for is_exec in self._is_exec[response.digest.hash]:
|
|
| 349 |
+ yield (response.digest, response.data, is_exec)
|
|
| 345 | 350 |
|
| 346 | 351 |
|
| 347 | 352 |
# Represents a batch of blobs queued for upload.
|
| ... | ... | @@ -61,7 +61,7 @@ class ArtifactTooLargeException(Exception): |
| 61 | 61 |
def create_server(repo, *, enable_push,
|
| 62 | 62 |
max_head_size=int(10e9),
|
| 63 | 63 |
min_head_size=int(2e9)):
|
| 64 |
- cas = CASCache(os.path.abspath(repo))
|
|
| 64 |
+ cas = CASCache(os.path.abspath(repo), disable_exec=True)
|
|
| 65 | 65 |
|
| 66 | 66 |
# Use max_workers default from Python 3.5+
|
| 67 | 67 |
max_workers = (os.cpu_count() or 1) * 5
|
| ... | ... | @@ -20,8 +20,6 @@ |
| 20 | 20 |
import os
|
| 21 | 21 |
from functools import cmp_to_key
|
| 22 | 22 |
from collections.abc import Mapping
|
| 23 |
-import tempfile
|
|
| 24 |
-import shutil
|
|
| 25 | 23 |
|
| 26 | 24 |
from .._exceptions import LoadError, LoadErrorReason
|
| 27 | 25 |
from .. import Consistency
|
| ... | ... | @@ -49,12 +47,10 @@ from .._message import Message, MessageType |
| 49 | 47 |
# context (Context): The Context object
|
| 50 | 48 |
# project (Project): The toplevel Project object
|
| 51 | 49 |
# parent (Loader): A parent Loader object, in the case this is a junctioned Loader
|
| 52 |
-# tempdir (str): A directory to cleanup with the Loader, given to the loader by a parent
|
|
| 53 |
-# loader in the case that this loader is a subproject loader.
|
|
| 54 | 50 |
#
|
| 55 | 51 |
class Loader():
|
| 56 | 52 |
|
| 57 |
- def __init__(self, context, project, *, parent=None, tempdir=None):
|
|
| 53 |
+ def __init__(self, context, project, *, parent=None):
|
|
| 58 | 54 |
|
| 59 | 55 |
# Ensure we have an absolute path for the base directory
|
| 60 | 56 |
basedir = project.element_path
|
| ... | ... | @@ -73,7 +69,6 @@ class Loader(): |
| 73 | 69 |
self._options = project.options # Project options (OptionPool)
|
| 74 | 70 |
self._basedir = basedir # Base project directory
|
| 75 | 71 |
self._first_pass_options = project.first_pass_config.options # Project options (OptionPool)
|
| 76 |
- self._tempdir = tempdir # A directory to cleanup
|
|
| 77 | 72 |
self._parent = parent # The parent loader
|
| 78 | 73 |
|
| 79 | 74 |
self._meta_elements = {} # Dict of resolved meta elements by name
|
| ... | ... | @@ -159,30 +154,6 @@ class Loader(): |
| 159 | 154 |
|
| 160 | 155 |
return ret
|
| 161 | 156 |
|
| 162 |
- # cleanup():
|
|
| 163 |
- #
|
|
| 164 |
- # Remove temporary checkout directories of subprojects
|
|
| 165 |
- #
|
|
| 166 |
- def cleanup(self):
|
|
| 167 |
- if self._parent and not self._tempdir:
|
|
| 168 |
- # already done
|
|
| 169 |
- return
|
|
| 170 |
- |
|
| 171 |
- # recurse
|
|
| 172 |
- for loader in self._loaders.values():
|
|
| 173 |
- # value may be None with nested junctions without overrides
|
|
| 174 |
- if loader is not None:
|
|
| 175 |
- loader.cleanup()
|
|
| 176 |
- |
|
| 177 |
- if not self._parent:
|
|
| 178 |
- # basedir of top-level loader is never a temporary directory
|
|
| 179 |
- return
|
|
| 180 |
- |
|
| 181 |
- # safe guard to not accidentally delete directories outside builddir
|
|
| 182 |
- if self._tempdir.startswith(self._context.builddir + os.sep):
|
|
| 183 |
- if os.path.exists(self._tempdir):
|
|
| 184 |
- shutil.rmtree(self._tempdir)
|
|
| 185 |
- |
|
| 186 | 157 |
###########################################
|
| 187 | 158 |
# Private Methods #
|
| 188 | 159 |
###########################################
|
| ... | ... | @@ -540,23 +511,28 @@ class Loader(): |
| 540 | 511 |
"Subproject has no ref for junction: {}".format(filename),
|
| 541 | 512 |
detail=detail)
|
| 542 | 513 |
|
| 543 |
- if len(sources) == 1 and sources[0]._get_local_path():
|
|
| 514 |
+ workspace = element._get_workspace()
|
|
| 515 |
+ if workspace:
|
|
| 516 |
+ # If a workspace is open, load it from there instead
|
|
| 517 |
+ basedir = workspace.get_absolute_path()
|
|
| 518 |
+ elif len(sources) == 1 and sources[0]._get_local_path():
|
|
| 544 | 519 |
# Optimization for junctions with a single local source
|
| 545 | 520 |
basedir = sources[0]._get_local_path()
|
| 546 |
- tempdir = None
|
|
| 547 | 521 |
else:
|
| 548 | 522 |
# Stage sources
|
| 549 |
- os.makedirs(self._context.builddir, exist_ok=True)
|
|
| 550 |
- basedir = tempfile.mkdtemp(prefix="{}-".format(element.normal_name), dir=self._context.builddir)
|
|
| 551 |
- element._stage_sources_at(basedir, mount_workspaces=False)
|
|
| 552 |
- tempdir = basedir
|
|
| 523 |
+ element._update_state()
|
|
| 524 |
+ basedir = os.path.join(self.project.directory, ".bst", "staged-junctions",
|
|
| 525 |
+ filename, element._get_cache_key())
|
|
| 526 |
+ if not os.path.exists(basedir):
|
|
| 527 |
+ os.makedirs(basedir, exist_ok=True)
|
|
| 528 |
+ element._stage_sources_at(basedir, mount_workspaces=False)
|
|
| 553 | 529 |
|
| 554 | 530 |
# Load the project
|
| 555 | 531 |
project_dir = os.path.join(basedir, element.path)
|
| 556 | 532 |
try:
|
| 557 | 533 |
from .._project import Project
|
| 558 | 534 |
project = Project(project_dir, self._context, junction=element,
|
| 559 |
- parent_loader=self, tempdir=tempdir)
|
|
| 535 |
+ parent_loader=self)
|
|
| 560 | 536 |
except LoadError as e:
|
| 561 | 537 |
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
|
| 562 | 538 |
raise LoadError(reason=LoadErrorReason.INVALID_JUNCTION,
|
| ... | ... | @@ -91,7 +91,7 @@ class ProjectConfig: |
| 91 | 91 |
class Project():
|
| 92 | 92 |
|
| 93 | 93 |
def __init__(self, directory, context, *, junction=None, cli_options=None,
|
| 94 |
- default_mirror=None, parent_loader=None, tempdir=None):
|
|
| 94 |
+ default_mirror=None, parent_loader=None):
|
|
| 95 | 95 |
|
| 96 | 96 |
# The project name
|
| 97 | 97 |
self.name = None
|
| ... | ... | @@ -147,7 +147,7 @@ class Project(): |
| 147 | 147 |
self._project_includes = None
|
| 148 | 148 |
|
| 149 | 149 |
profile_start(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-'))
|
| 150 |
- self._load(parent_loader=parent_loader, tempdir=tempdir)
|
|
| 150 |
+ self._load(parent_loader=parent_loader)
|
|
| 151 | 151 |
profile_end(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-'))
|
| 152 | 152 |
|
| 153 | 153 |
self._partially_loaded = True
|
| ... | ... | @@ -389,8 +389,6 @@ class Project(): |
| 389 | 389 |
# Cleans up resources used loading elements
|
| 390 | 390 |
#
|
| 391 | 391 |
def cleanup(self):
|
| 392 |
- self.loader.cleanup()
|
|
| 393 |
- |
|
| 394 | 392 |
# Reset the element loader state
|
| 395 | 393 |
Element._reset_load_state()
|
| 396 | 394 |
|
| ... | ... | @@ -439,7 +437,7 @@ class Project(): |
| 439 | 437 |
#
|
| 440 | 438 |
# Raises: LoadError if there was a problem with the project.conf
|
| 441 | 439 |
#
|
| 442 |
- def _load(self, parent_loader=None, tempdir=None):
|
|
| 440 |
+ def _load(self, parent_loader=None):
|
|
| 443 | 441 |
|
| 444 | 442 |
# Load builtin default
|
| 445 | 443 |
projectfile = os.path.join(self.directory, _PROJECT_CONF_FILE)
|
| ... | ... | @@ -505,8 +503,7 @@ class Project(): |
| 505 | 503 |
self._fatal_warnings = _yaml.node_get(pre_config_node, list, 'fatal-warnings', default_value=[])
|
| 506 | 504 |
|
| 507 | 505 |
self.loader = Loader(self._context, self,
|
| 508 |
- parent=parent_loader,
|
|
| 509 |
- tempdir=tempdir)
|
|
| 506 |
+ parent=parent_loader)
|
|
| 510 | 507 |
|
| 511 | 508 |
self._project_includes = Includes(self.loader, copy_tree=False)
|
| 512 | 509 |
|
| ... | ... | @@ -2,6 +2,8 @@ import os |
| 2 | 2 |
import tarfile
|
| 3 | 3 |
import hashlib
|
| 4 | 4 |
import pytest
|
| 5 |
+import shutil
|
|
| 6 |
+import stat
|
|
| 5 | 7 |
import subprocess
|
| 6 | 8 |
from tests.testutils.site import IS_WINDOWS
|
| 7 | 9 |
from tests.testutils import create_repo, ALL_REPO_KINDS, generate_junction
|
| ... | ... | @@ -709,3 +711,34 @@ def test_build_checkout_cross_junction(datafiles, cli, tmpdir): |
| 709 | 711 |
|
| 710 | 712 |
filename = os.path.join(checkout, 'etc', 'animal.conf')
|
| 711 | 713 |
assert os.path.exists(filename)
|
| 714 |
+ |
|
| 715 |
+ |
|
| 716 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
| 717 |
+def test_access_rights(datafiles, cli):
|
|
| 718 |
+ project = str(datafiles)
|
|
| 719 |
+ checkout = os.path.join(cli.directory, 'checkout')
|
|
| 720 |
+ |
|
| 721 |
+ shutil.copyfile(os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello'),
|
|
| 722 |
+ os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello-2'))
|
|
| 723 |
+ os.chmod(os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello'),
|
|
| 724 |
+ 0o0755)
|
|
| 725 |
+ os.chmod(os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello-2'),
|
|
| 726 |
+ 0o0644)
|
|
| 727 |
+ |
|
| 728 |
+ result = cli.run(project=project, args=['build', 'target.bst'])
|
|
| 729 |
+ result.assert_success()
|
|
| 730 |
+ |
|
| 731 |
+ checkout_args = ['artifact', 'checkout', 'target.bst',
|
|
| 732 |
+ '--directory', checkout]
|
|
| 733 |
+ |
|
| 734 |
+ # Now check it out
|
|
| 735 |
+ result = cli.run(project=project, args=checkout_args)
|
|
| 736 |
+ result.assert_success()
|
|
| 737 |
+ |
|
| 738 |
+ st = os.lstat(os.path.join(checkout, 'usr', 'bin', 'hello'))
|
|
| 739 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 740 |
+ assert stat.S_IMODE(st.st_mode) == 0o0755
|
|
| 741 |
+ |
|
| 742 |
+ st = os.lstat(os.path.join(checkout, 'usr', 'bin', 'hello-2'))
|
|
| 743 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 744 |
+ assert stat.S_IMODE(st.st_mode) == 0o0644
|
| 1 | 1 |
import os
|
| 2 | 2 |
import shutil
|
| 3 |
+import stat
|
|
| 3 | 4 |
import pytest
|
| 4 | 5 |
from buildstream.plugintestutils import cli
|
| 5 | 6 |
from tests.testutils import create_artifact_share, generate_junction
|
| ... | ... | @@ -462,3 +463,74 @@ def test_build_remote_option(caplog, cli, tmpdir, datafiles): |
| 462 | 463 |
assert shareproject.repo not in result.stderr
|
| 463 | 464 |
assert shareuser.repo not in result.stderr
|
| 464 | 465 |
assert sharecli.repo in result.stderr
|
| 466 |
+ |
|
| 467 |
+ |
|
| 468 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
| 469 |
+def test_pull_access_rights(caplog, cli, tmpdir, datafiles):
|
|
| 470 |
+ project = str(datafiles)
|
|
| 471 |
+ checkout = os.path.join(str(tmpdir), 'checkout')
|
|
| 472 |
+ |
|
| 473 |
+ # Work-around datafiles not preserving mode
|
|
| 474 |
+ os.chmod(os.path.join(project, 'files/bin-files/usr/bin/hello'), 0o0755)
|
|
| 475 |
+ |
|
| 476 |
+ # We need a big file that does not go into a batch to test a different
|
|
| 477 |
+ # code path
|
|
| 478 |
+ os.makedirs(os.path.join(project, 'files/dev-files/usr/share'), exist_ok=True)
|
|
| 479 |
+ with open(os.path.join(project, 'files/dev-files/usr/share/big-file'), 'w') as f:
|
|
| 480 |
+ buf = ' ' * 4096
|
|
| 481 |
+ for _ in range(1024):
|
|
| 482 |
+ f.write(buf)
|
|
| 483 |
+ |
|
| 484 |
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
|
|
| 485 |
+ |
|
| 486 |
+ cli.configure({
|
|
| 487 |
+ 'artifacts': {'url': share.repo, 'push': True}
|
|
| 488 |
+ })
|
|
| 489 |
+ result = cli.run(project=project, args=['build', 'compose-all.bst'])
|
|
| 490 |
+ result.assert_success()
|
|
| 491 |
+ |
|
| 492 |
+ result = cli.run(project=project,
|
|
| 493 |
+ args=['artifact', 'checkout',
|
|
| 494 |
+ '--hardlinks', '--no-integrate',
|
|
| 495 |
+ 'compose-all.bst',
|
|
| 496 |
+ '--directory', checkout])
|
|
| 497 |
+ result.assert_success()
|
|
| 498 |
+ |
|
| 499 |
+ st = os.lstat(os.path.join(checkout, 'usr/include/pony.h'))
|
|
| 500 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 501 |
+ assert stat.S_IMODE(st.st_mode) == 0o0644
|
|
| 502 |
+ |
|
| 503 |
+ st = os.lstat(os.path.join(checkout, 'usr/bin/hello'))
|
|
| 504 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 505 |
+ assert stat.S_IMODE(st.st_mode) == 0o0755
|
|
| 506 |
+ |
|
| 507 |
+ st = os.lstat(os.path.join(checkout, 'usr/share/big-file'))
|
|
| 508 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 509 |
+ assert stat.S_IMODE(st.st_mode) == 0o0644
|
|
| 510 |
+ |
|
| 511 |
+ shutil.rmtree(checkout)
|
|
| 512 |
+ |
|
| 513 |
+ artifacts = os.path.join(cli.directory, 'artifacts')
|
|
| 514 |
+ shutil.rmtree(artifacts)
|
|
| 515 |
+ |
|
| 516 |
+ result = cli.run(project=project, args=['artifact', 'pull', 'compose-all.bst'])
|
|
| 517 |
+ result.assert_success()
|
|
| 518 |
+ |
|
| 519 |
+ result = cli.run(project=project,
|
|
| 520 |
+ args=['artifact', 'checkout',
|
|
| 521 |
+ '--hardlinks', '--no-integrate',
|
|
| 522 |
+ 'compose-all.bst',
|
|
| 523 |
+ '--directory', checkout])
|
|
| 524 |
+ result.assert_success()
|
|
| 525 |
+ |
|
| 526 |
+ st = os.lstat(os.path.join(checkout, 'usr/include/pony.h'))
|
|
| 527 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 528 |
+ assert stat.S_IMODE(st.st_mode) == 0o0644
|
|
| 529 |
+ |
|
| 530 |
+ st = os.lstat(os.path.join(checkout, 'usr/bin/hello'))
|
|
| 531 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 532 |
+ assert stat.S_IMODE(st.st_mode) == 0o0755
|
|
| 533 |
+ |
|
| 534 |
+ st = os.lstat(os.path.join(checkout, 'usr/share/big-file'))
|
|
| 535 |
+ assert stat.S_ISREG(st.st_mode)
|
|
| 536 |
+ assert stat.S_IMODE(st.st_mode) == 0o0644
|
| ... | ... | @@ -49,7 +49,7 @@ class ArtifactShare(): |
| 49 | 49 |
|
| 50 | 50 |
os.makedirs(self.repodir)
|
| 51 | 51 |
|
| 52 |
- self.cas = CASCache(self.repodir)
|
|
| 52 |
+ self.cas = CASCache(self.repodir, disable_exec=True)
|
|
| 53 | 53 |
|
| 54 | 54 |
self.total_space = total_space
|
| 55 | 55 |
self.free_space = free_space
|
