Martin Blanchard pushed to branch mablanch/135-monitoring-bus at BuildGrid / buildgrid
Commits:
- 
886e3ff4
by Laurence Urhegyi at 2018-11-22T18:18:58Z
- 
131c6d87
by Finn at 2018-11-23T09:01:50Z
- 
2360d613
by Finn at 2018-11-23T09:01:50Z
- 
15e7a095
by Finn at 2018-11-23T09:01:50Z
- 
b21c1258
by Finn at 2018-11-23T09:01:50Z
- 
7e184bf9
by Finn at 2018-11-23T09:01:50Z
- 
38ed83ba
by Finn at 2018-11-23T09:01:50Z
- 
207fdeab
by Martin Blanchard at 2018-11-23T13:05:25Z
- 
beabf7e9
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
eb522334
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
0c56829f
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
6c89ca1e
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
9485a0ee
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
a172b8e6
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
19ec2ca2
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
2ed8382a
by Martin Blanchard at 2018-11-23T13:05:26Z
- 
5fd284f2
by Martin Blanchard at 2018-11-23T13:05:26Z
23 changed files:
- + COMMITTERS.md
- CONTRIBUTING.rst
- − MAINTAINERS
- buildgrid/_app/bots/buildbox.py
- buildgrid/_app/bots/host.py
- buildgrid/_app/commands/cmd_bot.py
- buildgrid/_app/commands/cmd_cas.py
- buildgrid/_app/commands/cmd_execute.py
- buildgrid/_app/commands/cmd_operation.py
- buildgrid/_app/commands/cmd_server.py
- buildgrid/_enums.py
- + buildgrid/_protos/buildgrid/__init__.py
- + buildgrid/_protos/buildgrid/v2/__init__.py
- + buildgrid/_protos/buildgrid/v2/monitoring.proto
- + buildgrid/_protos/buildgrid/v2/monitoring_pb2.py
- + buildgrid/_protos/buildgrid/v2/monitoring_pb2_grpc.py
- + buildgrid/server/_monitoring.py
- buildgrid/server/cas/instance.py
- buildgrid/server/cas/service.py
- buildgrid/server/instance.py
- tests/cas/test_services.py
- tests/server_instance.py
- + tests/utils/server.py
Changes:
| 1 | +## COMMITTERS
 | |
| 2 | + | |
| 3 | +| Name | Email |
 | |
| 4 | +| -------- | -------- | 
 | |
| 5 | +|  Carter Sande  | <carter.sande@duodecima.technology> |
 | |
| 6 | +|  Ed Baunton  | <edbaunton gmail com> |
 | |
| 7 | +|  Laurence Urhegyi  | <laurence urhegyi codethink co uk> |
 | |
| 8 | +|  Finn Ball  | <finn ball codethink co uk> |
 | |
| 9 | +|  Paul Sherwood  | <paul sherwood codethink co uk> |
 | |
| 10 | +|  James Ennis  | <james ennis codethink com> |
 | |
| 11 | +|  Jim MacArthur  | <jim macarthur codethink co uk> |
 | |
| 12 | +|  Juerg Billeter  | <juerg billeter codethink co uk> |
 | |
| 13 | +|  Martin Blanchard  | <martin blanchard codethink co uk> |
 | |
| 14 | +|  Marios Hadjimichael  | <mhadjimichae bloomberg net> |
 | |
| 15 | +|  Raoul Hidalgo Charman  | <raoul hidalgocharman codethink co uk> |
 | |
| 16 | +|  Rohit Kothur  |  <rkothur bloomberg net> | | 
| ... | ... | @@ -32,40 +32,31 @@ side effects and quirks the feature may have introduced. More on this below in | 
| 32 | 32 |  | 
| 33 | 33 |  .. _BuildGrid mailing list: https://lists.buildgrid.build/cgi-bin/mailman/listinfo/buildgrid
 | 
| 34 | 34 |  | 
| 35 | - | |
| 36 | 35 |  .. _patch-submissions:
 | 
| 37 | 36 |  | 
| 38 | 37 |  Patch submissions
 | 
| 39 | 38 |  -----------------
 | 
| 40 | 39 |  | 
| 41 | -We are running `trunk based development`_. The idea behind this is that merge
 | |
| 42 | -requests to the trunk will be small and made often, thus making the review and
 | |
| 43 | -merge process as fast as possible. We do not want to end up with a huge backlog
 | |
| 44 | -of outstanding merge requests. If possible, it is preferred that merge requests
 | |
| 45 | -address specific points and clearly outline what problem they are solving.
 | |
| 46 | - | |
| 47 | -Branches must be submitted as merge requests (MR) on GitLab and should be
 | |
| 48 | -associated with an issue, whenever possible. If it's a small change, we'll
 | |
| 49 | -accept an MR without it being associated to an issue, but generally we prefer an
 | |
| 50 | -issue to be raised in advance. This is so that we can track the work that is
 | |
| 40 | +Branches must be submitted as merge requests (MR) on GitLab and should have a 
 | |
| 41 | +corresponding issue raised in advance (whenever possible). If it's a small change,
 | |
| 42 | +an MR without it being associated to an issue is okay, but generally we prefer an
 | |
| 43 | +issue to be raised in advance so that we can track the work that is
 | |
| 51 | 44 |  currently in progress on the project.
 | 
| 52 | 45 |  | 
| 46 | +When submitting a merge request, please obtain a review from another committer 
 | |
| 47 | +who is familiar with the area of the code base which the branch effects. An 
 | |
| 48 | +approval from another committer who is not the patch author will be needed 
 | |
| 49 | +before any merge (we use Gitlab's 'approval' feature for this).
 | |
| 50 | + | |
| 53 | 51 |  Below is a list of good patch submission good practice:
 | 
| 54 | 52 |  | 
| 55 | 53 |  - Each commit should address a specific issue number in the commit message. This
 | 
| 56 | 54 |    is really important for provenance reasons.
 | 
| 57 | -- Merge requests that are not yet ready for review must be prefixed with the
 | |
| 58 | -  ``WIP:`` identifier, but if we stick to trunk based development then the
 | |
| 59 | -  ``WIP:`` identifier will not stay around for very long on a merge request.
 | |
| 60 | -- When a merge request is ready for review, please find someone willing to do
 | |
| 61 | -  the review (ideally a maintainer) and assign them the MR, leaving a comment
 | |
| 62 | -  asking for their review.
 | |
| 55 | +- Merge requests that are not yet ready for review should be prefixed with the
 | |
| 56 | +  ``WIP:`` identifier.
 | |
| 63 | 57 |  - Submitted branches should not contain a history of work done.
 | 
| 64 | 58 |  - Unit tests should be a separate commit.
 | 
| 65 | 59 |  | 
| 66 | -.. _trunk based development: https://trunkbaseddevelopment.com
 | |
| 67 | - | |
| 68 | - | |
| 69 | 60 |  Commit messages
 | 
| 70 | 61 |  ~~~~~~~~~~~~~~~
 | 
| 71 | 62 |  | 
| ... | ... | @@ -89,6 +80,57 @@ For more tips, please read `The seven rules of a great Git commit message`_. | 
| 89 | 80 |  | 
| 90 | 81 |  .. _The seven rules of a great Git commit message: https://chris.beams.io/posts/git-commit/#seven-rules
 | 
| 91 | 82 |  | 
| 83 | +.. _committer-access:
 | |
| 84 | + | |
| 85 | +Committer access
 | |
| 86 | +----------------
 | |
| 87 | + | |
| 88 | +Committers in the BuildGrid project are those folks to whom the right to 
 | |
| 89 | +directly commit changes to our version controlled resources has been granted. 
 | |
| 90 | +While every contribution is 
 | |
| 91 | +valued regardless of its source, not every person who contributes code to the 
 | |
| 92 | +project will earn commit access. The `COMMITTERS`_ file lists all committers.
 | |
| 93 | + | |
| 94 | +.. _COMMITTERS: https://gitlab.com/BuildGrid/buildgrid/blob/master/COMMITTERS.md
 | |
| 95 | +.. _Subversion: http://subversion.apache.org/docs/community-guide/roles.html#committers
 | |
| 96 | + | |
| 97 | + | |
| 98 | +How commit access is granted
 | |
| 99 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | |
| 100 | + | |
| 101 | +After someone has successfully contributed a few non-trivial patches, some full
 | |
| 102 | +committer, usually whoever has reviewed and applied the most patches from that
 | |
| 103 | +contributor, proposes them for commit access. This proposal is sent only to the
 | |
| 104 | +other full committers -- the ensuing discussion is private, so that everyone can
 | |
| 105 | +feel comfortable speaking their minds. Assuming there are no objections, the
 | |
| 106 | +contributor is granted commit access. The decision is made by consensus; there
 | |
| 107 | +are no formal rules governing the procedure, though generally if someone strongly
 | |
| 108 | +objects the access is not offered, or is offered on a provisional basis.
 | |
| 109 | + | |
| 110 | +This of course relies on contributors being responsive and showing willingness
 | |
| 111 | +to address any problems that may arise after landing patches. However, the primary
 | |
| 112 | +criterion for commit access is good judgment.
 | |
| 113 | + | |
| 114 | +You do not have to be a technical wizard, or demonstrate deep knowledge of the
 | |
| 115 | +entire codebase to become a committer. You just need to know what you don't
 | |
| 116 | +know. If your patches adhere to the guidelines in this file, adhere to all the usual
 | |
| 117 | +unquantifiable rules of coding (code should be readable, robust, maintainable, etc.),
 | |
| 118 | +and respect the Hippocratic Principle of "first, do no harm", then you will probably
 | |
| 119 | +get commit access pretty quickly. The size, complexity, and quantity of your patches
 | |
| 120 | +do not matter as much as the degree of care you show in avoiding bugs and minimizing
 | |
| 121 | +unnecessary impact on the rest of the code. Many full committers are people who have
 | |
| 122 | +not made major code contributions, but rather lots of small, clean fixes, each of
 | |
| 123 | +which was an unambiguous improvement to the code. (Of course, this does not mean the
 | |
| 124 | +project needs a bunch of very trivial patches whose only purpose is to gain commit
 | |
| 125 | +access; knowing what's worth a patch post and what's not is part of showing good
 | |
| 126 | +judgement.)
 | |
| 127 | + | |
| 128 | +When submitting a merge request, please obtain a review from another committer
 | |
| 129 | +who is familiar with the area of the code base which the branch effects. Asking on
 | |
| 130 | +slack is probably the best way to go about this. An approval from a committer
 | |
| 131 | +who is not the patch author will be needed before any merge (we use Gitlab's
 | |
| 132 | +'approval' feature for this).
 | |
| 133 | + | |
| 92 | 134 |  | 
| 93 | 135 |  .. _coding-style:
 | 
| 94 | 136 |  | 
| ... | ... | @@ -198,35 +240,6 @@ trunk. | 
| 198 | 240 |  | 
| 199 | 241 |  .. _coverage report: https://buildgrid.gitlab.io/buildgrid/coverage/
 | 
| 200 | 242 |  | 
| 201 | - | |
| 202 | -.. _committer-access:
 | |
| 203 | - | |
| 204 | -Committer access
 | |
| 205 | -----------------
 | |
| 206 | - | |
| 207 | -We'll hand out commit access to anyone who has successfully landed a single
 | |
| 208 | -patch to the code base. Please request this via Slack or the mailing list.
 | |
| 209 | - | |
| 210 | -This of course relies on contributors being responsive and showing willingness 
 | |
| 211 | -to address any problems that may arise after landing branches.
 | |
| 212 | - | |
| 213 | -When submitting a merge request, please obtain a review from another committer 
 | |
| 214 | -who is familiar with the area of the code base which the branch effects. An 
 | |
| 215 | -approval from another committer who is not the patch author will be needed 
 | |
| 216 | -before any merge (we use gitlab's 'approval' feature for this).
 | |
| 217 | - | |
| 218 | -What we are expecting of committers here in general is basically to escalate the
 | |
| 219 | -review in cases of uncertainty.
 | |
| 220 | - | |
| 221 | -.. note::
 | |
| 222 | - | |
| 223 | -   We don't have any detailed policy for "bad actors", but will of course handle
 | |
| 224 | -   things on a case by case basis - commit access should not result in commit
 | |
| 225 | -   wars or be used as a tool to subvert the project when disagreements arise.
 | |
| 226 | -   Such incidents (if any) would surely lead to temporary suspension of commit
 | |
| 227 | -   rights.
 | |
| 228 | - | |
| 229 | - | |
| 230 | 243 |  .. _gitlab-features:
 | 
| 231 | 244 |  | 
| 232 | 245 |  GitLab features
 | 
| 1 | -Finn Ball
 | |
| 2 | -E-mail: finn ball codethink co uk
 | |
| 3 | -Userid: finnball | 
| ... | ... | @@ -13,6 +13,7 @@ | 
| 13 | 13 |  # limitations under the License.
 | 
| 14 | 14 |  | 
| 15 | 15 |  | 
| 16 | +import logging
 | |
| 16 | 17 |  import os
 | 
| 17 | 18 |  import subprocess
 | 
| 18 | 19 |  import tempfile
 | 
| ... | ... | @@ -29,7 +30,8 @@ def work_buildbox(context, lease): | 
| 29 | 30 |      """
 | 
| 30 | 31 |      local_cas_directory = context.local_cas
 | 
| 31 | 32 |      # instance_name = context.parent
 | 
| 32 | -    logger = context.logger
 | |
| 33 | + | |
| 34 | +    logger = logging.getLogger(__name__)
 | |
| 33 | 35 |  | 
| 34 | 36 |      action_digest = remote_execution_pb2.Digest()
 | 
| 35 | 37 |  | 
| ... | ... | @@ -13,6 +13,7 @@ | 
| 13 | 13 |  # limitations under the License.
 | 
| 14 | 14 |  | 
| 15 | 15 |  | 
| 16 | +import logging
 | |
| 16 | 17 |  import os
 | 
| 17 | 18 |  import subprocess
 | 
| 18 | 19 |  import tempfile
 | 
| ... | ... | @@ -26,7 +27,7 @@ def work_host_tools(context, lease): | 
| 26 | 27 |      """Executes a lease for a build action, using host tools.
 | 
| 27 | 28 |      """
 | 
| 28 | 29 |      instance_name = context.parent
 | 
| 29 | -    logger = context.logger
 | |
| 30 | +    logger = logging.getLogger(__name__)
 | |
| 30 | 31 |  | 
| 31 | 32 |      action_digest = remote_execution_pb2.Digest()
 | 
| 32 | 33 |      action_result = remote_execution_pb2.ActionResult()
 | 
| ... | ... | @@ -20,7 +20,6 @@ Bot command | 
| 20 | 20 |  Create a bot interface and request work
 | 
| 21 | 21 |  """
 | 
| 22 | 22 |  | 
| 23 | -import logging
 | |
| 24 | 23 |  from pathlib import Path, PurePath
 | 
| 25 | 24 |  import sys
 | 
| 26 | 25 |  from urllib.parse import urlparse
 | 
| ... | ... | @@ -120,8 +119,7 @@ def cli(context, parent, update_period, remote, client_key, client_cert, server_ | 
| 120 | 119 |          context.cas_client_cert = context.client_cert
 | 
| 121 | 120 |          context.cas_server_cert = context.server_cert
 | 
| 122 | 121 |  | 
| 123 | -    context.logger = logging.getLogger(__name__)
 | |
| 124 | -    context.logger.debug("Starting for remote {}".format(context.remote))
 | |
| 122 | +    click.echo("Starting for remote=[{}]".format(context.remote))
 | |
| 125 | 123 |  | 
| 126 | 124 |      interface = bot_interface.BotInterface(context.channel)
 | 
| 127 | 125 |  | 
| ... | ... | @@ -20,7 +20,6 @@ Execute command | 
| 20 | 20 |  Request work to be executed and monitor status of jobs.
 | 
| 21 | 21 |  """
 | 
| 22 | 22 |  | 
| 23 | -import logging
 | |
| 24 | 23 |  import os
 | 
| 25 | 24 |  import sys
 | 
| 26 | 25 |  from urllib.parse import urlparse
 | 
| ... | ... | @@ -63,8 +62,7 @@ def cli(context, remote, instance_name, client_key, client_cert, server_cert): | 
| 63 | 62 |  | 
| 64 | 63 |          context.channel = grpc.secure_channel(context.remote, credentials)
 | 
| 65 | 64 |  | 
| 66 | -    context.logger = logging.getLogger(__name__)
 | |
| 67 | -    context.logger.debug("Starting for remote {}".format(context.remote))
 | |
| 65 | +    click.echo("Starting for remote=[{}]".format(context.remote))
 | |
| 68 | 66 |  | 
| 69 | 67 |  | 
| 70 | 68 |  @cli.command('upload-dummy', short_help="Upload a dummy action. Should be used with `execute dummy-request`")
 | 
| ... | ... | @@ -75,7 +73,7 @@ def upload_dummy(context): | 
| 75 | 73 |          action_digest = uploader.put_message(action)
 | 
| 76 | 74 |  | 
| 77 | 75 |      if action_digest.ByteSize():
 | 
| 78 | -        click.echo('Success: Pushed digest "{}/{}"'
 | |
| 76 | +        click.echo('Success: Pushed digest=["{}/{}]"'
 | |
| 79 | 77 |                     .format(action_digest.hash, action_digest.size_bytes))
 | 
| 80 | 78 |      else:
 | 
| 81 | 79 |          click.echo("Error: Failed pushing empty message.", err=True)
 | 
| ... | ... | @@ -92,7 +90,7 @@ def upload_file(context, file_path, verify): | 
| 92 | 90 |          for path in file_path:
 | 
| 93 | 91 |              if not os.path.isabs(path):
 | 
| 94 | 92 |                  path = os.path.abspath(path)
 | 
| 95 | -            context.logger.debug("Queueing {}".format(path))
 | |
| 93 | +            click.echo("Queueing path=[{}]".format(path))
 | |
| 96 | 94 |  | 
| 97 | 95 |              file_digest = uploader.upload_file(path, queue=True)
 | 
| 98 | 96 |  | 
| ... | ... | @@ -102,12 +100,12 @@ def upload_file(context, file_path, verify): | 
| 102 | 100 |      for file_digest in sent_digests:
 | 
| 103 | 101 |          file_path = os.path.relpath(files_map[file_digest.hash])
 | 
| 104 | 102 |          if verify and file_digest.size_bytes != os.stat(file_path).st_size:
 | 
| 105 | -            click.echo('Error: Failed to verify "{}"'.format(file_path), err=True)
 | |
| 103 | +            click.echo("Error: Failed to verify '{}'".format(file_path), err=True)
 | |
| 106 | 104 |          elif file_digest.ByteSize():
 | 
| 107 | -            click.echo('Success: Pushed "{}" with digest "{}/{}"'
 | |
| 105 | +            click.echo("Success: Pushed path=[{}] with digest=[{}/{}]"
 | |
| 108 | 106 |                         .format(file_path, file_digest.hash, file_digest.size_bytes))
 | 
| 109 | 107 |          else:
 | 
| 110 | -            click.echo('Error: Failed pushing "{}"'.format(file_path), err=True)
 | |
| 108 | +            click.echo("Error: Failed pushing path=[{}]".format(file_path), err=True)
 | |
| 111 | 109 |  | 
| 112 | 110 |  | 
| 113 | 111 |  @cli.command('upload-dir', short_help="Upload a directory to the CAS server.")
 | 
| ... | ... | @@ -121,7 +119,7 @@ def upload_directory(context, directory_path, verify): | 
| 121 | 119 |          for node, blob, path in merkle_tree_maker(directory_path):
 | 
| 122 | 120 |              if not os.path.isabs(path):
 | 
| 123 | 121 |                  path = os.path.abspath(path)
 | 
| 124 | -            context.logger.debug("Queueing {}".format(path))
 | |
| 122 | +            click.echo("Queueing path=[{}]".format(path))
 | |
| 125 | 123 |  | 
| 126 | 124 |              node_digest = uploader.put_blob(blob, digest=node.digest, queue=True)
 | 
| 127 | 125 |  | 
| ... | ... | @@ -134,12 +132,12 @@ def upload_directory(context, directory_path, verify): | 
| 134 | 132 |              node_path = os.path.relpath(node_path)
 | 
| 135 | 133 |          if verify and (os.path.isfile(node_path) and
 | 
| 136 | 134 |                         node_digest.size_bytes != os.stat(node_path).st_size):
 | 
| 137 | -            click.echo('Error: Failed to verify "{}"'.format(node_path), err=True)
 | |
| 135 | +            click.echo("Error: Failed to verify path=[{}]".format(node_path), err=True)
 | |
| 138 | 136 |          elif node_digest.ByteSize():
 | 
| 139 | -            click.echo('Success: Pushed "{}" with digest "{}/{}"'
 | |
| 137 | +            click.echo("Success: Pushed path=[{}] with digest=[{}/{}]"
 | |
| 140 | 138 |                         .format(node_path, node_digest.hash, node_digest.size_bytes))
 | 
| 141 | 139 |          else:
 | 
| 142 | -            click.echo('Error: Failed pushing "{}"'.format(node_path), err=True)
 | |
| 140 | +            click.echo("Error: Failed pushing path=[{}]".format(node_path), err=True)
 | |
| 143 | 141 |  | 
| 144 | 142 |  | 
| 145 | 143 |  def _create_digest(digest_string):
 | 
| ... | ... | @@ -160,8 +158,8 @@ def _create_digest(digest_string): | 
| 160 | 158 |  @pass_context
 | 
| 161 | 159 |  def download_file(context, digest_string, file_path, verify):
 | 
| 162 | 160 |      if os.path.exists(file_path):
 | 
| 163 | -        click.echo('Error: Invalid value for "file-path": ' +
 | |
| 164 | -                   'Path "{}" already exists.'.format(file_path), err=True)
 | |
| 161 | +        click.echo("Error: Invalid value for " +
 | |
| 162 | +                   "path=[{}] already exists.".format(file_path), err=True)
 | |
| 165 | 163 |          return
 | 
| 166 | 164 |  | 
| 167 | 165 |      digest = _create_digest(digest_string)
 | 
| ... | ... | @@ -171,11 +169,11 @@ def download_file(context, digest_string, file_path, verify): | 
| 171 | 169 |      if verify:
 | 
| 172 | 170 |          file_digest = create_digest(read_file(file_path))
 | 
| 173 | 171 |          if file_digest != digest:
 | 
| 174 | -            click.echo('Error: Failed to verify "{}"'.format(file_path), err=True)
 | |
| 172 | +            click.echo("Error: Failed to verify path=[{}]".format(file_path), err=True)
 | |
| 175 | 173 |              return
 | 
| 176 | 174 |  | 
| 177 | 175 |      if os.path.isfile(file_path):
 | 
| 178 | -        click.echo('Success: Pulled "{}" from digest "{}/{}"'
 | |
| 176 | +        click.echo("Success: Pulled path=[{}] from digest=[{}/{}]"
 | |
| 179 | 177 |                     .format(file_path, digest.hash, digest.size_bytes))
 | 
| 180 | 178 |      else:
 | 
| 181 | 179 |          click.echo('Error: Failed pulling "{}"'.format(file_path), err=True)
 | 
| ... | ... | @@ -190,8 +188,8 @@ def download_file(context, digest_string, file_path, verify): | 
| 190 | 188 |  def download_directory(context, digest_string, directory_path, verify):
 | 
| 191 | 189 |      if os.path.exists(directory_path):
 | 
| 192 | 190 |          if not os.path.isdir(directory_path) or os.listdir(directory_path):
 | 
| 193 | -            click.echo('Error: Invalid value for "directory-path": ' +
 | |
| 194 | -                       'Path "{}" already exists.'.format(directory_path), err=True)
 | |
| 191 | +            click.echo("Error: Invalid value, " +
 | |
| 192 | +                       "path=[{}] already exists.".format(directory_path), err=True)
 | |
| 195 | 193 |              return
 | 
| 196 | 194 |  | 
| 197 | 195 |      digest = _create_digest(digest_string)
 | 
| ... | ... | @@ -204,11 +202,11 @@ def download_directory(context, digest_string, directory_path, verify): | 
| 204 | 202 |              if node.DESCRIPTOR is remote_execution_pb2.DirectoryNode.DESCRIPTOR:
 | 
| 205 | 203 |                  last_directory_node = node
 | 
| 206 | 204 |          if last_directory_node.digest != digest:
 | 
| 207 | -            click.echo('Error: Failed to verify "{}"'.format(directory_path), err=True)
 | |
| 205 | +            click.echo("Error: Failed to verify path=[{}]".format(directory_path), err=True)
 | |
| 208 | 206 |              return
 | 
| 209 | 207 |  | 
| 210 | 208 |      if os.path.isdir(directory_path):
 | 
| 211 | -        click.echo('Success: Pulled "{}" from digest "{}/{}"'
 | |
| 209 | +        click.echo("Success: Pulled path=[{}] from digest=[{}/{}]"
 | |
| 212 | 210 |                     .format(directory_path, digest.hash, digest.size_bytes))
 | 
| 213 | 211 |      else:
 | 
| 214 | -        click.echo('Error: Failed pulling "{}"'.format(directory_path), err=True) | |
| 212 | +        click.echo("Error: Failed pulling path=[{}]".format(directory_path), err=True) | 
| ... | ... | @@ -20,7 +20,6 @@ Execute command | 
| 20 | 20 |  Request work to be executed and monitor status of jobs.
 | 
| 21 | 21 |  """
 | 
| 22 | 22 |  | 
| 23 | -import logging
 | |
| 24 | 23 |  import os
 | 
| 25 | 24 |  import stat
 | 
| 26 | 25 |  import sys
 | 
| ... | ... | @@ -64,8 +63,7 @@ def cli(context, remote, instance_name, client_key, client_cert, server_cert): | 
| 64 | 63 |  | 
| 65 | 64 |          context.channel = grpc.secure_channel(context.remote, credentials)
 | 
| 66 | 65 |  | 
| 67 | -    context.logger = logging.getLogger(__name__)
 | |
| 68 | -    context.logger.debug("Starting for remote {}".format(context.remote))
 | |
| 66 | +    click.echo("Starting for remote=[{}]".format(context.remote))
 | |
| 69 | 67 |  | 
| 70 | 68 |  | 
| 71 | 69 |  @cli.command('request-dummy', short_help="Send a dummy action.")
 | 
| ... | ... | @@ -76,7 +74,7 @@ def cli(context, remote, instance_name, client_key, client_cert, server_cert): | 
| 76 | 74 |  @pass_context
 | 
| 77 | 75 |  def request_dummy(context, number, wait_for_completion):
 | 
| 78 | 76 |  | 
| 79 | -    context.logger.info("Sending execution request...")
 | |
| 77 | +    click.echo("Sending execution request...")
 | |
| 80 | 78 |      action = remote_execution_pb2.Action(do_not_cache=True)
 | 
| 81 | 79 |      action_digest = create_digest(action.SerializeToString())
 | 
| 82 | 80 |  | 
| ... | ... | @@ -96,7 +94,7 @@ def request_dummy(context, number, wait_for_completion): | 
| 96 | 94 |              result = None
 | 
| 97 | 95 |              for stream in response:
 | 
| 98 | 96 |                  result = stream
 | 
| 99 | -                context.logger.info(result)
 | |
| 97 | +                click.echo(result)
 | |
| 100 | 98 |  | 
| 101 | 99 |              if not result.done:
 | 
| 102 | 100 |                  click.echo("Result did not return True." +
 | 
| ... | ... | @@ -104,7 +102,7 @@ def request_dummy(context, number, wait_for_completion): | 
| 104 | 102 |                  sys.exit(-1)
 | 
| 105 | 103 |  | 
| 106 | 104 |          else:
 | 
| 107 | -            context.logger.info(next(response))
 | |
| 105 | +            click.echo(next(response))
 | |
| 108 | 106 |  | 
| 109 | 107 |  | 
| 110 | 108 |  @cli.command('command', short_help="Send a command to be executed.")
 | 
| ... | ... | @@ -132,12 +130,12 @@ def run_command(context, input_root, commands, output_file, output_directory): | 
| 132 | 130 |  | 
| 133 | 131 |          command_digest = uploader.put_message(command, queue=True)
 | 
| 134 | 132 |  | 
| 135 | -        context.logger.info('Sent command: {}'.format(command_digest))
 | |
| 133 | +        click.echo("Sent command=[{}]".format(command_digest))
 | |
| 136 | 134 |  | 
| 137 | 135 |          # TODO: Check for missing blobs
 | 
| 138 | 136 |          input_root_digest = uploader.upload_directory(input_root)
 | 
| 139 | 137 |  | 
| 140 | -        context.logger.info('Sent input: {}'.format(input_root_digest))
 | |
| 138 | +        click.echo("Sent input=[{}]".format(input_root_digest))
 | |
| 141 | 139 |  | 
| 142 | 140 |          action = remote_execution_pb2.Action(command_digest=command_digest,
 | 
| 143 | 141 |                                               input_root_digest=input_root_digest,
 | 
| ... | ... | @@ -145,7 +143,7 @@ def run_command(context, input_root, commands, output_file, output_directory): | 
| 145 | 143 |  | 
| 146 | 144 |          action_digest = uploader.put_message(action, queue=True)
 | 
| 147 | 145 |  | 
| 148 | -        context.logger.info("Sent action: {}".format(action_digest))
 | |
| 146 | +        click.echo("Sent action="">".format(action_digest))
 | |
| 149 | 147 |  | 
| 150 | 148 |      request = remote_execution_pb2.ExecuteRequest(instance_name=context.instance_name,
 | 
| 151 | 149 |                                                    action_digest=action_digest,
 | 
| ... | ... | @@ -154,7 +152,7 @@ def run_command(context, input_root, commands, output_file, output_directory): | 
| 154 | 152 |  | 
| 155 | 153 |      stream = None
 | 
| 156 | 154 |      for stream in response:
 | 
| 157 | -        context.logger.info(stream)
 | |
| 155 | +        click.echo(stream)
 | |
| 158 | 156 |  | 
| 159 | 157 |      execute_response = remote_execution_pb2.ExecuteResponse()
 | 
| 160 | 158 |      stream.response.Unpack(execute_response)
 | 
| ... | ... | @@ -21,7 +21,6 @@ Check the status of operations | 
| 21 | 21 |  """
 | 
| 22 | 22 |  | 
| 23 | 23 |  from collections import OrderedDict
 | 
| 24 | -import logging
 | |
| 25 | 24 |  from operator import attrgetter
 | 
| 26 | 25 |  from urllib.parse import urlparse
 | 
| 27 | 26 |  import sys
 | 
| ... | ... | @@ -67,8 +66,7 @@ def cli(context, remote, instance_name, client_key, client_cert, server_cert): | 
| 67 | 66 |  | 
| 68 | 67 |          context.channel = grpc.secure_channel(context.remote, credentials)
 | 
| 69 | 68 |  | 
| 70 | -    context.logger = logging.getLogger(__name__)
 | |
| 71 | -    context.logger.debug("Starting for remote {}".format(context.remote))
 | |
| 69 | +    click.echo("Starting for remote=[{}]".format(context.remote))
 | |
| 72 | 70 |  | 
| 73 | 71 |  | 
| 74 | 72 |  def _print_operation_status(operation, print_details=False):
 | 
| ... | ... | @@ -20,8 +20,6 @@ Server command | 
| 20 | 20 |  Create a BuildGrid server.
 | 
| 21 | 21 |  """
 | 
| 22 | 22 |  | 
| 23 | -import asyncio
 | |
| 24 | -import logging
 | |
| 25 | 23 |  import sys
 | 
| 26 | 24 |  | 
| 27 | 25 |  import click
 | 
| ... | ... | @@ -35,7 +33,7 @@ from ..settings import parser | 
| 35 | 33 |  @click.group(name='server', short_help="Start a local server instance.")
 | 
| 36 | 34 |  @pass_context
 | 
| 37 | 35 |  def cli(context):
 | 
| 38 | -    context.logger = logging.getLogger(__name__)
 | |
| 36 | +    pass
 | |
| 39 | 37 |  | 
| 40 | 38 |  | 
| 41 | 39 |  @cli.command('start', short_help="Setup a new server instance.")
 | 
| ... | ... | @@ -52,18 +50,14 @@ def start(context, config): | 
| 52 | 50 |          click.echo("ERROR: Could not parse config: {}.\n".format(str(e)), err=True)
 | 
| 53 | 51 |          sys.exit(-1)
 | 
| 54 | 52 |  | 
| 55 | -    loop = asyncio.get_event_loop()
 | |
| 56 | 53 |      try:
 | 
| 57 | 54 |          server.start()
 | 
| 58 | -        loop.run_forever()
 | |
| 59 | 55 |  | 
| 60 | 56 |      except KeyboardInterrupt:
 | 
| 61 | 57 |          pass
 | 
| 62 | 58 |  | 
| 63 | 59 |      finally:
 | 
| 64 | -        context.logger.info("Stopping server")
 | |
| 65 | 60 |          server.stop()
 | 
| 66 | -        loop.close()
 | |
| 67 | 61 |  | 
| 68 | 62 |  | 
| 69 | 63 |  def _create_server_from_config(config):
 | 
| ... | ... | @@ -16,9 +16,13 @@ | 
| 16 | 16 |  from enum import Enum
 | 
| 17 | 17 |  | 
| 18 | 18 |  from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
 | 
| 19 | +from buildgrid._protos.buildgrid.v2 import monitoring_pb2
 | |
| 19 | 20 |  from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2
 | 
| 20 | 21 |  | 
| 21 | 22 |  | 
| 23 | +# RWAPI enumerations
 | |
| 24 | +# From google/devtools/remoteworkers/v1test2/bots.proto:
 | |
| 25 | + | |
| 22 | 26 |  class BotStatus(Enum):
 | 
| 23 | 27 |      # Initially unknown state.
 | 
| 24 | 28 |      BOT_STATUS_UNSPECIFIED = bots_pb2.BotStatus.Value('BOT_STATUS_UNSPECIFIED')
 | 
| ... | ... | @@ -45,6 +49,9 @@ class LeaseState(Enum): | 
| 45 | 49 |      CANCELLED = bots_pb2.LeaseState.Value('CANCELLED')
 | 
| 46 | 50 |  | 
| 47 | 51 |  | 
| 52 | +# REAPI enumerations
 | |
| 53 | +# From build/bazel/remote/execution/v2/remote_execution.proto:
 | |
| 54 | + | |
| 48 | 55 |  class OperationStage(Enum):
 | 
| 49 | 56 |      # Initially unknown stage.
 | 
| 50 | 57 |      UNKNOWN = remote_execution_pb2.ExecuteOperationMetadata.Stage.Value('UNKNOWN')
 | 
| ... | ... | @@ -56,3 +63,41 @@ class OperationStage(Enum): | 
| 56 | 63 |      EXECUTING = remote_execution_pb2.ExecuteOperationMetadata.Stage.Value('EXECUTING')
 | 
| 57 | 64 |      # Finished execution.
 | 
| 58 | 65 |      COMPLETED = remote_execution_pb2.ExecuteOperationMetadata.Stage.Value('COMPLETED')
 | 
| 66 | + | |
| 67 | + | |
| 68 | +# Internal enumerations
 | |
| 69 | +# From buildgrid.v2/monitoring.proto:
 | |
| 70 | + | |
| 71 | +class LogRecordLevel(Enum):
 | |
| 72 | +    # Initially unknown level.
 | |
| 73 | +    NOTSET = monitoring_pb2.LogRecord.Level.Value('NOTSET')
 | |
| 74 | +    # Debug message severity level.
 | |
| 75 | +    DEBUG = monitoring_pb2.LogRecord.Level.Value('DEBUG')
 | |
| 76 | +    # Information message severity level.
 | |
| 77 | +    INFO = monitoring_pb2.LogRecord.Level.Value('INFO')
 | |
| 78 | +    # Warning message severity level.
 | |
| 79 | +    WARNING = monitoring_pb2.LogRecord.Level.Value('WARNING')
 | |
| 80 | +    # Error message severity level.
 | |
| 81 | +    ERROR = monitoring_pb2.LogRecord.Level.Value('ERROR')
 | |
| 82 | +    # Critical message severity level.
 | |
| 83 | +    CRITICAL = monitoring_pb2.LogRecord.Level.Value('CRITICAL')
 | |
| 84 | + | |
| 85 | + | |
| 86 | +class MetricRecordDomain(Enum):
 | |
| 87 | +    # Initially unknown domain.
 | |
| 88 | +    UNKNOWN = monitoring_pb2.MetricRecord.Domain.Value('UNKNOWN')
 | |
| 89 | +    # A server state related metric.
 | |
| 90 | +    STATE = monitoring_pb2.MetricRecord.Domain.Value('STATE')
 | |
| 91 | +    # A build execution related metric.
 | |
| 92 | +    BUILD = monitoring_pb2.MetricRecord.Domain.Value('BUILD')
 | |
| 93 | + | |
| 94 | + | |
| 95 | +class MetricRecordType(Enum):
 | |
| 96 | +    # Initially unknown type.
 | |
| 97 | +    NONE = monitoring_pb2.MetricRecord.Type.Value('NONE')
 | |
| 98 | +    # A metric for counting.
 | |
| 99 | +    COUNTER = monitoring_pb2.MetricRecord.Type.Value('COUNTER')
 | |
| 100 | +    # A metric for mesuring a duration.
 | |
| 101 | +    TIMER = monitoring_pb2.MetricRecord.Type.Value('TIMER')
 | |
| 102 | +    # A metric in arbitrary value.
 | |
| 103 | +    GAUGE = monitoring_pb2.MetricRecord.Type.Value('GAUGE') | 
| 1 | +// Copyright (C) 2018 Bloomberg LP
 | |
| 2 | +//
 | |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License");
 | |
| 4 | +// you may not use this file except in compliance with the License.
 | |
| 5 | +// You may obtain a copy of the License at
 | |
| 6 | +//
 | |
| 7 | +//  <http://www.apache.org/licenses/LICENSE-2.0>
 | |
| 8 | +//
 | |
| 9 | +// Unless required by applicable law or agreed to in writing, software
 | |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS,
 | |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| 12 | +// See the License for the specific language governing permissions and
 | |
| 13 | +// limitations under the License.
 | |
| 14 | + | |
| 15 | +syntax = "proto3";
 | |
| 16 | + | |
| 17 | +package buildgrid.v2;
 | |
| 18 | + | |
| 19 | +import "google/api/annotations.proto";
 | |
| 20 | +import "google/protobuf/duration.proto";
 | |
| 21 | +import "google/protobuf/timestamp.proto";
 | |
| 22 | + | |
| 23 | +message BusMessage {
 | |
| 24 | +  // The position of this message in the bus stream.
 | |
| 25 | +  int64 sequence_number = 1;
 | |
| 26 | + | |
| 27 | +  // The carried message.
 | |
| 28 | +  oneof record {
 | |
| 29 | +    LogRecord log_record = 2;
 | |
| 30 | +    MetricRecord metric_record = 3;
 | |
| 31 | +  }
 | |
| 32 | +}
 | |
| 33 | + | |
| 34 | +message LogRecord {
 | |
| 35 | +  // When the record has been created.
 | |
| 36 | +  google.protobuf.Timestamp creation_timestamp = 1;
 | |
| 37 | + | |
| 38 | +  enum Level {
 | |
| 39 | +    NOTSET = 0;
 | |
| 40 | +    // Debug message severity level.
 | |
| 41 | +    DEBUG = 1;
 | |
| 42 | +    // Information message severity level.
 | |
| 43 | +    INFO = 2;
 | |
| 44 | +    // Warning message severity level.
 | |
| 45 | +    WARNING = 3;
 | |
| 46 | +    // Error message severity level.
 | |
| 47 | +    ERROR = 4;
 | |
| 48 | +    // Critical message severity level.
 | |
| 49 | +    CRITICAL = 5;
 | |
| 50 | +  }
 | |
| 51 | + | |
| 52 | +  // The domain name for the record.
 | |
| 53 | +  string domain = 2;
 | |
| 54 | + | |
| 55 | +  // The severity level of the record.
 | |
| 56 | +  Level level = 3;
 | |
| 57 | + | |
| 58 | +  // The human-readable record's message.
 | |
| 59 | +  string message = 4;
 | |
| 60 | + | |
| 61 | +  // An optional list of additional metadata.
 | |
| 62 | +  map<string, string> metadata = 5;
 | |
| 63 | +}
 | |
| 64 | + | |
| 65 | +message MetricRecord {
 | |
| 66 | +  // When the metric has been created.
 | |
| 67 | +  google.protobuf.Timestamp creation_timestamp = 1;
 | |
| 68 | + | |
| 69 | +  enum Domain {
 | |
| 70 | +    UNKNOWN = 0;
 | |
| 71 | +    // A server state related metric.
 | |
| 72 | +    STATE = 1;
 | |
| 73 | +    // A build execution related metric.
 | |
| 74 | +    BUILD = 2;
 | |
| 75 | +  }
 | |
| 76 | + | |
| 77 | +  // The domain for the record.
 | |
| 78 | +  Domain domain = 2;
 | |
| 79 | + | |
| 80 | +  enum Type {
 | |
| 81 | +    NONE = 0;
 | |
| 82 | +    // A metric for counting.
 | |
| 83 | +    COUNTER = 1;
 | |
| 84 | +    // A metric for mesuring a duration.
 | |
| 85 | +    TIMER = 2;
 | |
| 86 | +    // A metric in arbitrary value.
 | |
| 87 | +    GAUGE = 3;
 | |
| 88 | +  }
 | |
| 89 | + | |
| 90 | +  // The type of metric, see Type.
 | |
| 91 | +  Type type = 3;
 | |
| 92 | + | |
| 93 | +  // The name identifying the metric.
 | |
| 94 | +  string name = 4;
 | |
| 95 | + | |
| 96 | +  // The carried value, depending on the metric's type.
 | |
| 97 | +  oneof data {
 | |
| 98 | +    // Set for Type.COUNTER metrics.
 | |
| 99 | +    int32 count = 5;
 | |
| 100 | +    // Set for Type.TIMER metrics.
 | |
| 101 | +    google.protobuf.Duration duration = 6;
 | |
| 102 | +    // Set for Type.GAUGE metrics.
 | |
| 103 | +    int32 value = 7;
 | |
| 104 | +  }
 | |
| 105 | + | |
| 106 | +  // An optional list of additional metadata.
 | |
| 107 | +  map<string, string> metadata = 8;
 | |
| 108 | +} | |
| \ No newline at end of file | 
| 1 | +# Generated by the protocol buffer compiler.  DO NOT EDIT!
 | |
| 2 | +# source: buildgrid/v2/monitoring.proto
 | |
| 3 | + | |
| 4 | +import sys
 | |
| 5 | +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
 | |
| 6 | +from google.protobuf import descriptor as _descriptor
 | |
| 7 | +from google.protobuf import message as _message
 | |
| 8 | +from google.protobuf import reflection as _reflection
 | |
| 9 | +from google.protobuf import symbol_database as _symbol_database
 | |
| 10 | +# @@protoc_insertion_point(imports)
 | |
| 11 | + | |
| 12 | +_sym_db = _symbol_database.Default()
 | |
| 13 | + | |
| 14 | + | |
| 15 | +from buildgrid._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
 | |
| 16 | +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
 | |
| 17 | +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
 | |
| 18 | + | |
| 19 | + | |
| 20 | +DESCRIPTOR = _descriptor.FileDescriptor(
 | |
| 21 | +  name='buildgrid/v2/monitoring.proto',
 | |
| 22 | +  package='buildgrid.v2',
 | |
| 23 | +  syntax='proto3',
 | |
| 24 | +  serialized_options=None,
 | |
| 25 | +  serialized_pb=_b('\n\x1d\x62uildgrid/v2/monitoring.proto\x12\x0c\x62uildgrid.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x93\x01\n\nBusMessage\x12\x17\n\x0fsequence_number\x18\x01 \x01(\x03\x12-\n\nlog_record\x18\x02 \x01(\x0b\x32\x17.buildgrid.v2.LogRecordH\x00\x12\x33\n\rmetric_record\x18\x03 \x01(\x0b\x32\x1a.buildgrid.v2.MetricRecordH\x00\x42\x08\n\x06record\"\xcc\x02\n\tLogRecord\x12\x36\n\x12\x63reation_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06\x64omain\x18\x02 \x01(\t\x12,\n\x05level\x18\x03 \x01(\x0e\x32\x1d.buildgrid.v2.LogRecord.Level\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x37\n\x08metadata\x18\x05 \x03(\x0b\x32%.buildgrid.v2.LogRecord.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"N\n\x05Level\x12\n\n\x06NOTSET\x10\x00\x12\t\n\x05\x44\x45\x42UG\x10\x01\x12\x08\n\x04INFO\x10\x02\x12\x0b\n\x07WARNING\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0c\n\x08\x43RITICAL\x10\x05\"\xde\x03\n\x0cMetricRecord\x12\x36\n\x12\x63reation_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\x06\x64omain\x18\x02 \x01(\x0e\x32!.buildgrid.v2.MetricRecord.Domain\x12-\n\x04type\x18\x03 \x01(\x0e\x32\x1f.buildgrid.v2.MetricRecord.Type\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0f\n\x05\x63ount\x18\x05 \x01(\x05H\x00\x12-\n\x08\x64uration\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x0f\n\x05value\x18\x07 \x01(\x05H\x00\x12:\n\x08metadata\x18\x08 \x03(\x0b\x32(.buildgrid.v2.MetricRecord.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"+\n\x06\x44omain\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05STATE\x10\x01\x12\t\n\x05\x42UILD\x10\x02\"3\n\x04Type\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\t\n\x05TIMER\x10\x02\x12\t\n\x05GAUGE\x10\x03\x42\x06\n\x04\x64\x61tab\x06proto3')
 | |
| 26 | +  ,
 | |
| 27 | +  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
 | |
| 28 | + | |
| 29 | + | |
| 30 | + | |
| 31 | +_LOGRECORD_LEVEL = _descriptor.EnumDescriptor(
 | |
| 32 | +  name='Level',
 | |
| 33 | +  full_name='buildgrid.v2.LogRecord.Level',
 | |
| 34 | +  filename=None,
 | |
| 35 | +  file=DESCRIPTOR,
 | |
| 36 | +  values=[
 | |
| 37 | +    _descriptor.EnumValueDescriptor(
 | |
| 38 | +      name='NOTSET', index=0, number=0,
 | |
| 39 | +      serialized_options=None,
 | |
| 40 | +      type=None),
 | |
| 41 | +    _descriptor.EnumValueDescriptor(
 | |
| 42 | +      name='DEBUG', index=1, number=1,
 | |
| 43 | +      serialized_options=None,
 | |
| 44 | +      type=None),
 | |
| 45 | +    _descriptor.EnumValueDescriptor(
 | |
| 46 | +      name='INFO', index=2, number=2,
 | |
| 47 | +      serialized_options=None,
 | |
| 48 | +      type=None),
 | |
| 49 | +    _descriptor.EnumValueDescriptor(
 | |
| 50 | +      name='WARNING', index=3, number=3,
 | |
| 51 | +      serialized_options=None,
 | |
| 52 | +      type=None),
 | |
| 53 | +    _descriptor.EnumValueDescriptor(
 | |
| 54 | +      name='ERROR', index=4, number=4,
 | |
| 55 | +      serialized_options=None,
 | |
| 56 | +      type=None),
 | |
| 57 | +    _descriptor.EnumValueDescriptor(
 | |
| 58 | +      name='CRITICAL', index=5, number=5,
 | |
| 59 | +      serialized_options=None,
 | |
| 60 | +      type=None),
 | |
| 61 | +  ],
 | |
| 62 | +  containing_type=None,
 | |
| 63 | +  serialized_options=None,
 | |
| 64 | +  serialized_start=547,
 | |
| 65 | +  serialized_end=625,
 | |
| 66 | +)
 | |
| 67 | +_sym_db.RegisterEnumDescriptor(_LOGRECORD_LEVEL)
 | |
| 68 | + | |
| 69 | +_METRICRECORD_DOMAIN = _descriptor.EnumDescriptor(
 | |
| 70 | +  name='Domain',
 | |
| 71 | +  full_name='buildgrid.v2.MetricRecord.Domain',
 | |
| 72 | +  filename=None,
 | |
| 73 | +  file=DESCRIPTOR,
 | |
| 74 | +  values=[
 | |
| 75 | +    _descriptor.EnumValueDescriptor(
 | |
| 76 | +      name='UNKNOWN', index=0, number=0,
 | |
| 77 | +      serialized_options=None,
 | |
| 78 | +      type=None),
 | |
| 79 | +    _descriptor.EnumValueDescriptor(
 | |
| 80 | +      name='STATE', index=1, number=1,
 | |
| 81 | +      serialized_options=None,
 | |
| 82 | +      type=None),
 | |
| 83 | +    _descriptor.EnumValueDescriptor(
 | |
| 84 | +      name='BUILD', index=2, number=2,
 | |
| 85 | +      serialized_options=None,
 | |
| 86 | +      type=None),
 | |
| 87 | +  ],
 | |
| 88 | +  containing_type=None,
 | |
| 89 | +  serialized_options=None,
 | |
| 90 | +  serialized_start=1002,
 | |
| 91 | +  serialized_end=1045,
 | |
| 92 | +)
 | |
| 93 | +_sym_db.RegisterEnumDescriptor(_METRICRECORD_DOMAIN)
 | |
| 94 | + | |
| 95 | +_METRICRECORD_TYPE = _descriptor.EnumDescriptor(
 | |
| 96 | +  name='Type',
 | |
| 97 | +  full_name='buildgrid.v2.MetricRecord.Type',
 | |
| 98 | +  filename=None,
 | |
| 99 | +  file=DESCRIPTOR,
 | |
| 100 | +  values=[
 | |
| 101 | +    _descriptor.EnumValueDescriptor(
 | |
| 102 | +      name='NONE', index=0, number=0,
 | |
| 103 | +      serialized_options=None,
 | |
| 104 | +      type=None),
 | |
| 105 | +    _descriptor.EnumValueDescriptor(
 | |
| 106 | +      name='COUNTER', index=1, number=1,
 | |
| 107 | +      serialized_options=None,
 | |
| 108 | +      type=None),
 | |
| 109 | +    _descriptor.EnumValueDescriptor(
 | |
| 110 | +      name='TIMER', index=2, number=2,
 | |
| 111 | +      serialized_options=None,
 | |
| 112 | +      type=None),
 | |
| 113 | +    _descriptor.EnumValueDescriptor(
 | |
| 114 | +      name='GAUGE', index=3, number=3,
 | |
| 115 | +      serialized_options=None,
 | |
| 116 | +      type=None),
 | |
| 117 | +  ],
 | |
| 118 | +  containing_type=None,
 | |
| 119 | +  serialized_options=None,
 | |
| 120 | +  serialized_start=1047,
 | |
| 121 | +  serialized_end=1098,
 | |
| 122 | +)
 | |
| 123 | +_sym_db.RegisterEnumDescriptor(_METRICRECORD_TYPE)
 | |
| 124 | + | |
| 125 | + | |
| 126 | +_BUSMESSAGE = _descriptor.Descriptor(
 | |
| 127 | +  name='BusMessage',
 | |
| 128 | +  full_name='buildgrid.v2.BusMessage',
 | |
| 129 | +  filename=None,
 | |
| 130 | +  file=DESCRIPTOR,
 | |
| 131 | +  containing_type=None,
 | |
| 132 | +  fields=[
 | |
| 133 | +    _descriptor.FieldDescriptor(
 | |
| 134 | +      name='sequence_number', full_name='buildgrid.v2.BusMessage.sequence_number', index=0,
 | |
| 135 | +      number=1, type=3, cpp_type=2, label=1,
 | |
| 136 | +      has_default_value=False, default_value=0,
 | |
| 137 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 138 | +      is_extension=False, extension_scope=None,
 | |
| 139 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 140 | +    _descriptor.FieldDescriptor(
 | |
| 141 | +      name='log_record', full_name='buildgrid.v2.BusMessage.log_record', index=1,
 | |
| 142 | +      number=2, type=11, cpp_type=10, label=1,
 | |
| 143 | +      has_default_value=False, default_value=None,
 | |
| 144 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 145 | +      is_extension=False, extension_scope=None,
 | |
| 146 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 147 | +    _descriptor.FieldDescriptor(
 | |
| 148 | +      name='metric_record', full_name='buildgrid.v2.BusMessage.metric_record', index=2,
 | |
| 149 | +      number=3, type=11, cpp_type=10, label=1,
 | |
| 150 | +      has_default_value=False, default_value=None,
 | |
| 151 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 152 | +      is_extension=False, extension_scope=None,
 | |
| 153 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 154 | +  ],
 | |
| 155 | +  extensions=[
 | |
| 156 | +  ],
 | |
| 157 | +  nested_types=[],
 | |
| 158 | +  enum_types=[
 | |
| 159 | +  ],
 | |
| 160 | +  serialized_options=None,
 | |
| 161 | +  is_extendable=False,
 | |
| 162 | +  syntax='proto3',
 | |
| 163 | +  extension_ranges=[],
 | |
| 164 | +  oneofs=[
 | |
| 165 | +    _descriptor.OneofDescriptor(
 | |
| 166 | +      name='record', full_name='buildgrid.v2.BusMessage.record',
 | |
| 167 | +      index=0, containing_type=None, fields=[]),
 | |
| 168 | +  ],
 | |
| 169 | +  serialized_start=143,
 | |
| 170 | +  serialized_end=290,
 | |
| 171 | +)
 | |
| 172 | + | |
| 173 | + | |
| 174 | +_LOGRECORD_METADATAENTRY = _descriptor.Descriptor(
 | |
| 175 | +  name='MetadataEntry',
 | |
| 176 | +  full_name='buildgrid.v2.LogRecord.MetadataEntry',
 | |
| 177 | +  filename=None,
 | |
| 178 | +  file=DESCRIPTOR,
 | |
| 179 | +  containing_type=None,
 | |
| 180 | +  fields=[
 | |
| 181 | +    _descriptor.FieldDescriptor(
 | |
| 182 | +      name='key', full_name='buildgrid.v2.LogRecord.MetadataEntry.key', index=0,
 | |
| 183 | +      number=1, type=9, cpp_type=9, label=1,
 | |
| 184 | +      has_default_value=False, default_value=_b("").decode('utf-8'),
 | |
| 185 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 186 | +      is_extension=False, extension_scope=None,
 | |
| 187 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 188 | +    _descriptor.FieldDescriptor(
 | |
| 189 | +      name='value', full_name='buildgrid.v2.LogRecord.MetadataEntry.value', index=1,
 | |
| 190 | +      number=2, type=9, cpp_type=9, label=1,
 | |
| 191 | +      has_default_value=False, default_value=_b("").decode('utf-8'),
 | |
| 192 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 193 | +      is_extension=False, extension_scope=None,
 | |
| 194 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 195 | +  ],
 | |
| 196 | +  extensions=[
 | |
| 197 | +  ],
 | |
| 198 | +  nested_types=[],
 | |
| 199 | +  enum_types=[
 | |
| 200 | +  ],
 | |
| 201 | +  serialized_options=_b('8\001'),
 | |
| 202 | +  is_extendable=False,
 | |
| 203 | +  syntax='proto3',
 | |
| 204 | +  extension_ranges=[],
 | |
| 205 | +  oneofs=[
 | |
| 206 | +  ],
 | |
| 207 | +  serialized_start=498,
 | |
| 208 | +  serialized_end=545,
 | |
| 209 | +)
 | |
| 210 | + | |
| 211 | +_LOGRECORD = _descriptor.Descriptor(
 | |
| 212 | +  name='LogRecord',
 | |
| 213 | +  full_name='buildgrid.v2.LogRecord',
 | |
| 214 | +  filename=None,
 | |
| 215 | +  file=DESCRIPTOR,
 | |
| 216 | +  containing_type=None,
 | |
| 217 | +  fields=[
 | |
| 218 | +    _descriptor.FieldDescriptor(
 | |
| 219 | +      name='creation_timestamp', full_name='buildgrid.v2.LogRecord.creation_timestamp', index=0,
 | |
| 220 | +      number=1, type=11, cpp_type=10, label=1,
 | |
| 221 | +      has_default_value=False, default_value=None,
 | |
| 222 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 223 | +      is_extension=False, extension_scope=None,
 | |
| 224 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 225 | +    _descriptor.FieldDescriptor(
 | |
| 226 | +      name='domain', full_name='buildgrid.v2.LogRecord.domain', index=1,
 | |
| 227 | +      number=2, type=9, cpp_type=9, label=1,
 | |
| 228 | +      has_default_value=False, default_value=_b("").decode('utf-8'),
 | |
| 229 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 230 | +      is_extension=False, extension_scope=None,
 | |
| 231 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 232 | +    _descriptor.FieldDescriptor(
 | |
| 233 | +      name='level', full_name='buildgrid.v2.LogRecord.level', index=2,
 | |
| 234 | +      number=3, type=14, cpp_type=8, label=1,
 | |
| 235 | +      has_default_value=False, default_value=0,
 | |
| 236 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 237 | +      is_extension=False, extension_scope=None,
 | |
| 238 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 239 | +    _descriptor.FieldDescriptor(
 | |
| 240 | +      name='message', full_name='buildgrid.v2.LogRecord.message', index=3,
 | |
| 241 | +      number=4, type=9, cpp_type=9, label=1,
 | |
| 242 | +      has_default_value=False, default_value=_b("").decode('utf-8'),
 | |
| 243 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 244 | +      is_extension=False, extension_scope=None,
 | |
| 245 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 246 | +    _descriptor.FieldDescriptor(
 | |
| 247 | +      name='metadata', full_name='buildgrid.v2.LogRecord.metadata', index=4,
 | |
| 248 | +      number=5, type=11, cpp_type=10, label=3,
 | |
| 249 | +      has_default_value=False, default_value=[],
 | |
| 250 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 251 | +      is_extension=False, extension_scope=None,
 | |
| 252 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 253 | +  ],
 | |
| 254 | +  extensions=[
 | |
| 255 | +  ],
 | |
| 256 | +  nested_types=[_LOGRECORD_METADATAENTRY, ],
 | |
| 257 | +  enum_types=[
 | |
| 258 | +    _LOGRECORD_LEVEL,
 | |
| 259 | +  ],
 | |
| 260 | +  serialized_options=None,
 | |
| 261 | +  is_extendable=False,
 | |
| 262 | +  syntax='proto3',
 | |
| 263 | +  extension_ranges=[],
 | |
| 264 | +  oneofs=[
 | |
| 265 | +  ],
 | |
| 266 | +  serialized_start=293,
 | |
| 267 | +  serialized_end=625,
 | |
| 268 | +)
 | |
| 269 | + | |
| 270 | + | |
| 271 | +_METRICRECORD_METADATAENTRY = _descriptor.Descriptor(
 | |
| 272 | +  name='MetadataEntry',
 | |
| 273 | +  full_name='buildgrid.v2.MetricRecord.MetadataEntry',
 | |
| 274 | +  filename=None,
 | |
| 275 | +  file=DESCRIPTOR,
 | |
| 276 | +  containing_type=None,
 | |
| 277 | +  fields=[
 | |
| 278 | +    _descriptor.FieldDescriptor(
 | |
| 279 | +      name='key', full_name='buildgrid.v2.MetricRecord.MetadataEntry.key', index=0,
 | |
| 280 | +      number=1, type=9, cpp_type=9, label=1,
 | |
| 281 | +      has_default_value=False, default_value=_b("").decode('utf-8'),
 | |
| 282 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 283 | +      is_extension=False, extension_scope=None,
 | |
| 284 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 285 | +    _descriptor.FieldDescriptor(
 | |
| 286 | +      name='value', full_name='buildgrid.v2.MetricRecord.MetadataEntry.value', index=1,
 | |
| 287 | +      number=2, type=9, cpp_type=9, label=1,
 | |
| 288 | +      has_default_value=False, default_value=_b("").decode('utf-8'),
 | |
| 289 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 290 | +      is_extension=False, extension_scope=None,
 | |
| 291 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 292 | +  ],
 | |
| 293 | +  extensions=[
 | |
| 294 | +  ],
 | |
| 295 | +  nested_types=[],
 | |
| 296 | +  enum_types=[
 | |
| 297 | +  ],
 | |
| 298 | +  serialized_options=_b('8\001'),
 | |
| 299 | +  is_extendable=False,
 | |
| 300 | +  syntax='proto3',
 | |
| 301 | +  extension_ranges=[],
 | |
| 302 | +  oneofs=[
 | |
| 303 | +  ],
 | |
| 304 | +  serialized_start=498,
 | |
| 305 | +  serialized_end=545,
 | |
| 306 | +)
 | |
| 307 | + | |
| 308 | +_METRICRECORD = _descriptor.Descriptor(
 | |
| 309 | +  name='MetricRecord',
 | |
| 310 | +  full_name='buildgrid.v2.MetricRecord',
 | |
| 311 | +  filename=None,
 | |
| 312 | +  file=DESCRIPTOR,
 | |
| 313 | +  containing_type=None,
 | |
| 314 | +  fields=[
 | |
| 315 | +    _descriptor.FieldDescriptor(
 | |
| 316 | +      name='creation_timestamp', full_name='buildgrid.v2.MetricRecord.creation_timestamp', index=0,
 | |
| 317 | +      number=1, type=11, cpp_type=10, label=1,
 | |
| 318 | +      has_default_value=False, default_value=None,
 | |
| 319 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 320 | +      is_extension=False, extension_scope=None,
 | |
| 321 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 322 | +    _descriptor.FieldDescriptor(
 | |
| 323 | +      name='domain', full_name='buildgrid.v2.MetricRecord.domain', index=1,
 | |
| 324 | +      number=2, type=14, cpp_type=8, label=1,
 | |
| 325 | +      has_default_value=False, default_value=0,
 | |
| 326 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 327 | +      is_extension=False, extension_scope=None,
 | |
| 328 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 329 | +    _descriptor.FieldDescriptor(
 | |
| 330 | +      name='type', full_name='buildgrid.v2.MetricRecord.type', index=2,
 | |
| 331 | +      number=3, type=14, cpp_type=8, label=1,
 | |
| 332 | +      has_default_value=False, default_value=0,
 | |
| 333 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 334 | +      is_extension=False, extension_scope=None,
 | |
| 335 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 336 | +    _descriptor.FieldDescriptor(
 | |
| 337 | +      name='name', full_name='buildgrid.v2.MetricRecord.name', index=3,
 | |
| 338 | +      number=4, type=9, cpp_type=9, label=1,
 | |
| 339 | +      has_default_value=False, default_value=_b("").decode('utf-8'),
 | |
| 340 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 341 | +      is_extension=False, extension_scope=None,
 | |
| 342 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 343 | +    _descriptor.FieldDescriptor(
 | |
| 344 | +      name='count', full_name='buildgrid.v2.MetricRecord.count', index=4,
 | |
| 345 | +      number=5, type=5, cpp_type=1, label=1,
 | |
| 346 | +      has_default_value=False, default_value=0,
 | |
| 347 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 348 | +      is_extension=False, extension_scope=None,
 | |
| 349 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 350 | +    _descriptor.FieldDescriptor(
 | |
| 351 | +      name='duration', full_name='buildgrid.v2.MetricRecord.duration', index=5,
 | |
| 352 | +      number=6, type=11, cpp_type=10, label=1,
 | |
| 353 | +      has_default_value=False, default_value=None,
 | |
| 354 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 355 | +      is_extension=False, extension_scope=None,
 | |
| 356 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 357 | +    _descriptor.FieldDescriptor(
 | |
| 358 | +      name='value', full_name='buildgrid.v2.MetricRecord.value', index=6,
 | |
| 359 | +      number=7, type=5, cpp_type=1, label=1,
 | |
| 360 | +      has_default_value=False, default_value=0,
 | |
| 361 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 362 | +      is_extension=False, extension_scope=None,
 | |
| 363 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 364 | +    _descriptor.FieldDescriptor(
 | |
| 365 | +      name='metadata', full_name='buildgrid.v2.MetricRecord.metadata', index=7,
 | |
| 366 | +      number=8, type=11, cpp_type=10, label=3,
 | |
| 367 | +      has_default_value=False, default_value=[],
 | |
| 368 | +      message_type=None, enum_type=None, containing_type=None,
 | |
| 369 | +      is_extension=False, extension_scope=None,
 | |
| 370 | +      serialized_options=None, file=DESCRIPTOR),
 | |
| 371 | +  ],
 | |
| 372 | +  extensions=[
 | |
| 373 | +  ],
 | |
| 374 | +  nested_types=[_METRICRECORD_METADATAENTRY, ],
 | |
| 375 | +  enum_types=[
 | |
| 376 | +    _METRICRECORD_DOMAIN,
 | |
| 377 | +    _METRICRECORD_TYPE,
 | |
| 378 | +  ],
 | |
| 379 | +  serialized_options=None,
 | |
| 380 | +  is_extendable=False,
 | |
| 381 | +  syntax='proto3',
 | |
| 382 | +  extension_ranges=[],
 | |
| 383 | +  oneofs=[
 | |
| 384 | +    _descriptor.OneofDescriptor(
 | |
| 385 | +      name='data', full_name='buildgrid.v2.MetricRecord.data',
 | |
| 386 | +      index=0, containing_type=None, fields=[]),
 | |
| 387 | +  ],
 | |
| 388 | +  serialized_start=628,
 | |
| 389 | +  serialized_end=1106,
 | |
| 390 | +)
 | |
| 391 | + | |
| 392 | +_BUSMESSAGE.fields_by_name['log_record'].message_type = _LOGRECORD
 | |
| 393 | +_BUSMESSAGE.fields_by_name['metric_record'].message_type = _METRICRECORD
 | |
| 394 | +_BUSMESSAGE.oneofs_by_name['record'].fields.append(
 | |
| 395 | +  _BUSMESSAGE.fields_by_name['log_record'])
 | |
| 396 | +_BUSMESSAGE.fields_by_name['log_record'].containing_oneof = _BUSMESSAGE.oneofs_by_name['record']
 | |
| 397 | +_BUSMESSAGE.oneofs_by_name['record'].fields.append(
 | |
| 398 | +  _BUSMESSAGE.fields_by_name['metric_record'])
 | |
| 399 | +_BUSMESSAGE.fields_by_name['metric_record'].containing_oneof = _BUSMESSAGE.oneofs_by_name['record']
 | |
| 400 | +_LOGRECORD_METADATAENTRY.containing_type = _LOGRECORD
 | |
| 401 | +_LOGRECORD.fields_by_name['creation_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
 | |
| 402 | +_LOGRECORD.fields_by_name['level'].enum_type = _LOGRECORD_LEVEL
 | |
| 403 | +_LOGRECORD.fields_by_name['metadata'].message_type = _LOGRECORD_METADATAENTRY
 | |
| 404 | +_LOGRECORD_LEVEL.containing_type = _LOGRECORD
 | |
| 405 | +_METRICRECORD_METADATAENTRY.containing_type = _METRICRECORD
 | |
| 406 | +_METRICRECORD.fields_by_name['creation_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
 | |
| 407 | +_METRICRECORD.fields_by_name['domain'].enum_type = _METRICRECORD_DOMAIN
 | |
| 408 | +_METRICRECORD.fields_by_name['type'].enum_type = _METRICRECORD_TYPE
 | |
| 409 | +_METRICRECORD.fields_by_name['duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
 | |
| 410 | +_METRICRECORD.fields_by_name['metadata'].message_type = _METRICRECORD_METADATAENTRY
 | |
| 411 | +_METRICRECORD_DOMAIN.containing_type = _METRICRECORD
 | |
| 412 | +_METRICRECORD_TYPE.containing_type = _METRICRECORD
 | |
| 413 | +_METRICRECORD.oneofs_by_name['data'].fields.append(
 | |
| 414 | +  _METRICRECORD.fields_by_name['count'])
 | |
| 415 | +_METRICRECORD.fields_by_name['count'].containing_oneof = _METRICRECORD.oneofs_by_name['data']
 | |
| 416 | +_METRICRECORD.oneofs_by_name['data'].fields.append(
 | |
| 417 | +  _METRICRECORD.fields_by_name['duration'])
 | |
| 418 | +_METRICRECORD.fields_by_name['duration'].containing_oneof = _METRICRECORD.oneofs_by_name['data']
 | |
| 419 | +_METRICRECORD.oneofs_by_name['data'].fields.append(
 | |
| 420 | +  _METRICRECORD.fields_by_name['value'])
 | |
| 421 | +_METRICRECORD.fields_by_name['value'].containing_oneof = _METRICRECORD.oneofs_by_name['data']
 | |
| 422 | +DESCRIPTOR.message_types_by_name['BusMessage'] = _BUSMESSAGE
 | |
| 423 | +DESCRIPTOR.message_types_by_name['LogRecord'] = _LOGRECORD
 | |
| 424 | +DESCRIPTOR.message_types_by_name['MetricRecord'] = _METRICRECORD
 | |
| 425 | +_sym_db.RegisterFileDescriptor(DESCRIPTOR)
 | |
| 426 | + | |
| 427 | +BusMessage = _reflection.GeneratedProtocolMessageType('BusMessage', (_message.Message,), dict(
 | |
| 428 | +  DESCRIPTOR = _BUSMESSAGE,
 | |
| 429 | +  __module__ = 'buildgrid.v2.monitoring_pb2'
 | |
| 430 | +  # @@protoc_insertion_point(class_scope:buildgrid.v2.BusMessage)
 | |
| 431 | +  ))
 | |
| 432 | +_sym_db.RegisterMessage(BusMessage)
 | |
| 433 | + | |
| 434 | +LogRecord = _reflection.GeneratedProtocolMessageType('LogRecord', (_message.Message,), dict(
 | |
| 435 | + | |
| 436 | +  MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
 | |
| 437 | +    DESCRIPTOR = _LOGRECORD_METADATAENTRY,
 | |
| 438 | +    __module__ = 'buildgrid.v2.monitoring_pb2'
 | |
| 439 | +    # @@protoc_insertion_point(class_scope:buildgrid.v2.LogRecord.MetadataEntry)
 | |
| 440 | +    ))
 | |
| 441 | +  ,
 | |
| 442 | +  DESCRIPTOR = _LOGRECORD,
 | |
| 443 | +  __module__ = 'buildgrid.v2.monitoring_pb2'
 | |
| 444 | +  # @@protoc_insertion_point(class_scope:buildgrid.v2.LogRecord)
 | |
| 445 | +  ))
 | |
| 446 | +_sym_db.RegisterMessage(LogRecord)
 | |
| 447 | +_sym_db.RegisterMessage(LogRecord.MetadataEntry)
 | |
| 448 | + | |
| 449 | +MetricRecord = _reflection.GeneratedProtocolMessageType('MetricRecord', (_message.Message,), dict(
 | |
| 450 | + | |
| 451 | +  MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
 | |
| 452 | +    DESCRIPTOR = _METRICRECORD_METADATAENTRY,
 | |
| 453 | +    __module__ = 'buildgrid.v2.monitoring_pb2'
 | |
| 454 | +    # @@protoc_insertion_point(class_scope:buildgrid.v2.MetricRecord.MetadataEntry)
 | |
| 455 | +    ))
 | |
| 456 | +  ,
 | |
| 457 | +  DESCRIPTOR = _METRICRECORD,
 | |
| 458 | +  __module__ = 'buildgrid.v2.monitoring_pb2'
 | |
| 459 | +  # @@protoc_insertion_point(class_scope:buildgrid.v2.MetricRecord)
 | |
| 460 | +  ))
 | |
| 461 | +_sym_db.RegisterMessage(MetricRecord)
 | |
| 462 | +_sym_db.RegisterMessage(MetricRecord.MetadataEntry)
 | |
| 463 | + | |
| 464 | + | |
| 465 | +_LOGRECORD_METADATAENTRY._options = None
 | |
| 466 | +_METRICRECORD_METADATAENTRY._options = None
 | |
| 467 | +# @@protoc_insertion_point(module_scope) | 
| 1 | +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
 | |
| 2 | +import grpc
 | |
| 3 | + | 
| 1 | +# Copyright (C) 2018 Bloomberg LP
 | |
| 2 | +#
 | |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License");
 | |
| 4 | +# you may not use this file except in compliance with the License.
 | |
| 5 | +# You may obtain a copy of the License at
 | |
| 6 | +#
 | |
| 7 | +#  <http://www.apache.org/licenses/LICENSE-2.0>
 | |
| 8 | +#
 | |
| 9 | +# Unless required by applicable law or agreed to in writing, software
 | |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS,
 | |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| 12 | +# See the License for the specific language governing permissions and
 | |
| 13 | +# limitations under the License.
 | |
| 14 | + | |
| 15 | + | |
| 16 | +import asyncio
 | |
| 17 | +from enum import Enum
 | |
| 18 | +import sys
 | |
| 19 | + | |
| 20 | +from google.protobuf import json_format
 | |
| 21 | + | |
| 22 | +from buildgrid._protos.buildgrid.v2 import monitoring_pb2
 | |
| 23 | + | |
| 24 | + | |
| 25 | +class MonitoringOutputType(Enum):
 | |
| 26 | +    # Standard output stream.
 | |
| 27 | +    STDOUT = 'stdout'
 | |
| 28 | +    # On-disk file.
 | |
| 29 | +    FILE = 'file'
 | |
| 30 | +    # UNIX domain socket.
 | |
| 31 | +    SOCKET = 'socket'
 | |
| 32 | + | |
| 33 | + | |
| 34 | +class MonitoringOutputFormat(Enum):
 | |
| 35 | +    # Protobuf binary format.
 | |
| 36 | +    BINARY = 'binary'
 | |
| 37 | +    # JSON format.
 | |
| 38 | +    JSON = 'json'
 | |
| 39 | + | |
| 40 | + | |
| 41 | +class MonitoringBus:
 | |
| 42 | + | |
| 43 | +    def __init__(self, event_loop,
 | |
| 44 | +                 endpoint_type=MonitoringOutputType.SOCKET, endpoint_location=None,
 | |
| 45 | +                 serialisation_format=MonitoringOutputFormat.BINARY):
 | |
| 46 | +        self.__event_loop = event_loop
 | |
| 47 | +        self.__streaming_task = None
 | |
| 48 | + | |
| 49 | +        self.__message_queue = asyncio.Queue(loop=self.__event_loop)
 | |
| 50 | +        self.__sequence_number = 1
 | |
| 51 | + | |
| 52 | +        self.__output_location = None
 | |
| 53 | +        self.__async_output = False
 | |
| 54 | +        self.__json_output = False
 | |
| 55 | + | |
| 56 | +        if endpoint_type == MonitoringOutputType.FILE:
 | |
| 57 | +            self.__output_location = endpoint_location
 | |
| 58 | + | |
| 59 | +        elif endpoint_type == MonitoringOutputType.SOCKET:
 | |
| 60 | +            self.__output_location = endpoint_location
 | |
| 61 | +            self.__async_output = True
 | |
| 62 | + | |
| 63 | +        if serialisation_format == MonitoringOutputFormat.JSON:
 | |
| 64 | +            self.__json_output = True
 | |
| 65 | + | |
| 66 | +    # --- Public API ---
 | |
| 67 | + | |
| 68 | +    def start(self):
 | |
| 69 | +        """Starts the monitoring bus worker task."""
 | |
| 70 | +        if self.__streaming_task is not None:
 | |
| 71 | +            return
 | |
| 72 | + | |
| 73 | +        self.__streaming_task = asyncio.ensure_future(
 | |
| 74 | +            self._streaming_worker(), loop=self.__event_loop)
 | |
| 75 | + | |
| 76 | +    def stop(self):
 | |
| 77 | +        """Cancels the monitoring bus worker task."""
 | |
| 78 | +        if self.__streaming_task is None:
 | |
| 79 | +            return
 | |
| 80 | + | |
| 81 | +        self.__streaming_task.cancel()
 | |
| 82 | + | |
| 83 | +    async def send_record(self, record):
 | |
| 84 | +        """Publishes a record onto the bus asynchronously.
 | |
| 85 | + | |
| 86 | +        Args:
 | |
| 87 | +            record (Message): The
 | |
| 88 | +        """
 | |
| 89 | +        await self.__message_queue.put(record)
 | |
| 90 | + | |
| 91 | +    def send_record_nowait(self, record):
 | |
| 92 | +        """Publishes a record onto the bus.
 | |
| 93 | + | |
| 94 | +        Args:
 | |
| 95 | +            record (Message): The
 | |
| 96 | +        """
 | |
| 97 | +        self.__message_queue.put_nowait(record)
 | |
| 98 | + | |
| 99 | +    # --- Private API ---
 | |
| 100 | + | |
| 101 | +    async def _streaming_worker(self):
 | |
| 102 | +        """Handles bus messages streaming work."""
 | |
| 103 | +        async def __streaming_worker(end_points):
 | |
| 104 | +            record = await self.__message_queue.get()
 | |
| 105 | + | |
| 106 | +            message = monitoring_pb2.BusMessage()
 | |
| 107 | +            message.sequence_number = self.__sequence_number
 | |
| 108 | + | |
| 109 | +            if record.DESCRIPTOR is monitoring_pb2.LogRecord.DESCRIPTOR:
 | |
| 110 | +                message.log_record.CopyFrom(record)
 | |
| 111 | + | |
| 112 | +            elif record.DESCRIPTOR is monitoring_pb2.MetricRecord.DESCRIPTOR:
 | |
| 113 | +                message.metric_record.CopyFrom(record)
 | |
| 114 | + | |
| 115 | +            else:
 | |
| 116 | +                return False
 | |
| 117 | + | |
| 118 | +            if self.__json_output:
 | |
| 119 | +                binary_message = json_format.MessageToJson(message).encode()
 | |
| 120 | +            else:
 | |
| 121 | +                binary_message = message.SerializeToString()
 | |
| 122 | + | |
| 123 | +            for end_point in end_points:
 | |
| 124 | +                end_point.write(binary_message)
 | |
| 125 | + | |
| 126 | +            return True
 | |
| 127 | + | |
| 128 | +        output_writers, output_file = [], None
 | |
| 129 | + | |
| 130 | +        async def __client_connected_callback(reader, writer):
 | |
| 131 | +            output_writers.append(writer)
 | |
| 132 | + | |
| 133 | +        try:
 | |
| 134 | +            if self.__async_output and self.__output_location:
 | |
| 135 | +                await asyncio.start_unix_server(
 | |
| 136 | +                    __client_connected_callback, path=self.__output_location,
 | |
| 137 | +                    loop=self.__event_loop)
 | |
| 138 | + | |
| 139 | +                while True:
 | |
| 140 | +                    if await __streaming_worker(output_writers):
 | |
| 141 | +                        self.__sequence_number += 1
 | |
| 142 | + | |
| 143 | +                        for writer in output_writers:
 | |
| 144 | +                            await writer.drain()
 | |
| 145 | + | |
| 146 | +            elif self.__output_location:
 | |
| 147 | +                output_file = open(self.__output_location, mode='wb')
 | |
| 148 | + | |
| 149 | +                output_writers.append(output_file)
 | |
| 150 | + | |
| 151 | +                while True:
 | |
| 152 | +                    if await __streaming_worker(iter(output_file)):
 | |
| 153 | +                        self.__sequence_number += 1
 | |
| 154 | + | |
| 155 | +            else:
 | |
| 156 | +                output_writers.append(sys.stdout.buffer)
 | |
| 157 | + | |
| 158 | +                while True:
 | |
| 159 | +                    if await __streaming_worker(output_writers):
 | |
| 160 | +                        self.__sequence_number += 1
 | |
| 161 | + | |
| 162 | +        except asyncio.CancelledError:
 | |
| 163 | +            if output_file is not None:
 | |
| 164 | +                output_file.close()
 | |
| 165 | + | |
| 166 | +            elif output_writers:
 | |
| 167 | +                for writer in output_writers:
 | |
| 168 | +                    writer.close()
 | |
| 169 | +                    await writer.wait_closed() | 
| ... | ... | @@ -24,7 +24,7 @@ import logging | 
| 24 | 24 |  from buildgrid._exceptions import InvalidArgumentError, NotFoundError, OutOfRangeError
 | 
| 25 | 25 |  from buildgrid._protos.google.bytestream import bytestream_pb2
 | 
| 26 | 26 |  from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as re_pb2
 | 
| 27 | -from buildgrid.settings import HASH
 | |
| 27 | +from buildgrid.settings import HASH, HASH_LENGTH
 | |
| 28 | 28 |  | 
| 29 | 29 |  | 
| 30 | 30 |  class ContentAddressableStorageInstance:
 | 
| ... | ... | @@ -71,15 +71,12 @@ class ByteStreamInstance: | 
| 71 | 71 |      def register_instance_with_server(self, instance_name, server):
 | 
| 72 | 72 |          server.add_bytestream_instance(self, instance_name)
 | 
| 73 | 73 |  | 
| 74 | -    def read(self, path, read_offset, read_limit):
 | |
| 75 | -        storage = self._storage
 | |
| 76 | - | |
| 77 | -        if path[0] == "blobs":
 | |
| 78 | -            path = [""] + path
 | |
| 74 | +    def read(self, digest_hash, digest_size, read_offset, read_limit):
 | |
| 75 | +        if len(digest_hash) != HASH_LENGTH or not digest_size.isdigit():
 | |
| 76 | +            raise InvalidArgumentError("Invalid digest [{}/{}]"
 | |
| 77 | +                                       .format(digest_hash, digest_size))
 | |
| 79 | 78 |  | 
| 80 | -        # Parse/verify resource name.
 | |
| 81 | -        # Read resource names look like "[instance/]blobs/abc123hash/99".
 | |
| 82 | -        digest = re_pb2.Digest(hash=path[2], size_bytes=int(path[3]))
 | |
| 79 | +        digest = re_pb2.Digest(hash=digest_hash, size_bytes=int(digest_size))
 | |
| 83 | 80 |  | 
| 84 | 81 |          # Check the given read offset and limit.
 | 
| 85 | 82 |          if read_offset < 0 or read_offset > digest.size_bytes:
 | 
| ... | ... | @@ -95,7 +92,7 @@ class ByteStreamInstance: | 
| 95 | 92 |              raise InvalidArgumentError("Negative read_limit is invalid")
 | 
| 96 | 93 |  | 
| 97 | 94 |          # Read the blob from storage and send its contents to the client.
 | 
| 98 | -        result = storage.get_blob(digest)
 | |
| 95 | +        result = self._storage.get_blob(digest)
 | |
| 99 | 96 |          if result is None:
 | 
| 100 | 97 |              raise NotFoundError("Blob not found")
 | 
| 101 | 98 |  | 
| ... | ... | @@ -110,51 +107,35 @@ class ByteStreamInstance: | 
| 110 | 107 |                  data=result.read(min(self.BLOCK_SIZE, bytes_remaining)))
 | 
| 111 | 108 |              bytes_remaining -= self.BLOCK_SIZE
 | 
| 112 | 109 |  | 
| 113 | -    def write(self, requests):
 | |
| 114 | -        storage = self._storage
 | |
| 110 | +    def write(self, digest_hash, digest_size, first_block, other_blocks):
 | |
| 111 | +        if len(digest_hash) != HASH_LENGTH or not digest_size.isdigit():
 | |
| 112 | +            raise InvalidArgumentError("Invalid digest [{}/{}]"
 | |
| 113 | +                                       .format(digest_hash, digest_size))
 | |
| 115 | 114 |  | 
| 116 | -        first_request = next(requests)
 | |
| 117 | -        path = first_request.resource_name.split("/")
 | |
| 115 | +        digest = re_pb2.Digest(hash=digest_hash, size_bytes=int(digest_size))
 | |
| 118 | 116 |  | 
| 119 | -        if path[0] == "uploads":
 | |
| 120 | -            path = [""] + path
 | |
| 121 | - | |
| 122 | -        digest = re_pb2.Digest(hash=path[4], size_bytes=int(path[5]))
 | |
| 123 | -        write_session = storage.begin_write(digest)
 | |
| 117 | +        write_session = self._storage.begin_write(digest)
 | |
| 124 | 118 |  | 
| 125 | 119 |          # Start the write session and write the first request's data.
 | 
| 126 | -        write_session.write(first_request.data)
 | |
| 127 | -        hash_ = HASH(first_request.data)
 | |
| 128 | -        bytes_written = len(first_request.data)
 | |
| 129 | -        finished = first_request.finish_write
 | |
| 130 | - | |
| 131 | -        # Handle subsequent write requests.
 | |
| 132 | -        while not finished:
 | |
| 133 | - | |
| 134 | -            for request in requests:
 | |
| 135 | -                if finished:
 | |
| 136 | -                    raise InvalidArgumentError("Write request sent after write finished")
 | |
| 137 | - | |
| 138 | -                elif request.write_offset != bytes_written:
 | |
| 139 | -                    raise InvalidArgumentError("Invalid write offset")
 | |
| 120 | +        write_session.write(first_block)
 | |
| 140 | 121 |  | 
| 141 | -                elif request.resource_name and request.resource_name != first_request.resource_name:
 | |
| 142 | -                    raise InvalidArgumentError("Resource name changed mid-write")
 | |
| 122 | +        computed_hash = HASH(first_block)
 | |
| 123 | +        bytes_written = len(first_block)
 | |
| 143 | 124 |  | 
| 144 | -                finished = request.finish_write
 | |
| 145 | -                bytes_written += len(request.data)
 | |
| 146 | -                if bytes_written > digest.size_bytes:
 | |
| 147 | -                    raise InvalidArgumentError("Wrote too much data to blob")
 | |
| 125 | +        # Handle subsequent write requests.
 | |
| 126 | +        for next_block in other_blocks:
 | |
| 127 | +            write_session.write(next_block)
 | |
| 148 | 128 |  | 
| 149 | -                write_session.write(request.data)
 | |
| 150 | -                hash_.update(request.data)
 | |
| 129 | +            computed_hash.update(next_block)
 | |
| 130 | +            bytes_written += len(next_block)
 | |
| 151 | 131 |  | 
| 152 | 132 |          # Check that the data matches the provided digest.
 | 
| 153 | -        if bytes_written != digest.size_bytes or not finished:
 | |
| 133 | +        if bytes_written != digest.size_bytes:
 | |
| 154 | 134 |              raise NotImplementedError("Cannot close stream before finishing write")
 | 
| 155 | 135 |  | 
| 156 | -        elif hash_.hexdigest() != digest.hash:
 | |
| 136 | +        elif computed_hash.hexdigest() != digest.hash:
 | |
| 157 | 137 |              raise InvalidArgumentError("Data does not match hash")
 | 
| 158 | 138 |  | 
| 159 | -        storage.commit_write(digest, write_session)
 | |
| 139 | +        self._storage.commit_write(digest, write_session)
 | |
| 140 | + | |
| 160 | 141 |          return bytestream_pb2.WriteResponse(committed_size=bytes_written) | 
| ... | ... | @@ -21,7 +21,6 @@ Implements the Content Addressable Storage API and ByteStream API. | 
| 21 | 21 |  """
 | 
| 22 | 22 |  | 
| 23 | 23 |  | 
| 24 | -from itertools import tee
 | |
| 25 | 24 |  import logging
 | 
| 26 | 25 |  | 
| 27 | 26 |  import grpc
 | 
| ... | ... | @@ -115,27 +114,30 @@ class ByteStreamService(bytestream_pb2_grpc.ByteStreamServicer): | 
| 115 | 114 |      def Read(self, request, context):
 | 
| 116 | 115 |          self.__logger.debug("Read request from [%s]", context.peer())
 | 
| 117 | 116 |  | 
| 117 | +        names = request.resource_name.split('/')
 | |
| 118 | + | |
| 118 | 119 |          try:
 | 
| 119 | -            path = request.resource_name.split("/")
 | |
| 120 | -            instance_name = path[0]
 | |
| 120 | +            instance_name = ''
 | |
| 121 | +            # Format: "{instance_name}/blobs/{hash}/{size}":
 | |
| 122 | +            if len(names) < 3 or names[-3] != 'blobs':
 | |
| 123 | +                raise InvalidArgumentError("Invalid resource name: [{}]"
 | |
| 124 | +                                           .format(request.resource_name))
 | |
| 121 | 125 |  | 
| 122 | -            # TODO: Decide on default instance name
 | |
| 123 | -            if path[0] == "blobs":
 | |
| 124 | -                if len(path) < 3 or not path[2].isdigit():
 | |
| 125 | -                    raise InvalidArgumentError("Invalid resource name: [{}]".format(request.resource_name))
 | |
| 126 | -                instance_name = ""
 | |
| 126 | +            elif names[0] != 'blobs':
 | |
| 127 | +                index = names.index('blobs')
 | |
| 128 | +                instance_name = '/'.join(names[:index])
 | |
| 129 | +                names = names[index:]
 | |
| 127 | 130 |  | 
| 128 | -            elif path[1] == "blobs":
 | |
| 129 | -                if len(path) < 4 or not path[3].isdigit():
 | |
| 130 | -                    raise InvalidArgumentError("Invalid resource name: [{}]".format(request.resource_name))
 | |
| 131 | +            if len(names) < 3:
 | |
| 132 | +                raise InvalidArgumentError("Invalid resource name: [{}]"
 | |
| 133 | +                                           .format(request.resource_name))
 | |
| 131 | 134 |  | 
| 132 | -            else:
 | |
| 133 | -                raise InvalidArgumentError("Invalid resource name: [{}]".format(request.resource_name))
 | |
| 135 | +            hash_, size_bytes = names[1], names[2]
 | |
| 134 | 136 |  | 
| 135 | 137 |              instance = self._get_instance(instance_name)
 | 
| 136 | -            yield from instance.read(path,
 | |
| 137 | -                                     request.read_offset,
 | |
| 138 | -                                     request.read_limit)
 | |
| 138 | + | |
| 139 | +            yield from instance.read(hash_, size_bytes,
 | |
| 140 | +                                     request.read_offset, request.read_limit)
 | |
| 139 | 141 |  | 
| 140 | 142 |          except InvalidArgumentError as e:
 | 
| 141 | 143 |              self.__logger.error(e)
 | 
| ... | ... | @@ -158,31 +160,31 @@ class ByteStreamService(bytestream_pb2_grpc.ByteStreamServicer): | 
| 158 | 160 |      def Write(self, requests, context):
 | 
| 159 | 161 |          self.__logger.debug("Write request from [%s]", context.peer())
 | 
| 160 | 162 |  | 
| 161 | -        try:
 | |
| 162 | -            requests, request_probe = tee(requests, 2)
 | |
| 163 | -            first_request = next(request_probe)
 | |
| 164 | - | |
| 165 | -            path = first_request.resource_name.split("/")
 | |
| 163 | +        request = next(requests)
 | |
| 164 | +        names = request.resource_name.split('/')
 | |
| 166 | 165 |  | 
| 167 | -            instance_name = path[0]
 | |
| 166 | +        try:
 | |
| 167 | +            instance_name = ''
 | |
| 168 | +            # Format: "{instance_name}/uploads/{uuid}/blobs/{hash}/{size}/{anything}":
 | |
| 169 | +            if len(names) < 5 or 'uploads' not in names or 'blobs' not in names:
 | |
| 170 | +                raise InvalidArgumentError("Invalid resource name: [{}]"
 | |
| 171 | +                                           .format(request.resource_name))
 | |
| 168 | 172 |  | 
| 169 | -            # TODO: Sort out no instance name
 | |
| 170 | -            if path[0] == "uploads":
 | |
| 171 | -                if len(path) < 5 or path[2] != "blobs" or not path[4].isdigit():
 | |
| 172 | -                    raise InvalidArgumentError("Invalid resource name: [{}]".format(first_request.resource_name))
 | |
| 173 | -                instance_name = ""
 | |
| 173 | +            elif names[0] != 'uploads':
 | |
| 174 | +                index = names.index('uploads')
 | |
| 175 | +                instance_name = '/'.join(names[:index])
 | |
| 176 | +                names = names[index:]
 | |
| 174 | 177 |  | 
| 175 | -            elif path[1] == "uploads":
 | |
| 176 | -                if len(path) < 6 or path[3] != "blobs" or not path[5].isdigit():
 | |
| 177 | -                    raise InvalidArgumentError("Invalid resource name: [{}]".format(first_request.resource_name))
 | |
| 178 | +            if len(names) < 5:
 | |
| 179 | +                raise InvalidArgumentError("Invalid resource name: [{}]"
 | |
| 180 | +                                           .format(request.resource_name))
 | |
| 178 | 181 |  | 
| 179 | -            else:
 | |
| 180 | -                raise InvalidArgumentError("Invalid resource name: [{}]".format(first_request.resource_name))
 | |
| 182 | +            _, hash_, size_bytes = names[1], names[3], names[4]
 | |
| 181 | 183 |  | 
| 182 | 184 |              instance = self._get_instance(instance_name)
 | 
| 183 | -            response = instance.write(requests)
 | |
| 184 | 185 |  | 
| 185 | -            return response
 | |
| 186 | +            return instance.write(hash_, size_bytes, request.data,
 | |
| 187 | +                                  [request.data for request in requests])
 | |
| 186 | 188 |  | 
| 187 | 189 |          except NotImplementedError as e:
 | 
| 188 | 190 |              self.__logger.error(e)
 | 
| ... | ... | @@ -13,18 +13,21 @@ | 
| 13 | 13 |  # limitations under the License.
 | 
| 14 | 14 |  | 
| 15 | 15 |  | 
| 16 | +import asyncio
 | |
| 16 | 17 |  from concurrent import futures
 | 
| 17 | 18 |  import logging
 | 
| 18 | 19 |  import os
 | 
| 20 | +import signal
 | |
| 19 | 21 |  | 
| 20 | 22 |  import grpc
 | 
| 21 | 23 |  | 
| 22 | -from .cas.service import ByteStreamService, ContentAddressableStorageService
 | |
| 23 | -from .actioncache.service import ActionCacheService
 | |
| 24 | -from .execution.service import ExecutionService
 | |
| 25 | -from .operations.service import OperationsService
 | |
| 26 | -from .bots.service import BotsService
 | |
| 27 | -from .referencestorage.service import ReferenceStorageService
 | |
| 24 | +from buildgrid.server.actioncache.service import ActionCacheService
 | |
| 25 | +from buildgrid.server.bots.service import BotsService
 | |
| 26 | +from buildgrid.server.cas.service import ByteStreamService, ContentAddressableStorageService
 | |
| 27 | +from buildgrid.server.execution.service import ExecutionService
 | |
| 28 | +from buildgrid.server._monitoring import MonitoringBus, MonitoringOutputType, MonitoringOutputFormat
 | |
| 29 | +from buildgrid.server.operations.service import OperationsService
 | |
| 30 | +from buildgrid.server.referencestorage.service import ReferenceStorageService
 | |
| 28 | 31 |  | 
| 29 | 32 |  | 
| 30 | 33 |  class BuildGridServer:
 | 
| ... | ... | @@ -34,7 +37,7 @@ class BuildGridServer: | 
| 34 | 37 |      requisite services.
 | 
| 35 | 38 |      """
 | 
| 36 | 39 |  | 
| 37 | -    def __init__(self, max_workers=None):
 | |
| 40 | +    def __init__(self, max_workers=None, monitor=False):
 | |
| 38 | 41 |          """Initializes a new :class:`BuildGridServer` instance.
 | 
| 39 | 42 |  | 
| 40 | 43 |          Args:
 | 
| ... | ... | @@ -46,9 +49,11 @@ class BuildGridServer: | 
| 46 | 49 |              # Use max_workers default from Python 3.5+
 | 
| 47 | 50 |              max_workers = (os.cpu_count() or 1) * 5
 | 
| 48 | 51 |  | 
| 49 | -        server = grpc.server(futures.ThreadPoolExecutor(max_workers))
 | |
| 52 | +        self.__grpc_executor = futures.ThreadPoolExecutor(max_workers)
 | |
| 53 | +        self.__grpc_server = grpc.server(self.__grpc_executor)
 | |
| 50 | 54 |  | 
| 51 | -        self._server = server
 | |
| 55 | +        self.__main_loop = asyncio.get_event_loop()
 | |
| 56 | +        self.__monitoring_bus = None
 | |
| 52 | 57 |  | 
| 53 | 58 |          self._execution_service = None
 | 
| 54 | 59 |          self._bots_service = None
 | 
| ... | ... | @@ -58,15 +63,34 @@ class BuildGridServer: | 
| 58 | 63 |          self._cas_service = None
 | 
| 59 | 64 |          self._bytestream_service = None
 | 
| 60 | 65 |  | 
| 66 | +        self._is_instrumented = monitor
 | |
| 67 | + | |
| 68 | +        if self._is_instrumented:
 | |
| 69 | +            self.__monitoring_bus = MonitoringBus(
 | |
| 70 | +                self.__main_loop, endpoint_type=MonitoringOutputType.STDOUT,
 | |
| 71 | +                serialisation_format=MonitoringOutputFormat.JSON)
 | |
| 72 | + | |
| 73 | +    # --- Public API ---
 | |
| 74 | + | |
| 61 | 75 |      def start(self):
 | 
| 62 | -        """Starts the server.
 | |
| 63 | -        """
 | |
| 64 | -        self._server.start()
 | |
| 76 | +        """Starts the BuildGrid server."""
 | |
| 77 | +        self.__grpc_server.start()
 | |
| 65 | 78 |  | 
| 66 | -    def stop(self, grace=0):
 | |
| 67 | -        """Stops the server.
 | |
| 68 | -        """
 | |
| 69 | -        self._server.stop(grace)
 | |
| 79 | +        if self._is_instrumented:
 | |
| 80 | +            self.__monitoring_bus.start()
 | |
| 81 | + | |
| 82 | +        self.__main_loop.add_signal_handler(signal.SIGTERM, self.stop)
 | |
| 83 | + | |
| 84 | +        self.__main_loop.run_forever()
 | |
| 85 | + | |
| 86 | +    def stop(self):
 | |
| 87 | +        """Stops the BuildGrid server."""
 | |
| 88 | +        if self._is_instrumented:
 | |
| 89 | +            self.__monitoring_bus.stop()
 | |
| 90 | + | |
| 91 | +        self.__main_loop.stop()
 | |
| 92 | + | |
| 93 | +        self.__grpc_server.stop(None)
 | |
| 70 | 94 |  | 
| 71 | 95 |      def add_port(self, address, credentials):
 | 
| 72 | 96 |          """Adds a port to the server.
 | 
| ... | ... | @@ -77,14 +101,19 @@ class BuildGridServer: | 
| 77 | 101 |          Args:
 | 
| 78 | 102 |              address (str): The address with port number.
 | 
| 79 | 103 |              credentials (:obj:`grpc.ChannelCredentials`): Credentials object.
 | 
| 104 | + | |
| 105 | +        Returns:
 | |
| 106 | +            int: Number of the bound port.
 | |
| 80 | 107 |          """
 | 
| 81 | 108 |          if credentials is not None:
 | 
| 82 | 109 |              self.__logger.info("Adding secure connection on: [%s]", address)
 | 
| 83 | -            self._server.add_secure_port(address, credentials)
 | |
| 110 | +            port_number = self.__grpc_server.add_secure_port(address, credentials)
 | |
| 84 | 111 |  | 
| 85 | 112 |          else:
 | 
| 86 | 113 |              self.__logger.info("Adding insecure connection on [%s]", address)
 | 
| 87 | -            self._server.add_insecure_port(address)
 | |
| 114 | +            port_number = self.__grpc_server.add_insecure_port(address)
 | |
| 115 | + | |
| 116 | +        return port_number
 | |
| 88 | 117 |  | 
| 89 | 118 |      def add_execution_instance(self, instance, instance_name):
 | 
| 90 | 119 |          """Adds an :obj:`ExecutionInstance` to the service.
 | 
| ... | ... | @@ -96,7 +125,7 @@ class BuildGridServer: | 
| 96 | 125 |              instance_name (str): Instance name.
 | 
| 97 | 126 |          """
 | 
| 98 | 127 |          if self._execution_service is None:
 | 
| 99 | -            self._execution_service = ExecutionService(self._server)
 | |
| 128 | +            self._execution_service = ExecutionService(self.__grpc_server)
 | |
| 100 | 129 |  | 
| 101 | 130 |          self._execution_service.add_instance(instance_name, instance)
 | 
| 102 | 131 |  | 
| ... | ... | @@ -110,7 +139,7 @@ class BuildGridServer: | 
| 110 | 139 |              instance_name (str): Instance name.
 | 
| 111 | 140 |          """
 | 
| 112 | 141 |          if self._bots_service is None:
 | 
| 113 | -            self._bots_service = BotsService(self._server)
 | |
| 142 | +            self._bots_service = BotsService(self.__grpc_server)
 | |
| 114 | 143 |  | 
| 115 | 144 |          self._bots_service.add_instance(instance_name, instance)
 | 
| 116 | 145 |  | 
| ... | ... | @@ -124,7 +153,7 @@ class BuildGridServer: | 
| 124 | 153 |              instance_name (str): Instance name.
 | 
| 125 | 154 |          """
 | 
| 126 | 155 |          if self._operations_service is None:
 | 
| 127 | -            self._operations_service = OperationsService(self._server)
 | |
| 156 | +            self._operations_service = OperationsService(self.__grpc_server)
 | |
| 128 | 157 |  | 
| 129 | 158 |          self._operations_service.add_instance(instance_name, instance)
 | 
| 130 | 159 |  | 
| ... | ... | @@ -138,7 +167,7 @@ class BuildGridServer: | 
| 138 | 167 |              instance_name (str): Instance name.
 | 
| 139 | 168 |          """
 | 
| 140 | 169 |          if self._reference_storage_service is None:
 | 
| 141 | -            self._reference_storage_service = ReferenceStorageService(self._server)
 | |
| 170 | +            self._reference_storage_service = ReferenceStorageService(self.__grpc_server)
 | |
| 142 | 171 |  | 
| 143 | 172 |          self._reference_storage_service.add_instance(instance_name, instance)
 | 
| 144 | 173 |  | 
| ... | ... | @@ -152,7 +181,7 @@ class BuildGridServer: | 
| 152 | 181 |              instance_name (str): Instance name.
 | 
| 153 | 182 |          """
 | 
| 154 | 183 |          if self._action_cache_service is None:
 | 
| 155 | -            self._action_cache_service = ActionCacheService(self._server)
 | |
| 184 | +            self._action_cache_service = ActionCacheService(self.__grpc_server)
 | |
| 156 | 185 |  | 
| 157 | 186 |          self._action_cache_service.add_instance(instance_name, instance)
 | 
| 158 | 187 |  | 
| ... | ... | @@ -166,7 +195,7 @@ class BuildGridServer: | 
| 166 | 195 |              instance_name (str): Instance name.
 | 
| 167 | 196 |          """
 | 
| 168 | 197 |          if self._cas_service is None:
 | 
| 169 | -            self._cas_service = ContentAddressableStorageService(self._server)
 | |
| 198 | +            self._cas_service = ContentAddressableStorageService(self.__grpc_server)
 | |
| 170 | 199 |  | 
| 171 | 200 |          self._cas_service.add_instance(instance_name, instance)
 | 
| 172 | 201 |  | 
| ... | ... | @@ -180,6 +209,12 @@ class BuildGridServer: | 
| 180 | 209 |              instance_name (str): Instance name.
 | 
| 181 | 210 |          """
 | 
| 182 | 211 |          if self._bytestream_service is None:
 | 
| 183 | -            self._bytestream_service = ByteStreamService(self._server)
 | |
| 212 | +            self._bytestream_service = ByteStreamService(self.__grpc_server)
 | |
| 184 | 213 |  | 
| 185 | 214 |          self._bytestream_service.add_instance(instance_name, instance)
 | 
| 215 | + | |
| 216 | +    # --- Public API: Monitoring ---
 | |
| 217 | + | |
| 218 | +    @property
 | |
| 219 | +    def is_instrumented(self):
 | |
| 220 | +        return self._is_instrumented | 
| ... | ... | @@ -137,7 +137,7 @@ def test_bytestream_write(mocked, instance, extra_data): | 
| 137 | 137 |          bytestream_pb2.WriteRequest(data=b'def', write_offset=3, finish_write=True)
 | 
| 138 | 138 |      ]
 | 
| 139 | 139 |  | 
| 140 | -    response = servicer.Write(requests, context)
 | |
| 140 | +    response = servicer.Write(iter(requests), context)
 | |
| 141 | 141 |      assert response.committed_size == 6
 | 
| 142 | 142 |      assert len(storage.data) == 1
 | 
| 143 | 143 |      assert (hash_, 6) in storage.data
 | 
| ... | ... | @@ -159,7 +159,7 @@ def test_bytestream_write_rejects_wrong_hash(mocked): | 
| 159 | 159 |          bytestream_pb2.WriteRequest(resource_name=resource_name, data=data, finish_write=True)
 | 
| 160 | 160 |      ]
 | 
| 161 | 161 |  | 
| 162 | -    servicer.Write(requests, context)
 | |
| 162 | +    servicer.Write(iter(requests), context)
 | |
| 163 | 163 |      context.set_code.assert_called_once_with(grpc.StatusCode.INVALID_ARGUMENT)
 | 
| 164 | 164 |  | 
| 165 | 165 |      assert len(storage.data) is 0
 | 
| ... | ... | @@ -13,19 +13,24 @@ | 
| 13 | 13 |  # limitations under the License.
 | 
| 14 | 14 |  | 
| 15 | 15 |  | 
| 16 | -from buildgrid._app.settings import parser
 | |
| 17 | -from buildgrid._app.commands.cmd_server import _create_server_from_config
 | |
| 18 | -from buildgrid.server.cas.service import ByteStreamService, ContentAddressableStorageService
 | |
| 19 | -from buildgrid.server.actioncache.service import ActionCacheService
 | |
| 20 | -from buildgrid.server.execution.service import ExecutionService
 | |
| 21 | -from buildgrid.server.operations.service import OperationsService
 | |
| 22 | -from buildgrid.server.bots.service import BotsService
 | |
| 23 | -from buildgrid.server.referencestorage.service import ReferenceStorageService
 | |
| 16 | +import grpc
 | |
| 17 | + | |
| 18 | +from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
 | |
| 19 | +from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2_grpc
 | |
| 20 | +from buildgrid._protos.buildstream.v2 import buildstream_pb2
 | |
| 21 | +from buildgrid._protos.buildstream.v2 import buildstream_pb2_grpc
 | |
| 22 | +from buildgrid._protos.google.bytestream import bytestream_pb2
 | |
| 23 | +from buildgrid._protos.google.bytestream import bytestream_pb2_grpc
 | |
| 24 | +from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2
 | |
| 25 | +from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grpc
 | |
| 26 | +from buildgrid._protos.google.longrunning import operations_pb2
 | |
| 27 | +from buildgrid._protos.google.longrunning import operations_pb2_grpc
 | |
| 24 | 28 |  | 
| 25 | 29 |  from .utils.cas import run_in_subprocess
 | 
| 30 | +from .utils.server import serve
 | |
| 26 | 31 |  | 
| 27 | 32 |  | 
| 28 | -config = """
 | |
| 33 | +CONFIGURATION = """
 | |
| 29 | 34 |  server:
 | 
| 30 | 35 |    - !channel
 | 
| 31 | 36 |      port: 50051
 | 
| ... | ... | @@ -72,24 +77,102 @@ instances: | 
| 72 | 77 |  | 
| 73 | 78 |  def test_create_server():
 | 
| 74 | 79 |      # Actual test function, to be run in a subprocess:
 | 
| 75 | -    def __test_create_server(queue, config_data):
 | |
| 76 | -        settings = parser.get_parser().safe_load(config)
 | |
| 77 | -        server = _create_server_from_config(settings)
 | |
| 80 | +    def __test_create_server(queue, remote):
 | |
| 81 | +        # Open a channel to the remote server:
 | |
| 82 | +        channel = grpc.insecure_channel(remote)
 | |
| 78 | 83 |  | 
| 79 | -        server.start()
 | |
| 80 | -        server.stop()
 | |
| 84 | +        try:
 | |
| 85 | +            stub = remote_execution_pb2_grpc.ExecutionStub(channel)
 | |
| 86 | +            request = remote_execution_pb2.ExecuteRequest(instance_name='main')
 | |
| 87 | +            response = next(stub.Execute(request))
 | |
| 88 | + | |
| 89 | +            assert response.DESCRIPTOR is operations_pb2.Operation.DESCRIPTOR
 | |
| 90 | + | |
| 91 | +        except grpc.RpcError as e:
 | |
| 92 | +            if e.code() == grpc.StatusCode.UNIMPLEMENTED:
 | |
| 93 | +                queue.put(False)
 | |
| 94 | +        except AssertionError:
 | |
| 95 | +            queue.put(False)
 | |
| 96 | + | |
| 97 | +        try:
 | |
| 98 | +            stub = remote_execution_pb2_grpc.ActionCacheStub(channel)
 | |
| 99 | +            request = remote_execution_pb2.GetActionResultRequest(instance_name='main')
 | |
| 100 | +            response = stub.GetActionResult(request)
 | |
| 101 | + | |
| 102 | +            assert response.DESCRIPTOR is remote_execution_pb2.ActionResult.DESCRIPTOR
 | |
| 103 | + | |
| 104 | +        except grpc.RpcError as e:
 | |
| 105 | +            if e.code() == grpc.StatusCode.UNIMPLEMENTED:
 | |
| 106 | +                queue.put(False)
 | |
| 107 | +        except AssertionError:
 | |
| 108 | +            queue.put(False)
 | |
| 109 | + | |
| 110 | +        try:
 | |
| 111 | +            stub = remote_execution_pb2_grpc.ContentAddressableStorageStub(channel)
 | |
| 112 | +            request = remote_execution_pb2.BatchUpdateBlobsRequest(instance_name='main')
 | |
| 113 | +            response = stub.BatchUpdateBlobs(request)
 | |
| 114 | + | |
| 115 | +            assert response.DESCRIPTOR is remote_execution_pb2.BatchUpdateBlobsResponse.DESCRIPTOR
 | |
| 116 | + | |
| 117 | +        except grpc.RpcError as e:
 | |
| 118 | +            if e.code() == grpc.StatusCode.UNIMPLEMENTED:
 | |
| 119 | +                queue.put(False)
 | |
| 120 | +        except AssertionError:
 | |
| 121 | +            queue.put(False)
 | |
| 122 | + | |
| 123 | +        try:
 | |
| 124 | +            stub = buildstream_pb2_grpc.ReferenceStorageStub(channel)
 | |
| 125 | +            request = buildstream_pb2.GetReferenceRequest(instance_name='main')
 | |
| 126 | +            response = stub.GetReference(request)
 | |
| 127 | + | |
| 128 | +            assert response.DESCRIPTOR is buildstream_pb2.GetReferenceResponse.DESCRIPTOR
 | |
| 129 | + | |
| 130 | +        except grpc.RpcError as e:
 | |
| 131 | +            if e.code() == grpc.StatusCode.UNIMPLEMENTED:
 | |
| 132 | +                queue.put(False)
 | |
| 133 | +        except AssertionError:
 | |
| 134 | +            queue.put(False)
 | |
| 81 | 135 |  | 
| 82 | 136 |          try:
 | 
| 83 | -            assert isinstance(server._execution_service, ExecutionService)
 | |
| 84 | -            assert isinstance(server._operations_service, OperationsService)
 | |
| 85 | -            assert isinstance(server._bots_service, BotsService)
 | |
| 86 | -            assert isinstance(server._reference_storage_service, ReferenceStorageService)
 | |
| 87 | -            assert isinstance(server._action_cache_service, ActionCacheService)
 | |
| 88 | -            assert isinstance(server._cas_service, ContentAddressableStorageService)
 | |
| 89 | -            assert isinstance(server._bytestream_service, ByteStreamService)
 | |
| 137 | +            stub = bytestream_pb2_grpc.ByteStreamStub(channel)
 | |
| 138 | +            request = bytestream_pb2.ReadRequest()
 | |
| 139 | +            response = stub.Read(request)
 | |
| 140 | + | |
| 141 | +            assert next(response).DESCRIPTOR is bytestream_pb2.ReadResponse.DESCRIPTOR
 | |
| 142 | + | |
| 143 | +        except grpc.RpcError as e:
 | |
| 144 | +            if e.code() == grpc.StatusCode.UNIMPLEMENTED:
 | |
| 145 | +                queue.put(False)
 | |
| 90 | 146 |          except AssertionError:
 | 
| 91 | 147 |              queue.put(False)
 | 
| 92 | -        else:
 | |
| 93 | -            queue.put(True)
 | |
| 94 | 148 |  | 
| 95 | -    assert run_in_subprocess(__test_create_server, config) | |
| 149 | +        try:
 | |
| 150 | +            stub = operations_pb2_grpc.OperationsStub(channel)
 | |
| 151 | +            request = operations_pb2.ListOperationsRequest(name='main')
 | |
| 152 | +            response = stub.ListOperations(request)
 | |
| 153 | + | |
| 154 | +            assert response.DESCRIPTOR is operations_pb2.ListOperationsResponse.DESCRIPTOR
 | |
| 155 | + | |
| 156 | +        except grpc.RpcError as e:
 | |
| 157 | +            if e.code() == grpc.StatusCode.UNIMPLEMENTED:
 | |
| 158 | +                queue.put(False)
 | |
| 159 | +        except AssertionError:
 | |
| 160 | +            queue.put(False)
 | |
| 161 | + | |
| 162 | +        try:
 | |
| 163 | +            stub = bots_pb2_grpc.BotsStub(channel)
 | |
| 164 | +            request = bots_pb2.CreateBotSessionRequest()
 | |
| 165 | +            response = stub.CreateBotSession(request)
 | |
| 166 | + | |
| 167 | +            assert response.DESCRIPTOR is bots_pb2.BotSession.DESCRIPTOR
 | |
| 168 | + | |
| 169 | +        except grpc.RpcError as e:
 | |
| 170 | +            if e.code() == grpc.StatusCode.UNIMPLEMENTED:
 | |
| 171 | +                queue.put(False)
 | |
| 172 | +        except AssertionError:
 | |
| 173 | +            queue.put(False)
 | |
| 174 | + | |
| 175 | +        queue.put(True)
 | |
| 176 | + | |
| 177 | +    with serve(CONFIGURATION) as server:
 | |
| 178 | +        assert run_in_subprocess(__test_create_server, server.remote) | 
| 1 | +# Copyright (C) 2018 Bloomberg LP
 | |
| 2 | +#
 | |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License");
 | |
| 4 | +# you may not use this file except in compliance with the License.
 | |
| 5 | +# You may obtain a copy of the License at
 | |
| 6 | +#
 | |
| 7 | +#  <http://www.apache.org/licenses/LICENSE-2.0>
 | |
| 8 | +#
 | |
| 9 | +# Unless required by applicable law or agreed to in writing, software
 | |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS,
 | |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| 12 | +# See the License for the specific language governing permissions and
 | |
| 13 | +# limitations under the License.
 | |
| 14 | + | |
| 15 | + | |
| 16 | +from contextlib import contextmanager
 | |
| 17 | +import multiprocessing
 | |
| 18 | +import signal
 | |
| 19 | + | |
| 20 | +import pytest_cov
 | |
| 21 | + | |
| 22 | +from buildgrid._app.settings import parser
 | |
| 23 | +from buildgrid.server.instance import BuildGridServer
 | |
| 24 | + | |
| 25 | + | |
| 26 | +@contextmanager
 | |
| 27 | +def serve(configuration):
 | |
| 28 | +    server = Server(configuration)
 | |
| 29 | +    try:
 | |
| 30 | +        yield server
 | |
| 31 | +    finally:
 | |
| 32 | +        server.quit()
 | |
| 33 | + | |
| 34 | + | |
| 35 | +class Server:
 | |
| 36 | + | |
| 37 | +    def __init__(self, configuration):
 | |
| 38 | + | |
| 39 | +        self.configuration = configuration
 | |
| 40 | + | |
| 41 | +        self.__queue = multiprocessing.Queue()
 | |
| 42 | +        self.__process = multiprocessing.Process(
 | |
| 43 | +            target=Server.serve,
 | |
| 44 | +            args=(self.__queue, self.configuration))
 | |
| 45 | +        self.__process.start()
 | |
| 46 | + | |
| 47 | +        self.port = self.__queue.get()
 | |
| 48 | +        self.remote = 'localhost:{}'.format(self.port)
 | |
| 49 | + | |
| 50 | +    @classmethod
 | |
| 51 | +    def serve(cls, queue, configuration):
 | |
| 52 | +        pytest_cov.embed.cleanup_on_sigterm()
 | |
| 53 | + | |
| 54 | +        server = BuildGridServer()
 | |
| 55 | + | |
| 56 | +        def __signal_handler(signum, frame):
 | |
| 57 | +            server.stop()
 | |
| 58 | + | |
| 59 | +        signal.signal(signal.SIGINT, signal.SIG_IGN)
 | |
| 60 | +        signal.signal(signal.SIGTERM, __signal_handler)
 | |
| 61 | + | |
| 62 | +        instances = parser.get_parser().safe_load(configuration)['instances']
 | |
| 63 | +        for instance in instances:
 | |
| 64 | +            instance_name = instance['name']
 | |
| 65 | +            services = instance['services']
 | |
| 66 | +            for service in services:
 | |
| 67 | +                service.register_instance_with_server(instance_name, server)
 | |
| 68 | + | |
| 69 | +        port = server.add_port('localhost:0', None)
 | |
| 70 | + | |
| 71 | +        queue.put(port)
 | |
| 72 | + | |
| 73 | +        server.start()
 | |
| 74 | + | |
| 75 | +    def quit(self):
 | |
| 76 | +        if self.__process:
 | |
| 77 | +            self.__process.terminate()
 | |
| 78 | +            self.__process.join() | 
