Source code

Revision control

Copy as Markdown

Other Tools

# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from ..client import BaseClient
from ..client import createApiClient
from ..client import config
from ..client import createTemporaryCredentials
from ..client import createSession
_defaultConfig = config
class Queue(BaseClient):
"""
The queue service is responsible for accepting tasks and tracking their state
as they are executed by workers, in order to ensure they are eventually
resolved.
## Artifact Storage Types
* **Object artifacts** contain arbitrary data, stored via the object service.
* **Redirect artifacts**, will redirect the caller to URL when fetched
with a a 303 (See Other) response. Clients will not apply any kind of
authentication to that URL.
* **Link artifacts**, will be treated as if the caller requested the linked
artifact on the same task. Links may be chained, but cycles are forbidden.
The caller must have scopes for the linked artifact, or a 403 response will
be returned.
* **Error artifacts**, only consists of meta-data which the queue will
store for you. These artifacts are only meant to indicate that you the
worker or the task failed to generate a specific artifact, that you
would otherwise have uploaded. For example docker-worker will upload an
error artifact, if the file it was supposed to upload doesn't exists or
turns out to be a directory. Clients requesting an error artifact will
get a `424` (Failed Dependency) response. This is mainly designed to
ensure that dependent tasks can distinguish between artifacts that were
suppose to be generated and artifacts for which the name is misspelled.
* **S3 artifacts** are used for static files which will be
stored on S3. When creating an S3 artifact the queue will return a
pre-signed URL to which you can do a `PUT` request to upload your
artifact. Note that `PUT` request **must** specify the `content-length`
header and **must** give the `content-type` header the same value as in
the request to `createArtifact`. S3 artifacts will be deprecated soon,
and users should prefer object artifacts instead.
## Artifact immutability
Generally speaking you cannot overwrite an artifact when created.
But if you repeat the request with the same properties the request will
succeed as the operation is idempotent.
This is useful if you need to refresh a signed URL while uploading.
Do not abuse this to overwrite artifacts created by another entity!
Such as worker-host overwriting artifact created by worker-code.
The queue defines the following *immutability special cases*:
* A `reference` artifact can replace an existing `reference` artifact.
* A `link` artifact can replace an existing `reference` artifact.
* Any artifact's `expires` can be extended (made later, but not earlier).
"""
classOptions = {
}
serviceName = 'queue'
apiVersion = 'v1'
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def task(self, *args, **kwargs):
"""
Get Task Definition
This end-point will return the task-definition. Notice that the task
definition may have been modified by queue, if an optional property is
not specified the queue may provide a default value.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["task"], *args, **kwargs)
def status(self, *args, **kwargs):
"""
Get task status
Get task status structure from `taskId`
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["status"], *args, **kwargs)
def listTaskGroup(self, *args, **kwargs):
"""
List Task Group
List tasks sharing the same `taskGroupId`.
As a task-group may contain an unbounded number of tasks, this end-point
may return a `continuationToken`. To continue listing tasks you must call
the `listTaskGroup` again with the `continuationToken` as the
query-string option `continuationToken`.
By default this end-point will try to return up to 1000 members in one
request. But it **may return less**, even if more tasks are available.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listTaskGroup` with the last `continuationToken` until you
get a result without a `continuationToken`.
If you are not interested in listing all the members at once, you may
use the query-string option `limit` to return fewer.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listTaskGroup"], *args, **kwargs)
def listDependentTasks(self, *args, **kwargs):
"""
List Dependent Tasks
List tasks that depend on the given `taskId`.
As many tasks from different task-groups may dependent on a single tasks,
this end-point may return a `continuationToken`. To continue listing
tasks you must call `listDependentTasks` again with the
`continuationToken` as the query-string option `continuationToken`.
By default this end-point will try to return up to 1000 tasks in one
request. But it **may return less**, even if more tasks are available.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listDependentTasks` with the last `continuationToken` until
you get a result without a `continuationToken`.
If you are not interested in listing all the tasks at once, you may
use the query-string option `limit` to return fewer.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listDependentTasks"], *args, **kwargs)
def createTask(self, *args, **kwargs):
"""
Create New Task
Create a new task, this is an **idempotent** operation, so repeat it if
you get an internal server error or network connection is dropped.
**Task `deadline`**: the deadline property can be no more than 5 days
into the future. This is to limit the amount of pending tasks not being
taken care of. Ideally, you should use a much shorter deadline.
**Task expiration**: the `expires` property must be greater than the
task `deadline`. If not provided it will default to `deadline` + one
year. Notice that artifacts created by a task must expire before the
task's expiration.
**Task specific routing-keys**: using the `task.routes` property you may
define task specific routing-keys. If a task has a task specific
routing-key: `<route>`, then when the AMQP message about the task is
published, the message will be CC'ed with the routing-key:
`route.<route>`. This is useful if you want another component to listen
for completed tasks you have posted. The caller must have scope
`queue:route:<route>` for each route.
**Dependencies**: any tasks referenced in `task.dependencies` must have
already been created at the time of this call.
**Scopes**: Note that the scopes required to complete this API call depend
on the content of the `scopes`, `routes`, `schedulerId`, `priority`,
`provisionerId`, and `workerType` properties of the task definition.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createTask"], *args, **kwargs)
def scheduleTask(self, *args, **kwargs):
"""
Schedule Defined Task
scheduleTask will schedule a task to be executed, even if it has
unresolved dependencies. A task would otherwise only be scheduled if
its dependencies were resolved.
This is useful if you have defined a task that depends on itself or on
some other task that has not been resolved, but you wish the task to be
scheduled immediately.
This will announce the task as pending and workers will be allowed to
claim it and resolve the task.
**Note** this operation is **idempotent** and will not fail or complain
if called with a `taskId` that is already scheduled, or even resolved.
To reschedule a task previously resolved, use `rerunTask`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
def rerunTask(self, *args, **kwargs):
"""
Rerun a Resolved Task
This method _reruns_ a previously resolved task, even if it was
_completed_. This is useful if your task completes unsuccessfully, and
you just want to run it from scratch again. This will also reset the
number of `retries` allowed. It will schedule a task that is _unscheduled_
regardless of the state of its dependencies.
This method is deprecated in favour of creating a new task with the same
task definition (but with a new taskId).
Remember that `retries` in the task status counts the number of runs that
the queue have started because the worker stopped responding, for example
because a spot node died.
**Remark** this operation is idempotent: if it is invoked for a task that
is `pending` or `running`, it will just return the current task status.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
def cancelTask(self, *args, **kwargs):
"""
Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
def claimWork(self, *args, **kwargs):
"""
Claim Work
Claim pending task(s) for the given task queue.
If any work is available (even if fewer than the requested number of
tasks, this will return immediately. Otherwise, it will block for tens of
seconds waiting for work. If no work appears, it will return an emtpy
list of tasks. Callers should sleep a short while (to avoid denial of
service in an error condition) and call the endpoint again. This is a
simple implementation of "long polling".
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["claimWork"], *args, **kwargs)
def claimTask(self, *args, **kwargs):
"""
Claim Task
claim a task - never documented
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["claimTask"], *args, **kwargs)
def reclaimTask(self, *args, **kwargs):
"""
Reclaim task
Refresh the claim for a specific `runId` for given `taskId`. This updates
the `takenUntil` property and returns a new set of temporary credentials
for performing requests on behalf of the task. These credentials should
be used in-place of the credentials returned by `claimWork`.
The `reclaimTask` requests serves to:
* Postpone `takenUntil` preventing the queue from resolving
`claim-expired`,
* Refresh temporary credentials used for processing the task, and
* Abort execution if the task/run have been resolved.
If the `takenUntil` timestamp is exceeded the queue will resolve the run
as _exception_ with reason `claim-expired`, and proceeded to retry to the
task. This ensures that tasks are retried, even if workers disappear
without warning.
If the task is resolved, this end-point will return `409` reporting
`RequestConflict`. This typically happens if the task have been canceled
or the `task.deadline` have been exceeded. If reclaiming fails, workers
should abort the task and forget about the given `runId`. There is no
need to resolve the run or upload artifacts.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reclaimTask"], *args, **kwargs)
def reportCompleted(self, *args, **kwargs):
"""
Report Run Completed
Report a task completed, resolving the run as `completed`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
def reportFailed(self, *args, **kwargs):
"""
Report Run Failed
Report a run failed, resolving the run as `failed`. Use this to resolve
a run that failed because the task specific code behaved unexpectedly.
For example the task exited non-zero, or didn't produce expected output.
Do not use this if the task couldn't be run because if malformed
payload, or other unexpected condition. In these cases we have a task
exception, which should be reported with `reportException`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs)
def reportException(self, *args, **kwargs):
"""
Report Task Exception
Resolve a run as _exception_. Generally, you will want to report tasks as
failed instead of exception. You should `reportException` if,
* The `task.payload` is invalid,
* Non-existent resources are referenced,
* Declared actions cannot be executed due to unavailable resources,
* The worker had to shutdown prematurely,
* The worker experienced an unknown error, or,
* The task explicitly requested a retry.
Do not use this to signal that some user-specified code crashed for any
reason specific to this code. If user-specific code hits a resource that
is temporarily unavailable worker should report task _failed_.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs)
def createArtifact(self, *args, **kwargs):
"""
Create Artifact
This API end-point creates an artifact for a specific run of a task. This
should **only** be used by a worker currently operating on this task, or
from a process running within the task (ie. on the worker).
All artifacts must specify when they expire. The queue will
automatically take care of deleting artifacts past their
expiration point. This feature makes it feasible to upload large
intermediate artifacts from data processing applications, as the
artifacts can be set to expire a few days later.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs)
def finishArtifact(self, *args, **kwargs):
"""
Finish Artifact
This endpoint marks an artifact as present for the given task, and
should be called when the artifact data is fully uploaded.
The storage types `reference`, `link`, and `error` do not need to
be finished, as they are finished immediately by `createArtifact`.
The storage type `s3` does not support this functionality and cannot
be finished. In all such cases, calling this method is an input error
(400).
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["finishArtifact"], *args, **kwargs)
def getArtifact(self, *args, **kwargs):
"""
Get Artifact Data from Run
Get artifact by `<name>` from a specific run.
**Artifact Access**, in order to get an artifact you need the scope
`queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
To allow access to fetch artifacts with a client like `curl` or a web
browser, without using Taskcluster credentials, include a scope in the
`anonymous` role. The convention is to include
`queue:get-artifact:public/*`.
**Response**: the HTTP response to this method is a 303 redirect to the
URL from which the artifact can be downloaded. The body of that response
contains the data described in the output schema, contianing the same URL.
Callers are encouraged to use whichever method of gathering the URL is
most convenient. Standard HTTP clients will follow the redirect, while
API client libraries will return the JSON body.
In order to download an artifact the following must be done:
1. Obtain queue url. Building a signed url with a taskcluster client is
recommended
1. Make a GET request which does not follow redirects
1. In all cases, if specified, the
x-taskcluster-location-{content,transfer}-{sha256,length} values must be
validated to be equal to the Content-Length and Sha256 checksum of the
final artifact downloaded. as well as any intermediate redirects
1. If this response is a 500-series error, retry using an exponential
backoff. No more than 5 retries should be attempted
1. If this response is a 400-series error, treat it appropriately for
your context. This might be an error in responding to this request or
an Error storage type body. This request should not be retried.
1. If this response is a 200-series response, the response body is the artifact.
If the x-taskcluster-location-{content,transfer}-{sha256,length} and
x-taskcluster-location-content-encoding are specified, they should match
this response body
1. If the response type is a 300-series redirect, the artifact will be at the
location specified by the `Location` header. There are multiple artifact storage
types which use a 300-series redirect.
1. For all redirects followed, the user must verify that the content-sha256, content-length,
transfer-sha256, transfer-length and content-encoding match every further request. The final
artifact must also be validated against the values specified in the original queue response
1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference`
must not occur
**Headers**
The following important headers are set on the response to this method:
* location: the url of the artifact if a redirect is to be performed
* x-taskcluster-artifact-storage-type: the storage type. Example: s3
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getArtifact"], *args, **kwargs)
def getLatestArtifact(self, *args, **kwargs):
"""
Get Artifact Data from Latest Run
Get artifact by `<name>` from the last run of a task.
**Artifact Access**, in order to get an artifact you need the scope
`queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
To allow access to fetch artifacts with a client like `curl` or a web
browser, without using Taskcluster credentials, include a scope in the
`anonymous` role. The convention is to include
`queue:get-artifact:public/*`.
**API Clients**, this method will redirect you to the artifact, if it is
stored externally. Either way, the response may not be JSON. So API
client users might want to generate a signed URL for this end-point and
use that URL with a normal HTTP client.
**Remark**, this end-point is slightly slower than
`queue.getArtifact`, so consider that if you already know the `runId` of
the latest run. Otherwise, just us the most convenient API end-point.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getLatestArtifact"], *args, **kwargs)
def listArtifacts(self, *args, **kwargs):
"""
Get Artifacts from Run
Returns a list of artifacts and associated meta-data for a given run.
As a task may have many artifacts paging may be necessary. If this
end-point returns a `continuationToken`, you should call the end-point
again with the `continuationToken` as the query-string option:
`continuationToken`.
By default this end-point will list up-to 1000 artifacts in a single page
you may limit this with the query-string parameter `limit`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
def listLatestArtifacts(self, *args, **kwargs):
"""
Get Artifacts from Latest Run
Returns a list of artifacts and associated meta-data for the latest run
from the given task.
As a task may have many artifacts paging may be necessary. If this
end-point returns a `continuationToken`, you should call the end-point
again with the `continuationToken` as the query-string option:
`continuationToken`.
By default this end-point will list up-to 1000 artifacts in a single page
you may limit this with the query-string parameter `limit`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listLatestArtifacts"], *args, **kwargs)
def artifactInfo(self, *args, **kwargs):
"""
Get Artifact Information From Run
Returns associated metadata for a given artifact, in the given task run.
The metadata is the same as that returned from `listArtifacts`, and does
not grant access to the artifact data.
Note that this method does *not* automatically follow link artifacts.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["artifactInfo"], *args, **kwargs)
def latestArtifactInfo(self, *args, **kwargs):
"""
Get Artifact Information From Latest Run
Returns associated metadata for a given artifact, in the latest run of the
task. The metadata is the same as that returned from `listArtifacts`,
and does not grant access to the artifact data.
Note that this method does *not* automatically follow link artifacts.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["latestArtifactInfo"], *args, **kwargs)
def artifact(self, *args, **kwargs):
"""
Get Artifact Content From Run
Returns information about the content of the artifact, in the given task run.
Depending on the storage type, the endpoint returns the content of the artifact
or enough information to access that content.
This method follows link artifacts, so it will not return content
for a link artifact.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["artifact"], *args, **kwargs)
def latestArtifact(self, *args, **kwargs):
"""
Get Artifact Content From Latest Run
Returns information about the content of the artifact, in the latest task run.
Depending on the storage type, the endpoint returns the content of the artifact
or enough information to access that content.
This method follows link artifacts, so it will not return content
for a link artifact.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["latestArtifact"], *args, **kwargs)
def listProvisioners(self, *args, **kwargs):
"""
Get a list of all active provisioners
Get all active provisioners.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 provisioners in a single
page. You may limit this with the query-string parameter `limit`.
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs)
def getProvisioner(self, *args, **kwargs):
"""
Get an active provisioner
Get an active provisioner.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs)
def declareProvisioner(self, *args, **kwargs):
"""
Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `my-provisioner`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:my-provisioner#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
def pendingTasks(self, *args, **kwargs):
"""
Get Number of Pending Tasks
Get an approximate number of pending tasks for the given `taskQueueId`.
The underlying Azure Storage Queues only promises to give us an estimate.
Furthermore, we cache the result in memory for 20 seconds. So consumers
should be no means expect this to be an accurate number.
It is, however, a solid estimate of the number of pending tasks.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs)
def listWorkerTypes(self, *args, **kwargs):
"""
Get a list of all active worker-types
Get all active worker-types for the given provisioner.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 worker-types in a single
page. You may limit this with the query-string parameter `limit`.
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
def getWorkerType(self, *args, **kwargs):
"""
Get a worker-type
Get a worker-type from a provisioner.
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["getWorkerType"], *args, **kwargs)
def declareWorkerType(self, *args, **kwargs):
"""
Update a worker-type
Declare a workerType, supplying some details about it.
`declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
possessed. For example, a request to update the `highmem` worker-type within the `my-provisioner`
provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
`queue:declare-worker-type:my-provisioner/highmem#description`.
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
def listTaskQueues(self, *args, **kwargs):
"""
Get a list of all active task queues
Get all active task queues.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 task queues in a single
page. You may limit this with the query-string parameter `limit`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listTaskQueues"], *args, **kwargs)
def getTaskQueue(self, *args, **kwargs):
"""
Get a task queue
Get a task queue.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getTaskQueue"], *args, **kwargs)
def listWorkers(self, *args, **kwargs):
"""
Get a list of all active workers of a workerType
Get a list of all active workers of a workerType.
`listWorkers` allows a response to be filtered by quarantined and non quarantined workers.
To filter the query, you should call the end-point with `quarantined` as a query-string option with a
true or false value.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 workers in a single
page. You may limit this with the query-string parameter `limit`.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
def getWorker(self, *args, **kwargs):
"""
Get a worker-type
Get a worker from a worker-type.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getWorker"], *args, **kwargs)
def quarantineWorker(self, *args, **kwargs):
"""
Quarantine a worker
Quarantine a worker
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
def declareWorker(self, *args, **kwargs):
"""
Declare a worker
Declare a worker, supplying some details about it.
`declareWorker` allows updating one or more properties of a worker as long as the required scopes are
possessed.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs)
funcinfo = {
"artifact": {
'args': ['taskId', 'runId', 'name'],
'method': 'get',
'name': 'artifact',
'output': 'v1/artifact-content-response.json#',
'route': '/task/<taskId>/runs/<runId>/artifact-content/<name>',
'stability': 'stable',
},
"artifactInfo": {
'args': ['taskId', 'runId', 'name'],
'method': 'get',
'name': 'artifactInfo',
'output': 'v1/artifact-response.json#',
'route': '/task/<taskId>/runs/<runId>/artifact-info/<name>',
'stability': 'stable',
},
"cancelTask": {
'args': ['taskId'],
'method': 'post',
'name': 'cancelTask',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>/cancel',
'stability': 'stable',
},
"claimTask": {
'args': ['taskId', 'runId'],
'input': 'v1/task-claim-request.json#',
'method': 'post',
'name': 'claimTask',
'output': 'v1/task-claim-response.json#',
'route': '/task/<taskId>/runs/<runId>/claim',
'stability': 'deprecated',
},
"claimWork": {
'args': ['taskQueueId'],
'input': 'v1/claim-work-request.json#',
'method': 'post',
'name': 'claimWork',
'output': 'v1/claim-work-response.json#',
'route': '/claim-work/<taskQueueId>',
'stability': 'stable',
},
"createArtifact": {
'args': ['taskId', 'runId', 'name'],
'input': 'v1/post-artifact-request.json#',
'method': 'post',
'name': 'createArtifact',
'output': 'v1/post-artifact-response.json#',
'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
'stability': 'stable',
},
"createTask": {
'args': ['taskId'],
'input': 'v1/create-task-request.json#',
'method': 'put',
'name': 'createTask',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>',
'stability': 'stable',
},
"declareProvisioner": {
'args': ['provisionerId'],
'input': 'v1/update-provisioner-request.json#',
'method': 'put',
'name': 'declareProvisioner',
'output': 'v1/provisioner-response.json#',
'route': '/provisioners/<provisionerId>',
'stability': 'deprecated',
},
"declareWorker": {
'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
'input': 'v1/update-worker-request.json#',
'method': 'put',
'name': 'declareWorker',
'output': 'v1/worker-response.json#',
'route': '/provisioners/<provisionerId>/worker-types/<workerType>/<workerGroup>/<workerId>',
'stability': 'experimental',
},
"declareWorkerType": {
'args': ['provisionerId', 'workerType'],
'input': 'v1/update-workertype-request.json#',
'method': 'put',
'name': 'declareWorkerType',
'output': 'v1/workertype-response.json#',
'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
'stability': 'deprecated',
},
"finishArtifact": {
'args': ['taskId', 'runId', 'name'],
'input': 'v1/finish-artifact-request.json#',
'method': 'put',
'name': 'finishArtifact',
'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
'stability': 'stable',
},
"getArtifact": {
'args': ['taskId', 'runId', 'name'],
'method': 'get',
'name': 'getArtifact',
'output': 'v1/get-artifact-response.json#',
'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
'stability': 'stable',
},
"getLatestArtifact": {
'args': ['taskId', 'name'],
'method': 'get',
'name': 'getLatestArtifact',
'output': 'v1/get-artifact-response.json#',
'route': '/task/<taskId>/artifacts/<name>',
'stability': 'stable',
},
"getProvisioner": {
'args': ['provisionerId'],
'method': 'get',
'name': 'getProvisioner',
'output': 'v1/provisioner-response.json#',
'route': '/provisioners/<provisionerId>',
'stability': 'deprecated',
},
"getTaskQueue": {
'args': ['taskQueueId'],
'method': 'get',
'name': 'getTaskQueue',
'output': 'v1/taskqueue-response.json#',
'route': '/task-queues/<taskQueueId>',
'stability': 'stable',
},
"getWorker": {
'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
'method': 'get',
'name': 'getWorker',
'output': 'v1/worker-response.json#',
'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
'stability': 'experimental',
},
"getWorkerType": {
'args': ['provisionerId', 'workerType'],
'method': 'get',
'name': 'getWorkerType',
'output': 'v1/workertype-response.json#',
'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
'stability': 'deprecated',
},
"latestArtifact": {
'args': ['taskId', 'name'],
'method': 'get',
'name': 'latestArtifact',
'output': 'v1/artifact-content-response.json#',
'route': '/task/<taskId>/artifact-content/<name>',
'stability': 'stable',
},
"latestArtifactInfo": {
'args': ['taskId', 'name'],
'method': 'get',
'name': 'latestArtifactInfo',
'output': 'v1/artifact-response.json#',
'route': '/task/<taskId>/artifact-info/<name>',
'stability': 'stable',
},
"listArtifacts": {
'args': ['taskId', 'runId'],
'method': 'get',
'name': 'listArtifacts',
'output': 'v1/list-artifacts-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/task/<taskId>/runs/<runId>/artifacts',
'stability': 'stable',
},
"listDependentTasks": {
'args': ['taskId'],
'method': 'get',
'name': 'listDependentTasks',
'output': 'v1/list-dependent-tasks-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/task/<taskId>/dependents',
'stability': 'stable',
},
"listLatestArtifacts": {
'args': ['taskId'],
'method': 'get',
'name': 'listLatestArtifacts',
'output': 'v1/list-artifacts-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/task/<taskId>/artifacts',
'stability': 'stable',
},
"listProvisioners": {
'args': [],
'method': 'get',
'name': 'listProvisioners',
'output': 'v1/list-provisioners-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/provisioners',
'stability': 'deprecated',
},
"listTaskGroup": {
'args': ['taskGroupId'],
'method': 'get',
'name': 'listTaskGroup',
'output': 'v1/list-task-group-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/task-group/<taskGroupId>/list',
'stability': 'stable',
},
"listTaskQueues": {
'args': [],
'method': 'get',
'name': 'listTaskQueues',
'output': 'v1/list-taskqueues-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/task-queues',
'stability': 'stable',
},
"listWorkerTypes": {
'args': ['provisionerId'],
'method': 'get',
'name': 'listWorkerTypes',
'output': 'v1/list-workertypes-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/provisioners/<provisionerId>/worker-types',
'stability': 'deprecated',
},
"listWorkers": {
'args': ['provisionerId', 'workerType'],
'method': 'get',
'name': 'listWorkers',
'output': 'v1/list-workers-response.json#',
'query': ['continuationToken', 'limit', 'quarantined'],
'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers',
'stability': 'experimental',
},
"pendingTasks": {
'args': ['taskQueueId'],
'method': 'get',
'name': 'pendingTasks',
'output': 'v1/pending-tasks-response.json#',
'route': '/pending/<taskQueueId>',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"quarantineWorker": {
'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
'input': 'v1/quarantine-worker-request.json#',
'method': 'put',
'name': 'quarantineWorker',
'output': 'v1/worker-response.json#',
'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
'stability': 'experimental',
},
"reclaimTask": {
'args': ['taskId', 'runId'],
'method': 'post',
'name': 'reclaimTask',
'output': 'v1/task-reclaim-response.json#',
'route': '/task/<taskId>/runs/<runId>/reclaim',
'stability': 'stable',
},
"reportCompleted": {
'args': ['taskId', 'runId'],
'method': 'post',
'name': 'reportCompleted',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>/runs/<runId>/completed',
'stability': 'stable',
},
"reportException": {
'args': ['taskId', 'runId'],
'input': 'v1/task-exception-request.json#',
'method': 'post',
'name': 'reportException',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>/runs/<runId>/exception',
'stability': 'stable',
},
"reportFailed": {
'args': ['taskId', 'runId'],
'method': 'post',
'name': 'reportFailed',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>/runs/<runId>/failed',
'stability': 'stable',
},
"rerunTask": {
'args': ['taskId'],
'method': 'post',
'name': 'rerunTask',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>/rerun',
'stability': 'stable',
},
"scheduleTask": {
'args': ['taskId'],
'method': 'post',
'name': 'scheduleTask',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>/schedule',
'stability': 'stable',
},
"status": {
'args': ['taskId'],
'method': 'get',
'name': 'status',
'output': 'v1/task-status-response.json#',
'route': '/task/<taskId>/status',
'stability': 'stable',
},
"task": {
'args': ['taskId'],
'method': 'get',
'name': 'task',
'output': 'v1/task.json#',
'route': '/task/<taskId>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Queue']