apiVersion:v1kind:ConfigMapmetadata:# If you want to use this config map by default, name it "artifact-repositories". Otherwise, you can provide a reference to a# different config map in `artifactRepositoryRef.configMap`.name:artifact-repositoriesannotations:# v3.0 and after - if you want to use a specific key, put that key into this annotation.workflows.argoproj.io/default-artifact-repository:my-artifact-repositorydata:my-artifact-repository:| s3:
bucket: lixd-argo
endpoint: minio.argo-dev.svc:9000
insecure: true
accessKeySecret:
name: my-s3-secret
key: accessKey
secretKeySecret:
name: my-s3-secret
key: secretKey# 可以写多个 Repositorymy-artifact-repository2: ...
Data 中的每一个 Key 对应一个 Repository, 然后使用workflows.argoproj.io/default-artifact-repositoryannotation 来指定默认使用哪个 artifactRepository。
spec:artifactRepositoryRef:configMap:my-artifact-repository# default is "artifact-repositories"key:v2-s3-artifact-repository# default can be set by the `workflows.argoproj.io/default-artifact-repository` annotation in config map.
templates:- name:artifact-exampleinputs:artifacts:- name:my-input-artifactpath:/my-input-artifacts3:endpoint:s3.amazonaws.combucket:my-aws-bucket-namekey:path/in/bucket/my-input-artifact.tgzaccessKeySecret:name:my-aws-s3-credentialskey:accessKeysecretKeySecret:name:my-aws-s3-credentialskey:secretKeyoutputs:artifacts:- name:my-output-artifactpath:/my-output-artifacts3:endpoint:storage.googleapis.combucket:my-gcs-bucket-name# NOTE that, by default, all output artifacts are automatically tarred and# gzipped before saving. So as a best practice, .tgz or .tar.gz# should be incorporated into the key name so the resulting file# has an accurate file extension.key:path/in/bucket/my-output-artifact.tgzaccessKeySecret:name:my-gcs-s3-credentialskey:accessKeysecretKeySecret:name:my-gcs-s3-credentialskey:secretKeyregion:my-GCS-storage-bucket-regioncontainer:image:debian:latestcommand:[sh, -c]args:["cp -r /my-input-artifact /my-output-artifact"]
apiVersion:argoproj.io/v1alpha1kind:Workflowmetadata:generateName:artifact-passing-spec:entrypoint:artifact-exampletemplates:- name:artifact-examplesteps:- - name:generate-artifacttemplate:whalesay- - name:consume-artifacttemplate:print-messagearguments:artifacts:# bind message to the hello-art artifact# generated by the generate-artifact step- name:messagefrom:"{{steps.generate-artifact.outputs.artifacts.hello-art}}"- name:whalesaycontainer:image:docker/whalesay:latestcommand:[sh, -c]args:["cowsay hello world | tee /tmp/hello_world.txt"]outputs:artifacts:# generate hello-art artifact from /tmp/hello_world.txt# artifacts can be directories as well as files- name:hello-artpath:/tmp/hello_world.txt- name:print-messageinputs:artifacts:# unpack the message input artifact# and put it at /tmp/message- name:messagepath:/tmp/messagecontainer:image:alpine:latestcommand:[sh, -c]args:["cat /tmp/message"]
第一个步骤,通过
第一个步骤,通过 tee 命令创建了一个文件并通过 outputs 进行输出,由于指定的是 artifacts,因此这个文件会被存储到 S3。
artifact-passing-vzp2r-1469537892:
boundaryID: artifact-passing-vzp2r
displayName: generate-artifact
finishedAt: "2024-03-29T08:42:34Z" hostNodeName: lixd-argo
id: artifact-passing-vzp2r-1469537892
message: 'Error (exit code 1): You need to configure artifact storage. More
information on how to do this can be found in the docs: https://argo-workflows.readthedocs.io/en/release-3.5/configure-artifact-repository/' name: artifact-passing-vzp2r[0].generate-artifact
artifact 压缩
默认情况下,所有的 artifact 会被打成 tar 包并 gzip 压缩,可以通过archive 字段来配置压缩情况:
<... snipped ...>outputs:artifacts:# default behavior - tar+gzip default compression.- name:hello-art-1path:/tmp/hello_world.txt# disable archiving entirely - upload the file / directory as is.# this is useful when the container layout matches the desired target repository layout. - name:hello-art-2path:/tmp/hello_world.txtarchive:none:{}# customize the compression behavior (disabling it here).# this is useful for files with varying compression benefits, # e.g. disabling compression for a cached build workspace and large binaries, # or increasing compression for "perfect" textual data - like a json/xml export of a large database.- name:hello-art-3path:/tmp/hello_world.txtarchive:tar:# no compression (also accepts the standard gzip 1 to 9 values)compressionLevel:0<... snipped ...>
apiVersion:argoproj.io/v1alpha1kind:Workflowmetadata:generateName:artifact-gc-spec:entrypoint:mainartifactGC:strategy:OnWorkflowDeletion # default Strategy set here applies to all Artifacts by defaulttemplates:- name:maincontainer:image:argoproj/argosay:v2command:- sh- -cargs:- | echo "can throw this away" > /tmp/temporary-artifact.txt
echo "keep this" > /tmp/keep-this.txtoutputs:artifacts:- name:temporary-artifactpath:/tmp/temporary-artifact.txts3:key:temporary-artifact.txt- name:keep-thispath:/tmp/keep-this.txts3:key:keep-this.txtartifactGC:strategy:Never # optional override for an Artifact
核心部分如下:
1
2
3
4
5
6
7
8
9
10
11
spec:entrypoint:main# 为 Workflow 中的所有 artifact 统一配置artifactGC:strategy:OnWorkflowDeletion # default Strategy set here applies to all Artifacts by default# 单独指定 artifact 的回收策略outputs:artifacts:- name:temporary-artifactartifactGC:strategy:Never # optional override for an Artifact
apiVersion:argoproj.io/v1alpha1kind:Workflowmetadata:generateName:hardwired-artifact-spec:entrypoint:hardwired-artifacttemplates:- name:hardwired-artifactinputs:artifacts:# Check out the main branch of the argo repo and place it at /src# revision can be anything that git checkout accepts: branch, commit, tag, etc.- name:argo-sourcepath:/srcgit:repo:https://github.com/argoproj/argo-workflows.gitrevision:"main"# Download kubectl 1.8.0 and place it at /bin/kubectl- name:kubectlpath:/bin/kubectlmode:0755http:url:https://storage.googleapis.com/kubernetes-release/release/v1.8.0/bin/linux/amd64/kubectl# Copy an s3 compatible artifact repository bucket (such as AWS, GCS and MinIO) and place it at /s3- name:objectspath:/s3s3:endpoint:storage.googleapis.combucket:my-bucket-namekey:path/in/bucketaccessKeySecret:name:my-s3-credentialskey:accessKeysecretKeySecret:name:my-s3-credentialskey:secretKeycontainer:image:debiancommand:[sh, -c]args:["ls -l /src /bin/kubectl /s3"]