|
1 | | -def build_image(image_name) { |
2 | | - hash = sh( |
| 1 | +def ecr_push(full_name) { |
| 2 | + aws_account_id = sh( |
3 | 3 | returnStdout: true, |
4 | | - script: 'git log -1 --format=\'%h\'' |
| 4 | + script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', |
| 5 | + label: 'Get AWS ID' |
5 | 6 | ).trim() |
6 | | - def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" |
7 | | - sh( |
8 | | - script: "${docker_build} ${image_name} --spec ${full_name}", |
9 | | - label: 'Build docker image' |
10 | | - ) |
| 7 | + |
| 8 | + def ecr_name = "${aws_account_id}.{{ aws_ecr_url }}/${full_name}" |
| 9 | + try { |
| 10 | + withEnv([ |
| 11 | + "AWS_ACCOUNT_ID=${aws_account_id}", |
| 12 | + 'AWS_DEFAULT_REGION={{ aws_default_region }}', |
| 13 | + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { |
| 14 | + sh( |
| 15 | + script: ''' |
| 16 | + set -eux |
| 17 | + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO |
| 18 | + ''', |
| 19 | + label: 'Log in to ECR' |
| 20 | + ) |
| 21 | + sh( |
| 22 | + script: """ |
| 23 | + set -x |
| 24 | + docker tag ${full_name} \$AWS_ECR_REPO/${full_name} |
| 25 | + docker push \$AWS_ECR_REPO/${full_name} |
| 26 | + """, |
| 27 | + label: 'Upload image to ECR' |
| 28 | + ) |
| 29 | + } |
| 30 | + } finally { |
| 31 | + withEnv([ |
| 32 | + "AWS_ACCOUNT_ID=${aws_account_id}", |
| 33 | + 'AWS_DEFAULT_REGION={{ aws_default_region }}', |
| 34 | + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { |
| 35 | + sh( |
| 36 | + script: 'docker logout $AWS_ECR_REPO', |
| 37 | + label: 'Clean up login credentials' |
| 38 | + ) |
| 39 | + } |
| 40 | + } |
| 41 | + return ecr_name |
| 42 | +} |
| 43 | + |
| 44 | +def ecr_pull(full_name) { |
11 | 45 | aws_account_id = sh( |
12 | 46 | returnStdout: true, |
13 | 47 | script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', |
14 | 48 | label: 'Get AWS ID' |
15 | 49 | ).trim() |
16 | 50 |
|
17 | 51 | try { |
18 | | - // Use a credential so Jenkins knows to scrub the AWS account ID which is nice |
19 | | - // (but so we don't have to rely it being hardcoded in Jenkins) |
20 | | - withCredentials([string( |
21 | | - credentialsId: 'aws-account-id', |
22 | | - variable: '_ACCOUNT_ID_DO_NOT_USE', |
23 | | - )]) { |
24 | | - withEnv([ |
25 | | - "AWS_ACCOUNT_ID=${aws_account_id}", |
26 | | - 'AWS_DEFAULT_REGION=us-west-2']) { |
27 | | - sh( |
28 | | - script: ''' |
29 | | - set -x |
30 | | - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com |
31 | | - ''', |
32 | | - label: 'Log in to ECR' |
33 | | - ) |
34 | | - sh( |
35 | | - script: """ |
36 | | - set -x |
37 | | - docker tag ${full_name} \$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name} |
38 | | - docker push \$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name} |
39 | | - """, |
40 | | - label: 'Upload image to ECR' |
41 | | - ) |
42 | | - } |
| 52 | + withEnv([ |
| 53 | + "AWS_ACCOUNT_ID=${aws_account_id}", |
| 54 | + 'AWS_DEFAULT_REGION={{ aws_default_region }}', |
| 55 | + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { |
| 56 | + sh( |
| 57 | + script: ''' |
| 58 | + set -eux |
| 59 | + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO |
| 60 | + ''', |
| 61 | + label: 'Log in to ECR' |
| 62 | + ) |
| 63 | + sh( |
| 64 | + script: """ |
| 65 | + set -eux |
| 66 | + docker pull ${full_name} |
| 67 | + """, |
| 68 | + label: 'Pull image from ECR' |
| 69 | + ) |
43 | 70 | } |
44 | 71 | } finally { |
45 | | - sh( |
46 | | - script: 'rm -f ~/.docker/config.json', |
47 | | - label: 'Clean up login credentials' |
48 | | - ) |
| 72 | + withEnv([ |
| 73 | + "AWS_ACCOUNT_ID=${aws_account_id}", |
| 74 | + 'AWS_DEFAULT_REGION={{ aws_default_region }}', |
| 75 | + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { |
| 76 | + sh( |
| 77 | + script: 'docker logout $AWS_ECR_REPO', |
| 78 | + label: 'Clean up login credentials' |
| 79 | + ) |
| 80 | + } |
49 | 81 | } |
| 82 | +} |
| 83 | + |
| 84 | +def build_image(image_name) { |
| 85 | + hash = sh( |
| 86 | + returnStdout: true, |
| 87 | + script: 'git log -1 --format=\'%h\'' |
| 88 | + ).trim() |
| 89 | + def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" |
50 | 90 | sh( |
51 | | - script: "docker rmi ${full_name}", |
52 | | - label: 'Remove docker image' |
| 91 | + script: "${docker_build} ${image_name} --spec ${full_name}", |
| 92 | + label: 'Build docker image' |
53 | 93 | ) |
| 94 | + return ecr_push(full_name) |
54 | 95 | } |
55 | 96 |
|
| 97 | + |
56 | 98 | def build_docker_images() { |
57 | 99 | stage('Docker Image Build') { |
58 | | - // TODO in a follow up PR: Find ecr tag and use in subsequent builds |
59 | | - parallel 'ci-lint': { |
60 | | - node('CPU') { |
61 | | - timeout(time: max_time, unit: 'MINUTES') { |
62 | | - docker_init('none') |
63 | | - init_git() |
64 | | - build_image('ci_lint') |
65 | | - } |
66 | | - } |
67 | | - }, 'ci-cpu': { |
68 | | - node('CPU') { |
69 | | - timeout(time: max_time, unit: 'MINUTES') { |
70 | | - docker_init('none') |
71 | | - init_git() |
72 | | - build_image('ci_cpu') |
| 100 | + parallel( |
| 101 | + {% for image in images %} |
| 102 | + '{{ image.name }}': { |
| 103 | + node('{{ image.platform }}') { |
| 104 | + timeout(time: max_time, unit: 'MINUTES') { |
| 105 | + init_git() |
| 106 | + // We're purposefully not setting the built image here since they |
| 107 | + // are not yet being uploaded to tlcpack |
| 108 | + // {{ image.name }} = build_image('{{ image.name }}') |
| 109 | + build_image('{{ image.name }}') |
| 110 | + } |
73 | 111 | } |
74 | | - } |
75 | | - }, 'ci-gpu': { |
76 | | - node('GPU') { |
77 | | - timeout(time: max_time, unit: 'MINUTES') { |
78 | | - docker_init('none') |
79 | | - init_git() |
80 | | - build_image('ci_gpu') |
81 | | - } |
82 | | - } |
83 | | - }, 'ci-qemu': { |
84 | | - node('CPU') { |
85 | | - timeout(time: max_time, unit: 'MINUTES') { |
86 | | - docker_init('none') |
87 | | - init_git() |
88 | | - build_image('ci_qemu') |
89 | | - } |
90 | | - } |
91 | | - }, 'ci-i386': { |
92 | | - node('CPU') { |
93 | | - timeout(time: max_time, unit: 'MINUTES') { |
94 | | - docker_init('none') |
95 | | - init_git() |
96 | | - build_image('ci_i386') |
97 | | - } |
98 | | - } |
99 | | - }, 'ci-arm': { |
100 | | - node('ARM') { |
101 | | - timeout(time: max_time, unit: 'MINUTES') { |
102 | | - docker_init('none') |
103 | | - init_git() |
104 | | - build_image('ci_arm') |
105 | | - } |
106 | | - } |
107 | | - }, 'ci-wasm': { |
108 | | - node('CPU') { |
109 | | - timeout(time: max_time, unit: 'MINUTES') { |
110 | | - docker_init('none') |
111 | | - init_git() |
112 | | - build_image('ci_wasm') |
113 | | - } |
114 | | - } |
115 | | - }, 'ci-hexagon': { |
116 | | - node('CPU') { |
117 | | - timeout(time: max_time, unit: 'MINUTES') { |
118 | | - docker_init('none') |
119 | | - init_git() |
120 | | - build_image('ci_hexagon') |
121 | | - } |
122 | | - } |
123 | | - } |
124 | | - } |
125 | | - // // TODO: Once we are able to use the built images, enable this step |
126 | | - // // If the docker images changed, we need to run the image build before the lint |
127 | | - // // can run since it requires a base docker image. Most of the time the images |
128 | | - // // aren't build though so it's faster to use the same node that checks for |
129 | | - // // docker changes to run the lint in the usual case. |
130 | | - // stage('Sanity Check (re-run)') { |
131 | | - // timeout(time: max_time, unit: 'MINUTES') { |
132 | | - // node('CPU') { |
133 | | - // ws({{ m.per_exec_ws('tvm/sanity') }}) { |
134 | | - // init_git() |
135 | | - // sh ( |
136 | | - // script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", |
137 | | - // label: 'Run lint', |
138 | | - // ) |
139 | | - // } |
140 | | - // } |
141 | | - // } |
142 | | - // } |
143 | | -} |
144 | | - |
145 | | -// Run make. First try to do an incremental make from a previous workspace in hope to |
146 | | -// accelerate the compilation. If something is wrong, clean the workspace and then |
147 | | -// build from scratch. |
148 | | -def make(docker_type, path, make_flag) { |
149 | | - timeout(time: max_time, unit: 'MINUTES') { |
150 | | - try { |
151 | | - cmake_build(docker_type, path, make_flag) |
152 | | - // always run cpp test when build |
153 | | - } catch (hudson.AbortException ae) { |
154 | | - // script exited due to user abort, directly throw instead of retry |
155 | | - if (ae.getMessage().contains('script returned exit code 143')) { |
156 | | - throw ae |
157 | | - } |
158 | | - echo 'Incremental compilation failed. Fall back to build from scratch' |
159 | | - sh ( |
160 | | - script: "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}", |
161 | | - label: 'Clear old cmake workspace', |
162 | | - ) |
163 | | - cmake_build(docker_type, path, make_flag) |
164 | | - } |
| 112 | + }, |
| 113 | + {% endfor %} |
| 114 | + ) |
165 | 115 | } |
166 | 116 | } |
0 commit comments