diff --git a/.gitignore b/.gitignore index fafc6168..d5499d1e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,27 +1,170 @@ -.envrc - -.DS_Store -*.html -search_index.json -!google34cb7dffb22dd501.html - -blog/concourse.zip -blog/assets/css/* -!blog/assets/css/*.gitkeep -blog/assets/images/* -!blog/assets/images/.gitkeep -blog/assets/js/* -!blog/assets/js/.gitkeep - -!discourse/*/*.html - -### Node ### -node_modules -yarn-error.log - -### Elm ### -# elm-package generated files -elm-stuff -# elm-repl generated files -repl-temp-* -elm.js +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +.idea/ +*.iml + +mkdocs.yml +/node_modules/ + +overrides/assets/javascripts/*.min.js* +overrides/assets/stylesheets/*.css* \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..31fe20d7 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "docs/libs/examples"] + path = docs/libs/examples + url = https://github.com/concourse/examples.git diff --git a/Makefile b/.old/Makefile similarity index 100% rename from Makefile rename to .old/Makefile diff --git a/Staticfile b/.old/Staticfile similarity index 100% rename from Staticfile rename to .old/Staticfile diff --git a/_config.yml b/.old/_config.yml similarity index 100% rename from _config.yml rename to .old/_config.yml diff --git a/blog/assets/css/.gitkeep b/.old/blog/assets/css/.gitkeep similarity index 100% rename from blog/assets/css/.gitkeep rename to .old/blog/assets/css/.gitkeep diff --git a/blog/assets/images/.gitkeep b/.old/blog/assets/images/.gitkeep similarity index 100% rename from blog/assets/images/.gitkeep rename to .old/blog/assets/images/.gitkeep diff --git a/blog/assets/js/.gitkeep b/.old/blog/assets/js/.gitkeep similarity index 100% rename from blog/assets/js/.gitkeep rename to .old/blog/assets/js/.gitkeep diff --git a/blog/author.hbs b/.old/blog/author.hbs similarity index 100% rename from blog/author.hbs rename to .old/blog/author.hbs diff --git a/blog/default.hbs b/.old/blog/default.hbs similarity index 100% rename from blog/default.hbs rename to .old/blog/default.hbs diff --git a/blog/index.hbs b/.old/blog/index.hbs similarity index 100% rename from blog/index.hbs rename to .old/blog/index.hbs diff --git a/blog/package.json b/.old/blog/package.json similarity index 100% rename from blog/package.json rename to .old/blog/package.json diff --git a/blog/partials/navigation.hbs b/.old/blog/partials/navigation.hbs similarity index 100% rename from blog/partials/navigation.hbs rename to .old/blog/partials/navigation.hbs diff --git a/blog/partials/post-meta.hbs b/.old/blog/partials/post-meta.hbs similarity index 100% rename from blog/partials/post-meta.hbs rename to .old/blog/partials/post-meta.hbs diff --git a/blog/partials/posts.hbs b/.old/blog/partials/posts.hbs similarity index 100% rename from blog/partials/posts.hbs rename to .old/blog/partials/posts.hbs diff --git a/blog/post.hbs b/.old/blog/post.hbs similarity index 100% rename from blog/post.hbs rename to .old/blog/post.hbs diff --git a/blog/tag.hbs b/.old/blog/tag.hbs similarity index 100% rename from blog/tag.hbs rename to .old/blog/tag.hbs diff --git a/.old/ci/build b/.old/ci/build new file mode 100644 index 00000000..a60f170d --- /dev/null +++ b/.old/ci/build @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e -x + +export GOPATH=$PWD/gopath +export PATH=$PWD/gopath/bin:$PATH + +git config --global user.email "concourseteam+concourse-github-bot@gmail.com" +git config --global user.name "Concourse Bot" + +pushd docs + if [ -e .git ] && git remote | grep origin >/dev/null; then + # undo single-branch clone and fetch gh-pages + git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" + git fetch origin gh-pages + + ref=$(git rev-parse HEAD) + git checkout gh-pages + git pull + git merge --no-edit $ref + + # clear out old content + git rm *.html || true + git rm search_index.json || true + fi + + ./scripts/build + + if [ -e .git ]; then + git add -A + git commit --allow-empty -m "build" + fi +popd diff --git a/.old/ci/build.yml b/.old/ci/build.yml new file mode 100644 index 00000000..341f67be --- /dev/null +++ b/.old/ci/build.yml @@ -0,0 +1,23 @@ +--- +platform: linux + +image_resource: + type: registry-image + source: {repository: golang} + +inputs: +- name: docs + +caches: +- path: gopath + +outputs: +- name: built-docs + path: docs + +params: + ANALYTICS_ID: + GITHUB_TOKEN: + +run: + path: docs/ci/build diff --git a/.old/ci/prs-pipeline.yml b/.old/ci/prs-pipeline.yml new file mode 100644 index 00000000..e3be6d33 --- /dev/null +++ b/.old/ci/prs-pipeline.yml @@ -0,0 +1,51 @@ +--- +resource_types: +- name: pull-request + type: registry-image + source: {repository: teliaoss/github-pr-resource} + +resources: +- name: docs-master + type: git + icon: github + source: + uri: https://github.com/concourse/docs + +- name: docs-pr + type: pull-request + icon: source-pull + source: + repository: concourse/docs + access_token: ((pull_requests_access_token)) + +jobs: +- name: build + public: true + on_failure: + put: docs-pr + inputs: [docs-pr] + params: {path: docs-pr, status: failure, context: build} + tags: [pr] + on_success: + put: docs-pr + inputs: [docs-pr] + params: {path: docs-pr, status: success, context: build} + tags: [pr] + plan: + - in_parallel: + - get: docs-pr + trigger: true + version: every + tags: [pr] + - get: docs-master + tags: [pr] + - put: docs-pr + params: {path: docs-pr, status: pending, context: build} + tags: [pr] + - task: build + file: docs-master/ci/build.yml + input_mapping: {docs: docs-pr} + params: + GITHUB_TOKEN: ((concourse_github_dummy.access_token)) + tags: [pr] + diff --git a/css/assets/iosevka-bold.ttf b/.old/css/assets/iosevka-bold.ttf similarity index 100% rename from css/assets/iosevka-bold.ttf rename to .old/css/assets/iosevka-bold.ttf diff --git a/css/assets/iosevka-bold.woff b/.old/css/assets/iosevka-bold.woff similarity index 100% rename from css/assets/iosevka-bold.woff rename to .old/css/assets/iosevka-bold.woff diff --git a/css/assets/iosevka-bold.woff2 b/.old/css/assets/iosevka-bold.woff2 similarity index 100% rename from css/assets/iosevka-bold.woff2 rename to .old/css/assets/iosevka-bold.woff2 diff --git a/css/assets/iosevka-bolditalic.ttf b/.old/css/assets/iosevka-bolditalic.ttf similarity index 100% rename from css/assets/iosevka-bolditalic.ttf rename to .old/css/assets/iosevka-bolditalic.ttf diff --git a/css/assets/iosevka-bolditalic.woff b/.old/css/assets/iosevka-bolditalic.woff similarity index 100% rename from css/assets/iosevka-bolditalic.woff rename to .old/css/assets/iosevka-bolditalic.woff diff --git a/css/assets/iosevka-bolditalic.woff2 b/.old/css/assets/iosevka-bolditalic.woff2 similarity index 100% rename from css/assets/iosevka-bolditalic.woff2 rename to .old/css/assets/iosevka-bolditalic.woff2 diff --git a/css/assets/iosevka-boldoblique.ttf b/.old/css/assets/iosevka-boldoblique.ttf similarity index 100% rename from css/assets/iosevka-boldoblique.ttf rename to .old/css/assets/iosevka-boldoblique.ttf diff --git a/css/assets/iosevka-boldoblique.woff b/.old/css/assets/iosevka-boldoblique.woff similarity index 100% rename from css/assets/iosevka-boldoblique.woff rename to .old/css/assets/iosevka-boldoblique.woff diff --git a/css/assets/iosevka-boldoblique.woff2 b/.old/css/assets/iosevka-boldoblique.woff2 similarity index 100% rename from css/assets/iosevka-boldoblique.woff2 rename to .old/css/assets/iosevka-boldoblique.woff2 diff --git a/css/assets/iosevka-extralight.ttf b/.old/css/assets/iosevka-extralight.ttf similarity index 100% rename from css/assets/iosevka-extralight.ttf rename to .old/css/assets/iosevka-extralight.ttf diff --git a/css/assets/iosevka-extralight.woff b/.old/css/assets/iosevka-extralight.woff similarity index 100% rename from css/assets/iosevka-extralight.woff rename to .old/css/assets/iosevka-extralight.woff diff --git a/css/assets/iosevka-extralight.woff2 b/.old/css/assets/iosevka-extralight.woff2 similarity index 100% rename from css/assets/iosevka-extralight.woff2 rename to .old/css/assets/iosevka-extralight.woff2 diff --git a/css/assets/iosevka-extralightitalic.ttf b/.old/css/assets/iosevka-extralightitalic.ttf similarity index 100% rename from css/assets/iosevka-extralightitalic.ttf rename to .old/css/assets/iosevka-extralightitalic.ttf diff --git a/css/assets/iosevka-extralightitalic.woff b/.old/css/assets/iosevka-extralightitalic.woff similarity index 100% rename from css/assets/iosevka-extralightitalic.woff rename to .old/css/assets/iosevka-extralightitalic.woff diff --git a/css/assets/iosevka-extralightitalic.woff2 b/.old/css/assets/iosevka-extralightitalic.woff2 similarity index 100% rename from css/assets/iosevka-extralightitalic.woff2 rename to .old/css/assets/iosevka-extralightitalic.woff2 diff --git a/css/assets/iosevka-extralightoblique.ttf b/.old/css/assets/iosevka-extralightoblique.ttf similarity index 100% rename from css/assets/iosevka-extralightoblique.ttf rename to .old/css/assets/iosevka-extralightoblique.ttf diff --git a/css/assets/iosevka-extralightoblique.woff b/.old/css/assets/iosevka-extralightoblique.woff similarity index 100% rename from css/assets/iosevka-extralightoblique.woff rename to .old/css/assets/iosevka-extralightoblique.woff diff --git a/css/assets/iosevka-extralightoblique.woff2 b/.old/css/assets/iosevka-extralightoblique.woff2 similarity index 100% rename from css/assets/iosevka-extralightoblique.woff2 rename to .old/css/assets/iosevka-extralightoblique.woff2 diff --git a/css/assets/iosevka-heavy.ttf b/.old/css/assets/iosevka-heavy.ttf similarity index 100% rename from css/assets/iosevka-heavy.ttf rename to .old/css/assets/iosevka-heavy.ttf diff --git a/css/assets/iosevka-heavy.woff b/.old/css/assets/iosevka-heavy.woff similarity index 100% rename from css/assets/iosevka-heavy.woff rename to .old/css/assets/iosevka-heavy.woff diff --git a/css/assets/iosevka-heavy.woff2 b/.old/css/assets/iosevka-heavy.woff2 similarity index 100% rename from css/assets/iosevka-heavy.woff2 rename to .old/css/assets/iosevka-heavy.woff2 diff --git a/css/assets/iosevka-heavyitalic.ttf b/.old/css/assets/iosevka-heavyitalic.ttf similarity index 100% rename from css/assets/iosevka-heavyitalic.ttf rename to .old/css/assets/iosevka-heavyitalic.ttf diff --git a/css/assets/iosevka-heavyitalic.woff b/.old/css/assets/iosevka-heavyitalic.woff similarity index 100% rename from css/assets/iosevka-heavyitalic.woff rename to .old/css/assets/iosevka-heavyitalic.woff diff --git a/css/assets/iosevka-heavyitalic.woff2 b/.old/css/assets/iosevka-heavyitalic.woff2 similarity index 100% rename from css/assets/iosevka-heavyitalic.woff2 rename to .old/css/assets/iosevka-heavyitalic.woff2 diff --git a/css/assets/iosevka-heavyoblique.ttf b/.old/css/assets/iosevka-heavyoblique.ttf similarity index 100% rename from css/assets/iosevka-heavyoblique.ttf rename to .old/css/assets/iosevka-heavyoblique.ttf diff --git a/css/assets/iosevka-heavyoblique.woff b/.old/css/assets/iosevka-heavyoblique.woff similarity index 100% rename from css/assets/iosevka-heavyoblique.woff rename to .old/css/assets/iosevka-heavyoblique.woff diff --git a/css/assets/iosevka-heavyoblique.woff2 b/.old/css/assets/iosevka-heavyoblique.woff2 similarity index 100% rename from css/assets/iosevka-heavyoblique.woff2 rename to .old/css/assets/iosevka-heavyoblique.woff2 diff --git a/css/assets/iosevka-italic.ttf b/.old/css/assets/iosevka-italic.ttf similarity index 100% rename from css/assets/iosevka-italic.ttf rename to .old/css/assets/iosevka-italic.ttf diff --git a/css/assets/iosevka-italic.woff b/.old/css/assets/iosevka-italic.woff similarity index 100% rename from css/assets/iosevka-italic.woff rename to .old/css/assets/iosevka-italic.woff diff --git a/css/assets/iosevka-italic.woff2 b/.old/css/assets/iosevka-italic.woff2 similarity index 100% rename from css/assets/iosevka-italic.woff2 rename to .old/css/assets/iosevka-italic.woff2 diff --git a/css/assets/iosevka-light.ttf b/.old/css/assets/iosevka-light.ttf similarity index 100% rename from css/assets/iosevka-light.ttf rename to .old/css/assets/iosevka-light.ttf diff --git a/css/assets/iosevka-light.woff b/.old/css/assets/iosevka-light.woff similarity index 100% rename from css/assets/iosevka-light.woff rename to .old/css/assets/iosevka-light.woff diff --git a/css/assets/iosevka-light.woff2 b/.old/css/assets/iosevka-light.woff2 similarity index 100% rename from css/assets/iosevka-light.woff2 rename to .old/css/assets/iosevka-light.woff2 diff --git a/css/assets/iosevka-lightitalic.ttf b/.old/css/assets/iosevka-lightitalic.ttf similarity index 100% rename from css/assets/iosevka-lightitalic.ttf rename to .old/css/assets/iosevka-lightitalic.ttf diff --git a/css/assets/iosevka-lightitalic.woff b/.old/css/assets/iosevka-lightitalic.woff similarity index 100% rename from css/assets/iosevka-lightitalic.woff rename to .old/css/assets/iosevka-lightitalic.woff diff --git a/css/assets/iosevka-lightitalic.woff2 b/.old/css/assets/iosevka-lightitalic.woff2 similarity index 100% rename from css/assets/iosevka-lightitalic.woff2 rename to .old/css/assets/iosevka-lightitalic.woff2 diff --git a/css/assets/iosevka-lightoblique.ttf b/.old/css/assets/iosevka-lightoblique.ttf similarity index 100% rename from css/assets/iosevka-lightoblique.ttf rename to .old/css/assets/iosevka-lightoblique.ttf diff --git a/css/assets/iosevka-lightoblique.woff b/.old/css/assets/iosevka-lightoblique.woff similarity index 100% rename from css/assets/iosevka-lightoblique.woff rename to .old/css/assets/iosevka-lightoblique.woff diff --git a/css/assets/iosevka-lightoblique.woff2 b/.old/css/assets/iosevka-lightoblique.woff2 similarity index 100% rename from css/assets/iosevka-lightoblique.woff2 rename to .old/css/assets/iosevka-lightoblique.woff2 diff --git a/css/assets/iosevka-medium.ttf b/.old/css/assets/iosevka-medium.ttf similarity index 100% rename from css/assets/iosevka-medium.ttf rename to .old/css/assets/iosevka-medium.ttf diff --git a/css/assets/iosevka-medium.woff b/.old/css/assets/iosevka-medium.woff similarity index 100% rename from css/assets/iosevka-medium.woff rename to .old/css/assets/iosevka-medium.woff diff --git a/css/assets/iosevka-medium.woff2 b/.old/css/assets/iosevka-medium.woff2 similarity index 100% rename from css/assets/iosevka-medium.woff2 rename to .old/css/assets/iosevka-medium.woff2 diff --git a/css/assets/iosevka-mediumitalic.ttf b/.old/css/assets/iosevka-mediumitalic.ttf similarity index 100% rename from css/assets/iosevka-mediumitalic.ttf rename to .old/css/assets/iosevka-mediumitalic.ttf diff --git a/css/assets/iosevka-mediumitalic.woff b/.old/css/assets/iosevka-mediumitalic.woff similarity index 100% rename from css/assets/iosevka-mediumitalic.woff rename to .old/css/assets/iosevka-mediumitalic.woff diff --git a/css/assets/iosevka-mediumitalic.woff2 b/.old/css/assets/iosevka-mediumitalic.woff2 similarity index 100% rename from css/assets/iosevka-mediumitalic.woff2 rename to .old/css/assets/iosevka-mediumitalic.woff2 diff --git a/css/assets/iosevka-mediumoblique.ttf b/.old/css/assets/iosevka-mediumoblique.ttf similarity index 100% rename from css/assets/iosevka-mediumoblique.ttf rename to .old/css/assets/iosevka-mediumoblique.ttf diff --git a/css/assets/iosevka-mediumoblique.woff b/.old/css/assets/iosevka-mediumoblique.woff similarity index 100% rename from css/assets/iosevka-mediumoblique.woff rename to .old/css/assets/iosevka-mediumoblique.woff diff --git a/css/assets/iosevka-mediumoblique.woff2 b/.old/css/assets/iosevka-mediumoblique.woff2 similarity index 100% rename from css/assets/iosevka-mediumoblique.woff2 rename to .old/css/assets/iosevka-mediumoblique.woff2 diff --git a/css/assets/iosevka-oblique.ttf b/.old/css/assets/iosevka-oblique.ttf similarity index 100% rename from css/assets/iosevka-oblique.ttf rename to .old/css/assets/iosevka-oblique.ttf diff --git a/css/assets/iosevka-oblique.woff b/.old/css/assets/iosevka-oblique.woff similarity index 100% rename from css/assets/iosevka-oblique.woff rename to .old/css/assets/iosevka-oblique.woff diff --git a/css/assets/iosevka-oblique.woff2 b/.old/css/assets/iosevka-oblique.woff2 similarity index 100% rename from css/assets/iosevka-oblique.woff2 rename to .old/css/assets/iosevka-oblique.woff2 diff --git a/css/assets/iosevka-regular.ttf b/.old/css/assets/iosevka-regular.ttf similarity index 100% rename from css/assets/iosevka-regular.ttf rename to .old/css/assets/iosevka-regular.ttf diff --git a/css/assets/iosevka-regular.woff b/.old/css/assets/iosevka-regular.woff similarity index 100% rename from css/assets/iosevka-regular.woff rename to .old/css/assets/iosevka-regular.woff diff --git a/css/assets/iosevka-regular.woff2 b/.old/css/assets/iosevka-regular.woff2 similarity index 100% rename from css/assets/iosevka-regular.woff2 rename to .old/css/assets/iosevka-regular.woff2 diff --git a/css/assets/iosevka-thin.ttf b/.old/css/assets/iosevka-thin.ttf similarity index 100% rename from css/assets/iosevka-thin.ttf rename to .old/css/assets/iosevka-thin.ttf diff --git a/css/assets/iosevka-thin.woff b/.old/css/assets/iosevka-thin.woff similarity index 100% rename from css/assets/iosevka-thin.woff rename to .old/css/assets/iosevka-thin.woff diff --git a/css/assets/iosevka-thin.woff2 b/.old/css/assets/iosevka-thin.woff2 similarity index 100% rename from css/assets/iosevka-thin.woff2 rename to .old/css/assets/iosevka-thin.woff2 diff --git a/css/assets/iosevka-thinitalic.ttf b/.old/css/assets/iosevka-thinitalic.ttf similarity index 100% rename from css/assets/iosevka-thinitalic.ttf rename to .old/css/assets/iosevka-thinitalic.ttf diff --git a/css/assets/iosevka-thinitalic.woff b/.old/css/assets/iosevka-thinitalic.woff similarity index 100% rename from css/assets/iosevka-thinitalic.woff rename to .old/css/assets/iosevka-thinitalic.woff diff --git a/css/assets/iosevka-thinitalic.woff2 b/.old/css/assets/iosevka-thinitalic.woff2 similarity index 100% rename from css/assets/iosevka-thinitalic.woff2 rename to .old/css/assets/iosevka-thinitalic.woff2 diff --git a/css/assets/iosevka-thinoblique.ttf b/.old/css/assets/iosevka-thinoblique.ttf similarity index 100% rename from css/assets/iosevka-thinoblique.ttf rename to .old/css/assets/iosevka-thinoblique.ttf diff --git a/css/assets/iosevka-thinoblique.woff b/.old/css/assets/iosevka-thinoblique.woff similarity index 100% rename from css/assets/iosevka-thinoblique.woff rename to .old/css/assets/iosevka-thinoblique.woff diff --git a/css/assets/iosevka-thinoblique.woff2 b/.old/css/assets/iosevka-thinoblique.woff2 similarity index 100% rename from css/assets/iosevka-thinoblique.woff2 rename to .old/css/assets/iosevka-thinoblique.woff2 diff --git a/css/blog.css b/.old/css/blog.css similarity index 100% rename from css/blog.css rename to .old/css/blog.css diff --git a/css/booklit.css b/.old/css/booklit.css similarity index 100% rename from css/booklit.css rename to .old/css/booklit.css diff --git a/css/iosevka.css b/.old/css/iosevka.css similarity index 97% rename from css/iosevka.css rename to .old/css/iosevka.css index 6eaf2848..5d43d85d 100644 --- a/css/iosevka.css +++ b/.old/css/iosevka.css @@ -1,116 +1,116 @@ -@font-face { - font-family: 'Iosevka'; - font-weight: 100; - src: url('assets/iosevka-thin.woff2') format('woff2'), url('assets/iosevka-thin.woff') format('woff'), url('assets/iosevka-thin.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 100; - font-style: italic; - src: url('assets/iosevka-thinitalic.woff2') format('woff2'), url('assets/iosevka-thinitalic.woff') format('woff'), url('assets/iosevka-thinitalic.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 100; - font-style: oblique; - src: url('assets/iosevka-thinoblique.woff2') format('woff2'), url('assets/iosevka-thinoblique.woff') format('woff'), url('assets/iosevka-thinoblique.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 200; - src: url('assets/iosevka-extralight.woff2') format('woff2'), url('assets/iosevka-extralight.woff') format('woff'), url('assets/iosevka-extralight.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 200; - font-style: italic; - src: url('assets/iosevka-extralightitalic.woff2') format('woff2'), url('assets/iosevka-extralightitalic.woff') format('woff'), url('assets/iosevka-extralightitalic.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 200; - font-style: oblique; - src: url('assets/iosevka-extralightoblique.woff2') format('woff2'), url('assets/iosevka-extralightoblique.woff') format('woff'), url('assets/iosevka-extralightoblique.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 300; - src: url('assets/iosevka-light.woff2') format('woff2'), url('assets/iosevka-light.woff') format('woff'), url('assets/iosevka-light.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 300; - font-style: italic; - src: url('assets/iosevka-lightitalic.woff2') format('woff2'), url('assets/iosevka-lightitalic.woff') format('woff'), url('assets/iosevka-lightitalic.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 300; - font-style: oblique; - src: url('assets/iosevka-lightoblique.woff2') format('woff2'), url('assets/iosevka-lightoblique.woff') format('woff'), url('assets/iosevka-lightoblique.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - src: url('assets/iosevka-regular.woff2') format('woff2'), url('assets/iosevka-regular.woff') format('woff'), url('assets/iosevka-regular.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: italic; - src: url('assets/iosevka-italic.woff2') format('woff2'), url('assets/iosevka-italic.woff') format('woff'), url('assets/iosevka-italic.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: oblique; - src: url('assets/iosevka-oblique.woff2') format('woff2'), url('assets/iosevka-oblique.woff') format('woff'), url('assets/iosevka-oblique.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 500; - src: url('assets/iosevka-medium.woff2') format('woff2'), url('assets/iosevka-medium.woff') format('woff'), url('assets/iosevka-medium.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: italic; - font-weight: 500; - src: url('assets/iosevka-mediumitalic.woff2') format('woff2'), url('assets/iosevka-mediumitalic.woff') format('woff'), url('assets/iosevka-mediumitalic.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: oblique; - font-weight: 500; - src: url('assets/iosevka-mediumoblique.woff2') format('woff2'), url('assets/iosevka-mediumoblique.woff') format('woff'), url('assets/iosevka-mediumoblique.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 700; - src: url('assets/iosevka-bold.woff2') format('woff2'), url('assets/iosevka-bold.woff') format('woff'), url('assets/iosevka-bold.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: italic; - font-weight: 700; - src: url('assets/iosevka-bolditalic.woff2') format('woff2'), url('assets/iosevka-bolditalic.woff') format('woff'), url('assets/iosevka-bolditalic.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: oblique; - font-weight: 700; - src: url('assets/iosevka-boldoblique.woff2') format('woff2'), url('assets/iosevka-boldoblique.woff') format('woff'), url('assets/iosevka-boldoblique.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-weight: 900; - src: url('assets/iosevka-heavy.woff2') format('woff2'), url('assets/iosevka-heavy.woff') format('woff'), url('assets/iosevka-heavy.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: italic; - font-weight: 900; - src: url('assets/iosevka-heavyitalic.woff2') format('woff2'), url('assets/iosevka-heavyitalic.woff') format('woff'), url('assets/iosevka-heavyitalic.ttf') format('truetype'); -} -@font-face { - font-family: 'Iosevka'; - font-style: oblique; - font-weight: 900; - src: url('assets/iosevka-heavyoblique.woff2') format('woff2'), url('assets/iosevka-heavyoblique.woff') format('woff'), url('assets/iosevka-heavyoblique.ttf') format('truetype'); +@font-face { + font-family: 'Iosevka'; + font-weight: 100; + src: url('assets/iosevka-thin.woff2') format('woff2'), url('assets/iosevka-thin.woff') format('woff'), url('assets/iosevka-thin.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 100; + font-style: italic; + src: url('assets/iosevka-thinitalic.woff2') format('woff2'), url('assets/iosevka-thinitalic.woff') format('woff'), url('assets/iosevka-thinitalic.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 100; + font-style: oblique; + src: url('assets/iosevka-thinoblique.woff2') format('woff2'), url('assets/iosevka-thinoblique.woff') format('woff'), url('assets/iosevka-thinoblique.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 200; + src: url('assets/iosevka-extralight.woff2') format('woff2'), url('assets/iosevka-extralight.woff') format('woff'), url('assets/iosevka-extralight.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 200; + font-style: italic; + src: url('assets/iosevka-extralightitalic.woff2') format('woff2'), url('assets/iosevka-extralightitalic.woff') format('woff'), url('assets/iosevka-extralightitalic.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 200; + font-style: oblique; + src: url('assets/iosevka-extralightoblique.woff2') format('woff2'), url('assets/iosevka-extralightoblique.woff') format('woff'), url('assets/iosevka-extralightoblique.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 300; + src: url('assets/iosevka-light.woff2') format('woff2'), url('assets/iosevka-light.woff') format('woff'), url('assets/iosevka-light.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 300; + font-style: italic; + src: url('assets/iosevka-lightitalic.woff2') format('woff2'), url('assets/iosevka-lightitalic.woff') format('woff'), url('assets/iosevka-lightitalic.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 300; + font-style: oblique; + src: url('assets/iosevka-lightoblique.woff2') format('woff2'), url('assets/iosevka-lightoblique.woff') format('woff'), url('assets/iosevka-lightoblique.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + src: url('assets/iosevka-regular.woff2') format('woff2'), url('assets/iosevka-regular.woff') format('woff'), url('assets/iosevka-regular.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: italic; + src: url('assets/iosevka-italic.woff2') format('woff2'), url('assets/iosevka-italic.woff') format('woff'), url('assets/iosevka-italic.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: oblique; + src: url('assets/iosevka-oblique.woff2') format('woff2'), url('assets/iosevka-oblique.woff') format('woff'), url('assets/iosevka-oblique.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 500; + src: url('assets/iosevka-medium.woff2') format('woff2'), url('assets/iosevka-medium.woff') format('woff'), url('assets/iosevka-medium.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: italic; + font-weight: 500; + src: url('assets/iosevka-mediumitalic.woff2') format('woff2'), url('assets/iosevka-mediumitalic.woff') format('woff'), url('assets/iosevka-mediumitalic.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: oblique; + font-weight: 500; + src: url('assets/iosevka-mediumoblique.woff2') format('woff2'), url('assets/iosevka-mediumoblique.woff') format('woff'), url('assets/iosevka-mediumoblique.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 700; + src: url('assets/iosevka-bold.woff2') format('woff2'), url('assets/iosevka-bold.woff') format('woff'), url('assets/iosevka-bold.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: italic; + font-weight: 700; + src: url('assets/iosevka-bolditalic.woff2') format('woff2'), url('assets/iosevka-bolditalic.woff') format('woff'), url('assets/iosevka-bolditalic.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: oblique; + font-weight: 700; + src: url('assets/iosevka-boldoblique.woff2') format('woff2'), url('assets/iosevka-boldoblique.woff') format('woff'), url('assets/iosevka-boldoblique.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-weight: 900; + src: url('assets/iosevka-heavy.woff2') format('woff2'), url('assets/iosevka-heavy.woff') format('woff'), url('assets/iosevka-heavy.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: italic; + font-weight: 900; + src: url('assets/iosevka-heavyitalic.woff2') format('woff2'), url('assets/iosevka-heavyitalic.woff') format('woff'), url('assets/iosevka-heavyitalic.ttf') format('truetype'); +} +@font-face { + font-family: 'Iosevka'; + font-style: oblique; + font-weight: 900; + src: url('assets/iosevka-heavyoblique.woff2') format('woff2'), url('assets/iosevka-heavyoblique.woff') format('woff'), url('assets/iosevka-heavyoblique.ttf') format('truetype'); } \ No newline at end of file diff --git a/css/normalize.css b/.old/css/normalize.css similarity index 100% rename from css/normalize.css rename to .old/css/normalize.css diff --git a/css/pipeline.css b/.old/css/pipeline.css similarity index 100% rename from css/pipeline.css rename to .old/css/pipeline.css diff --git a/css/prism.css b/.old/css/prism.css similarity index 100% rename from css/prism.css rename to .old/css/prism.css diff --git a/elm.json b/.old/elm.json similarity index 100% rename from elm.json rename to .old/elm.json diff --git a/elm/Query.elm b/.old/elm/Query.elm similarity index 100% rename from elm/Query.elm rename to .old/elm/Query.elm diff --git a/elm/Search.elm b/.old/elm/Search.elm similarity index 100% rename from elm/Search.elm rename to .old/elm/Search.elm diff --git a/favicon.ico b/.old/favicon.ico similarity index 100% rename from favicon.ico rename to .old/favicon.ico diff --git a/go.mod b/.old/go.mod similarity index 100% rename from go.mod rename to .old/go.mod diff --git a/go.sum b/.old/go.sum similarity index 100% rename from go.sum rename to .old/go.sum diff --git a/go/docs/colors.go b/.old/go/docs/colors.go similarity index 100% rename from go/docs/colors.go rename to .old/go/docs/colors.go diff --git a/go/docs/db.go b/.old/go/docs/db.go similarity index 100% rename from go/docs/db.go rename to .old/go/docs/db.go diff --git a/go/docs/examples.go b/.old/go/docs/examples.go similarity index 100% rename from go/docs/examples.go rename to .old/go/docs/examples.go diff --git a/go/docs/github.go b/.old/go/docs/github.go similarity index 100% rename from go/docs/github.go rename to .old/go/docs/github.go diff --git a/go/docs/metrics.go b/.old/go/docs/metrics.go similarity index 100% rename from go/docs/metrics.go rename to .old/go/docs/metrics.go diff --git a/go/docs/plugin.go b/.old/go/docs/plugin.go similarity index 100% rename from go/docs/plugin.go rename to .old/go/docs/plugin.go diff --git a/go/docs/rfcs.go b/.old/go/docs/rfcs.go similarity index 100% rename from go/docs/rfcs.go rename to .old/go/docs/rfcs.go diff --git a/go/docs/schema.go b/.old/go/docs/schema.go similarity index 100% rename from go/docs/schema.go rename to .old/go/docs/schema.go diff --git a/go/docs/splash.go b/.old/go/docs/splash.go similarity index 100% rename from go/docs/splash.go rename to .old/go/docs/splash.go diff --git a/go/resource-type-list/data.go b/.old/go/resource-type-list/data.go similarity index 100% rename from go/resource-type-list/data.go rename to .old/go/resource-type-list/data.go diff --git a/go/resource-type-list/plugin.go b/.old/go/resource-type-list/plugin.go similarity index 100% rename from go/resource-type-list/plugin.go rename to .old/go/resource-type-list/plugin.go diff --git a/html/banner.tmpl b/.old/html/banner.tmpl similarity index 100% rename from html/banner.tmpl rename to .old/html/banner.tmpl diff --git a/html/basic-pipeline-svg.tmpl b/.old/html/basic-pipeline-svg.tmpl similarity index 100% rename from html/basic-pipeline-svg.tmpl rename to .old/html/basic-pipeline-svg.tmpl diff --git a/html/blue-button.tmpl b/.old/html/blue-button.tmpl similarity index 100% rename from html/blue-button.tmpl rename to .old/html/blue-button.tmpl diff --git a/html/button.tmpl b/.old/html/button.tmpl similarity index 100% rename from html/button.tmpl rename to .old/html/button.tmpl diff --git a/html/code-block.tmpl b/.old/html/code-block.tmpl similarity index 100% rename from html/code-block.tmpl rename to .old/html/code-block.tmpl diff --git a/html/code-lines.tmpl b/.old/html/code-lines.tmpl similarity index 100% rename from html/code-lines.tmpl rename to .old/html/code-lines.tmpl diff --git a/html/code-window.tmpl b/.old/html/code-window.tmpl similarity index 100% rename from html/code-window.tmpl rename to .old/html/code-window.tmpl diff --git a/html/coffee.tmpl b/.old/html/coffee.tmpl similarity index 100% rename from html/coffee.tmpl rename to .old/html/coffee.tmpl diff --git a/html/community-image.tmpl b/.old/html/community-image.tmpl similarity index 100% rename from html/community-image.tmpl rename to .old/html/community-image.tmpl diff --git a/html/definition.tmpl b/.old/html/definition.tmpl similarity index 100% rename from html/definition.tmpl rename to .old/html/definition.tmpl diff --git a/html/diagram.tmpl b/.old/html/diagram.tmpl similarity index 100% rename from html/diagram.tmpl rename to .old/html/diagram.tmpl diff --git a/html/dim.tmpl b/.old/html/dim.tmpl similarity index 100% rename from html/dim.tmpl rename to .old/html/dim.tmpl diff --git a/html/docs-header.tmpl b/.old/html/docs-header.tmpl similarity index 100% rename from html/docs-header.tmpl rename to .old/html/docs-header.tmpl diff --git a/html/download-links.tmpl b/.old/html/download-links.tmpl similarity index 100% rename from html/download-links.tmpl rename to .old/html/download-links.tmpl diff --git a/html/example-toggle.tmpl b/.old/html/example-toggle.tmpl similarity index 100% rename from html/example-toggle.tmpl rename to .old/html/example-toggle.tmpl diff --git a/html/example.tmpl b/.old/html/example.tmpl similarity index 100% rename from html/example.tmpl rename to .old/html/example.tmpl diff --git a/html/font-awesome.tmpl b/.old/html/font-awesome.tmpl similarity index 100% rename from html/font-awesome.tmpl rename to .old/html/font-awesome.tmpl diff --git a/html/frame.tmpl b/.old/html/frame.tmpl similarity index 100% rename from html/frame.tmpl rename to .old/html/frame.tmpl diff --git a/html/getting-started.tmpl b/.old/html/getting-started.tmpl similarity index 100% rename from html/getting-started.tmpl rename to .old/html/getting-started.tmpl diff --git a/html/github-issue-link.tmpl b/.old/html/github-issue-link.tmpl similarity index 100% rename from html/github-issue-link.tmpl rename to .old/html/github-issue-link.tmpl diff --git a/html/github-pr-link.tmpl b/.old/html/github-pr-link.tmpl similarity index 100% rename from html/github-pr-link.tmpl rename to .old/html/github-pr-link.tmpl diff --git a/html/github-release-link.tmpl b/.old/html/github-release-link.tmpl similarity index 100% rename from html/github-release-link.tmpl rename to .old/html/github-release-link.tmpl diff --git a/html/github-user-link.tmpl b/.old/html/github-user-link.tmpl similarity index 100% rename from html/github-user-link.tmpl rename to .old/html/github-user-link.tmpl diff --git a/html/head.tmpl b/.old/html/head.tmpl similarity index 100% rename from html/head.tmpl rename to .old/html/head.tmpl diff --git a/html/highlighted-block.tmpl b/.old/html/highlighted-block.tmpl similarity index 100% rename from html/highlighted-block.tmpl rename to .old/html/highlighted-block.tmpl diff --git a/html/highlighted.tmpl b/.old/html/highlighted.tmpl similarity index 100% rename from html/highlighted.tmpl rename to .old/html/highlighted.tmpl diff --git a/html/index-page.tmpl b/.old/html/index-page.tmpl similarity index 100% rename from html/index-page.tmpl rename to .old/html/index-page.tmpl diff --git a/html/inline-header.tmpl b/.old/html/inline-header.tmpl similarity index 100% rename from html/inline-header.tmpl rename to .old/html/inline-header.tmpl diff --git a/html/label.tmpl b/.old/html/label.tmpl similarity index 100% rename from html/label.tmpl rename to .old/html/label.tmpl diff --git a/html/light-button.tmpl b/.old/html/light-button.tmpl similarity index 100% rename from html/light-button.tmpl rename to .old/html/light-button.tmpl diff --git a/html/linux-icon.tmpl b/.old/html/linux-icon.tmpl similarity index 100% rename from html/linux-icon.tmpl rename to .old/html/linux-icon.tmpl diff --git a/html/osx-icon.tmpl b/.old/html/osx-icon.tmpl similarity index 100% rename from html/osx-icon.tmpl rename to .old/html/osx-icon.tmpl diff --git a/html/page-region.tmpl b/.old/html/page-region.tmpl similarity index 100% rename from html/page-region.tmpl rename to .old/html/page-region.tmpl diff --git a/html/page-toc.tmpl b/.old/html/page-toc.tmpl similarity index 100% rename from html/page-toc.tmpl rename to .old/html/page-toc.tmpl diff --git a/html/page.tmpl b/.old/html/page.tmpl similarity index 100% rename from html/page.tmpl rename to .old/html/page.tmpl diff --git a/html/pipeline-image.tmpl b/.old/html/pipeline-image.tmpl similarity index 100% rename from html/pipeline-image.tmpl rename to .old/html/pipeline-image.tmpl diff --git a/html/pipeline-svg.tmpl b/.old/html/pipeline-svg.tmpl similarity index 100% rename from html/pipeline-svg.tmpl rename to .old/html/pipeline-svg.tmpl diff --git a/html/prometheus-metric.tmpl b/.old/html/prometheus-metric.tmpl similarity index 100% rename from html/prometheus-metric.tmpl rename to .old/html/prometheus-metric.tmpl diff --git a/html/quick-start.tmpl b/.old/html/quick-start.tmpl similarity index 100% rename from html/quick-start.tmpl rename to .old/html/quick-start.tmpl diff --git a/html/resources-table-cell.tmpl b/.old/html/resources-table-cell.tmpl similarity index 100% rename from html/resources-table-cell.tmpl rename to .old/html/resources-table-cell.tmpl diff --git a/html/resources-table-header.tmpl b/.old/html/resources-table-header.tmpl similarity index 100% rename from html/resources-table-header.tmpl rename to .old/html/resources-table-header.tmpl diff --git a/html/resources-table.tmpl b/.old/html/resources-table.tmpl similarity index 100% rename from html/resources-table.tmpl rename to .old/html/resources-table.tmpl diff --git a/html/rfc-questions.tmpl b/.old/html/rfc-questions.tmpl similarity index 100% rename from html/rfc-questions.tmpl rename to .old/html/rfc-questions.tmpl diff --git a/html/rfc-reaction.tmpl b/.old/html/rfc-reaction.tmpl similarity index 100% rename from html/rfc-reaction.tmpl rename to .old/html/rfc-reaction.tmpl diff --git a/html/rfc-status.tmpl b/.old/html/rfc-status.tmpl similarity index 100% rename from html/rfc-status.tmpl rename to .old/html/rfc-status.tmpl diff --git a/html/rfc.tmpl b/.old/html/rfc.tmpl similarity index 100% rename from html/rfc.tmpl rename to .old/html/rfc.tmpl diff --git a/html/rfcs-table.tmpl b/.old/html/rfcs-table.tmpl similarity index 100% rename from html/rfcs-table.tmpl rename to .old/html/rfcs-table.tmpl diff --git a/html/schema-attribute-name.tmpl b/.old/html/schema-attribute-name.tmpl similarity index 100% rename from html/schema-attribute-name.tmpl rename to .old/html/schema-attribute-name.tmpl diff --git a/html/schema-attribute.tmpl b/.old/html/schema-attribute.tmpl similarity index 100% rename from html/schema-attribute.tmpl rename to .old/html/schema-attribute.tmpl diff --git a/html/schema-group.tmpl b/.old/html/schema-group.tmpl similarity index 100% rename from html/schema-group.tmpl rename to .old/html/schema-group.tmpl diff --git a/html/schema-one-of.tmpl b/.old/html/schema-one-of.tmpl similarity index 100% rename from html/schema-one-of.tmpl rename to .old/html/schema-one-of.tmpl diff --git a/html/schema-scalar.tmpl b/.old/html/schema-scalar.tmpl similarity index 100% rename from html/schema-scalar.tmpl rename to .old/html/schema-scalar.tmpl diff --git a/html/schema.tmpl b/.old/html/schema.tmpl similarity index 100% rename from html/schema.tmpl rename to .old/html/schema.tmpl diff --git a/html/section.tmpl b/.old/html/section.tmpl similarity index 100% rename from html/section.tmpl rename to .old/html/section.tmpl diff --git a/html/side-by-side.tmpl b/.old/html/side-by-side.tmpl similarity index 100% rename from html/side-by-side.tmpl rename to .old/html/side-by-side.tmpl diff --git a/html/sidebar-right.tmpl b/.old/html/sidebar-right.tmpl similarity index 100% rename from html/sidebar-right.tmpl rename to .old/html/sidebar-right.tmpl diff --git a/html/sidebar.tmpl b/.old/html/sidebar.tmpl similarity index 100% rename from html/sidebar.tmpl rename to .old/html/sidebar.tmpl diff --git a/html/slack-blurb.tmpl b/.old/html/slack-blurb.tmpl similarity index 100% rename from html/slack-blurb.tmpl rename to .old/html/slack-blurb.tmpl diff --git a/html/soft-line.tmpl b/.old/html/soft-line.tmpl similarity index 100% rename from html/soft-line.tmpl rename to .old/html/soft-line.tmpl diff --git a/html/splash-example.tmpl b/.old/html/splash-example.tmpl similarity index 100% rename from html/splash-example.tmpl rename to .old/html/splash-example.tmpl diff --git a/html/splash-intro.tmpl b/.old/html/splash-intro.tmpl similarity index 100% rename from html/splash-intro.tmpl rename to .old/html/splash-intro.tmpl diff --git a/html/step-with-line.tmpl b/.old/html/step-with-line.tmpl similarity index 100% rename from html/step-with-line.tmpl rename to .old/html/step-with-line.tmpl diff --git a/html/step.tmpl b/.old/html/step.tmpl similarity index 100% rename from html/step.tmpl rename to .old/html/step.tmpl diff --git a/html/talk.tmpl b/.old/html/talk.tmpl similarity index 100% rename from html/talk.tmpl rename to .old/html/talk.tmpl diff --git a/html/titled-codeblock.tmpl b/.old/html/titled-codeblock.tmpl similarity index 100% rename from html/titled-codeblock.tmpl rename to .old/html/titled-codeblock.tmpl diff --git a/html/toc.tmpl b/.old/html/toc.tmpl similarity index 100% rename from html/toc.tmpl rename to .old/html/toc.tmpl diff --git a/html/top.tmpl b/.old/html/top.tmpl similarity index 100% rename from html/top.tmpl rename to .old/html/top.tmpl diff --git a/html/trademark-guidelines.tmpl b/.old/html/trademark-guidelines.tmpl similarity index 100% rename from html/trademark-guidelines.tmpl rename to .old/html/trademark-guidelines.tmpl diff --git a/html/truitt.tmpl b/.old/html/truitt.tmpl similarity index 100% rename from html/truitt.tmpl rename to .old/html/truitt.tmpl diff --git a/html/value-prop-left.tmpl b/.old/html/value-prop-left.tmpl similarity index 100% rename from html/value-prop-left.tmpl rename to .old/html/value-prop-left.tmpl diff --git a/html/value-prop-right.tmpl b/.old/html/value-prop-right.tmpl similarity index 100% rename from html/value-prop-right.tmpl rename to .old/html/value-prop-right.tmpl diff --git a/html/video.tmpl b/.old/html/video.tmpl similarity index 100% rename from html/video.tmpl rename to .old/html/video.tmpl diff --git a/html/warning.tmpl b/.old/html/warning.tmpl similarity index 100% rename from html/warning.tmpl rename to .old/html/warning.tmpl diff --git a/html/windows-icon.tmpl b/.old/html/windows-icon.tmpl similarity index 100% rename from html/windows-icon.tmpl rename to .old/html/windows-icon.tmpl diff --git a/images/booklit.svg b/.old/images/booklit.svg similarity index 100% rename from images/booklit.svg rename to .old/images/booklit.svg diff --git a/images/concourse-pipeline.png b/.old/images/concourse-pipeline.png similarity index 100% rename from images/concourse-pipeline.png rename to .old/images/concourse-pipeline.png diff --git a/images/countdown.gif b/.old/images/countdown.gif similarity index 100% rename from images/countdown.gif rename to .old/images/countdown.gif diff --git a/images/icons/arrow-down-box.svg b/.old/images/icons/arrow-down-box.svg similarity index 100% rename from images/icons/arrow-down-box.svg rename to .old/images/icons/arrow-down-box.svg diff --git a/images/icons/chevron-down.svg b/.old/images/icons/chevron-down.svg similarity index 100% rename from images/icons/chevron-down.svg rename to .old/images/icons/chevron-down.svg diff --git a/images/icons/chevron-up.svg b/.old/images/icons/chevron-up.svg similarity index 100% rename from images/icons/chevron-up.svg rename to .old/images/icons/chevron-up.svg diff --git a/images/icons/github-box.svg b/.old/images/icons/github-box.svg similarity index 100% rename from images/icons/github-box.svg rename to .old/images/icons/github-box.svg diff --git a/images/icons/project.svg b/.old/images/icons/project.svg similarity index 100% rename from images/icons/project.svg rename to .old/images/icons/project.svg diff --git a/images/icons/tag.svg b/.old/images/icons/tag.svg similarity index 100% rename from images/icons/tag.svg rename to .old/images/icons/tag.svg diff --git a/images/life.gif b/.old/images/life.gif similarity index 100% rename from images/life.gif rename to .old/images/life.gif diff --git a/images/logo-white.svg b/.old/images/logo-white.svg similarity index 100% rename from images/logo-white.svg rename to .old/images/logo-white.svg diff --git a/images/search_icon.svg b/.old/images/search_icon.svg similarity index 100% rename from images/search_icon.svg rename to .old/images/search_icon.svg diff --git a/images/technically.gif b/.old/images/technically.gif similarity index 100% rename from images/technically.gif rename to .old/images/technically.gif diff --git a/images/tutorial/github-release.png b/.old/images/tutorial/github-release.png similarity index 100% rename from images/tutorial/github-release.png rename to .old/images/tutorial/github-release.png diff --git a/images/tutorial/hello-world-first-build.mp4 b/.old/images/tutorial/hello-world-first-build.mp4 similarity index 100% rename from images/tutorial/hello-world-first-build.mp4 rename to .old/images/tutorial/hello-world-first-build.mp4 diff --git a/images/tutorial/hello-world-passing-artifacts.png b/.old/images/tutorial/hello-world-passing-artifacts.png similarity index 100% rename from images/tutorial/hello-world-passing-artifacts.png rename to .old/images/tutorial/hello-world-passing-artifacts.png diff --git a/images/tutorial/hello-world-success.png b/.old/images/tutorial/hello-world-success.png similarity index 100% rename from images/tutorial/hello-world-success.png rename to .old/images/tutorial/hello-world-success.png diff --git a/images/tutorial/hello-world-trigger-job.png b/.old/images/tutorial/hello-world-trigger-job.png similarity index 100% rename from images/tutorial/hello-world-trigger-job.png rename to .old/images/tutorial/hello-world-trigger-job.png diff --git a/images/tutorial/resource-pipeline-done.png b/.old/images/tutorial/resource-pipeline-done.png similarity index 100% rename from images/tutorial/resource-pipeline-done.png rename to .old/images/tutorial/resource-pipeline-done.png diff --git a/images/tutorial/resource-pipeline-fix-tests.mp4 b/.old/images/tutorial/resource-pipeline-fix-tests.mp4 similarity index 100% rename from images/tutorial/resource-pipeline-fix-tests.mp4 rename to .old/images/tutorial/resource-pipeline-fix-tests.mp4 diff --git a/images/tutorial/resource-pipeline-tests-trigger.mp4 b/.old/images/tutorial/resource-pipeline-tests-trigger.mp4 similarity index 100% rename from images/tutorial/resource-pipeline-tests-trigger.mp4 rename to .old/images/tutorial/resource-pipeline-tests-trigger.mp4 diff --git a/images/tutorial/resource-publish-job.png b/.old/images/tutorial/resource-publish-job.png similarity index 100% rename from images/tutorial/resource-publish-job.png rename to .old/images/tutorial/resource-publish-job.png diff --git a/images/tutorial/resource-publish-logs.png b/.old/images/tutorial/resource-publish-logs.png similarity index 100% rename from images/tutorial/resource-publish-logs.png rename to .old/images/tutorial/resource-publish-logs.png diff --git a/images/tutorial/resource-version-web-ui.png b/.old/images/tutorial/resource-version-web-ui.png similarity index 100% rename from images/tutorial/resource-version-web-ui.png rename to .old/images/tutorial/resource-version-web-ui.png diff --git a/images/tutorial/resources-pipeline-tests-trigger.png b/.old/images/tutorial/resources-pipeline-tests-trigger.png similarity index 100% rename from images/tutorial/resources-pipeline-tests-trigger.png rename to .old/images/tutorial/resources-pipeline-tests-trigger.png diff --git a/images/tutorial/resources-pipeline-tests.png b/.old/images/tutorial/resources-pipeline-tests.png similarity index 100% rename from images/tutorial/resources-pipeline-tests.png rename to .old/images/tutorial/resources-pipeline-tests.png diff --git a/images/tutorial/trigger-tests-job-get-repo.mp4 b/.old/images/tutorial/trigger-tests-job-get-repo.mp4 similarity index 100% rename from images/tutorial/trigger-tests-job-get-repo.mp4 rename to .old/images/tutorial/trigger-tests-job-get-repo.mp4 diff --git a/images/tutorial/trigger-tests-job-go-test.mp4 b/.old/images/tutorial/trigger-tests-job-go-test.mp4 similarity index 100% rename from images/tutorial/trigger-tests-job-go-test.mp4 rename to .old/images/tutorial/trigger-tests-job-go-test.mp4 diff --git a/images/tutorial/view-check-status.png b/.old/images/tutorial/view-check-status.png similarity index 100% rename from images/tutorial/view-check-status.png rename to .old/images/tutorial/view-check-status.png diff --git a/images/tutorial/welcome-screen.png b/.old/images/tutorial/welcome-screen.png similarity index 100% rename from images/tutorial/welcome-screen.png rename to .old/images/tutorial/welcome-screen.png diff --git a/images/vapenaysh.png b/.old/images/vapenaysh.png similarity index 100% rename from images/vapenaysh.png rename to .old/images/vapenaysh.png diff --git a/images/whoami.gif b/.old/images/whoami.gif similarity index 100% rename from images/whoami.gif rename to .old/images/whoami.gif diff --git a/images/wow.gif b/.old/images/wow.gif similarity index 100% rename from images/wow.gif rename to .old/images/wow.gif diff --git a/js/prism.js b/.old/js/prism.js similarity index 100% rename from js/prism.js rename to .old/js/prism.js diff --git a/js/search.js b/.old/js/search.js similarity index 100% rename from js/search.js rename to .old/js/search.js diff --git a/less/base.less b/.old/less/base.less similarity index 100% rename from less/base.less rename to .old/less/base.less diff --git a/less/blog.less b/.old/less/blog.less similarity index 100% rename from less/blog.less rename to .old/less/blog.less diff --git a/less/booklit.less b/.old/less/booklit.less similarity index 100% rename from less/booklit.less rename to .old/less/booklit.less diff --git a/less/colors.less b/.old/less/colors.less similarity index 100% rename from less/colors.less rename to .old/less/colors.less diff --git a/less/discourse/common.less b/.old/less/discourse/common.less similarity index 100% rename from less/discourse/common.less rename to .old/less/discourse/common.less diff --git a/less/discourse/desktop.less b/.old/less/discourse/desktop.less similarity index 100% rename from less/discourse/desktop.less rename to .old/less/discourse/desktop.less diff --git a/less/discourse/mobile.less b/.old/less/discourse/mobile.less similarity index 100% rename from less/discourse/mobile.less rename to .old/less/discourse/mobile.less diff --git a/less/icons.less b/.old/less/icons.less similarity index 100% rename from less/icons.less rename to .old/less/icons.less diff --git a/less/index.less b/.old/less/index.less similarity index 100% rename from less/index.less rename to .old/less/index.less diff --git a/less/mixins.less b/.old/less/mixins.less similarity index 100% rename from less/mixins.less rename to .old/less/mixins.less diff --git a/less/pipeline.less b/.old/less/pipeline.less similarity index 100% rename from less/pipeline.less rename to .old/less/pipeline.less diff --git a/less/prism.less b/.old/less/prism.less similarity index 100% rename from less/prism.less rename to .old/less/prism.less diff --git a/less/schema.less b/.old/less/schema.less similarity index 100% rename from less/schema.less rename to .old/less/schema.less diff --git a/less/search.less b/.old/less/search.less similarity index 100% rename from less/search.less rename to .old/less/search.less diff --git a/less/vars.less b/.old/less/vars.less similarity index 100% rename from less/vars.less rename to .old/less/vars.less diff --git a/lit/docs.lit b/.old/lit/docs.lit similarity index 100% rename from lit/docs.lit rename to .old/lit/docs.lit diff --git a/lit/docs/auth.lit b/.old/lit/docs/auth.lit similarity index 100% rename from lit/docs/auth.lit rename to .old/lit/docs/auth.lit diff --git a/lit/docs/auth/configuring.lit b/.old/lit/docs/auth/configuring.lit similarity index 100% rename from lit/docs/auth/configuring.lit rename to .old/lit/docs/auth/configuring.lit diff --git a/lit/docs/auth/configuring/bitbucket.lit b/.old/lit/docs/auth/configuring/bitbucket.lit similarity index 100% rename from lit/docs/auth/configuring/bitbucket.lit rename to .old/lit/docs/auth/configuring/bitbucket.lit diff --git a/lit/docs/auth/configuring/cf.lit b/.old/lit/docs/auth/configuring/cf.lit similarity index 100% rename from lit/docs/auth/configuring/cf.lit rename to .old/lit/docs/auth/configuring/cf.lit diff --git a/lit/docs/auth/configuring/generic-oauth.lit b/.old/lit/docs/auth/configuring/generic-oauth.lit similarity index 100% rename from lit/docs/auth/configuring/generic-oauth.lit rename to .old/lit/docs/auth/configuring/generic-oauth.lit diff --git a/lit/docs/auth/configuring/generic-oidc.lit b/.old/lit/docs/auth/configuring/generic-oidc.lit similarity index 100% rename from lit/docs/auth/configuring/generic-oidc.lit rename to .old/lit/docs/auth/configuring/generic-oidc.lit diff --git a/lit/docs/auth/configuring/generic-saml.lit b/.old/lit/docs/auth/configuring/generic-saml.lit similarity index 100% rename from lit/docs/auth/configuring/generic-saml.lit rename to .old/lit/docs/auth/configuring/generic-saml.lit diff --git a/lit/docs/auth/configuring/github.lit b/.old/lit/docs/auth/configuring/github.lit similarity index 100% rename from lit/docs/auth/configuring/github.lit rename to .old/lit/docs/auth/configuring/github.lit diff --git a/lit/docs/auth/configuring/gitlab.lit b/.old/lit/docs/auth/configuring/gitlab.lit similarity index 100% rename from lit/docs/auth/configuring/gitlab.lit rename to .old/lit/docs/auth/configuring/gitlab.lit diff --git a/lit/docs/auth/configuring/ldap.lit b/.old/lit/docs/auth/configuring/ldap.lit similarity index 100% rename from lit/docs/auth/configuring/ldap.lit rename to .old/lit/docs/auth/configuring/ldap.lit diff --git a/lit/docs/auth/configuring/local.lit b/.old/lit/docs/auth/configuring/local.lit similarity index 100% rename from lit/docs/auth/configuring/local.lit rename to .old/lit/docs/auth/configuring/local.lit diff --git a/lit/docs/auth/configuring/microsoft.lit b/.old/lit/docs/auth/configuring/microsoft.lit similarity index 100% rename from lit/docs/auth/configuring/microsoft.lit rename to .old/lit/docs/auth/configuring/microsoft.lit diff --git a/lit/docs/auth/main-team.lit b/.old/lit/docs/auth/main-team.lit similarity index 100% rename from lit/docs/auth/main-team.lit rename to .old/lit/docs/auth/main-team.lit diff --git a/lit/docs/auth/managing-teams.lit b/.old/lit/docs/auth/managing-teams.lit similarity index 100% rename from lit/docs/auth/managing-teams.lit rename to .old/lit/docs/auth/managing-teams.lit diff --git a/lit/docs/auth/roles.lit b/.old/lit/docs/auth/roles.lit similarity index 100% rename from lit/docs/auth/roles.lit rename to .old/lit/docs/auth/roles.lit diff --git a/lit/docs/builds.lit b/.old/lit/docs/builds.lit similarity index 100% rename from lit/docs/builds.lit rename to .old/lit/docs/builds.lit diff --git a/lit/docs/config.lit b/.old/lit/docs/config.lit similarity index 100% rename from lit/docs/config.lit rename to .old/lit/docs/config.lit diff --git a/lit/docs/config/basic-schemas.lit b/.old/lit/docs/config/basic-schemas.lit similarity index 100% rename from lit/docs/config/basic-schemas.lit rename to .old/lit/docs/config/basic-schemas.lit diff --git a/lit/docs/config/vars/caching.lit b/.old/lit/docs/config/vars/caching.lit similarity index 100% rename from lit/docs/config/vars/caching.lit rename to .old/lit/docs/config/vars/caching.lit diff --git a/lit/docs/config/yaml.lit b/.old/lit/docs/config/yaml.lit similarity index 100% rename from lit/docs/config/yaml.lit rename to .old/lit/docs/config/yaml.lit diff --git a/lit/docs/fly.lit b/.old/lit/docs/fly.lit similarity index 100% rename from lit/docs/fly.lit rename to .old/lit/docs/fly.lit diff --git a/lit/docs/getting-started.lit b/.old/lit/docs/getting-started.lit similarity index 100% rename from lit/docs/getting-started.lit rename to .old/lit/docs/getting-started.lit diff --git a/lit/docs/getting-started/hello-world.lit b/.old/lit/docs/getting-started/hello-world.lit similarity index 100% rename from lit/docs/getting-started/hello-world.lit rename to .old/lit/docs/getting-started/hello-world.lit diff --git a/lit/docs/getting-started/inputs-outputs.lit b/.old/lit/docs/getting-started/inputs-outputs.lit similarity index 100% rename from lit/docs/getting-started/inputs-outputs.lit rename to .old/lit/docs/getting-started/inputs-outputs.lit diff --git a/lit/docs/getting-started/quickstart.lit b/.old/lit/docs/getting-started/quickstart.lit similarity index 100% rename from lit/docs/getting-started/quickstart.lit rename to .old/lit/docs/getting-started/quickstart.lit diff --git a/lit/docs/getting-started/resources.lit b/.old/lit/docs/getting-started/resources.lit similarity index 100% rename from lit/docs/getting-started/resources.lit rename to .old/lit/docs/getting-started/resources.lit diff --git a/lit/docs/guides/container-images.lit b/.old/lit/docs/guides/container-images.lit similarity index 100% rename from lit/docs/guides/container-images.lit rename to .old/lit/docs/guides/container-images.lit diff --git a/lit/docs/guides/git.lit b/.old/lit/docs/guides/git.lit similarity index 100% rename from lit/docs/guides/git.lit rename to .old/lit/docs/guides/git.lit diff --git a/lit/docs/guides/pipeline-guides.lit b/.old/lit/docs/guides/pipeline-guides.lit similarity index 100% rename from lit/docs/guides/pipeline-guides.lit rename to .old/lit/docs/guides/pipeline-guides.lit diff --git a/lit/docs/how-to-guides.lit b/.old/lit/docs/how-to-guides.lit similarity index 100% rename from lit/docs/how-to-guides.lit rename to .old/lit/docs/how-to-guides.lit diff --git a/lit/docs/install.lit b/.old/lit/docs/install.lit similarity index 100% rename from lit/docs/install.lit rename to .old/lit/docs/install.lit diff --git a/lit/docs/install/concourse-cli.lit b/.old/lit/docs/install/concourse-cli.lit similarity index 100% rename from lit/docs/install/concourse-cli.lit rename to .old/lit/docs/install/concourse-cli.lit diff --git a/lit/docs/install/db.lit b/.old/lit/docs/install/db.lit similarity index 100% rename from lit/docs/install/db.lit rename to .old/lit/docs/install/db.lit diff --git a/lit/docs/install/generating-keys.lit b/.old/lit/docs/install/generating-keys.lit similarity index 100% rename from lit/docs/install/generating-keys.lit rename to .old/lit/docs/install/generating-keys.lit diff --git a/lit/docs/install/upgrading.lit b/.old/lit/docs/install/upgrading.lit similarity index 100% rename from lit/docs/install/upgrading.lit rename to .old/lit/docs/install/upgrading.lit diff --git a/lit/docs/install/web.lit b/.old/lit/docs/install/web.lit similarity index 100% rename from lit/docs/install/web.lit rename to .old/lit/docs/install/web.lit diff --git a/lit/docs/install/worker.lit b/.old/lit/docs/install/worker.lit similarity index 100% rename from lit/docs/install/worker.lit rename to .old/lit/docs/install/worker.lit diff --git a/lit/docs/internals.lit b/.old/lit/docs/internals.lit similarity index 100% rename from lit/docs/internals.lit rename to .old/lit/docs/internals.lit diff --git a/lit/docs/internals/build-tracker.lit b/.old/lit/docs/internals/build-tracker.lit similarity index 100% rename from lit/docs/internals/build-tracker.lit rename to .old/lit/docs/internals/build-tracker.lit diff --git a/lit/docs/internals/checker.lit b/.old/lit/docs/internals/checker.lit similarity index 100% rename from lit/docs/internals/checker.lit rename to .old/lit/docs/internals/checker.lit diff --git a/lit/docs/internals/garbage-collector.lit b/.old/lit/docs/internals/garbage-collector.lit similarity index 100% rename from lit/docs/internals/garbage-collector.lit rename to .old/lit/docs/internals/garbage-collector.lit diff --git a/lit/docs/internals/scheduler.lit b/.old/lit/docs/internals/scheduler.lit similarity index 100% rename from lit/docs/internals/scheduler.lit rename to .old/lit/docs/internals/scheduler.lit diff --git a/lit/docs/jobs.lit b/.old/lit/docs/jobs.lit similarity index 100% rename from lit/docs/jobs.lit rename to .old/lit/docs/jobs.lit diff --git a/lit/docs/jobs/managing.lit b/.old/lit/docs/jobs/managing.lit similarity index 100% rename from lit/docs/jobs/managing.lit rename to .old/lit/docs/jobs/managing.lit diff --git a/lit/docs/observation.lit b/.old/lit/docs/observation.lit similarity index 100% rename from lit/docs/observation.lit rename to .old/lit/docs/observation.lit diff --git a/lit/docs/operation.lit b/.old/lit/docs/operation.lit similarity index 100% rename from lit/docs/operation.lit rename to .old/lit/docs/operation.lit diff --git a/lit/docs/operation/administration.lit b/.old/lit/docs/operation/administration.lit similarity index 100% rename from lit/docs/operation/administration.lit rename to .old/lit/docs/operation/administration.lit diff --git a/lit/docs/operation/container-placement.lit b/.old/lit/docs/operation/container-placement.lit similarity index 100% rename from lit/docs/operation/container-placement.lit rename to .old/lit/docs/operation/container-placement.lit diff --git a/lit/docs/operation/creds.lit b/.old/lit/docs/operation/creds.lit similarity index 100% rename from lit/docs/operation/creds.lit rename to .old/lit/docs/operation/creds.lit diff --git a/lit/docs/operation/creds/aws-secretsmanager.lit b/.old/lit/docs/operation/creds/aws-secretsmanager.lit similarity index 100% rename from lit/docs/operation/creds/aws-secretsmanager.lit rename to .old/lit/docs/operation/creds/aws-secretsmanager.lit diff --git a/lit/docs/operation/creds/aws-ssm.lit b/.old/lit/docs/operation/creds/aws-ssm.lit similarity index 100% rename from lit/docs/operation/creds/aws-ssm.lit rename to .old/lit/docs/operation/creds/aws-ssm.lit diff --git a/lit/docs/operation/creds/caching.lit b/.old/lit/docs/operation/creds/caching.lit similarity index 100% rename from lit/docs/operation/creds/caching.lit rename to .old/lit/docs/operation/creds/caching.lit diff --git a/lit/docs/operation/creds/credhub.lit b/.old/lit/docs/operation/creds/credhub.lit similarity index 100% rename from lit/docs/operation/creds/credhub.lit rename to .old/lit/docs/operation/creds/credhub.lit diff --git a/lit/docs/operation/creds/cyberark-conjur.lit b/.old/lit/docs/operation/creds/cyberark-conjur.lit similarity index 100% rename from lit/docs/operation/creds/cyberark-conjur.lit rename to .old/lit/docs/operation/creds/cyberark-conjur.lit diff --git a/lit/docs/operation/creds/idtoken.lit b/.old/lit/docs/operation/creds/idtoken.lit similarity index 100% rename from lit/docs/operation/creds/idtoken.lit rename to .old/lit/docs/operation/creds/idtoken.lit diff --git a/lit/docs/operation/creds/kubernetes.lit b/.old/lit/docs/operation/creds/kubernetes.lit similarity index 100% rename from lit/docs/operation/creds/kubernetes.lit rename to .old/lit/docs/operation/creds/kubernetes.lit diff --git a/lit/docs/operation/creds/redacting.lit b/.old/lit/docs/operation/creds/redacting.lit similarity index 100% rename from lit/docs/operation/creds/redacting.lit rename to .old/lit/docs/operation/creds/redacting.lit diff --git a/lit/docs/operation/creds/retry.lit b/.old/lit/docs/operation/creds/retry.lit similarity index 100% rename from lit/docs/operation/creds/retry.lit rename to .old/lit/docs/operation/creds/retry.lit diff --git a/lit/docs/operation/creds/vault.lit b/.old/lit/docs/operation/creds/vault.lit similarity index 100% rename from lit/docs/operation/creds/vault.lit rename to .old/lit/docs/operation/creds/vault.lit diff --git a/lit/docs/operation/encryption.lit b/.old/lit/docs/operation/encryption.lit similarity index 100% rename from lit/docs/operation/encryption.lit rename to .old/lit/docs/operation/encryption.lit diff --git a/lit/docs/operation/global-resources.lit b/.old/lit/docs/operation/global-resources.lit similarity index 100% rename from lit/docs/operation/global-resources.lit rename to .old/lit/docs/operation/global-resources.lit diff --git a/lit/docs/operation/metrics.lit b/.old/lit/docs/operation/metrics.lit similarity index 100% rename from lit/docs/operation/metrics.lit rename to .old/lit/docs/operation/metrics.lit diff --git a/lit/docs/operation/opa.lit b/.old/lit/docs/operation/opa.lit similarity index 100% rename from lit/docs/operation/opa.lit rename to .old/lit/docs/operation/opa.lit diff --git a/lit/docs/operation/performance-tuning.lit b/.old/lit/docs/operation/performance-tuning.lit similarity index 100% rename from lit/docs/operation/performance-tuning.lit rename to .old/lit/docs/operation/performance-tuning.lit diff --git a/lit/docs/operation/security-hardening.lit b/.old/lit/docs/operation/security-hardening.lit similarity index 100% rename from lit/docs/operation/security-hardening.lit rename to .old/lit/docs/operation/security-hardening.lit diff --git a/lit/docs/operation/tracing.lit b/.old/lit/docs/operation/tracing.lit similarity index 100% rename from lit/docs/operation/tracing.lit rename to .old/lit/docs/operation/tracing.lit diff --git a/lit/docs/pipelines.lit b/.old/lit/docs/pipelines.lit similarity index 100% rename from lit/docs/pipelines.lit rename to .old/lit/docs/pipelines.lit diff --git a/lit/docs/pipelines/configuring.lit b/.old/lit/docs/pipelines/configuring.lit similarity index 100% rename from lit/docs/pipelines/configuring.lit rename to .old/lit/docs/pipelines/configuring.lit diff --git a/lit/docs/pipelines/instance-groups.lit b/.old/lit/docs/pipelines/instance-groups.lit similarity index 100% rename from lit/docs/pipelines/instance-groups.lit rename to .old/lit/docs/pipelines/instance-groups.lit diff --git a/lit/docs/pipelines/managing.lit b/.old/lit/docs/pipelines/managing.lit similarity index 100% rename from lit/docs/pipelines/managing.lit rename to .old/lit/docs/pipelines/managing.lit diff --git a/lit/docs/resource-types.lit b/.old/lit/docs/resource-types.lit similarity index 100% rename from lit/docs/resource-types.lit rename to .old/lit/docs/resource-types.lit diff --git a/lit/docs/resource-types/implementing.lit b/.old/lit/docs/resource-types/implementing.lit similarity index 100% rename from lit/docs/resource-types/implementing.lit rename to .old/lit/docs/resource-types/implementing.lit diff --git a/lit/docs/resource-types/managing.lit b/.old/lit/docs/resource-types/managing.lit similarity index 100% rename from lit/docs/resource-types/managing.lit rename to .old/lit/docs/resource-types/managing.lit diff --git a/lit/docs/resources.lit b/.old/lit/docs/resources.lit similarity index 100% rename from lit/docs/resources.lit rename to .old/lit/docs/resources.lit diff --git a/lit/docs/resources/managing.lit b/.old/lit/docs/resources/managing.lit similarity index 100% rename from lit/docs/resources/managing.lit rename to .old/lit/docs/resources/managing.lit diff --git a/lit/docs/resources/versions.lit b/.old/lit/docs/resources/versions.lit similarity index 100% rename from lit/docs/resources/versions.lit rename to .old/lit/docs/resources/versions.lit diff --git a/lit/docs/steps.lit b/.old/lit/docs/steps.lit similarity index 100% rename from lit/docs/steps.lit rename to .old/lit/docs/steps.lit diff --git a/lit/docs/tasks.lit b/.old/lit/docs/tasks.lit similarity index 100% rename from lit/docs/tasks.lit rename to .old/lit/docs/tasks.lit diff --git a/lit/docs/tasks/environment.lit b/.old/lit/docs/tasks/environment.lit similarity index 100% rename from lit/docs/tasks/environment.lit rename to .old/lit/docs/tasks/environment.lit diff --git a/lit/docs/tasks/running.lit b/.old/lit/docs/tasks/running.lit similarity index 100% rename from lit/docs/tasks/running.lit rename to .old/lit/docs/tasks/running.lit diff --git a/lit/docs/vars.lit b/.old/lit/docs/vars.lit similarity index 100% rename from lit/docs/vars.lit rename to .old/lit/docs/vars.lit diff --git a/lit/ecosystem.lit b/.old/lit/ecosystem.lit similarity index 100% rename from lit/ecosystem.lit rename to .old/lit/ecosystem.lit diff --git a/lit/examples.lit b/.old/lit/examples.lit similarity index 100% rename from lit/examples.lit rename to .old/lit/examples.lit diff --git a/lit/examples/attic/dockerhub.lit b/.old/lit/examples/attic/dockerhub.lit similarity index 100% rename from lit/examples/attic/dockerhub.lit rename to .old/lit/examples/attic/dockerhub.lit diff --git a/lit/examples/git-triggered.lit b/.old/lit/examples/git-triggered.lit similarity index 100% rename from lit/examples/git-triggered.lit rename to .old/lit/examples/git-triggered.lit diff --git a/lit/examples/golang-lib.lit b/.old/lit/examples/golang-lib.lit similarity index 100% rename from lit/examples/golang-lib.lit rename to .old/lit/examples/golang-lib.lit diff --git a/lit/examples/hello-world.lit b/.old/lit/examples/hello-world.lit similarity index 100% rename from lit/examples/hello-world.lit rename to .old/lit/examples/hello-world.lit diff --git a/lit/examples/hooks.lit b/.old/lit/examples/hooks.lit similarity index 100% rename from lit/examples/hooks.lit rename to .old/lit/examples/hooks.lit diff --git a/lit/examples/java.lit b/.old/lit/examples/java.lit similarity index 100% rename from lit/examples/java.lit rename to .old/lit/examples/java.lit diff --git a/lit/examples/manual-trigger.lit b/.old/lit/examples/manual-trigger.lit similarity index 100% rename from lit/examples/manual-trigger.lit rename to .old/lit/examples/manual-trigger.lit diff --git a/lit/examples/nodejs.lit b/.old/lit/examples/nodejs.lit similarity index 100% rename from lit/examples/nodejs.lit rename to .old/lit/examples/nodejs.lit diff --git a/lit/examples/php.lit b/.old/lit/examples/php.lit similarity index 100% rename from lit/examples/php.lit rename to .old/lit/examples/php.lit diff --git a/lit/examples/pipeline-vars.lit b/.old/lit/examples/pipeline-vars.lit similarity index 100% rename from lit/examples/pipeline-vars.lit rename to .old/lit/examples/pipeline-vars.lit diff --git a/lit/examples/rails.lit b/.old/lit/examples/rails.lit similarity index 100% rename from lit/examples/rails.lit rename to .old/lit/examples/rails.lit diff --git a/lit/examples/serial-job.lit b/.old/lit/examples/serial-job.lit similarity index 100% rename from lit/examples/serial-job.lit rename to .old/lit/examples/serial-job.lit diff --git a/lit/examples/set-pipelines.lit b/.old/lit/examples/set-pipelines.lit similarity index 100% rename from lit/examples/set-pipelines.lit rename to .old/lit/examples/set-pipelines.lit diff --git a/lit/examples/task-inputs-outputs.lit b/.old/lit/examples/task-inputs-outputs.lit similarity index 100% rename from lit/examples/task-inputs-outputs.lit rename to .old/lit/examples/task-inputs-outputs.lit diff --git a/lit/examples/time-triggered.lit b/.old/lit/examples/time-triggered.lit similarity index 100% rename from lit/examples/time-triggered.lit rename to .old/lit/examples/time-triggered.lit diff --git a/lit/index.lit b/.old/lit/index.lit similarity index 100% rename from lit/index.lit rename to .old/lit/index.lit diff --git a/lit/project.lit b/.old/lit/project.lit similarity index 100% rename from lit/project.lit rename to .old/lit/project.lit diff --git a/lit/support.lit b/.old/lit/support.lit similarity index 100% rename from lit/support.lit rename to .old/lit/support.lit diff --git a/.old/package.json b/.old/package.json new file mode 100644 index 00000000..a8d011ea --- /dev/null +++ b/.old/package.json @@ -0,0 +1,21 @@ +{ + "name": "docs-search", + "version": "0.0.1", + "repository": "https://github.com/concourse/docs", + "author": "Alex Suraci ", + "license": "MIT", + "scripts": { + "start": "node index.js", + "build": "make" + }, + "dependencies": { + "elm": "0.19.1-6", + "gscan": "^5.0.0", + "less": "^3.9.0" + }, + "devDependencies": { + "elm-format": "^0.8.8", + "elm-oracle": "^1.1.1" + }, + "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e" +} diff --git a/robots.txt b/.old/robots.txt similarity index 100% rename from robots.txt rename to .old/robots.txt diff --git a/scripts/build b/.old/scripts/build old mode 100755 new mode 100644 similarity index 100% rename from scripts/build rename to .old/scripts/build diff --git a/scripts/deploy-examples b/.old/scripts/deploy-examples old mode 100755 new mode 100644 similarity index 100% rename from scripts/deploy-examples rename to .old/scripts/deploy-examples diff --git a/yarn.lock b/.old/yarn.lock similarity index 100% rename from yarn.lock rename to .old/yarn.lock diff --git a/README.md b/README.md index 05a9f942..09f6c4f7 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,7 @@ # Concourse Docs This is where you will find the source for the Concourse website and overall -documentation. All of our docs are written using the [Booklit documentation -engine](https://vito.github.io/booklit/). - -**Table of Contents** -* [Building the Docs Locally](#building-the-docs-locally) -* [Docs Styling](#docs-styling) -* [Content Layout](#content-layout) +documentation. # Examples Status @@ -29,57 +23,7 @@ engine](https://vito.github.io/booklit/). ## Prerequisites -* Have Go v1.11.2+ installed and configured. You can find the relevant - instructions for your platform of choice here: [Go Getting - Started](https://golang.org/doc/install) - -* Clone this repository: - [https://github.com/concourse/docs](https://github.com/concourse/docs) - -## Compiling the Docs - -You can compile the Concourse docs by running: - -```bash -./scripts/build -``` - -The `build` script will instruct Booklit to compile all the files under `lit/` -as `html` files. The files will then be dumped into your current working -directory, i.e. the root of this repo. - -## Viewing the docs in your browser - -To run a server that will rebuild the docs as needed, pass `-s (port)` like so: - -```bash -./scripts/build -s 8000 -``` - -You will be now be able to see the rendered site if you navigate to -[http://localhost:8000](http://localhost:8000). - -# Docs Styling - -You can find all of the styling assets for the Concourse website and documentation under the [`css/`](./css) folder. - -If you are planning to make changes to the site, [`css/booklit.css`](./css/booklit.css) is usually a good place to start. - -# Content Layout - -All of the website content can be found under the [`lit/`](./lit) folder of this repository. - -The content layout for the site is qute simple, and for the most part self-explanatory. If you want to change a specific page on the website you can usually jump straight to it by looking for the `.lit` version of the page. For example you can make changes to https://concourse-ci.org/fly.html by editing `lit/fly.lit`. - -* [`html/docs-header.tmpl`](./html/docs-header.tmpl) L1 navigation header for the Concourse website and docs. -* [`lit/index.lit`](./lit/index.lit) The Concourse Homepage -* [`lit/docs/`](./lit/docs/) This is where you'll find most of the documentation listed under https://concourse-ci.org/docs.html - -The Resource Types live in a separate repository: [concourse/resource-types](https://github.com/concourse/resource-types). - -# Updating the Blog Theme +Required Dependencies: -```sh -$ make blog/concourse.zip -# and then upload it in the Ghost admin UI -``` +- ytt by Carvel +- \ No newline at end of file diff --git a/base.yml b/base.yml new file mode 100644 index 00000000..8fb126bd --- /dev/null +++ b/base.yml @@ -0,0 +1,263 @@ +site_name: Concourse +site_url: https://aviator-labs.github.io/docs-ci/ + +repo_url: https://github.com/concourse/concourse +repo_name: concourse/concourse + +edit_uri: https://github.com/Aviator-Labs/docs-ci/edit/main/docs/ + +watch: + - docs + - overrides + +markdown_extensions: + - admonition + - def_list + - tables + - footnotes + - attr_list + - md_in_html + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.superfences + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets: + base_path: + - "." # Current directory + - "docs" # docs directory + - pymdownx.tabbed: + alternate_style: true + slugify: !!python/object/apply:pymdownx.slugs.slugify + kwds: + case: lower + +extra_css: + - assets/stylesheets/mwc-theme.css + +extra_javascript: + - assets/javascripts/mwc-bundle.min.js + +exclude_docs: | + libs/**/*.md + libs/examples/apps + libs/examples/terraform + libs/examples/misc + libs/examples/LICENSE + +theme: + name: material + custom_dir: overrides + font: + text: Roboto Slab + features: + - navigation.tabs # Top-level tabs + - navigation.tabs.sticky # Keep tabs visible + - navigation.indexes # Section index pages + - navigation.instant # SPA-like navigation + - navigation.top # Back to top button + - navigation.footer # Previous/next links + - content.action.edit # Edit Page in GitHub + - content.action.view # View raw Page in GitHub + - content.code.copy # Copy code + - attr_list # Attribute Lists + - md_in_html # Markdown in HTML + - pymdownx.blocks.caption # Caption + + palette: + - scheme: default + primary: custom + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - scheme: slate + primary: custom + toggle: + icon: material/brightness-4 + name: Switch to light mode + + logo: assets/logo-white.svg + favicon: assets/favicon.ico + +copyright: >- + Copyright © 2025 The Linux Foundation ™ – + Change cookie settings + +extra: + analytics: + provider: goattracker + consent: + title: Cookie consent + description: >- + We use cookies to recognize your repeated visits. With your consent, you're helping us to + make our documentation better. + cookies: + analytics: + name: Goat Tracker Analytics + checked: true + social: + - icon: fontawesome/brands/github + link: https://github.com/concourse + - icon: fontawesome/brands/docker + link: https://hub.docker.com/u/concourse + - icon: fontawesome/brands/discord + link: https://discord.gg/MeRxXKW + - icon: fontawesome/brands/bluesky + link: https://bsky.app/profile/concourse-ci.org + - icon: fontawesome/solid/paper-plane + link: mailto:contact@concourse-ci.org + version: 7.14.2 + +not_in_nav: | + index.md + +nav: + - Docs: + - docs/index.md + - Getting Started: + - docs/getting-started/index.md + - docs/getting-started/quick-start.md + - docs/getting-started/hello-world.md + - docs/getting-started/inputs-outputs.md + - docs/getting-started/resources.md + - Install: + - docs/install/index.md + - docs/install/running-postgres.md + - docs/install/concourse-cli.md + - docs/install/generating-keys.md + - docs/install/running-web.md + - docs/install/running-worker.md + - docs/install/upgrading-concourse.md + - Auth & Teams: + - docs/auth-and-teams/index.md + - docs/auth-and-teams/main-team.md + - Configuring Auth: + - docs/auth-and-teams/configuring/index.md + - docs/auth-and-teams/configuring/local-user.md + - docs/auth-and-teams/configuring/github.md + - docs/auth-and-teams/configuring/gitlab.md + - docs/auth-and-teams/configuring/bitbucket-cloud.md + - docs/auth-and-teams/configuring/cf-uaa.md + - docs/auth-and-teams/configuring/ldap.md + - docs/auth-and-teams/configuring/microsoft.md + - docs/auth-and-teams/configuring/generic-oidc.md + - docs/auth-and-teams/configuring/generic-oauth.md + - docs/auth-and-teams/configuring/generic-saml.md + - docs/auth-and-teams/managing-teams.md + - docs/auth-and-teams/user-roles.md + - docs/auth-and-teams/exposing.md + - docs/auth-and-teams/caveats.md + - docs/fly.md + - docs/config-basics.md + - Pipelines: + - docs/pipelines/index.md + - docs/pipelines/setting-pipelines.md + - docs/pipelines/managing-pipelines.md + - docs/pipelines/grouping-pipelines.md + - docs/vars.md + - Resources: + - docs/resources/index.md + - docs/resources/resource-versions.md + - docs/resources/managing-resources.md + - Resource Types: + - docs/resource-types/index.md + - docs/resource-types/implementing.md + - docs/resource-types/managing-types.md + - docs/jobs.md + - Steps: + - docs/steps/index.md + - docs/steps/get.md + - docs/steps/put.md + - docs/steps/task.md + - docs/steps/set-pipeline.md + - docs/steps/load-var.md + - docs/steps/in-parallel.md + - docs/steps/do.md + - docs/steps/try.md + - Modifiers and Hooks: + - docs/steps/modifier-and-hooks/index.md + - docs/steps/modifier-and-hooks/across.md + - docs/steps/modifier-and-hooks/timeout.md + - docs/steps/modifier-and-hooks/attempts.md + - docs/steps/modifier-and-hooks/tags.md + - docs/steps/modifier-and-hooks/on-success.md + - docs/steps/modifier-and-hooks/on-failure.md + - docs/steps/modifier-and-hooks/on-abort.md + - docs/steps/modifier-and-hooks/on-error.md + - docs/steps/modifier-and-hooks/ensure.md + - docs/tasks.md + - docs/builds.md + - How-To Guides: + - docs/how-to/index.md + - Pipeline Guides: + - docs/how-to/pipeline-guides/index.md + - docs/how-to/pipeline-guides/managing-pipeline-configs.md + - docs/how-to/pipeline-guides/common-pipeline.md + - docs/how-to/pipeline-guides/task-inputs-outputs.md + - docs/how-to/pipeline-guides/gated-pipelines.md + - docs/how-to/pipeline-guides/time-triggered-pipelines.md + - docs/how-to/pipeline-guides/manual-approval.md + - Git Guides: + - docs/how-to/git-guides/index.md + - docs/how-to/git-guides/basic.md + - docs/how-to/git-guides/multi-branch.md + - Container Image Guides: + - docs/how-to/container-image-guides/index.md + - docs/how-to/container-image-guides/build-push.md + - docs/how-to/container-image-guides/build-use.md + - Operation: + - docs/operation/index.md + - docs/operation/metrics.md + - docs/operation/tracing.md + - docs/operation/encryption.md + - Credential Management: + - docs/operation/creds/index.md + - docs/operation/creds/vault.md + - docs/operation/creds/credhub.md + - docs/operation/creds/aws-ssm.md + - docs/operation/creds/aws-secrets.md + - docs/operation/creds/kubernetes.md + - docs/operation/creds/conjur.md + - docs/operation/creds/id-token.md + - docs/operation/creds/caching.md + - docs/operation/creds/redacting.md + - docs/operation/creds/retrying-failed.md + - docs/operation/security-hardening.md + - docs/operation/container-placement.md + - docs/operation/opa-integration.md + - docs/operation/tuning.md + - docs/operation/global-resources.md + - docs/operation/administration.md + - docs/observation.md + - Internals: + - docs/internals/index.md + - docs/internals/checker.md + - docs/internals/scheduler.md + - docs/internals/build-tracker.md + - docs/internals/garbage-collector.md + - Examples: + - examples/index.md + - examples/hello-world.md + - examples/serial-job.md + - examples/pipeline-vars.md + - examples/set-pipeline.md + - examples/inputs-outputs.md + - examples/time-triggered.md + - examples/git-triggered.md + - examples/manually-triggered.md + - examples/job-and-task-hooks.md + - examples/golang-lib.md + - examples/rails-app.md + - examples/java-app.md + - examples/nodejs-app.md + - examples/php-app.md + - project/index.md + - ecosystem/index.md + - support/index.md + - Blog: + - blog/index.md \ No newline at end of file diff --git a/ci/build b/ci/build index a60f170d..5313ca07 100755 --- a/ci/build +++ b/ci/build @@ -2,31 +2,44 @@ set -e -x -export GOPATH=$PWD/gopath -export PATH=$PWD/gopath/bin:$PATH +apk add git git config --global user.email "concourseteam+concourse-github-bot@gmail.com" git config --global user.name "Concourse Bot" +# Change to the docs repo pushd docs - if [ -e .git ] && git remote | grep origin >/dev/null; then - # undo single-branch clone and fetch gh-pages - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" - git fetch origin gh-pages + # Install dependencies + pip install mkdocs-material mkdocs-redirects mkdocs-glightbox nodeenv + nodeenv env --node=22.21.1 + npm ci - ref=$(git rev-parse HEAD) - git checkout gh-pages - git pull - git merge --no-edit $ref + # Remove apps before build + rm -rf docs/libs/examples/apps/ + rm -rf docs/libs/examples/**/*.md - # clear out old content - git rm *.html || true - git rm search_index.json || true - fi + # Create site directory + npm run build - ./scripts/build + # Copy well-known for BlueSky + cp -R .well-known site/.well-known if [ -e .git ]; then + git checkout gh-pages + git rm -rf . + + rm -rf venv/ node_modules/ env/ .cache/ overrides/ docs/ .idea/ + + mv site/{.,}* . + rmdir site + + # Create CNAME file + echo -n "concourse-ci.org" > CNAME + + # Create robots.txt file + echo "User-agent: *" > robots.txt + echo -n "Disallow: /single-page.html" >> robots.txt + git add -A git commit --allow-empty -m "build" fi diff --git a/ci/build.yml b/ci/build.yml index 341f67be..59a2f3d8 100644 --- a/ci/build.yml +++ b/ci/build.yml @@ -3,17 +3,16 @@ platform: linux image_resource: type: registry-image - source: {repository: golang} + source: + repository: python + tag: 3.14-alpine3.21 inputs: -- name: docs - -caches: -- path: gopath + - name: docs outputs: -- name: built-docs - path: docs + - name: built-docs + path: docs params: ANALYTICS_ID: diff --git a/ci/prs-pipeline.yml b/ci/prs-pipeline.yml index e3be6d33..e6dd0bd5 100644 --- a/ci/prs-pipeline.yml +++ b/ci/prs-pipeline.yml @@ -1,51 +1,73 @@ --- resource_types: -- name: pull-request - type: registry-image - source: {repository: teliaoss/github-pr-resource} + - name: pull-request + type: registry-image + source: + repository: teliaoss/github-pr-resource resources: -- name: docs-master - type: git - icon: github - source: - uri: https://github.com/concourse/docs + - name: docs-master + type: git + icon: github + source: + uri: https://github.com/concourse/docs -- name: docs-pr - type: pull-request - icon: source-pull - source: - repository: concourse/docs - access_token: ((pull_requests_access_token)) + - name: docs-pr + type: pull-request + icon: source-pull + source: + repository: concourse/docs + access_token: ((pull_requests_access_token)) jobs: -- name: build - public: true - on_failure: - put: docs-pr - inputs: [docs-pr] - params: {path: docs-pr, status: failure, context: build} - tags: [pr] - on_success: - put: docs-pr - inputs: [docs-pr] - params: {path: docs-pr, status: success, context: build} - tags: [pr] - plan: - - in_parallel: - - get: docs-pr - trigger: true - version: every - tags: [pr] - - get: docs-master - tags: [pr] - - put: docs-pr - params: {path: docs-pr, status: pending, context: build} - tags: [pr] - - task: build - file: docs-master/ci/build.yml - input_mapping: {docs: docs-pr} - params: - GITHUB_TOKEN: ((concourse_github_dummy.access_token)) - tags: [pr] + - name: build + public: true + plan: + - in_parallel: + - get: docs-pr + trigger: true + version: every + tags: + - pr + - get: docs-master + tags: + - pr + - put: docs-pr + params: + path: docs-pr + status: pending + context: build + tags: + - pr + + - task: build + file: docs-master/ci/build.yml + input_mapping: + docs: docs-pr + params: + GITHUB_TOKEN: ((concourse_github_dummy.access_token)) + tags: + - pr + + on_failure: + put: docs-pr + inputs: + - docs-pr + params: + path: docs-pr + status: failure + context: build + tags: + - pr + + on_success: + put: docs-pr + inputs: + - docs-pr + params: + path: docs-pr + status: success + context: build + tags: + - pr diff --git a/docs/assets/favicon.ico b/docs/assets/favicon.ico new file mode 100644 index 00000000..81db1589 Binary files /dev/null and b/docs/assets/favicon.ico differ diff --git a/docs/assets/logo-white.svg b/docs/assets/logo-white.svg new file mode 100644 index 00000000..7bab9d5d --- /dev/null +++ b/docs/assets/logo-white.svg @@ -0,0 +1,22 @@ + + + + Concourse_logo_white + Created with Sketch. + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/blog/.authors.yml b/docs/blog/.authors.yml new file mode 100644 index 00000000..eee39838 --- /dev/null +++ b/docs/blog/.authors.yml @@ -0,0 +1,5 @@ +authors: + taylorsilva: + name: Taylor Silva + description: Maintainer + avatar: https://github.com/taylorsilva.png \ No newline at end of file diff --git a/docs/blog/index.md b/docs/blog/index.md new file mode 100644 index 00000000..c58f16c5 --- /dev/null +++ b/docs/blog/index.md @@ -0,0 +1,2 @@ +# Blog + diff --git a/docs/blog/posts/2017/2017-09-29-the-concourse-crew--2017.md b/docs/blog/posts/2017/2017-09-29-the-concourse-crew--2017.md new file mode 100644 index 00000000..d0f5860a --- /dev/null +++ b/docs/blog/posts/2017/2017-09-29-the-concourse-crew--2017.md @@ -0,0 +1,34 @@ +--- +layout: post +title: The Concourse Crew (2017) +date: 2017-09-29 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/The-Concourse-Crew--2017-/1-Q5Wx-Lltp5MDzh1rE2Yw0g.png" alt="Concourse logo" +position="center" >}} + + + +In 2014 the Concourse CI project started with just two engineers; Alex Suraci and Chris Brown. At the time, both Alex +and Chris were working on the [Pivotal](https://medium.com/u/44756b810893) Cloud Foundry team. Over time, they became +increasingly frustrated by existing CI/CD solutions. In response, Alex and Chris worked on designing a new CI/CD system +in their spare time; imagining a new type of CI/CD system that would treat pipelines as first class citizens. After +building some early prototypes and seeing early success internally within Pivotal, Concourse as released as an open +source project with sponsorship from Pivotal. + +Fast forward to 2017 and the Concourse team has grown considerably. We now have 6 full time engineers (soon to be 8), a +product manager (that’s me!) and a product designer. The Concourse team is distributed across two countries (US and +Canada); with a majority of the team working out of the Pivotal office in Toronto. Alex is still around and remains a +key contributor to the project. + +The Concourse open source community has grown considerably as well. We do our best to to keep engaged with everyone +through [GitHub issues](https://github.com/concourse/concourse/issues), [Slack](https://concourseci.slack.com/) +and [StackOverflow](https://stackoverflow.com/questions/tagged/concourse). And now, using Medium we are going to try to +do a better job at covering the bigger topics like: the Concourse roadmap, the Concourse philosophy, and more generally +“how things work”. + +If you have any specific comments that you’d like us to cover, comment below, hit me up on Twitter (@pioverpi) or reach +out on the Concourse Slack (@jma) + diff --git a/docs/blog/posts/2017/2017-10-03-how-the-concourse-team-organize-issues.md b/docs/blog/posts/2017/2017-10-03-how-the-concourse-team-organize-issues.md new file mode 100644 index 00000000..d30c3b1a --- /dev/null +++ b/docs/blog/posts/2017/2017-10-03-how-the-concourse-team-organize-issues.md @@ -0,0 +1,132 @@ +--- +title: How the Concourse Team Organize Issues +date: 2017-10-03 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/How-the-Concourse-Team-Organize-Issues/1--19t4s8wuBf9tUCiluO_mQ.png" alt="" +width="20%" >}} + + + +As the Concourse team continues to grow in size and in the # of incoming issues, the team has been experimenting with +new ways of managing our backlog. So far we have tried three different setups: + +1. GitHub + issues + [Customs](https://github.com/vito/customs)/[Tracksuit](https://github.com/vito/tracksuit) + [Pivotal Tracker](https://www.pivotaltracker.com/) +2. GitHub issues + aggressive labelling + [CodeTree](https://codetree.com/) +3. **GitHub issues +** [**GitHub Projects**](https://github.com/blog/2272-introducing-projects-for-organizations) + +We’ve been using the third setup, GitHub issues + GitHub Projects, for the past few months and we’ve been mildly happy +with the experience. + +### GitHub Issues + +All issues are reported in through the [concourse/concourse](https://github.com/concourse/concourse) repo. Issues can +include community-reported bugs, feature requests, technical chores, features, etc. If you want something done against +the Concourse codebase, it gets reported there. + +Relying on a single location for all issues has the benefit of consolidating our backlogs; making it easier for the +community to submit issues and track its progress. However with over 400 open issues against Concourse, the question +becomes: _how does the team decide what to work on first?_ + +Our first approach was to simply prioritize issues that were slated for the next release and burn through the list +top-down. This naive approach became problematic for our growing team because of the incredible breadth of problems that +Concourse covers. One day an engineer could be working on [Elm](http://elm-lang.org/) and the next they would be working +on our garbage collector. Even with [pairing](https://en.wikipedia.org/wiki/Extreme_programming), new engineers found it +very frustrating to master the codebase as they were constantly context-switching through thematically different issues. + +To address this, [Alex Suraci](https://medium.com/u/263a63b2f209) bucketed our issues into five “projects”: + +- Operations +- Runtime +- Integrations +- Core +- UX + +By bucketing our issues into projects, engineers can now spend more time in thematically similar problem spaces in the +Concourse codebase. + +### GitHub Projects + +{{< image src="/images/downloaded_images/How-the-Concourse-Team-Organize-Issues/1-5wA-RflsG_zFAyYMw0O95w.png" alt="A +snapshot of the Concourse UX project" width="100%">}} + +Our Concourse Projects manifest themselves as GitHub Projects in the Concourse GitHub organization. Annoyingly, GitHub +doesn’t (yet?) allow us to share these projects publicly. + +Each project has an engineering “anchor” assigned to it. The anchor responsible for deeply understanding the issues +under that project and usually sticks on the project for a long period of time. Each project also has its own roadmap +and short-term goals. + +Every week we have an Iteration Planning Meeting (IPM) where we discuss the backlog for each project team. This is where +our team discusses what’s been done, what’s in-flight, and what the upcoming issues are for the week ahead. + +* * * + +I hope this post gives everyone in the community a bit of insight into how the Concourse team manages incoming issues +and incoming work. We’re hoping that Github announces improvements to the GitHub Projects system in their upcoming +conference, GitHub Universe. If we aren’t able to make our projects public in the near future, the Concourse team is +committed to looking into alternative tools to publicly share our roadmap. + +For those who are interested, I’ve also listed the specifics of our Five Concourse Project below: + +## The Five Concourse Projects + +### Operations + +Ensuring Concourse is deployable and manageable in various environments, and able to meet organizations' authorization +requirements. + +Subject matter: + +- Various deployment scenarios (BOSH, binaries, Docker, Kubernetes, Windows, Darwin) +- Understanding resource demands of Concourse, both minimum requirements and "at scale" +- Systems knowledge to support and improve all of the above +- Multi-tenant operator demands (auth, inspectability) + +### Runtime + +Bring the theory to life. How do we go from a declarative configuration to efficiently running things across a pool of +VMs? + +Subject matter: + +- Containers: what & why, what is their “cost” +- Copy-on-write volume management +- Scheduling to most efficiently utilize a pool of VMs +- Safely managing containers/volumes/etc. across VMs without leaking resources + +### Integration + +Defining interfaces and patterns for how Concourse interacts with the real world. + +Subject matter: + +- Supporting the core set of resources +- Establishing and documenting patterns for resources, watching out for anti-patterns +- Defining the best interfaces for extending Concourse; recognizing resources alone may not be enough + +### Concourse Core + +Pushing Concourse concepts forward by distilling customer needs into abstract primitives. + +Subject matter: + +- Recognize that most feature requests are valid, but should not be implemented “as-is” +- Networking and pattern-matching; peel back the layers of the GitHub onion to identify common ground between various + issues +- Leave space for innovation and re-framing our existing concepts +- Define the REST API, and how pipelines/tasks are configured + +### UX + +The face of Concourse — web UI, fly, and user research to find the best representations of what Concourse does and how +users want to interact with it. + +Subject matter: + +- Tie Concourse’s pretentious high-level super-abstract concepts to users’ needs around automation. +- Consume Concourse’s API and ensure it doesn’t get too bogged down in specific user flows. +- Drive feedback into the rest of the project through user research. diff --git a/docs/blog/posts/2017/2017-10-26-build-page-improvements.md b/docs/blog/posts/2017/2017-10-26-build-page-improvements.md new file mode 100644 index 00000000..55ff8c64 --- /dev/null +++ b/docs/blog/posts/2017/2017-10-26-build-page-improvements.md @@ -0,0 +1,43 @@ +--- +title: Build Page Improvements +date: 2017-10-26 +--- + +{{< image src="/images/downloaded_images/Build-Page-Improvements/1-vjvvVZAw9nO4yRrveU0Ojg.gif" alt="GIF demoing build +page changes" width="50%" >}} + + + +[Concourse v3.6.0](https://concourse-ci.org/downloads.html#v360) comes with two new features on the build output page: +timestamps and keyboard shortcuts. + +## Timestamps and Output Sharing + +When looking at the build page, you will now see timestamps reported against each line of output using your browser’s +reported timezone. As you hover over the timestamp, you can select single line of output or you can SHIFT select +multiple lines of output. You’ll also notice that the build page URL is updated to reflect the lines you have selected. +You can use this URL to share specific build outputs with your team members. + +This feature addresses +issue [#361](https://github.com/concourse/concourse/issues/361), [#838](https://github.com/concourse/concourse/issues/838) +and [#1423](https://github.com/concourse/concourse/issues/1423). Thank you for your patience! + +## Keyboard Shortcuts + +{{< image src="/images/downloaded_images/Build-Page-Improvements/1-8-_eZ3qsDLB8Sqq5I-9vTw.png" alt="" width="50%" >}} + +The build page also supports basic vim-style keyboard shortcuts as well. You can bring up a handy reference menu using +`?` or `SHIFT + /` if you’re really having trouble finding it. + +The supported keyboard shortcuts are: + +- `h` and `l` for previous / next build +- `j` and `k` for scrolling up and down +- `T` to trigger a new build +- `A` to abort the build +- `gg` to scroll back to the top of the page +- `G` to scroll to the bottom of the page +- `?` to toggle the keyboard hint on and off + +This feature closes out issue [#439](https://github.com/concourse/concourse/issues/439) + diff --git a/docs/blog/posts/2017/2017-11-01-sneak-peek--spatial-resources.md b/docs/blog/posts/2017/2017-11-01-sneak-peek--spatial-resources.md new file mode 100644 index 00000000..171d19cc --- /dev/null +++ b/docs/blog/posts/2017/2017-11-01-sneak-peek--spatial-resources.md @@ -0,0 +1,50 @@ +--- +title: 'Sneak Peek: Spatial Resources' +date: 2017-11-01 +--- + +{{< image src="/images/downloaded_images/Sneak-Peek--Spatial-Resources/1-agN3JPhVv4Fyfvp-2VQsRQ.png" alt="An early +visualization of Spatial Resources" width="100%" >}} + + + +If you’ve been paying close attention to our [issues on GitHub](https://github.com/concourse/concourse/issues/) you may +have noticed a small flurry of activity around one specific +issue: [#1707 Spike: spatial resource flows](https://github.com/concourse/concourse/issues/1707). + +### What are Spatial Resources Flows (aka Space)? + +The first reference to “spatial” resources came up in a proposal +between [Alex Suraci](https://medium.com/u/263a63b2f209) and [Christopher Hendrix](https://medium.com/u/9c1e9edb1d5e) +for Multi-branch workflows ( [#1172](https://github.com/concourse/concourse/issues/1172)). In that issue we focused +specifically on one recurring problem: it’s a real pain to deal with the Git resource when you have multiple branches +representing different streams of work. + +Over time we researched similar build paradigms and thought deeply about generalized solutions that would fit nicely +with the Concourse philosophy™: + +> Concourse makes it very easy to model changes over time. Resources do this for you; you point them at a given source +> of truth, and it’ll let you know everything that happened.Some workflows, however, can’t just be modeled as change over +> time. Multiple branches of a repo, or pull requests to a repository, are over _space_, not _time_. They are parallel +> pipelines, which today are hard to manage, and impossible to correlate (you cannot fan-in). +> Build matrixes are another example of wanting to run over many spaces (i.e. versions of a product). This can be done +> today, within a pipeline, but results in a massive pipeline with every combination explicitly configured... +> …(a spatial resource) introduces the ability to have arbitrary build matrixes within one pipeline, which should +> dramatically improve the experience for people testing many variations/combinations. + +### Would you like to know more? + +{{< image src="/images/downloaded_images/Sneak-Peek--Spatial-Resources/1-9vFBBZBYtFvCxsDnoMZszw.jpeg" alt="" width=" +100%" >}} + +There’s a few ways you can get involved as the Concourse team continues to build out spaces: + +1. Read the proposal for spatial resources on GitHub [here](https://github.com/concourse/concourse/issues/1707). Join in + on the discussion and ❤️ the proposal if you’re excited to see this happen! +2. We’re building out some common pipeline use cases to test out spaces. You can see our current list of proposals in + issue [#1766](https://github.com/concourse/concourse/issues/1766). If you’d like to add your own personal experiences + and pipeline use case, I’d encourage you to add your notes on the issue. +3. Spaces will introduce a lot of new abstractions into the pipeline visualization. We’re still experimenting with the + visualization of spaces in the pipeline view; and are looking for fresh new ideas on how to visualize this. If you + have a sketch, a doodle, or even a simple recommendation; drop us a line on + the [proposal](https://github.com/concourse/concourse/issues/1707)or on our [Slack](https://concourseci.slack.com). diff --git a/docs/blog/posts/2017/2017-12-01-concourse-at-springone-2017.md b/docs/blog/posts/2017/2017-12-01-concourse-at-springone-2017.md new file mode 100644 index 00000000..8cada953 --- /dev/null +++ b/docs/blog/posts/2017/2017-12-01-concourse-at-springone-2017.md @@ -0,0 +1,39 @@ +--- +title: Concourse at SpringOne 2017 +date: 2017-12-01 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-at-SpringOne-2017/1-JzJoM_Man8MYThde2qy7Zg.png" alt="" width=" +100%" >}} + + + +[Topher Bullock](https://medium.com/u/58876cdc2180), [Lindsay Auchinachie](https://medium.com/u/84b937bda3b6), [Alex Suraci](https://medium.com/u/263a63b2f209) +and I will be travelling to San Francisco next week to attend +the [SpringOne Platform 2017](https://springoneplatform.io/) Conference. Not only are there a lot of exciting Spring +talks this year, but there will be a lot of CI/CD talks from some amazing speakers. + +Unfortunately none of the Concourse core contribution team will be speaking at SpringOne this year; but that doesn’t +mean there’s a shortage of Concourse-related talks. Here’s a short list of talks from people who will be sharing their +real-world Concourse knowledge and experiences at SpringOne: + +- [Automated PCF Upgrades with Concourse](https://springoneplatform.io/sessions/automated-pcf-upgrades-with-concourse) + with Rich Ruedin from Express Scripts +- [Building Developer Pipelines with PKS, Harbor, Clair and Concourse](https://springoneplatform.io/sessions/building-developer-pipelines-with-pks-harbor-clair-and-concourse) + with Thomas Kraus and Merlin Glynn from VMWare +- [Concourse in the Real World: A Case Study in CI/CD and DevOp](https://springoneplatform.io/sessions/concourse-in-the-real-world-a-case-study-in-ci-cd-and-devops) + s with Bryan Kelly and Greg Meyer from Cerner Corp +- [Ensuring Platform Security with Windows BOSH Add-ons and Runtime Config at Boeing](https://springoneplatform.io/sessions/ensuring-platform-security-with-windows-bosh-add-ons-and-runtime-config-at-boeing) + with Sheryl Maris, Brad Schaefbauer and James Coppock from Boeing +- [Enterprise CI/CD — Scaling the Build Pipeline at Home Depot](https://springoneplatform.io/sessions/enterprise-ci-cd-scaling-the-build-pipeline-at-home-depot) + with Matt MacKenny from The Home Depot +- [How to Continuously Delivery Your Platform with Concourse](https://springoneplatform.io/sessions/how-to-continuously-deliver-your-platform-with-concourse) + with Brian Kirland from Verizon and [Ryan Pei](https://medium.com/u/c6c6db72dbb) from Pivotal + +You can find a full list of sessions on the SpringOne sessions page [here](https://springoneplatform.io/sessions). Be +sure to check out the DevOps, CI, CD + +Looking forward to seeing ya’ll there! + diff --git a/docs/blog/posts/2018/2018-02-02-concourse-updates--jan-29---feb-2--2018-.md b/docs/blog/posts/2018/2018-02-02-concourse-updates--jan-29---feb-2--2018-.md new file mode 100644 index 00000000..9107ce5d --- /dev/null +++ b/docs/blog/posts/2018/2018-02-02-concourse-updates--jan-29---feb-2--2018-.md @@ -0,0 +1,56 @@ +--- +title: Concourse Updates (Jan 29 — Feb 2, 2018) +date: 2018-02-02 +categories: +- product-update +--- + +As a Product Manager at Pivotal, one of my responsibilities is to write weekly updates to let Pivots know what the +Concourse team has been up to for the past week. When the Concourse team got together earlier this month for our 2018 +planning, we decided that we should be sharing these updates with our community as a whole. So, without further ado, +here’s our first update of 2018! + + + +## Features + +**UX** + +- Fixed [https://github.com/concourse/concourse/issues/1978](https://github.com/concourse/concourse/issues/1978) +- We gave a shot at doing lazy-loading and pagination of builds, but it didn’t work very well. Reverting in lieu of some + more UX research on that + page [https://github.com/concourse/concourse/issues/1855](https://github.com/concourse/concourse/issues/1855) + +**Core** + +- Currently looking for additional feedback on use-cases for Spatial Resources. If you have an opinion on this, **PLEASE + ** jump on this issue and + comment: [https://github.com/concourse/concourse/issues/1766](https://github.com/concourse/concourse/issues/1766) +- Continued work on refactoring auth providers model in preparation for Users in Concourse. + See [https://github.com/concourse/concourse/issues/1991](https://github.com/concourse/concourse/issues/1991) + and [https://github.com/concourse/concourse/issues/1888](https://github.com/concourse/concourse/issues/1888) + +**Runtime** + +- Wrapped up work on “Bind-mount certs to resource containers at + `/etc/ssl/certs`"[https://github.com/concourse/concourse/issues/1938](https://github.com/concourse/concourse/issues/1938). + This was a tough one. Look forward to a post from [Topher Bullock](https://medium.com/u/58876cdc2180) explaining some + of the nuances behind this implementation + +## Design Research + +- [Lindsay Auchinachie](https://medium.com/u/d3a12206d051) and [Sam Peinado](https://medium.com/u/8a529ac5b818) mocked + up a new “High Density” view of the Concourse + dashboard ([https://github.com/concourse/concourse/issues/1899](https://github.com/concourse/concourse/issues/1899)). + This new design would be an add-on to the current beta dashboard, and would be activated using a toggle in the status + bar. +- The design team is also beginning to research new designs to support adding comments to the current build page +- We’re also beginning to work on new designs for the [http://concourse.ci/](http://concourse.ci/) homepage! New year, + new look! + +## Feedback + +This is our first time posting updates publicly like this, so please let us know if they’re helpful by giving us a +“clap” or by responding to the story below! We also plan to announce a new portal where community members can follow +along with our progress in the coming weeks, so look forward to more information coming your way! + diff --git a/docs/blog/posts/2018/2018-02-09-concourse-updates--feb-5---feb9-.md b/docs/blog/posts/2018/2018-02-09-concourse-updates--feb-5---feb9-.md new file mode 100644 index 00000000..0c7de190 --- /dev/null +++ b/docs/blog/posts/2018/2018-02-09-concourse-updates--feb-5---feb9-.md @@ -0,0 +1,43 @@ +--- +layout: post +title: Concourse Updates (Feb 5 — Feb9) +date: 2018-02-09 +categories: + - product-update +--- + +We spent some time this week wrapping up additional testing on +our [certs management across workers](https://github.com/concourse/concourse/issues/1938). We also put down some of work +on Spaces this week to play around with something fun: a high density dashboard view. A lot of you have been asking us +when Concourse v3.9.0 will be available, and the answer is: very soon! + + + +On to the update: + +## **Features** + +**UX** + +- High Density View! Original Git issue [#1899](https://github.com/concourse/concourse/issues/1899) and a demo version + of it up and running can be found in our [production environment](https://ci.concourse-ci.org/dashboard/hd) +- Merged PR [#227](https://github.com/concourse/atc/pull/227), thanks for the + contribution [SwamWithTurtles](https://github.com/SwamWithTurtles)! + +**Runtime** + +- Picked up [#2016 Move TSA beacon operations to ‘worker’](https://github.com/concourse/concourse/issues/2016). We need + to do this to fix some nasty behaviour we’ve observed in our large scale Concourse + installation [Pivotal](https://medium.com/u/44756b810893) + +**Core** + +- Continued breaking out our backend auth systems to use + Dex [#1888](https://github.com/concourse/concourse/issues/1888), ( + see [#1886](https://github.com/concourse/concourse/issues/1886)for additional background) + +## Design Research + +- Did some research and prototypes to see what build page commenting would look like and how it would behave in + Concourse +- Continuing some design prototyping for Concourse brand assets diff --git a/docs/blog/posts/2018/2018-02-16-concourse-update--feb-12-16-.md b/docs/blog/posts/2018/2018-02-16-concourse-update--feb-12-16-.md new file mode 100644 index 00000000..ec4b2421 --- /dev/null +++ b/docs/blog/posts/2018/2018-02-16-concourse-update--feb-12-16-.md @@ -0,0 +1,52 @@ +--- +layout: post +title: Concourse Update (Feb 12–16) +date: 2018-02-16 +categories: + - product-update +--- + +If you haven’t heard the news by now, we released Concourse v3.9.0 this week 🎉🎉🎉! Two of the top-line features in this +release are: + + + +- Concourse will now automatically propagate certificates from the worker machine into resource containers (GH + issue [#1027](http://github.com/concourse/concourse/issues/1027)) +- Improved btrfs volume driver stability. So if you’re getting hit hard by overlay weirdness, I’d suggest you give the + btrfs driver another shot! + +To find out what else we’ve packed into this release, I’d encourage you to read the full release notes on +the [concourse.ci/downloads](https://concourse-ci.org/downloads.html#v390) page! + +On to the update... + +## **Features** + +**UX** + +- Started to look at slow page-load times on the web-ui. The team identified that a large source of the pain came when + we introduced timestamps last year. We’ve since been able to drastically improve the load times on that + page [GH issue 1912](https://github.com/concourse/concourse/issues/1912) + +**Runtime** + +- As I mentioned last week, the Concourse team runs a relatively large installation of Concourse that is used by Pivotal + employees for internal projects. As a result of running this giant Concourse, we’ve discovered that our Garbage + Collector needs significant improvement in order to keep up with the workloads that we’ve been observing. GH + issue [#2016](https://github.com/concourse/concourse/issues/2016) has been consuming a lot of our thoughts and + feelings this week. + +**Core** + +- Same as last week: continued breaking out our backend auth systems to use + Dex [#1888](https://github.com/concourse/concourse/issues/1888), ( + see [#1886](https://github.com/concourse/concourse/issues/1886)for additional background) +- ^^ Refactoring our complex backend to support individual auth is going to take some time, and we recognize that :) + +## Design Research + +We’ve picked up design research work on Spatial Resources +again. [Lindsay Auchinachie](https://medium.com/u/d3a12206d051) and [Sam Peinado](https://medium.com/u/8a529ac5b818) are +currently exploring different ways to visualize the (potentially) dense permutations and combinations of work. + diff --git a/docs/blog/posts/2018/2018-02-23-concourse-update--feb-20-23-.md b/docs/blog/posts/2018/2018-02-23-concourse-update--feb-20-23-.md new file mode 100644 index 00000000..53dc8453 --- /dev/null +++ b/docs/blog/posts/2018/2018-02-23-concourse-update--feb-20-23-.md @@ -0,0 +1,40 @@ +--- +layout: post +title: Concourse Update (Feb 20–23) +date: 2018-02-23 +categories: + - product-update +--- + +Monday, Feb 19 was [Family Day](https://en.wikipedia.org/wiki/Family_Day_%28Canada%29) for us here in Canada, so its +been a relatively short work week for the Concourse team. With the release of v3.9.0 last week, we’ve gotten some +reports of new bugs and issues, so thanks to everyone who reported them in via our +GitHub [issues](https://github.com/concourse/concourse/issues) and Slack. Please make sure to check the updated release +notes ([here](https://concourse-ci.org/downloads.html#v390)) for the full details! We’re planning to cut a new patch +release early next week with some of the fixes to the reported issues. + + + +On to the update: + +## Features + +**UX** + +- We fixed issue #[1912](https://github.com/concourse/concourse/issues/1912)(slow build page due to timestamps)! The fix + for this should be rolled into the next patch release as well +- Started working on search hint and autocomplete on the Concourse + Dashboard [#1713](https://github.com/concourse/concourse/issues/1713) +- Tried adding buffering to fly outputs, it didn’t help [#1912](https://github.com/concourse/concourse/pull/1912) + +**Core** + +- Fixed an issue with noisy logging from skymarshall by lowering the the log + level [#2044](https://github.com/concourse/concourse/pull/2044) +- SURPRISE: we’re still refactoring our backend to support users + +**Operations** + +- Pulled in PR [#2030](https://github.com/concourse/concourse/pull/2030)so we could fix the BOSH deployment issue where + the ATC will fail due to function esc not being defined [#2029](https://github.com/concourse/concourse/pull/2030) +- Fixed a CredHub integration bug [#2034](https://github.com/concourse/concourse/pull/2034) diff --git a/docs/blog/posts/2018/2018-03-02-concourse-update--feb-26---mar2-.md b/docs/blog/posts/2018/2018-03-02-concourse-update--feb-26---mar2-.md new file mode 100644 index 00000000..2f373103 --- /dev/null +++ b/docs/blog/posts/2018/2018-03-02-concourse-update--feb-26---mar2-.md @@ -0,0 +1,16 @@ +--- +layout: post +title: Concourse Update (Feb 26 — Mar2) +date: 2018-03-02 +categories: +- product-update +--- + +After some wrestling with our production pipelines last week we managed to release a patch update in the form +of [Concourse v3.9.1](https://concourse-ci.org/downloads.html#v391). We’ve fixed some of the reported bugs from the +previous release (3.9.0) so definitely go and check it out! + +I don’t have much else regarding updates this week so here’s a fun fact for you to chew on: did you know that Concourse +uses Concourse to deploy Concourse? Its true! You can check out our publishing pipelines +here: [https://ci.concourse-ci.org/teams/main/pipelines/main?groups=publish](https://ci.concourse-ci.org/teams/main/pipelines/main?groups=publish) + diff --git a/docs/blog/posts/2018/2018-03-08-we-re-switchin--domains-.md b/docs/blog/posts/2018/2018-03-08-we-re-switchin--domains-.md new file mode 100644 index 00000000..61308252 --- /dev/null +++ b/docs/blog/posts/2018/2018-03-08-we-re-switchin--domains-.md @@ -0,0 +1,55 @@ +--- +title: We’re switchin’ domains. +date: 2018-03-08 +--- + +**(UPDATE March 9 @ ~10 AM: The old domain appears to now be hosting a very old snapshot of our website. This is either +targeted or part of a phishing scam. Do not go to it.)** + + + +Well, that sucked. + +Wednesday morning I woke up to a ton messages because Concourse’s site was gone, and in its place was a blank domain +registrar placeholder. + +Before you say anything, I totally remembered to renew the domain. It was due to expire in August. Not even close to +expiring. + +As far as I can tell, our registrar just didn’t do the one thing it’s supposed to do: renew the damned domain. They took +the money, bumped the expiry date on the website, and…apparently stopped there. They had literally one job and they +didn’t do it. + +{{< youtube src="https://www.youtube.com/embed/4T2GmGSNvaM?start=39" >}} + +So some Joe Schmoe out of Macedonia went ahead and registered it somewhere else, presumeably to act as part of some spam +network (the only thing set up were MX records). We contacted the new registrar’s abuse email and they basically told us +that the domain was registered normally, not transferred, and must have been available. And that there is nothing they +can do. + +I contacted our registrar, and the latest word is this: + +> We are contacting Domain Authorities in Ivory Coast to know more about this. I will contact you back as soon as +> possible. + +Soooo at this point I’m calling the domain a loss. I’m giving up pretty easily here for a reason: the .ci TLD is under +the authority of the Ivory Coast and has next to zero legitimate registrars willing to reserve domains for it. I could +tell from day one that my registrar was hot garbage, but didn’t find any other choices. + +It seems like the registrar messed up so badly that there’s not much leverage for getting it back. Even if I could get +it back, I don’t really want to deal with something like this again in the future. Luckily, before we got too big I went +ahead and registered concourse-ci.org, .net, and .com in case something like this happened. + +So here‘s our new home: [https://concourse-ci.org](https://concourse-ci.org) + +I’d already been considering this switch for a while (anticipating trouble with .ci), but a more graceful transition +would have been nice. Unfortunately there is a ton of material pointing to the old website, and it’ll probably take time +for the new location to bubble up in Google search results. + +I want to highlight that this doesn’t seem to have been a targeted attack, but that you should be careful to not +accidentally go to the old domain or send any traffic or emails there. It may not be a targeted attack, but the new +owner still has full control over it, and they’re receiving a bunch of free traffic. I wouldn’t be surprised if they +wisened up and pulled something nefarious. + +We really appreciate the support y’all have shown us, and all the folks who offered to help. Sorry for the trouble. + diff --git a/docs/blog/posts/2018/2018-03-09-concourse-update--mar-5-9-.md b/docs/blog/posts/2018/2018-03-09-concourse-update--mar-5-9-.md new file mode 100644 index 00000000..75501636 --- /dev/null +++ b/docs/blog/posts/2018/2018-03-09-concourse-update--mar-5-9-.md @@ -0,0 +1,48 @@ +--- +title: Concourse Update (Mar 5–9) +date: 2018-03-09 +categories: +- product-update +--- + +Whelp, that felt like a long week. If you haven’t heard the news by now you should definitely +read [Alex Suraci](https://medium.com/u/263a63b2f209)’s +post [regarding our domain](https://medium.com/concourse-ci/were-switchin-domains-5597dcd0b48b). + + + +I want to take this time to thank the Concourse fans out there who offered their help, support, and positive vibes +throughout the whole ordeal. The team here really appreciates it 🙏 + +Luckily for us this event didn’t consume our entire engineering team. We WERE able to get some issues resolved this week +and are planning for an imminent release of Concourse 3.9.2 + +On to the update: + +**UX:** + +- Resolved [#1841](https://github.com/concourse/concourse/issues/1841) “ANSI cursor escapees wreak havoc with Concourse + build output” +- Resolved [#1999](https://github.com/concourse/concourse/issues/1999) where buttons weren’t working on the build page + when using Firefox +- Experimented with adding scroll effects to pipeline names in the + Dashboard [#2026](https://github.com/concourse/concourse/issues/2026). It was hilarious and many \ jokes + were made. +- Brought back the/dashboard route to app [#2051](https://github.com/concourse/concourse/issues/2051), which should make + you be able to login again [#1801](https://github.com/concourse/concourse/issues/1801) + +**Docker Image Resource** + +- Fixed [#170](https://github.com/concourse/docker-image-resource/issues/170). According to GitHub, I’m the original + author of that, but I honestly can’t remember writing it. I _do_ remember that it was supposed to help a Concourse + user, so hurrah! + +**Runtime** + +- Resolved [#2031](https://github.com/concourse/concourse/issues/2031) “cannot\_invalidate\_during\_initialization + constraint bubbles up to the user” +- Resolved [#2059](https://github.com/concourse/concourse/issues/2059) + and[#2058](https://github.com/concourse/concourse/issues/2058), two similar issues that influenced our decision to + make a 3.9.2 patch release +- Resolved [#1499](https://github.com/concourse/concourse/issues/1499), tasks occasionally failing when interacting with + Vault diff --git a/docs/blog/posts/2018/2018-03-16-concourse-update--mar-12-16-.md b/docs/blog/posts/2018/2018-03-16-concourse-update--mar-12-16-.md new file mode 100644 index 00000000..57ed972a --- /dev/null +++ b/docs/blog/posts/2018/2018-03-16-concourse-update--mar-12-16-.md @@ -0,0 +1,46 @@ +--- +title: Concourse Update (Mar 12–16) +date: 2018-03-16 +categories: + - product-update +--- + +Its been a first week in a long time where we were back to full strength and fully co-located. It was nice! + + + +Oh, and Concourse v3.9.2 was released this week as well,[check it out!](https://concourse-ci.org/downloads.html#v392) + +On to our update: + +**UX** + +- Started to make our dashboard a bit more mobile friendly [#1712](https://github.com/concourse/concourse/issues/1712) +- Started to tackle the problem where our dashboard holds too many open + connections [#1806](https://github.com/concourse/concourse/issues/1806) + +**Core** + +- Removed a dependancy to provide an external URL for fly + execute [#2069](https://github.com/concourse/concourse/issues/2069). To + quote [Alex Suraci](https://medium.com/u/263a63b2f209): + +> “It also makes the ‘getting started with Docker’ flow a bit complicated on platforms like Darwin where Docker is +> actually run via a Linux VM, in a separate namespace. fly execute can't be made to automatically work; the container IP +> would probably work for fly execute but isn't really what they should be setting as the external URL (as they can't +> reach it from their own machine).” + +- Continued our refactoring of Concourse APIs to support multiple teams + +**Runtime** + +- Completed work on [#2070](https://github.com/concourse/concourse/issues/2070), making it so that workers can retry + against multiple TSAs +- Picked up a reported issue around custom resources on tagged + workers [#1371](https://github.com/concourse/concourse/issues/1371). We’re not quite sure how it got so bad + +**Docs** + +We’ve been working on a new website! The focus of it is to make it less flashy, less marketing, and more content driven. +The design of it is still in progress but we want to start sharing it out very soon 👍 + diff --git a/docs/blog/posts/2018/2018-03-23-concourse-update--april-19-23-.md b/docs/blog/posts/2018/2018-03-23-concourse-update--april-19-23-.md new file mode 100644 index 00000000..bab282fd --- /dev/null +++ b/docs/blog/posts/2018/2018-03-23-concourse-update--april-19-23-.md @@ -0,0 +1,45 @@ +--- +title: Concourse Update (April 19–23) +date: 2018-03-23 +categories: + - product-update +--- + +Hi folks, + +Had an interesting week talking to customers about how we might improve their Concourse operations and deployments. More +info on that soon! + + + +On to the update: + +**UX** + +- Fixed an issue with timestamps [#2088](https://github.com/concourse/concourse/issues/2088) + +**Core** + +- Continued our refactoring of the API to support dex and + users [#1888](https://github.com/concourse/concourse/issues/1888) + +**Runtime** + +- Finished the issue around custom resources on tagged workers, it should work + now [#1371](https://github.com/concourse/concourse/issues/1371) +- Restricted the list of allowed TLS ciphers for more security + checkboxing [#1997](https://github.com/concourse/concourse/issues/1997) + +**Design** + +And now, some words from our Product Design team: + +> Lindsay Auchinachie and Sam Peinado are continuing work on the Space and Causality features in Concourse.The Space +> features give users the ability to have arbitrary build matrixes within one pipeline and of a resource to solve for the +> pain around people testing many variations/combinations. Causality allows users a view into what is going through the +> pipeline, and how far it has made it through the pipeline. Read the proposals for Spatial Resource Flows and Resource +> Causality Flow on GitHub. +> This week we shared out our Space framing and talked through feasibility and technical constraints of this with the +> engineering team. Started work on a small Invision with the current Ruby and Git use case. Pairing with engineering next +> week to define additional more complex uses cases for coding a prototype with real pipeline. + diff --git a/docs/blog/posts/2018/2018-03-29-a-renewed-focus---community-changes.md b/docs/blog/posts/2018/2018-03-29-a-renewed-focus---community-changes.md new file mode 100644 index 00000000..61935b74 --- /dev/null +++ b/docs/blog/posts/2018/2018-03-29-a-renewed-focus---community-changes.md @@ -0,0 +1,83 @@ +--- +layout: post +title: A renewed focus & community changes +date: 2018-03-29 +--- + +Phew, we’ve been busy for the past couple of months! There’s a lot to give y’all an update on. + + + +## New website + +First off, [check out our new website](https://concourse-ci.org)! We’ve completely redesigned it and redone how we +organize the documentation, in hopes that it’ll be much easier to find what you’re looking for. + +We also hope that the new style, language, and tone will feel a bit more inclusive and humble. For example, we got rid +of the “Concourse vs.” section — the effort it took to keep that up-to-date is better spent elsewhere. Use whatever dang +tool you want! Our old site, as pretty as it was, felt a bit too much like we were trying to sell a finished product. + +We’ve added an [“About” page](https://concourse-ci.org/about.html) which provides all the background and motivation you +should need to get a good idea of who we are and what we’re about. There’s also +a [“Contribute” section](https://concourse-ci.org/contribute.html) which contains reference material for developers as +well as general guidance. We’re also fleshing out an [“Operation” section](https://concourse-ci.org/operation.html) +which should help out those who are deploying Concourse for the first time or managing it at scale. + +In addition to these new sections, we’ve also consolidated many pages and simplified the organization. There are now +top-level sections for all the “things” you’ll be working with (Pipelines, Tasks, etc.), and each section contains the +schema right up-front with examples to the side. This should make the docs much more effective when used as a reference. + +Search is back, and we’ve made a lot better than it was before its unceremonious removal. Try searching “imgrespar” and +you’ll find image\_resource.params. It’s not full-text, but there’s always Google for that. I tried but it’s pretty slow +and janky. + +## Community platform changes + +Along with the new site, we’re changing a few things in an effort to foster a healthier, more collaborative community: + +- [There’s a new community forum!](https://discuss.concourse-ci.org/t/welcome-to-the-concourse-community/35) This will + be a much better format for support, long-form discussion, announcing cool new resource types, and whatever else y’all + want to talk about. +- [We’re switching from Slack to Discord!](https://discuss.concourse-ci.org/t/join-us-in-discord/34) We hope to have + this new chat platform be an organized place for contributors to have meaningful discussions, rather than a firehose + of help requests. There’s still a #need-help channel, but we’d prefer if most support went through the forums instead, + as persistent threads are much easier to keep tabs on and are much easier to find in Google search results. +- [We’ve got a publicly visible roadmap!](https://project.concourse-ci.org) This is thanks to a tool + called [Cadet](https://github.com/vito/cadet), which provides visibility into each of our GitHub projects (which are + normally hidden on GitHub). It also provides a networked view of issues and PRs that helps us identify the “boulders” + vs. the “pebbles” when it comes to understanding problem spaces to tackle. + +## Simpler deployment + +We’ve coordinated all this with the launch of 3.10.0, which simplifies how Concourse is deployed. We’ve made it easier +to spin up a single-instance Concourse via the quickstart command, which we’re in turn using for the quick intro on the +front page, via Docker Compose. We also no longer require you to configure an external URL (which was the main obstacle +in the way of a single-command intro). + +Instead of documenting four different deployment methods (and scaring away people in the process), we’re focusing on the +concourse binary distribution as the _lingua franca_ on the main site. It’s the most general and assumes the least about +how you want to deploy it. For platform-specific documentation, each GitHub repo will be the source of truth: + +- [Concourse BOSH Deployment](https://github.com/concourse/concourse-bosh-deployment) +- [Concourse Docker](https://github.com/concourse/concourse-docker) +- [Concourse Helm Chart](https://github.com/kubernetes/charts/tree/master/stable/concourse) (official soon) + +These repos are linked to by the [“Download” page](https://concourse-ci.org/download.html) as their own platform +alongside the binaries, so they should feel just as official, while not feeling like a necessary mental hurdle for +beginners. + +## HALP + +Lastly, I want to apologize for the recent slowdown in processing pull requests. I’ve been pretty focused on getting all +this out there, and it’s definitely taken away from my other duties. + +I hope that with our continued focus on community building in 2018, more of these responsibilities can be shared among a +broader, stronger network of contributors. If you’re interested in stepping up and helping out in a meaningful way, let +us know early and we can help! That’s part of the reason for introducing Discord and the forums. + +We’re still figuring things out, and hope to provide more structure to the contribution process for those who need it, +but a conversation is a great start. + +As always, thanks everyone for your patience and support. + +Alex diff --git a/docs/blog/posts/2018/2018-04-06-concourse-updates--april-2-6-.md b/docs/blog/posts/2018/2018-04-06-concourse-updates--april-2-6-.md new file mode 100644 index 00000000..8fab9169 --- /dev/null +++ b/docs/blog/posts/2018/2018-04-06-concourse-updates--april-2-6-.md @@ -0,0 +1,41 @@ +--- +title: Concourse Updates (April 2–6) +date: 2018-04-06 +categories: + - product-update +--- + +If you haven’t done so already please check out [Alex Suraci](https://medium.com/u/263a63b2f209)’s recent update post +on “[A renewed focus & community changes](https://medium.com/concourse-ci/a-renewed-focus-community-changes-cbab8200ea05)”. +It covers all the recent changes that we’ve been making; starting with the new styling of +the [website](https://concourse-ci.org/), our new [discussion forum](https://discuss.concourse-ci.org/), and our +migration to [Discord chat](https://discordapp.com/invite/MeRxXKW). + + + +Specifically, we’ve been getting some mixed feedback on the new format of the site. Some folks love it, other folks miss +the highly visual styling of the old site. As always, the Concourse team is always open to hearing your feedback in the +usual channels. If you’d like, you can even open issues against the docs repo +itself [here](https://github.com/concourse/docs/issues). + +And now, on to the update: + +**UX:** + +- Finished up [#1806](https://github.com/concourse/concourse/issues/1806) where our dashboard keeps spamming the ATC and + the db with connection requests. + +**Core** + +- Started to spike on the spatial resource visualization, you can follow along + at [#2131](https://github.com/concourse/concourse/issues/2131) + +**Runtime** + +- Tackling the large story on adding batch volume & container deletion to a worker [#2109](http://2109) + +**PRs** + +Apologies to everyone who’s been waiting for feedback on their PRs. [Alex Suraci](https://medium.com/u/263a63b2f209) has +been working down the list this week; so we’re slowly making our way down the list and merging them in. + diff --git a/docs/blog/posts/2018/2018-04-13-concourse-update--april-9-13-.md b/docs/blog/posts/2018/2018-04-13-concourse-update--april-9-13-.md new file mode 100644 index 00000000..ba9bc427 --- /dev/null +++ b/docs/blog/posts/2018/2018-04-13-concourse-update--april-9-13-.md @@ -0,0 +1,34 @@ +--- +layout: post +title: Concourse Update (April 9–13) +date: 2018-04-13 +categories: + - product-update +--- + +Concourse v3.11.0 came out today! Go get +it: [https://concourse-ci.org/download.html#v3110](https://concourse-ci.org/download.html#v3110) + + + +On another note: I’ve noticed some interesting articles and guides come out on writing custom resources for Concourse. +There’s one on the Pivotal +blog [https://content.pivotal.io/blog/developing-a-custom-concourse-resource](https://content.pivotal.io/blog/developing-a-custom-concourse-resource) +and another from fellow Medium +writer [Shin Myung Yoon](https://medium.com/u/d58b0a9a1e17) ([https://itnext.io/writing-a-custom-resource-for-concourse-detecting-pull-request-close-merge-events-e40468eb2a81](https://itnext.io/writing-a-custom-resource-for-concourse-detecting-pull-request-close-merge-events-e40468eb2a81))! + +[Topher Bullock](https://medium.com/u/58876cdc2180) and I will also be travelling to Boston next week +for [CF Summit 2018](https://www.cloudfoundry.org/event/nasummit2018/). We’ll be around to meet some Pivotal PCF +customers and answer questions about Concourse. Make sure you visit some of the awesome talks on Concourse as well. You +can find some articles about it on the Cloud Foundry blog: + +- [https://www.cloudfoundry.org/blog/5-cloud-foundry-summit-sessions-developers-build-cicd-practice/](https://www.cloudfoundry.org/blog/5-cloud-foundry-summit-sessions-developers-build-cicd-practice/) +- [https://content.pivotal.io/blog/dialing-platform-ops-to-eleven-5-cloud-foundry-summit-sessions-that-operators-shouldnt-miss?\_lrsc=96881456-a782-4c9e-999a-3be986b65b16&utm\_source=employee-social&utm\_medium=twitter&utm\_campaign=employee\_advocacy](https://content.pivotal.io/blog/dialing-platform-ops-to-eleven-5-cloud-foundry-summit-sessions-that-operators-shouldnt-miss?_lrsc=96881456-a782-4c9e-999a-3be986b65b16&utm_source=employee-social&utm_medium=twitter&utm_campaign=employee_advocacy) + +Fun fact: we got some new stickers printed up and we’ll be handing some out at Summit: + +{{< image src="/images/downloaded_images/Concourse-Update--April-9-13-/1-cS-JeBD00f0h7vhlpYTE7w.png" alt="" width=" +20%" >}} + +{{< image src="/images/downloaded_images/Concourse-Update--April-9-13-/1-thrSlXnAzYewzqirHjukWA.png" alt="Whee!" width=" +20%" >}} \ No newline at end of file diff --git a/docs/blog/posts/2018/2018-04-27-concourse-update--april-23-27-.md b/docs/blog/posts/2018/2018-04-27-concourse-update--april-23-27-.md new file mode 100644 index 00000000..70b9cca8 --- /dev/null +++ b/docs/blog/posts/2018/2018-04-27-concourse-update--april-23-27-.md @@ -0,0 +1,64 @@ +--- +title: Concourse Update (April 23–27) +date: 2018-04-27 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--April-23-27-/1-1T4dM1zWpCx5NvHFnhK2Lw.jpeg" alt="" width=" +100%" >}} + + + +Well, that was fun! [Topher Bullock](https://medium.com/u/58876cdc2180) absolutely _killed it_ last week on the CF +Summit 2018 main stage with his demo of the experimental Concourse ❤ ️K8s runtime project. We also had a great time +talking to companies who were using Concourse to continuously do things in the cloud. One of my favorite talks was from +Jason Immerman and Derek Van Assche from +Zipcar ([Concourse All the Things, All the Time](https://cfna18.sched.com/event/DdZz/concourse-all-of-the-things-at-all-times-jason-immerman-zipcar-derek-van-assche-hs2-solutions?iframe=no&w=&sidebar=yes&bg=no)); +really inspirational stuff! + +Now, on to the update. + +We released [Concourse v3.12.0](https://concourse-ci.org/download.html#v3120) earlier this week. As usual it contains a +lot of new improvements; but notably this release fixes the earlier memory leak reported in v3.11.0. A few things to +highlight: + +- We’ve been doing some work behind the scenes to improve our GC behaviour on workers. To do this we’ve started work on + distributing container/volume garbage-collection across + workers ([#1959](https://github.com/concourse/concourse/issues/1959)). This release has the early signs of this work, + and we expect it to be done in just a few more weeks (tm). However, this DOES mean that you’ll need to open up port + 7799 on the worker in order to have workers behave properly with v3.12.0 (please see release notes for more details!) +- We also pulled in a change that made the git tag fetch behavior toggle-able…a lot of folks were hit by that so thanks + to GH user [mdomke](https://github.com/mdomke) for the quick change! +- We think we finally hunted down some weird UI issues where certain versions of Chrome / Safari didn’t let you click + into jobs. Let us know how that goes for ya’ll when you upgrade! + +This week we’ve been cranking away at three key areas: + +- Working on the visualization to spatial + resources [https://github.com/concourse/concourse/issues/2131](https://github.com/concourse/concourse/issues/2131) +- Building out Users in Concourse with + dex [https://github.com/concourse/concourse/issues/1888](https://github.com/concourse/concourse/issues/1888) +- Distributing GC across + workers: [https://github.com/concourse/concourse/issues/1959](https://github.com/concourse/concourse/issues/1959) + +On the design front, our team has been working on something that we call “design snacks”; minor changes to the UI that +could make big improvements to the overall experience with the app. Given our current tracks of work, we may not be able +to pick them up right away, but at least the designs are attached to issues for contributors to pick up; if they felt so +inclined :D + +A few examples: + +- Concourse pipeline groups should be + responsive [https://github.com/concourse/concourse/issues/2130](https://github.com/concourse/concourse/issues/2130) +- Breadcrumb on Nav + Bar [https://github.com/concourse/concourse/issues/2139](https://github.com/concourse/concourse/issues/2139) +- Paused jobs should indicate they’re paused on the build page(scroll to the bottom for that + one [https://github.com/concourse/concourse/issues/1915](https://github.com/concourse/concourse/issues/1915) +- Build-level + commenting [https://github.com/concourse/concourse/issues/2025](https://github.com/concourse/concourse/issues/2025) + +As always, feel free to jump into the discussion on our +forum ([https://discuss.concourse-ci.org/](https://discuss.concourse-ci.org/)) or on +Discord ([https://discord.gg/MeRxXKW](https://discord.gg/MeRxXKW)) + diff --git a/docs/blog/posts/2018/2018-05-04-concourse-update--april-30---may-4-.md b/docs/blog/posts/2018/2018-05-04-concourse-update--april-30---may-4-.md new file mode 100644 index 00000000..0ad8f4bb --- /dev/null +++ b/docs/blog/posts/2018/2018-05-04-concourse-update--april-30---may-4-.md @@ -0,0 +1,48 @@ +--- +title: Concourse Update (April 30 — May 4) +date: 2018-05-04 +categories: + - product-update +--- + +I’ve gotten some questions about Freedom Friday from some readers after last week’s update. Well it turns out +that[Topher Bullock](https://medium.com/u/58876cdc2180) wrote a great article about it this week; you read up on it +here: [https://medium.com/concourse-ci/freedom-fridays-319204dea834](https://medium.com/concourse-ci/freedom-fridays-319204dea834) + + + +We also release [Concourse v3.13.0](https://concourse-ci.org/download.html) earlier this week. Make sure you check it +out if you were hit by the accumulating logs issue introduced in v3.12.0. + +On to the update: + +## **Space** + +We’ve been building out some of the frontend code for representing Spaces as part +of [#2131](https://github.com/concourse/concourse/issues/2131). You can see some of the early visualizations below: + +{{< image src="/images/downloaded_images/Concourse-Update--April-30---May-4-/1-K13pFduQtcsPeX3VH6crQQ.png" alt="" +width="100%" >}} +{{< image src="/images/downloaded_images/Concourse-Update--April-30---May-4-/1-_ndF5rSNwVlKJWTj2_vxUQ.png" alt="" +width="100%" >}} +{{< image src="/images/downloaded_images/Concourse-Update--April-30---May-4-/1-kBELwDyhYQwPchw7J-O0eQ.png" alt="" +width="100%" >}} + +We now have the capability of testing Space end-to-end i.e. write the yml -\> fly sp -\> check out the web +visualization. + +EXCITING + +## Distributed GC on Workers + +We’ve been hacking away on master issue [#1959](https://github.com/concourse/concourse/issues/1959) for distributed GC. +If you’ve been following along closely you’ll notice that the number of boxes that we’ve checked has increased…and +that’s a good thing! We’re in the final stretches of this work and will be prepping to test them in our internal +Concourse “Wings” very soon + +## User Auth + +As always, we continue to work on our User Auth master +issue [#1888](https://github.com/concourse/concourse/issues/1888). We’ve now transitioned into building out specific +auth connectors using the [dex](https://github.com/coreos/dex) library. We’ve completed the GitHub and CF connectors, +and are currently working on the generic OAuth provider diff --git a/docs/blog/posts/2018/2018-05-11-concourse-update--may-7-11-.md b/docs/blog/posts/2018/2018-05-11-concourse-update--may-7-11-.md new file mode 100644 index 00000000..85078151 --- /dev/null +++ b/docs/blog/posts/2018/2018-05-11-concourse-update--may-7-11-.md @@ -0,0 +1,23 @@ +--- +title: Concourse Update (May 7–11) +date: 2018-05-11 +categories: + - product-update +--- + +Hi folks, + +[Joshua Winters](https://medium.com/u/d6d52be6c4b0) has spent a lot of time refactoring Concourse so that it can finally +support Users. We’re finally at a point where we can share some our work with you, so I’d really encourage you to check +out his recent blog post [Oh, Auth](https://medium.com/concourse-ci/oh-auth-f4fe68438171) + + + +Outside of that, we’ve got a bunch of vacations going on this week, so it’s been more of the same three tracks of work: + +- Users +- Distributed GC on workers +- Spatial Resource flows + +See you next week! + diff --git a/docs/blog/posts/2018/2018-05-18-concourse-update--may-14-18-.md b/docs/blog/posts/2018/2018-05-18-concourse-update--may-14-18-.md new file mode 100644 index 00000000..ed3531ae --- /dev/null +++ b/docs/blog/posts/2018/2018-05-18-concourse-update--may-14-18-.md @@ -0,0 +1,35 @@ +--- +title: Concourse Update (May 14–18) +date: 2018-05-18 +categories: + - product-update +--- + +In case you missed it, I’d encourage you to check out some of the recent posts +from [Shashwathi Reddy](https://medium.com/u/bca2c0ffce5e) +on “[My first month on Concourse](https://medium.com/concourse-ci/my-first-month-on-concourse-a75f72d21487)” +and [Joshua Winters](https://medium.com/u/d6d52be6c4b0) regarding upcoming changes to our +authentication; “[Oh, Auth](https://medium.com/concourse-ci/oh-auth-f4fe68438171)”. We’d love to hear your feedback! + + + +Heads up: the Concourse team will be taking Monday May 21st off +for [Victoria Day](https://en.wikipedia.org/wiki/Victoria_Day)holiday. + +And now, on to the update: + +**Core** + +- Continued banging our heads against new auth connectors with Dex. **Note:** We’ve started to centralize backwards-(in) + compatibilities with user auth in issue [#2218](https://github.com/concourse/concourse/issues/2218) +- We’ve stood up a new Concourse with our experimental Spaces work. We’re looking for volunteers who are interested in + trying out their pipelines before and after “space”. Tweet at me if you’re + interested [https://twitter.com/pioverpi](https://twitter.com/pioverpi)! + +**Runtime** + +- Completed all the volume collection work for distributed GC + in [#1959](https://github.com/concourse/concourse/issues/1959). We’re currently deploying this change to our internal + environments to see how it works at scale 🤞 +- Fixed issue [#2168](https://github.com/concourse/concourse/issues/2168), wherein “Duplicate resource type volumes + created over time” diff --git a/docs/blog/posts/2018/2018-05-25-concourse-update--may-22-25-.md b/docs/blog/posts/2018/2018-05-25-concourse-update--may-22-25-.md new file mode 100644 index 00000000..49fdde62 --- /dev/null +++ b/docs/blog/posts/2018/2018-05-25-concourse-update--may-22-25-.md @@ -0,0 +1,24 @@ +--- +title: Concourse Update (May 22–25) +date: 2018-05-25 +categories: + - product-update +--- + +It was a short week for us here in Canada, but we had a few interesting updates: + + + +- We attempted to deploy our [distributed GC changes](https://github.com/concourse/concourse/issues/1959) to our + internal environment “Wings” last Friday. Turns out that was an incredibly bad idea. The deployment failed + horrifically and we had to roll back all our changes. We’re still investigating why our code worked in + our [“prod” environment](https://ci.concourse-ci.org/) but failed when deployed onto Wings. We’re tracking this work + in issue [#2202](https://github.com/concourse/concourse/issues/2202). +- Our team conducted our first round of interviews on Spatial resources with Pivots in the Toronto office. We’re getting + a lot of interesting feedback and are making tweaks for next week’s batch of interviews +- In the mean time, we managed to work through some design snacks, addressing the lack + of [breadcrumbs](https://github.com/concourse/concourse/issues/2139) + and [responsive design on groups](https://github.com/concourse/concourse/issues/2130). +- Investigated migration paths forward with the new dex auth. Keep an eye on + issue [#2218](https://github.com/concourse/concourse/issues/2218) for more information on future incompatibilities + with this upgrade! diff --git a/docs/blog/posts/2018/2018-06-01-concourse-update--may-28---june-1-.md b/docs/blog/posts/2018/2018-06-01-concourse-update--may-28---june-1-.md new file mode 100644 index 00000000..c328541d --- /dev/null +++ b/docs/blog/posts/2018/2018-06-01-concourse-update--may-28---june-1-.md @@ -0,0 +1,48 @@ +--- +title: Concourse Update (May 28 — June 1) +date: 2018-06-01 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--May-28---June-1-/1-kJxF-3MOSqElyItFT2ec-A.png" alt="" width=" +100%" >}} + + + +If you’ve been experiencing “Aw Snap” errors on Chrome with Concourse 3.13.0 or 3.12.0 we traced the root case to two +lines of CSS. This seems to happen only on Chrome 67; so a temporary workaround is to switch over to +Chrome [canary](https://www.google.com/chrome/browser/canary.html) or use Firefox/Safari/Edge. You can follow along in +our discussion at GitHub issue [#2236](https://github.com/concourse/concourse/issues/2236) + +Now, on to the update + +**Runtime** + +We were able to successfully test our distributed volume GC collection code on our Wings environment this week. Overall +we’ve seen a significant drop in Database Queries and a ~10% decrease in Web CPU usage. + +{{< image src="/images/downloaded_images/Concourse-Update--May-28---June-1-/1-GfBC0PNc6p2DOiGAbcxKnA.png" alt="" width=" +100%" >}} +{{< image src="/images/downloaded_images/Concourse-Update--May-28---June-1-/1-n8Ea93MfUmDIGaPLtdU37Q.png" alt="" width=" +100%" >}} + +Notice how the Database Queries now look like a sawtooth; this is a result of our new “mark and sweep” GC strategy on +workers. + +**Core** + +In an effort to make our new Users work backwards compatible and downgrade-able, we spent a good chunk of this week +figuring out down-migrations. The conversation around this, and the compromises we’ve had to make can be found in Josh’s +follow up comment [here](https://github.com/concourse/concourse/issues/1888#issuecomment-392958566) + +**UX** + +{{< image src="/images/downloaded_images/Concourse-Update--May-28---June-1-/1-VzHW0teV3e1DfrqcYWc_-w.png" alt="" width=" +100%" >}} + +Check out the new breadcrumbs and responsive groups on our [prod environment](https://ci.concourse-ci.org/)! + +We’re still looking for users who would be interested in testing out our new spatial resources view! **Please reach out +to me over Twitter or @jma on Discord if you’re interested!** + diff --git a/docs/blog/posts/2018/2018-06-06-how-we-build-concourse.md b/docs/blog/posts/2018/2018-06-06-how-we-build-concourse.md new file mode 100644 index 00000000..8820bedf --- /dev/null +++ b/docs/blog/posts/2018/2018-06-06-how-we-build-concourse.md @@ -0,0 +1,88 @@ +--- +title: How We Build Concourse +date: 2018-06-06 +--- + +Building on some of our previous posts on the Concourse team mechanics¹, I wanted to spend some time going over how we +actually _build_ Concourse. + + + +### Tracking Features and Bugs + +Concourse tracks all of its bugs, features and epics through GitHub Issues. + +For items regarding the core functionality of Concourse itself, you can find the master issues list +here: [https://github.com/concourse/concourse/issues](https://github.com/concourse/concourse/issues) + +For issues regarding the Concourse website and documentation, you can find the backlog +here: [https://github.com/concourse/docs](https://github.com/concourse/docs) + +Concourse resources, both the ones [included with Concourse](https://concourse-ci.org/included-resources.html) and the +ones that are[community made](https://concourse-ci.org/community-resources.html), live in their own repositories. +Issues, bugs and features should be reported against the resource’s GitHub issues repo. + +### Triage, Review and Prioritization + +We do our best to review and triage new issues that come into the Concourse repository on a daily basis. Triaging an +issue requires us to: + +- Identify whether the issue is a bug or new feature (aka enhancement) +- Identify whether the issue requires more investigation +- Apply relevant labels for easier search down-the road (e.g. web-ui, fly, security, etc.) +- Follow up with any questions or comments if issue was unclear +- Connect issues to related issues already entered previously +- If applicable, assign to one of the [Concourse GitHub Projects](https://github.com/orgs/concourse/projects) + +Issues assigned to a GitHub Project are automatically assigned into the project’s Icebox. The Concourse team follows a +very similar development approach +to [XP and Pivotal workflow](https://www.pivotaltracker.com/help/articles/workflow_overview/) where only active and +prioritized items are assigned to the Backlog, and all finished stories are required to be “Accepted” or “Rejected” by a +Product Manager or some other knowledgeable subject matter expert. + +### Design & Research + +Issues that require design and UX feedback are labeled with needs-design. These are usually picked up by our product +design team. + +We also use the label design-snack on bite-sized UX/UI issues that are ready to be picked up by an engineer. +design-snacks aren’t highly prioritized issues but are nonetheless very useful for Concourse users! + +Sometimes we work on big issues that require more research and testing before we can actually write issues. This work is +often tracked separately through various tools (both online and offline). We do our best to post updates in blog posts +and GitHub issues along the way. + +### Iteration Planning Meeting (IPM) + +The Concourse team conducts IPM every week on Monday afternoon. During this time we review each GitHub Project’s +backlog. This includes discussions on stories that were recently complete, are currently in flight, new stories added to +the backlog and any change in Backlog priorities. The Concourse team uses this custom-build project +view ([https://project.concourse-ci.org/](https://project.concourse-ci.org/)) as a way to quickly access the backlogs of +all our projects. + +### Acceptance + +Issues that are resolved are moved into the “Done” column of each project. This means that the issue is ready to be +reviewed for Acceptance. Typically, work that is ready for acceptance is reviewed on our “prod” instance, that +is, [https://ci.concourse-ci.org/](https://ci.concourse-ci.org/) The issue is typically reviewed by a product manager or +a subject matter expert who can determine whether the completed issue is acceptable for general distribution. Some +changes require additional load and/or “real-world” testing; in that case we deploy to Pivotal’s internal large-scale +Concourse “Wings”; which currently runs 3 ATCs, 38 workers and has \> 70 teams. + +Rejected issues are returned to the top of the GitHub Project Backlog and commented on for revision. + +### PR/Community + +PRs and “Community” work (e.g. answering questions on Discord, our Forum, in GitHub issues) is usually handled by a +dedicated person. Currently this person is Alex Suraci (Product Manager). In addition to helping the community, he is +building out new proposals for long-term changes to Concourse in +the [RFCs repo](https://github.com/concourse/rfcs/pulls). + +### What do we build? + +In my next blog post, I plan on covering _what_ the core team is working on, how we make those decisions, and how we are +working to make those plans more obvious + +¹ see The [Concourse Crew](https://medium.com/concourse-ci/the-concourse-crew-2017-fce7daeffe52) +and[How the Concourse Team Organizes Issues](https://medium.com/concourse-ci/how-the-concourse-team-organize-issues-9393f3d4151a) + diff --git a/docs/blog/posts/2018/2018-06-08-concourse-update--june-4-8-.md b/docs/blog/posts/2018/2018-06-08-concourse-update--june-4-8-.md new file mode 100644 index 00000000..09b1d860 --- /dev/null +++ b/docs/blog/posts/2018/2018-06-08-concourse-update--june-4-8-.md @@ -0,0 +1,48 @@ +--- +title: Concourse Update (June 4–8) +date: 2018-06-08 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--June-4-8-/1-XZfbfSSmYOJi2Ujc1uDP7Q.png" alt="" width=" +100%" >}} + + + +Big release this week! After lots internal load testing on Wings we finally felt comfortable releasing Concourse 3.14.0. +In addition to the +new [Distributed Garbage Collection](https://medium.com/concourse-ci/distributed-garbage-collection-ae3867ab5438), +breadcrumbs, responsive groups, +and [Windows worker](https://github.com/concourse/concourse-bosh-deployment/blob/master/cluster/operations/windows-worker.yml), +we have 14 new features a whole bunch of bug fixes. But wait! Don’t download that one; +get [Concourse v3.14.1 i](https://concourse-ci.org/download.html#v3141)nstead. + +A few other updates. First, be sure to check out my write up +on [How We Build Concourse](https://medium.com/concourse-ci/how-we-build-concourse-dd15939d92f2). I plan on writing more +posts like this in hopes of giving you more insight into the internals of the Concourse team. Hope you like it! + +And now, on to the update; starting with a note on [RFCs](https://github.com/concourse/rfcs): + +**RFCs** + +- We’re looking for feedback on how to improve our existing implementation of credential management. You can read more + about it in [issue #5](https://github.com/concourse/rfcs/issues/5). +- The RFC around [Resources v2](https://github.com/concourse/rfcs/pull/1)is moving along with some new changes. Thanks + to all the reviewers ([itsdalmo](https://github.com/itsdalmo), [cwlbraa](https://github.com/cwlbraa) + and [dprotaso](https://github.com/dprotaso)). I’d **REALLY** encourage ya’ll to read + the [full proposal](https://github.com/vito/rfcs/blob/resources-v2/01-resources-v2/proposal.md)and provide your + inputs; since we’ll be relying on these changes for new features like Spatial Resources. + +**UX** + +- We’re seriously, absolutely, most definitely tacking the slow performance on the build + page [#1543](https://github.com/concourse/concourse/issues/1543#issuecomment-394449918) +- Spatial Resource testing continues! Here’s a peek at our most recent iteration: + {{< image src="/images/downloaded_images/Concourse-Update--June-4-8-/1-C8RdmEmjBxrG5pzamGDMSg.png" alt="" width=" + 100%" >}} + +**Core** + +- Now that 3.14.1 is out, we’re now ready to rebase and merge in our Users change and prime that for release in 3.15.0. + Testing begins next week after we finish getting everything merged in diff --git a/docs/blog/posts/2018/2018-06-15-concourse-update--jun-11-15-.md b/docs/blog/posts/2018/2018-06-15-concourse-update--jun-11-15-.md new file mode 100644 index 00000000..c79de353 --- /dev/null +++ b/docs/blog/posts/2018/2018-06-15-concourse-update--jun-11-15-.md @@ -0,0 +1,48 @@ +--- +title: Concourse Update (Jun 11–15) +date: 2018-06-15 +categories: + - product-update +--- + +This was a post-release week, so we spent a lot of time merging in new code from the Users track, fixing our pipelines, +and working on some neglected issues. All in all a solid week’s worth of work! On to the update + + + +**UX** + +- Changed the behaviour of the breadcrumb so that clicking on the pipeline name resets the pipeline view and group + settings ([#2258](https://github.com/concourse/concourse/issues/2258)) +- Fixed a bug with the breadcrumb where it wouldn’t render whitespace + correctly ([#2267](https://github.com/concourse/concourse/issues/2267)) +- Fixed a bug with team name overflowing on breadcrumbs ([#2241](https://github.com/concourse/concourse/issues/2276)) +- Fixed a UI bug on the navigation arrows ([#2276](https://github.com/concourse/concourse/issues/2276)) +- Added JSON stdout to Fly CLI ([#952](https://github.com/concourse/concourse/issues/952)) + +We haven’t done work on this yet, but based on our observations and feedback from the community, we’re planning to push +the dashboard up to / level. This will require a few items of polish first; but you can refer to +issue [#2282](https://github.com/concourse/concourse/issues/2282) for details + +**Core** + +- Spent most of the week trying to rebase and merge in changes from the Users track. Our pipelines are finally green so + we’re ready to push some of that work into our local environments for broad testing. Be sure to check up + on [#2218](https://github.com/concourse/concourse/issues/2218) for any gotchas that might affect you! + +**Space** + +- Conducted two user interviews this week. We have only have one or two more interviews left next week. After that we’ll + be figuring out what our MVP might look light so we can start exposing that feature to adventurous Concourse users. + +**RFCs** + +As with last week, we’re looking for feedback on how to improve our existing implementation of credential management. +You can read more about it in [issue #5](https://github.com/concourse/rfcs/issues/5). + +The RFC around [Resources v2](https://github.com/concourse/rfcs/pull/1)is moving along with some new changes. Thanks to +all the reviewers ([itsdalmo](https://github.com/itsdalmo), [cwlbraa](https://github.com/cwlbraa) +and [dprotaso](https://github.com/dprotaso)). I’d **REALLY** encourage ya’ll to read +the [full proposal](https://github.com/vito/rfcs/blob/resources-v2/01-resources-v2/proposal.md)and provide your inputs; +since we’ll be relying on these changes for new features like Spatial Resources. + diff --git a/docs/blog/posts/2018/2018-06-22-concourse-update--jun-18-22-.md b/docs/blog/posts/2018/2018-06-22-concourse-update--jun-18-22-.md new file mode 100644 index 00000000..cb5ae224 --- /dev/null +++ b/docs/blog/posts/2018/2018-06-22-concourse-update--jun-18-22-.md @@ -0,0 +1,34 @@ +--- +title: Concourse Update (Jun 18–22) +date: 2018-06-22 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jun-18-22-/0-iPsCYY5ob7h-bSKD.jpg" alt="" width="100%" >}} + + + +It’s been a busy week for myself and [Topher Bullock](https://medium.com/u/58876cdc2180). We spent some time in Boston +meeting with some users operating large-scale Concourses. We learned a lot about the issues they were running into +operating Concourse a scale…and we ate a lot of Lobster! + +On to the update: + +**UX:** + +- Made some improvements to the build page in issue [#1543](https://github.com/concourse/concourse/issues/1543) that + we’re hoping to test soon on our internal Concourse. You can read into some more of the details in + our [comments](https://github.com/concourse/concourse/issues/1543#issuecomment-398188077). +- Expanded our PR pipelines in [#2305](https://github.com/concourse/concourse/issues/2305) to run web tests as a part of + pulling in the PR for Shareable Search on dashboard [#2265](https://github.com/concourse/concourse/issues/2265). +- Started the move of the dashboard/ view to be the root level page ( + issue [#2282](https://github.com/concourse/concourse/issues/2282)) by adding Logout to the dashboard page + in [#1663](https://github.com/concourse/concourse/issues/1663) + +**Core** + +- Continued our struggle to finish off Users work with some fixes to migrations and breakages to our own testing + pipeline +- Continued with our user testing on Spatial Resources. We’re getting more confident with the designs, so we added a + story to bring those designs into beta/ in issue [#2292](https://github.com/concourse/concourse/issues/2292) diff --git a/docs/blog/posts/2018/2018-06-29-concourse-update--jun-25-29-.md b/docs/blog/posts/2018/2018-06-29-concourse-update--jun-25-29-.md new file mode 100644 index 00000000..22a5e20c --- /dev/null +++ b/docs/blog/posts/2018/2018-06-29-concourse-update--jun-25-29-.md @@ -0,0 +1,51 @@ +--- +layout: post +title: Concourse Update (Jun 25–29) +date: 2018-06-29 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jun-25-29-/1-eGvw-f2AjgJvsWN9pdikBg.gif" alt="" width=" +25%" >}} + + + +If you’ve been following along with our [Auth changes](https://medium.com/concourse-ci/oh-auth-f4fe68438171), you’ll +know that we’ve been doing a lot of work behind the scenes to make the upgrade into this new world as seamless as +possible. This week, we were able to do our first large-scale upgrade test against our Wings instance. The upgrade went +well and we were able to find a few more areas of polish before we push this feature. You can find our updated list of +future incompatibilities in GitHub issue [#2218](https://github.com/concourse/concourse/issues/2218). Having considered +the nature of the breaking changes, the next update of Concourse with Users will push us +into[4.0.0](https://github.com/concourse/concourse/issues/2218#issuecomment-401078612)!!! + +I also wanted to take this time to give a big **thank you** to all of the participants in our spatial resource +interview. If you’re curious to see the results of our research please read up +on [Lindsay Auchinachie](https://medium.com/u/84b937bda3b6)’s post +here:[Designing for Space in Concourse](https://medium.com/concourse-ci/designing-for-space-in-concourse-3037344644c6) + +If you’d like to get your hands on Space as soon as possible, then I’d encourage you to also read and comment on our +Resources v2 [RFC](https://github.com/concourse/rfcs/pull/1). [Alex Suraci](https://medium.com/u/263a63b2f209) made some +recent updates to the proposal so +definitely [check it out](https://github.com/concourse/rfcs/pull/1/files/3bc00098143d7f1d59c7c25b8614ddc545a05d81), or +read the [fully rendered proposal](https://github.com/vito/rfcs/blob/resources-v2/01-resources-v2/proposal.md). + +We’d like to get the Resources v2 RFC closed out soon so we can implement the resource changes necessary to tap into the +full potential of spatial resources! + +Some other updates: + +- We fixed a known issue [#2300](http://ourse/issues/2300) with the 3.14.x series whereby users noticed a significant + increase in CPU usage when connecting with CredHub. This has now been fixed +- Increased pipeline stability and fixed some flakes with our UI tests in topgun +- Fixed an [issue](https://github.com/concourse/concourse/issues/2313) where the auth token is shown in the address bar +- Picked up additional pipeline work by adding integration + for [web PRs](https://github.com/concourse/concourse/issues/2305) + +_Edit_ + +I ALMOST FORGOT! We also improved build page performance [#1543](https://github.com/concourse/concourse/issues/1543)! In +some instances we reduced the page load time from 25s to only 5s: + +{{< image src="/images/downloaded_images/Concourse-Update--Jun-25-29-/1-KEWandpQWRWRFcBvLRwbog.jpeg" alt="" width=" +100%" >}} \ No newline at end of file diff --git a/docs/blog/posts/2018/2018-07-06-concourse-updates--july-3-6-.md b/docs/blog/posts/2018/2018-07-06-concourse-updates--july-3-6-.md new file mode 100644 index 00000000..9926f86f --- /dev/null +++ b/docs/blog/posts/2018/2018-07-06-concourse-updates--july-3-6-.md @@ -0,0 +1,56 @@ +--- +title: Concourse Updates (July 3–6) +date: 2018-07-06 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Updates--July-3-6-/0-BrjbFvtgpagi0Ag3.png" alt="Concourse, beavers, +poutine and maple syrup" width="100%" >}} + + + +Since July 1st was the official day of [Canada’s birth](https://en.wikipedia.org/wiki/Canada_Day), the Concourse team +enjoyed a long weekend with no work on Monday. We were, however, able to get quite a bit done during this short week. + +A big win is that we added a k8s-testflight job to our official ci pipeline (check it +out [here](https://ci.concourse-ci.org/teams/main/pipelines/main/jobs/k8s-testflight)); this will let us know in advance +when we have broken +the [Concourse Kubernetes Helm Chart](https://github.com/kubernetes/charts/tree/master/stable/concourse). Shout out +to [Divya Dadlani](https://medium.com/u/7b8aac84a2b9), Jamie Klassen and Rui Yang for working on that in the Concourse +team! + +Here’s also a few interesting reminders: + +- For Pivotal-supported releases of Concourse (aka Concourse for PCF) you can find a compatibility matrix of common + dependencies + here: [http://docs.pivotal.io/p-concourse/#Compatibility](http://docs.pivotal.io/p-concourse/#Compatibility) +- I realized this week that not a lot of people know about this; but [Alex Suraci](https://medium.com/u/263a63b2f209) + wrote up a series of [Concourse Anti-Patterns](https://github.com/concourse/concourse/wiki/Anti-Patterns) a while + back. Its definitely worth the read +- PLEASE take a second to review the upcoming Resource v2 [RFC](https://github.com/concourse/rfcs/pull/1) or its + rendered version [here](https://github.com/vito/rfcs/blob/resources-v2/01-resources-v2/proposal.md) +- **Concourse team is going to OSCON!** Come by the Pivotal booth 406 and say “hi”! + +Anyways, on to the update: + +**UX** + +- Fixed some minor UI issues across the + board: [#2333](https://github.com/concourse/concourse/issues/2333), [#2313](https://github.com/concourse/concourse/issues/2313), [#2291](https://github.com/concourse/concourse/issues/2291), + and [#2310](https://github.com/concourse/concourse/issues/2310) +- Continued our work in routing Dashboard page to the Home page + in [#2282](https://github.com/concourse/concourse/issues/2282). This, however, has turned into a bit of a scope creep + and we are now upgrading the entire UI to use the new dark theme: + {{< image src="/images/downloaded_images/Concourse-Updates--July-3-6-/1-Xp51wHexBz5wx1GcqaCvwA.png" alt="" width=" + 50%" >}} + +**Core** + +- Discovered some additional backward incompatibilities with the new user-based auth that would be _super annoying_ to + deal with; so we have addressed some of them + in[#2326,](https://github.com/concourse/concourse/issues/2326) [#2299](https://github.com/concourse/concourse/issues/2299) + and [#1810](https://github.com/concourse/concourse/issues/1810). **As always, you can read about future + incompatibilities with our new auth in issue** [**#2218**](https://github.com/concourse/concourse/issues/2218) +- [Alex Suraci](https://medium.com/u/263a63b2f209) had some time to pick up some low-hanging performance-improving fruit + in [#285](https://github.com/concourse/atc/pull/285) diff --git a/docs/blog/posts/2018/2018-07-13-concourse-update--jul-9-13-.md b/docs/blog/posts/2018/2018-07-13-concourse-update--jul-9-13-.md new file mode 100644 index 00000000..ed32cc91 --- /dev/null +++ b/docs/blog/posts/2018/2018-07-13-concourse-update--jul-9-13-.md @@ -0,0 +1,48 @@ +--- +layout: post +title: Concourse Update (Jul 9–13) +date: 2018-07-13 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jul-9-13-/1-AuH8VYkniNetbpZtRBjTuA.png" alt="DARK" width=" +100%" >}} + + + +We’re going dark themed for Concourse 4.0.0! In addition to the users work, we’re promoting the Dashboard to the / level +to take over the home page. You’ll also notice that we added pipeline play/pause capabilities to the dashboard, NEAT! + +To keep things consistent, we’re also propagating our new design to the existing pipeline views. You can play around +with this new nav structure on our own CI: [https://ci.concourse-ci.org/](https://ci.concourse-ci.org/) + +The team here is also planning to attend OSCON in Portland next week (July 18 & 19). Drop by the Pivotal booth to say hi +and grab a Concourse sticker! + +On to the update: + +**UX** + +- Fixed some resource alerting errors on the pipeline [#2333](https://github.com/concourse/concourse/issues/2333) +- Moved dashboard to home [#2282](https://github.com/concourse/concourse/issues/2282) +- Added Pause/Play pipeline buttons on homepage [#2365](https://github.com/concourse/concourse/issues/2365) +- Worked on dragging to re-order pipelines on the dashboard [#2364](https://github.com/concourse/concourse/issues/2333) +- Updated and propagated the new colours across the app [#2370](https://github.com/concourse/concourse/issues/2370) + +**Core** + +- Added health check APIs to verify credential managers are properly + configured [#2216](https://github.com/concourse/concourse/issues/2216) +- Even more db optimizations! yay! + +**Runtime** + +- Picked up an oldie but a goodie: Concourse should support imposing limits on container + resources [#787](https://github.com/concourse/concourse/issues/787) + +**Operations** + +- Started to move our stemcells onto the + new [Xenial stemcells](https://github.com/concourse/concourse-bosh-deployment/issues/71) +- Switch upgrade/downgrade testing jobs to the binaries [#2371](https://github.com/concourse/concourse/issues/2371) diff --git a/docs/blog/posts/2018/2018-07-20-concourse-update--jul-16-20-.md b/docs/blog/posts/2018/2018-07-20-concourse-update--jul-16-20-.md new file mode 100644 index 00000000..04a779b8 --- /dev/null +++ b/docs/blog/posts/2018/2018-07-20-concourse-update--jul-16-20-.md @@ -0,0 +1,60 @@ +--- +layout: post +title: Concourse Update (Jul 16–20) +date: 2018-07-20 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jul-16-20-/1-CIxNgJ_FKbnacEpUI588nw.jpeg" alt="" width=" +100%" >}} + + + +This week, the Concourse team went out to Portland to attend OSCON +2018. [Topher Bullock](https://medium.com/u/58876cdc2180) gave a great intro to Concourse in the Open Source track. We +even met some of the Concourse fans in person! + +In other news, we’ve begun to sketch out what RBAC might look like in Concourse. Please check +out [#2389](https://github.com/concourse/concourse/issues/2389) when you have some time! + +On to the update: + +**UX** + +- Team has been working on adding drag and drop re-arranging for the dashboard + in [#2364](https://github.com/concourse/concourse/issues/2364) +- We also found a weird quirk with the new team creation flow, where you won’t see your team if it was just created and + has no pipelines. We will be addressing this in [#2382](https://github.com/concourse/concourse/issues/2382) +- Play/pause pipeline on the dashboard is mostly completed but was missing functionality when a search filter was + applied; so I had to reject that story for review in [#2365](https://github.com/concourse/concourse/issues/2365) +- [Lindsay Auchinachie](https://medium.com/u/84b937bda3b6) has also been entering some new UI polish issues to co-incide + with our new dark + theme: [#2370](https://github.com/concourse/concourse/issues/2370), [#2385](https://github.com/concourse/concourse/issues/2385), [#2387](https://github.com/concourse/concourse/issues/2387), [#2361](https://github.com/concourse/concourse/issues/2361) + +**Core** + +- Picked up some stories related our migrations, see [#2380](https://github.com/concourse/concourse/issues/2380) + and [#2074](https://github.com/concourse/concourse/issues/2074) +- Keen watchers of our repo will notice that we’ve added a note in + our [core backlog](https://cadet.cfapps.io/projects/Core#s-MDU6SXNzdWUzMzgzMjAxOTA%3D.s-MDU6SXNzdWUxOTA4MjM4NzU%3D.s-MDU6SXNzdWUzNDE1ODE1NTE%3D) + to start sketching out what additional work we need to get space moving along. +- Reminder to check out and comment on + the [Resources v2](https://github.com/vito/rfcs/blob/resources-v2/01-resources-v2/proposal.md) proposal! + +**Integrations** + +- We closed [#215](https://github.com/concourse/docker-image-resource/issues/215) in the docker-image-resource recently + after we discovered a regression with a newer version of Docker. This seems to only affect large-scale Concourse + installations that have reliability issues accessing and connecting to their local registries. A short-term fix is to + target older versions of the docker-image-resource + +**Runtime** + +- Addressed [#1516](https://github.com/concourse/concourse/issues/1516), wherein Concourse doesn’t run any jobs if Vault + misconfigured +- Did some work to begin imposing limits on containers in [#787](https://github.com/concourse/concourse/issues/787). + Please review this issue carefully if this affects you; since our initial resolution is very specific and requires you + to understand the nature of your worker vms +- Worked on [#2375](https://github.com/concourse/concourse/issues/2375) “Listing destroying volumes should not perform + any database write operations” :D diff --git a/docs/blog/posts/2018/2018-07-27-concourse-update--jul-23-27-.md b/docs/blog/posts/2018/2018-07-27-concourse-update--jul-23-27-.md new file mode 100644 index 00000000..359d311e --- /dev/null +++ b/docs/blog/posts/2018/2018-07-27-concourse-update--jul-23-27-.md @@ -0,0 +1,47 @@ +--- +title: Concourse Update (Jul 23–27) +date: 2018-07-27 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jul-23-27-/1-tfhJwBRSLe9wrc2-a7MwpQ.png" alt="Concourse v4 +Dashboard" width="100%" >}} + + + +I’m happy to announce that we released Concourse 4.0.0 this week! This was a HUGE release with over 28 new features and +fixes. I’d encourage you to read through the full list of changes on +our [Downloads page.](https://concourse-ci.org/download.html#v400) + +Why did this release warrant a bump in the major version? Well, if you’ve been following +along [closely](https://medium.com/concourse-ci/oh-auth-f4fe68438171) you’ll know that we had just finished our new auth +work in 4.0.0. Users are now central to the authentication flows, **not** teams. Practically speaking, the user-centric +auth flow means that you won’t need to re-login to see pipelines in other teams that you already have access to! +Underneath the hood though, _“We’re leveraging CoreOS’s Dex project for all the moving parts, which already supports a +ton of providers (Dex calls them “connectors”). The only delta required for Concourse to support a Dex connector is a +tiny bit of glue code in our new_ [_Skymarshal_](https://github.com/concourse/skymarshal) _component to provide +higher-level flags for our CLI.”_ + +We spent a lot of time near the end of this cycle trying to make these +changes [backwards compatible](https://github.com/concourse/concourse/issues/2218), but ultimately decided that the +changes were significant enough to warrant a bump in the major version. PLEASE _PLEASE **PLEASE**_ refer to our release +notes for all the breaking changes before executing your upgrade! + +{{< image src="/images/downloaded_images/Concourse-Update--Jul-23-27-/1-A7zDAYYisJzHjZldrxqneg.gif" alt="" width=" +50%" >}} + +The second big change you’ll notice in 4.0.0 is that the home (/) route now takes you to the dashboard. We’ve also +propagated the new colour scheme to the rest of the app and tightened up the fonts throughout the app. + +We hope you like it! + +So, what’s next? We’re focusing on three key areas: + +- [Resources v2](https://github.com/vito/rfcs/blob/resources-v2/01-resources-v2/proposal.md) and Spatial resources. + Please review and comment on the RFC! +- [Runtime efficiency](https://github.com/orgs/concourse/projects/23) & [Operational observability](https://github.com/orgs/concourse/projects/24) + into Concourse +- [Role based access control](https://github.com/concourse/rfcs/pull/6). That’s right, we’re finally doing it. Please + read the RFC for this change. You can also find a copy of our initial permission + matrix[here](https://docs.google.com/spreadsheets/d/1np3hyJy3mVRfB2gcgKykz3QTQg5qEj28QgK523SEmao/edit#gid=0) diff --git a/docs/blog/posts/2018/2018-08-03-concourse-update--july-30---aug-3-.md b/docs/blog/posts/2018/2018-08-03-concourse-update--july-30---aug-3-.md new file mode 100644 index 00000000..170d4547 --- /dev/null +++ b/docs/blog/posts/2018/2018-08-03-concourse-update--july-30---aug-3-.md @@ -0,0 +1,47 @@ +--- +title: Concourse Update (July 30 — Aug 3) +date: 2018-08-03 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--July-30---Aug-3-/1--syJtPB3nj0x2z8AVEh7zA.png" alt="" width=" +45%" >}} + + + +With the launch of Concourse 4.0.0, we’ve been monitoring +our [typical communication channels](https://concourse-ci.org/community.html)carefully to watch out for any glaring new +bugs. So far we seem to be safe from any crazy issues, but we have noticed that there has been some confusion in how to +set the basic auth users in the new deployment method (see [#2421](https://github.com/concourse/concourse/issues/2421) +for details). Thanks everyone for your patience and working through these issues with us! + +The Concourse team will also be taking Monday, Aug 6 off for Canada’s Civic Holiday. We’ll be back at it on Tuesday, Aug +7. + +On to the update: + +**UX** + +- You’ll notice that our UX backlog is filled to the brim with clean-up and polish stories. Now that we’ve release + 4.0.0, we’re taking some additional time to slow down to perform some additional polish and refactors +- Of note we have one regression which is prioritized highly “New resources are no longer highlighted in + UI” [#2423](https://github.com/concourse/concourse/issues/2423) +- A lot of folks have noticed that the sidebar has been removed, and a bid to bring it back has started with + issue [#2440](https://github.com/concourse/concourse/issues/2440) + +**Core** + +- We’ve been working on a track of stories around “pinning” a version of a resource across the + pipeline [#2439](https://github.com/concourse/concourse/issues/2439) + and [#2386](https://github.com/concourse/concourse/issues/2386) +- Database migrations have always been a headache for us and we’ve been looking at + issues [#2074](https://github.com/concourse/concourse/issues/2074) + and [#2452](https://github.com/concourse/concourse/issues/2439) + +**Runtime** + +- We finally got rid of Yeller support [#1819](https://github.com/concourse/concourse/issues/1819). I have no idea what + that did, or why it was there; but good riddance +- The much requested feature to stream build logs out is being worked + on [#2104](https://github.com/concourse/concourse/issues/2104) diff --git a/docs/blog/posts/2018/2018-08-10-concourse-update--aug-7-10-.md b/docs/blog/posts/2018/2018-08-10-concourse-update--aug-7-10-.md new file mode 100644 index 00000000..c5c9ff98 --- /dev/null +++ b/docs/blog/posts/2018/2018-08-10-concourse-update--aug-7-10-.md @@ -0,0 +1,40 @@ +--- +title: Concourse Update (Aug 7–10) +date: 2018-08-10 +categories: +- roadmap +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Aug-7-10-/1-QBeLayNVacbJGgBW-BhGSw.jpeg" alt="" width=" +35%" >}} + + + +As I mentioned last week, this was a short week for us in Canada due to +the [Civic Holiday](https://en.wikipedia.org/wiki/Civic_Holiday#Ontario). We did, however, manage to work on some pretty +cool stuff! + +With the release of 4.0.0, we’ve been shifting our new feature focus towards Operations and Runtime. We’re intentionally +slowing down on UX to focus on regressions and UI polish for existing screens. + +On to the update: + +**Core** + +- Continued our work on “pinning” a version of a resource across the pipeline. + Completed [#2439](https://github.com/concourse/concourse/issues/2439) but still + have [#2386](https://github.com/concourse/concourse/issues/2386) in flight + +**Runtime** + +- Wrapping up work around streaming build logs to an external + target [#2104](https://github.com/concourse/concourse/issues/2104) +- Investigated the issue around External Worker affinity on ATCs when using external + workers [#2312](https://github.com/concourse/concourse/issues/2312) +- Picked up the issue for adding a configurable timeout for resource and resource type + checks [#2352](https://github.com/concourse/concourse/issues/2352) + +**Operations** + +- In our continued exploration of k8s Helm Chart for Concourse, we’re looking into how we might auto-magically generate + helm chart parameters [#2472](https://github.com/concourse/concourse/issues/2472) diff --git a/docs/blog/posts/2018/2018-08-10-suspicious-volume-usage-on-workers.md b/docs/blog/posts/2018/2018-08-10-suspicious-volume-usage-on-workers.md new file mode 100644 index 00000000..cebbd6bd --- /dev/null +++ b/docs/blog/posts/2018/2018-08-10-suspicious-volume-usage-on-workers.md @@ -0,0 +1,80 @@ +--- +title: Suspicious Volume Usage on Workers +date: 2018-08-10 +--- + +As a Product Manger at Pivotal I’m often called on to help with our customer’s Concourse-related issues. I recently +spent some time hunting down an issue around suspiciously high volume usage on Concourse workers. It was an interesting +problem that I wanted to share with the broader Concourse community. + + + +### Platform Management Pipelines + +One of the primary use case for Concourse within the Pivotal Cloud +Foundry ([PCF](https://www.google.com/search?q=pivotal+cloud+foundry&oq=pivotal+cloud+fou&aqs=chrome.0.0j69i60l2j69i65l2j69i60.3685j0j7&sourceid=chrome&ie=UTF-8)) +ecosystem is to automate the toil of manual maintenance against the platform; +specifically [PAS](https://pivotal.io/platform/pivotal-application-service) +and [PKS](https://pivotal.io/platform/pivotal-container-service). For the purposes of this issue, the specific details +of the pipeline doesn’t matter but an understanding the general flow of the pipeline will help frame problem: + +{{< image src="/images/downloaded_images/Suspicious-Volume-Usage-on-Workers/1-afxjY-fNHqW6BPik1xdGVQ.png" alt="A +simplified version of a pipeline used to pull updates from the Pivotal Network and apply the changes onto Ops Manager" +width="100%" >}} + +In these pipelines the pivnet-resource is responsible for monitoring new product versions +on [network.pivotal.io](https://network.pivotal.io/) (aka PivNet). When a new product version is released on PivNet, the +pivnet-resource picks it up and initiates a download. These files are relatively large, from 500mb to over 1 GB + +**Recreate _all_ the workers?** + +Over the course of the past year or so we would get sporadic reports of customers who used Concourse for PCF management +running out of space on their workers. The typical manifestation of it comes from a failed to stream in volume error. It +would appear that workers were running out of space; but it wasn’t clear why. To mitigate the issue Concourse operators +would be forced to periodically re-create their workers to get a “clean slate”. + +**But why?** + +Having to periodically recreate workers is a _huge_ pain and it doesn’t give operators a lot of confidence in Concourse +itself. The team decided to take a look into the root cause of this issue. We wanted to understand whether this was a +bug in the system and whether we could do something to address it. + +After some poking and prodding, I think we figured out what was happening in this specific scenario. Using the same +simplified pipeline above, consider the following scenario: + +{{< image src="/images/downloaded_images/Suspicious-Volume-Usage-on-Workers/1-m-1ouUbMQEVv9gPJ3wggug.png" alt="" width=" +100%" >}} + +- At t=0 the pipeline is configured and idling; monitoring the external system for a new version. +- At t=1 a new version of the “Metrics” product is released on PivNet, picked up by Concourse, downloaded and begins to + flow through your pipeline +- At t=2 the Upload to OM (OM == Ops Manager) job kicks off and does its thing +- At t=3 the artifact is used for some long running process like Apply Changes on OM. Concourse will hold on to that + downloaded data since its still running + +But wait, what’s that new Metrics 1.0 box in deep blue at t=3? Well, its not uncommon for the metadata of a release to +be modified just-after release. This could be a tweak to metadata (e.g. support dates, dependencies, supported +stemcells, etc.), which causes PivNet to report a new version. Semantically, its still reported as Metrics v1.0 but +Concourse will pick it up nonetheless. Because of this change we have effectively doubled the amount of storage used! + +**I think we learned a valuable lesson today…** + +The problem I described was a specific to the usage of the pivnet-resource, but there are a lot of common takeaways: + +- Spend some time to understand your resources! The specific implementation of check can drastically affect your + pipeline +- Be wary of using large files with long running jobs. I could see how someone could easily re-create a similar scenario + with other resources +- Consider separating out the act of downloading an artifact from the act of operating on the artifact. For example, I + found that other teams in Pivotal worked around this by landing their PivNet artifacts in an s3 bucket and picking it + up in the next job via the s3-resource +- Set up some monitoring! You can catch errors and creeping disk use this + in [metrics dashboards](https://metrics.concourse-ci.org/dashboard/db/concourse?refresh=1m&orgId=1) + +**No seriously, why does this happen?!** + +In this blog post I covered a lot of the symptoms of the problem and touched on some abstract reasoning on why this +happens. In the next blog post we (and by “we” I mostly mean [Topher Bullock](https://medium.com/u/58876cdc2180)) will +cover the specific technical details of Resource Caching on Concourse so you can have a better understanding of exactly +_why_ this happens. + diff --git a/docs/blog/posts/2018/2018-08-17-concourse-update--august-13-17-.md b/docs/blog/posts/2018/2018-08-17-concourse-update--august-13-17-.md new file mode 100644 index 00000000..04194096 --- /dev/null +++ b/docs/blog/posts/2018/2018-08-17-concourse-update--august-13-17-.md @@ -0,0 +1,46 @@ +--- +title: Concourse Update (August 13–17) +date: 2018-08-17 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--August-13-17-/1-tElpiP87T2Ee3rXKJP88QQ.gif" alt="Combining +repos for great justice" width="45%" >}} + + + +Going to switch things up this week and start with some interesting community news: + +- We’ve decided to restructure our repositories to make things more understandable and less scary for + contributors. [Alex Suraci](https://medium.com/u/263a63b2f209) has laid out a good explainer on why and how we’re + going to start in + our [PSA: the Great Code Restructing of 2018](https://discuss.concourse-ci.org/t/psa-the-great-code-restructing-of-2018/543) +- [Lindsay Auchinachie](https://medium.com/u/84b937bda3b6) wrote up a blog post describing some of the visual elements + of the Concourse pipeline view in a blog post + titled [Concourse Pipeline UI Explained](https://medium.com/concourse-ci/concourse-pipeline-ui-explained-87dfeea83553) +- marco-m has been updating a “concourse-in-a-box” formula that comes with a s3-compatible-store and a Vault. Check it + out here: [https://github.com/marco-m/concourse-ci-formula](https://github.com/marco-m/concourse-ci-formula) +- [concourse-up](https://github.com/EngineerBetter/concourse-up) is a Concourse quick-start tool created created by our + friends at EngineerBetter. The team there is looking for feedback on how to support the 4.0.0 authentication scheme + moving forwards. If you use their tool, please take some time to give them some love on their GitHub + issue [https://github.com/EngineerBetter/concourse-up/issues/62](https://github.com/EngineerBetter/concourse-up/issues/62) +- Is our efforts to have Concourse un-flaky a myth? Find out on this forum post + by [eedwards-sk](https://discuss.concourse-ci.org/u/eedwards-sk), you won’t believe + post [#4](https://discuss.concourse-ci.org/t/is-concourses-aim-to-eliminate-snowflaking-just-a-myth/444/4?u=jama)!! + +On to some development news: + +- We’re still hacking away at + issue [#2425 “Login session expired” error with multiple ATCs](https://github.com/concourse/concourse/issues/2425). + Please check in on the story to follow along with our plans for a fix (it involves some migrations) +- Praise be, we fixed the UX regression on the resources page where new resources weren’t being + highlighted [#2423](https://github.com/concourse/concourse/issues/2423) +- Still refactoring away to make way for [#2386](https://github.com/concourse/concourse/issues/2386) in preparation for + spaces +- Finished [#2352](https://github.com/concourse/concourse/issues/2352)on Configurable timeout for resource checks. Turns + out that by fixing that issue we also fixed [#2431](https://github.com/concourse/concourse/issues/2431). We did end up + making [#2494](https://github.com/concourse/concourse/issues/2494) though in order to break out the specific ability + to configure a timeout for resource type checks. +- Paused work on [#2104](https://github.com/concourse/concourse/issues/2104) on emitting build logs to an external + system this week, hoping to pick it back up next week! diff --git a/docs/blog/posts/2018/2018-08-24-concourse-update--august-20-24-.md b/docs/blog/posts/2018/2018-08-24-concourse-update--august-20-24-.md new file mode 100644 index 00000000..331786ca --- /dev/null +++ b/docs/blog/posts/2018/2018-08-24-concourse-update--august-20-24-.md @@ -0,0 +1,68 @@ +--- +title: Concourse Update (August 20–24) +date: 2018-08-24 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--August-20-24-/0-9tKyl-ikt-ttbS_z.jpg" alt="Logs and +resources" width="25%" >}} + + + +### Kubernetes + +As we continue our sporadic work on Kubernetes and its Helm chart, we’re also starting to expand our thinking to cover +the runtime aspects of Concourse + Kubernetes. We’ve already prioritized the need to have Kubernetes as a supporting +backend in addition to Garden, but what about the spiffy new developments in the Kubernetes world? We’re hearing a lot +about [knative](https://github.com/knative/) and knative services like [build](https://github.com/knative/build) +and [eventing](https://github.com/knative/eventing). Are there any kubernetes users who’d like to weigh in on the topic? +Let us know on our[forums!](https://discuss.concourse-ci.org/t/kubernetes-knative/573) + +### Request for Comment + +We’ve written a new RFC +titled: [Merge](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md)[resource](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md)[and](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md)[resource\_type](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md) +s. This RFC came about as a result of the work in pinning resources +and [#2386](https://github.com/concourse/concourse/issues/2386). You can comment on the RFC +PR [here](https://github.com/concourse/rfcs/pull/8) + +We are also stuck on [#2104](https://github.com/concourse/concourse/issues/2104) **Streaming of Build Logs to Additional +Target.** We’re specifically looking on feedback on the following questions before moving forward: + +- Our test for syslog is flakey: failing/hanging sometimes +- Need to backfill a test for updating the drained column +- What metadata do we want to send with the build log? team/pipeline/job/build names? +- Is there a possibility for the build logs to be reaped before they are drained? +- What kind of database locks do we need for the operation? + +If you have an insight to shed on these questions, please hop on over to the +issue [#2104](https://github.com/concourse/concourse/issues/2104) + +Here are the rest of the updates for this week: + +**UX** + +- Long pipeline names in the dashboards will now have a tooltip to let you read the full pipeline + name [#2411](https://github.com/concourse/concourse/issues/2411) +- Login button alignment on mobile views is pushed up [#2433](https://github.com/concourse/concourse/issues/2433) + +**API** + +- Finished up the work on the multiple ATC login issue [#2425](https://github.com/concourse/concourse/issues/2425). The + fix for this will require a db migration in the next version of Concourse! +- Added the ability to do a fly check-resource-type [#2507](https://github.com/concourse/concourse/issues/2507) in order + to support [#2494](https://github.com/concourse/concourse/issues/2494)resource type check\_timeout + +**Core** + +- Continued work on issue [#2386](https://github.com/concourse/concourse/issues/2386) Equivalent resources defined + across pipelines and teams should only correspond to a single version history. The work here has led to the creation + of the new RFC mentioned + above: [Merge](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md)[resource](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md)[and](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md)[resource\_types](https://github.com/clarafu/rfcs/blob/master/05-recursive-resources/proposal.md) + +**Operations** + +- We’ve picked up an issue that aims to be better at inferring defaults for peer/external + URLS [#2519](https://github.com/concourse/concourse/issues/2519). This should help with some of the 4.0.0 upgrade and + installation issues. diff --git a/docs/blog/posts/2018/2018-08-30-concourse-update--aug-27-31-.md b/docs/blog/posts/2018/2018-08-30-concourse-update--aug-27-31-.md new file mode 100644 index 00000000..86e12b3f --- /dev/null +++ b/docs/blog/posts/2018/2018-08-30-concourse-update--aug-27-31-.md @@ -0,0 +1,45 @@ +--- +title: Concourse Update (Aug 27–31) +date: 2018-08-30 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Aug-27-31-/0-39sBwa4rlBwJYlH4.jpg" alt="Photo courtesy of the +CNE" width="100%" >}} + + + +Apologies for the break from the usual update schedule; I wanted to get one last update out before I take some personal +time, starting Fri. Aug 31 and coming back Sept 10. In my absence [Scott Foerster](https://medium.com/u/86d0fa097bb9) +and [Alex Suraci](https://medium.com/u/263a63b2f209) will be writing the product update next week. The Concourse team +will also be taking Monday, Sept 3rd off in observance of Labour day as well. + +On to the updates: + +- Concourse 4.1.0 will be out…soon! We’ve begun the process of accepting all stories and deploying our pre-release + version onto the internal test environments. If you’re curious as to what new features/bug fixes are coming out in + this release, you can get an at-a-glace view in our [Milestones page](https://project.concourse-ci.org/milestones).You + can expect the official release to come out very soon :D +- [Lindsay Auchinachie](https://medium.com/u/84b937bda3b6) wrote another entry in her Concourse UI Explained series; + this time covering the [Concourse Build page](https://medium.com/@lauchinachie_78613/4f92824c98f1). +- The Concourse mono-repo is coming! You can read more about the change in + issue [#2534](https://github.com/concourse/concourse/issues/2534). Work on this will continue the moment we release + 4.1.0 + +**UX** + +- Worked on some UI improvements to help users distinguish between teams they belong to vs exposed + pipelines [#2427](https://github.com/concourse/concourse/issues/2427) + +**Core** + +- Continued refactoring work on [#2386](https://github.com/concourse/concourse/issues/2386) +- Worked on discussion regarding a PR from GitHub user [edtan](https://github.com/edtan) to resolve + issue [#2511](https://github.com/concourse/concourse/issues/2511) + +**Runtime** + +- Since there didn’t seem to be any strong opinions on how we managed log outputting + in [#2104](https://github.com/concourse/concourse/issues/2104), we’ve decided to move forward with some reasonable + assumptions. diff --git a/docs/blog/posts/2018/2018-09-14-concourse-update--sept-10---sept-14-.md b/docs/blog/posts/2018/2018-09-14-concourse-update--sept-10---sept-14-.md new file mode 100644 index 00000000..70b7b73a --- /dev/null +++ b/docs/blog/posts/2018/2018-09-14-concourse-update--sept-10---sept-14-.md @@ -0,0 +1,53 @@ +--- +title: Concourse Update (Sept 10 — Sept 14) +date: 2018-09-14 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Sept-10---Sept-14-/1-oJ9JKLsPYwX6YtLNEibR0w.png" alt="Let us +know if you’d be interested in Concourse swag" width="100%" >}} + + + +Following up from a discussion on our forums [Scott Foerster](https://medium.com/u/86d0fa097bb9) has been looking at +different options for selling Concourse swag online. Do you want Concourse leggings? or maybe a limited edition @vito +pls pillow! Let us know in the +thread [Concourse merchandising](https://discuss.concourse-ci.org/t/concourse-merchandising-t-shirts-and-similar/599/4). + +**Please also take some time to fill out our** [**2018 Concourse Community survey +**](https://docs.google.com/forms/u/1/d/e/1FAIpQLScWHuP130rJAcqBJhQtyIUCqbMcY4Qj0beHtfOnWEQugWSuUw/viewform). Your +feedback is really valuable to us and the information you provide will help us plan the future of Concourse. We only +have a handful of responses so far and we’d like to get more before we publish the results! + +On to the update: + +**API** + +- As a welcome back to [Joshua Winters](https://medium.com/u/d6d52be6c4b0), we took a look + at [#2463](https://github.com/concourse/concourse/issues/2463) and the possibility of doing an internal redirect for + all auth components. Unfortunately, that didn’t work quite well. Check out the full issue thread for details +- Remember that[RBAC RFC](https://github.com/concourse/rfcs/pull/6)? Well, we’re going to buckle down and start working + on that now + +**UX** + +- Following up on issue [#2427](https://github.com/concourse/concourse/issues/2427), we’re applying the same labelling + principals to the HD dashboard view in [#2572](https://github.com/concourse/concourse/issues/2572) + +**Core** + +- Kept hacking away on good ol’[#2386](https://github.com/concourse/concourse/issues/2386) + +**Runtime** + +- Spiking on [#2581](https://github.com/concourse/concourse/issues/2581), where we ask ourselves “Can we determine when + a build step fails because the worker is unusable?” + +**Operations** + +- Continuing on [#2312](https://github.com/concourse/concourse/issues/2312). This issue has exploded a bit to lots of + edge cases and race conditions; but our determination to finish this issue is strong +- Looked into why + our [k8s-testflight](https://ci.concourse-ci.org/teams/main/pipelines/main/jobs/k8s-testflight/builds/114) job keeps + breaking. diff --git a/docs/blog/posts/2018/2018-09-21-concourse-update--sept-17-21-.md b/docs/blog/posts/2018/2018-09-21-concourse-update--sept-17-21-.md new file mode 100644 index 00000000..63a6f4fe --- /dev/null +++ b/docs/blog/posts/2018/2018-09-21-concourse-update--sept-17-21-.md @@ -0,0 +1,60 @@ +--- +title: Concourse Update (Sept 17–21) +date: 2018-09-21 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Sept-17-21-/1-7EmboSXNrRcSwtYmV9WSLQ.jpeg" alt=" +wiltshirespotter" width="50%" >}} + + + +[Concourse 4.2.0](https://concourse-ci.org/download.html#v420) +and [Concourse 4.2.1](https://concourse-ci.org/download.html#v421) were released earlier this week. There’s a lot of +great fixes and features in this new release, so please upgrade now! + +Reminder that [The Great Project Restructuring of 2018](https://github.com/concourse/concourse/issues/2534) is now +underway. You’ll notice that all our submodules (e.g. ATC, TSA fly)are now all under the root level of +the [concourse/concourse](https://github.com/concourse/concourse) repo. Its cleaner. + +You’ll also notice that the BOSH spec has moved from its usual place. We’ve separated out the BOSH release code into its +own repo under [concourse-bosh-release](https://github.com/concourse/concourse-bosh-release). As always, you can find +examples of how to use the BOSH release +under [concourse-bosh-deployment](https://github.com/concourse/concourse-bosh-deployment). + +_Edit:_ I forgot to mention that Concourse user [danhigham](https://github.com/danhigham) wrote an awesome Atom plugin +for Concourse. Give the [concourse-vis](https://github.com/danhigham) plugin a spin and show him some love! + +Finally, please take some time to fill out +the [2018 Concourse community survey](https://docs.google.com/forms/d/e/1FAIpQLScWHuP130rJAcqBJhQtyIUCqbMcY4Qj0beHtfOnWEQugWSuUw/viewform). +We’re at 80 responses right now and hoping to hit 100 before we publish the results! + +On to the update: + +**API** + +- RBAC IS COMING! Team is working away at implementing our first iteration of fine grained role based access control. + You can read the details about this work in the + RFC [here](https://github.com/pivotal-jwinters/rfcs/blob/proposal/rbac/03-rbac/proposal.md). + +**UX** + +- More UX polish and refactoring, specifically we’re trying to merge the HD dashboard logic with the normal dashboard + logic. A lot of that work is hidden in [#2572](https://github.com/concourse/concourse/issues/2572) + +**Core** + +- [#2386](https://github.com/concourse/concourse/issues/2386)is close to completion! Hurray. Applying some final + polishes before shipping it. You’re gonna love it. + +**Runtime** + +- Finished [#2586](https://github.com/concourse/concourse/issues/2586), which should make things more efficient +- Made progress on [#2588](https://github.com/concourse/concourse/issues/2588). Completed the GC container portion and + will re-apply the same logic on the volumes portion + +**Operations** + +- Issued PR [#7804](https://github.com/helm/charts/pull/7804) against the Concourse Helm Chart, which refactors the + values.yml to better map Concourse binary commands in the Helm Chart diff --git a/docs/blog/posts/2018/2018-09-28-concourse-update--sept-24-28-.md b/docs/blog/posts/2018/2018-09-28-concourse-update--sept-24-28-.md new file mode 100644 index 00000000..f36b44b8 --- /dev/null +++ b/docs/blog/posts/2018/2018-09-28-concourse-update--sept-24-28-.md @@ -0,0 +1,65 @@ +--- +title: Concourse Update (Sept 24–28) +date: 2018-09-28 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Sept-24-28-/1-hrYofU4YBkZ9SWkt4fUPZA.jpeg" alt="Reppin’ +Concourse at Spring One Platform" width="60%" >}} + + + +The Concourse team went out to Washington D.C. this week to attend Spring One Platform 2018. Thanks to all the Concourse +fans who stopped by to say hi, we really enjoyed meeting ya’ll. All of the talks were recorded and should be uploaded to +the [SpringDeveloper](https://www.youtube.com/user/SpringSourceDev/videos) YouTube channel in the coming weeks. Some of +the interesting talks to check out are: + +- [Extreme Pipelines](https://springoneplatform.io/2018/sessions/extreme-pipelines) +- [Zero to Multicloud](https://springoneplatform.io/2018/sessions/zero-to-multi-cloud) + and [Spinnaker and the Distributed Monorepo](https://springoneplatform.io/2018/sessions/spinnaker-and-the-distributed-monorepo) +- ...and of + course [Draupnir: A story about Managing Concourse in the Enterprise](https://springoneplatform.io/2018/sessions/draupnir-a-story-about-managing-concourse-in-the-enterprise) + {{< image src="/images/downloaded_images/Concourse-Update--Sept-24-28-/1-syqGOwSEdWFE5CvrkZT-Kg.jpeg" alt="Concourse ❤ + Spring & PCF" width="60%" >}} + +And now, on to the update: + +{{< image src="/images/downloaded_images/Concourse-Update--Sept-24-28-/1-kTNsddsROpolUBj1oiJ6Mg.png" alt="" width=" +50%" >}} + +You’ll notice that our main pipelines are paused. This is because [Alex Suraci](https://medium.com/u/263a63b2f209) is +working away on [#2534](https://github.com/concourse/concourse/issues/2534), refactoring our main pipeline to support +our new mono-repo structure. This new pipeline is simply called +the [concourse](https://ci.concourse-ci.org/teams/main/pipelines/concourse)[pipeline](https://ci.concourse-ci.org/teams/main/pipelines/concourse). + +In addition to refactoring the pipeline, [Alex Suraci](https://medium.com/u/263a63b2f209) has been fleshing out the new +developer/contributor workflows under our new mono-repo. You can find the new updated information +in [CONTRIBUTING.md](https://github.com/concourse/concourse/blob/master/.github/CONTRIBUTING.md). + +{{< image src="/images/downloaded_images/Concourse-Update--Sept-24-28-/1-f2DIMOJRMC4Cm8YG-iWGXw.png" alt="Bugs…or +features?!" width="100%" >}} + +You’ll also notice that we ask whether you are reporting a bug or a new feature when creating issues. This will ( +hopefully) help get our backlog more organized and reduce the up-front triaging! + +**Fly** + +- Completed [#2221](https://github.com/concourse/concourse/issues/2221) “Add fly command to land worker” +- Added new fly flag to “Support manual token entry during login when running `fly` from a remote shell” + in [#2464](https://github.com/concourse/concourse/issues/2464) +- Fixed [#2539](https://github.com/concourse/concourse/issues/2539), where a login through fly may be “successful” if + you do not belong to a specific team +- Fixed [#2598](https://github.com/concourse/concourse/issues/2598) + +**Core** + +- [#2386](https://github.com/concourse/concourse/issues/2386) is done! + +**Runtime** + +- Continuing on [#2588](https://github.com/concourse/concourse/issues/2588) + +**Operations** + +- Tackling [#2312](https://github.com/concourse/concourse/issues/2312), which is still giving us a run for our money diff --git a/docs/blog/posts/2018/2018-10-05-concourse-update--oct-1-5-.md b/docs/blog/posts/2018/2018-10-05-concourse-update--oct-1-5-.md new file mode 100644 index 00000000..c01a781a --- /dev/null +++ b/docs/blog/posts/2018/2018-10-05-concourse-update--oct-1-5-.md @@ -0,0 +1,67 @@ +--- +title: Concourse Update (Oct 1–5) +date: 2018-10-05 +categories: +- product-update +--- + +[Alex Suraci](https://medium.com/u/263a63b2f209) is still tackling the chores on our One Big Repo +issue [#2534](https://github.com/concourse/concourse/issues/2534). Specifically, Alex is re-writing a new pipeline ( +aka [concourse](https://ci.concourse-ci.org/teams/main/pipelines/concourse)) for our mono-repo structure so we can +unblock ourselves from releasing updates. + + + +In other news, Concourse engineer [Saman Alvi](https://medium.com/u/d40e22ec1cfa) wrote up a short article on her +experience pairing with a product designer during a discovery into +the [PivNet resource](https://github.com/pivotal-cf/pivnet-resource); check it +out: [Design & Dev Pairing: What we learned during a one week technical discovery](https://medium.com/concourse-ci/design-dev-pairing-what-we-learned-during-a-one-week-technical-discovery-f9dfb4c35cd5). + +Finally, the Concourse team will be taking Monday, Oct 5 off to +celebrate [Thanksgiving](https://en.wikipedia.org/wiki/Thanksgiving_%28Canada%29). We’ll see you all next week! + +On to the update: + +**RBAC** + +We continue to work on the proposal for [Role Based Access Control (RBAC)](https://github.com/concourse/rfcs/pull/6). In +the past few weeks we’ve been focusing more on the _experience_ of assigning roles to new users. Our early attempts at +this was to require operators to supply those changes through the fly CLI: + +```shell-session +fly -t mytarget set-team -n myteam --role viewer --allow-all-users + +fly -t mytarget set-team -n myteam --role member --github-user pivotal-jwinters --github-team myorg:myteam +``` + +This raises some questions though: how do you go about removing a role from a user on a team? should the role parameters +be additive, or overriding like the other flags? Also, that’s a lot of flags to supply through the set-team command, +maybe this belongs in a configuration file. + +So with that we decided to move all of the user role configurations into a config file. We think that’ll be much +cleaner. Hop on over to +the [updated RFC](https://github.com/pivotal-jwinters/rfcs/blob/proposal/rbac/03-rbac/proposal.md)for the update +details. + +**UX** + +- We’ve been doing some much needed refactoring on the Elm frontend code. That’s also let us pick up some design polish + stories like [#2434](https://github.com/concourse/concourse/issues/2434) + and [#2430](https://github.com/concourse/concourse/issues/2430) +- The team has also had the opportunity to pick up a lot of issues around the fly + CLI: [#2532](https://github.com/concourse/concourse/issues/2532), [#963](https://github.com/concourse/concourse/issues/2430), [#1062](https://github.com/concourse/concourse/issues/2430) + +**Core** + +- Space is back…but really it never left! With the hard work of resource pinning and global caching, we’re now ready to + resume the work around Spatial resources [#2651](https://github.com/concourse/concourse/issues/2651) + +**Runtime** + +- Finished [#1799](https://github.com/concourse/concourse/issues/1799) “Permit overlapping inputs, outputs and task + caches + +**Operations** + +- We finished [#2312](https://github.com/concourse/concourse/issues/2312)!!! ….except we DO need to do some + acceptance testing to make sure we’ve covered all our bases. diff --git a/docs/blog/posts/2018/2018-10-12-concourse-update--oct-9-12-.md b/docs/blog/posts/2018/2018-10-12-concourse-update--oct-9-12-.md new file mode 100644 index 00000000..abb1550a --- /dev/null +++ b/docs/blog/posts/2018/2018-10-12-concourse-update--oct-9-12-.md @@ -0,0 +1,43 @@ +--- +title: Concourse Update (Oct 9–12) +date: 2018-10-12 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Oct-9-12-/1-beC36nbzVbF57aHcM2jEyQ.jpeg" alt="From the +Smithsonian National Air and Space Museum in Washington D.C." width="100%" >}} + + + +The results of the [Concourse 2018 Community survey](https://medium.com/concourse-ci/2018-community-survey-ddff90bdc35b) +is out! Thanks to everyone who took the time to fill it out; and to [Scott Foerster](https://medium.com/u/86d0fa097bb9) +and [Lindsay Auchinachie](https://medium.com/u/84b937bda3b6) for sifting through the data. + +It was a relatively short week for us due to Thanksgiving celebrations, but here’s our update: + +**UX** + +- Continued our rampage in fixing fly + issues: [#259](https://github.com/concourse/fly/issues/259), [#267](https://github.com/concourse/fly/issues/267), [#1038](https://github.com/concourse/concourse/issues/1083), [#1062](https://github.com/concourse/concourse/issues/1062), [#248](https://github.com/concourse/fly/issues/248) + +I also wanted to add that we’re trying to keep all issues +under [concourse/concourse](https://github.com/concourse/concourse/issues). We’re planning on migrating the issues +under [concourse/fly](https://github.com/concourse/fly/issues) and closing off that repo in order to centralize +everything under [concourse/concourse](https://github.com/concourse/concourse/issues). + +**Core** + +- SPATIAL RESOURCES ARE BACK [#2651](https://github.com/concourse/concourse/issues/2651) + +**Runtime** + +- Picked up [#1954](https://github.com/concourse/concourse/issues/1954)(The ATC holds a lock on a resource type scan) + and [#1796](https://github.com/concourse/concourse/issues/1796) (Task fails with “config file not found” after + restarting Docker service) +- Finished [#1799](https://github.com/concourse/concourse/issues/1799) Permit overlapping inputs, outputs, and task + caches + +**Operations** + +- Picked up [#2674](https://github.com/concourse/concourse/issues/2674) Emit metrics for locks held in the DB diff --git a/docs/blog/posts/2018/2018-10-19-concourse-update--oct-15-19-.md b/docs/blog/posts/2018/2018-10-19-concourse-update--oct-15-19-.md new file mode 100644 index 00000000..603cbaba --- /dev/null +++ b/docs/blog/posts/2018/2018-10-19-concourse-update--oct-15-19-.md @@ -0,0 +1,66 @@ +--- +title: Concourse Update (Oct 15–19) +date: 2018-10-19 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Oct-15-19-/1-Y05yilBhjLQKwCftw39ZVw.jpeg" alt="Torontonians +typically overreact when they get their first snowfall of the year. Its just a bit of frost ya’ll" width="50%" >}} + + + +We finished our first implementation of Role Based Access Control (RBAC) this week! You can look forward to this change +in our next release of Concourse. + +Speaking of which, the next release of Concourse is currently blocked while we try to re-build our +new [release pipelines](https://ci.concourse-ci.org/teams/main/pipelines/concourse). Along with our move to the +mono-repo, we’re focusing even more on making the binary distribution of Concourse the first-class distribution of +Concourse. This means that you’ll get everything you need for Concourse packaged into one nifty tgz! We’re still working +on finalizing the pipelines, so look forward to hearing more details about these changes in the coming weeks. + +This week, I’ve also been doing some analysis on our internal Concourse instance Wings. Wings currently runs on GCP and +has + +- 4 web instances +- 31 workers @ 4 vCPUs, 16 GB memory, 1000 GB SSD +- Google CloudSQL as the db +- 99 internal teams + +Since inception last year, we’ve processed **238957900.6 build seconds, or 7 years of build activities** for Pivotal. +Our peak month was in July, 2018, where we processed **48978695.88 build seconds, or 1.5 build years.** + +Neat. + +On to the update: + +**API** + +{{< image src="/images/downloaded_images/Concourse-Update--Oct-15-19-/1-I0qcGZPL9DOugmQ6eC_xVQ.png" alt="" width=" +40%" >}} + +- We finished RBAC! +- Fixed an issue where Users who are not assigned to teams aren’t able to + login [#2670](https://github.com/concourse/concourse/issues/2670) + +**UX** + +- Working on finalizing the fix to [#2414](https://github.com/concourse/concourse/issues/2414), which we thought was + implemented but found that it didn’t work on Linux and Windows machines +- Continuing our UI cleanup work + with [#2434](https://github.com/concourse/concourse/issues/2434), [#2430](https://github.com/concourse/concourse/issues/2670), [#2435](https://github.com/concourse/concourse/issues/2435) +- Picked up the corresponding UI story for pinning resources in the Web + UI [#2508](https://github.com/concourse/concourse/issues/2508) + +**Core** + +- SPACE ([#1202](https://github.com/concourse/concourse/issues/1202) + and [#2651](https://github.com/concourse/concourse/issues/2651)) + +**Runtime** + +- Picked up some work on improving volume streaming [#2676](https://github.com/concourse/concourse/issues/2676) + +**Operations** + +- Working on emitting more metrics for locks held in DB [#2674](https://github.com/concourse/concourse/issues/2674) diff --git a/docs/blog/posts/2018/2018-10-26-concourse-update--oct-22-26-.md b/docs/blog/posts/2018/2018-10-26-concourse-update--oct-22-26-.md new file mode 100644 index 00000000..31546111 --- /dev/null +++ b/docs/blog/posts/2018/2018-10-26-concourse-update--oct-22-26-.md @@ -0,0 +1,47 @@ +--- +title: Concourse Update (Oct 22–26) +date: 2018-10-26 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Oct-22-26-/1-MnGUtwM_fDCZBeKhYrlwqA.jpeg" alt="MGI +Construction Corp" width="50%" >}} + + + +This week the team got together to discuss the initial groundwork and investigations required to publish and maintain a +supported API. If you’ve built any tools against our API and have feedback for us, please let us know by commenting on +the original [issue #1122](https://github.com/concourse/concourse/issues/1122). + +In another interesting update, the [PivNet](https://network.pivotal.io/) team has published an update to +the [pivnet-resource](https://github.com/pivotal-cf/pivnet-resource) so “you **no longer need to specify the access key, +secret access key, bucket and region** for creating releases.” If you use that resource, you should definitely check it +out! + +On to the update: + +**UX** + +- Picked up the story for pinning versions of resources in the web + UI [#2508](https://github.com/concourse/concourse/issues/2508) + +**Core** + +- Continued our work on resources v2 and spatial resources + with [#2651](https://github.com/concourse/concourse/issues/2651) + +**Runtime** + +- Picked up failing tests in Testflight/Watsjs [#2719](https://github.com/concourse/concourse/issues/2719) +- Started work on retry / read deadline for Volume Streaming [#2676](https://github.com/concourse/concourse/issues/2676) + +**Operations** + +{{< image src="/images/downloaded_images/Concourse-Update--Oct-22-26-/1-yxHddOEl3sz5TqCy7M0q_A.png" width="50%" >}} + +We’ve added descriptions to our metrics graphs! You can check out the descriptions on our prod metrics +here: [https://metrics.concourse-ci.org/dashboard/db/concourse?refresh=1m&orgId=1](https://metrics.concourse-ci.org/dashboard/db/concourse?refresh=1m&orgId=1) + +In other news we’re also working on [#2674](https://github.com/concourse/concourse/issues/2674), emit metrics for locks +held in the database diff --git a/docs/blog/posts/2018/2018-11-02-concourse-update--oct-29--nov-2-.md b/docs/blog/posts/2018/2018-11-02-concourse-update--oct-29--nov-2-.md new file mode 100644 index 00000000..e2598c70 --- /dev/null +++ b/docs/blog/posts/2018/2018-11-02-concourse-update--oct-29--nov-2-.md @@ -0,0 +1,44 @@ +--- +title: Concourse Update (Oct 29— Nov 2) +date: 2018-11-02 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Oct-29--Nov-2-/1-7C_nS91OafAnN5DWBtN4SA.jpeg" alt="The +Concourse team’s big yoga ball has returned to its rightful home" width="50%" >}} + + + +As a part of our refactor of the prod pipeline, [Alex Suraci](https://medium.com/u/263a63b2f209) cleaned up and +refactored parts of the TSA to better support draining and +rebalancing [#2748](https://github.com/concourse/concourse/pull/2748). The numbers are looking really good! + +On to the update: + +### API + +- We’re deep into investigations around our API documentation and management strategy. Our current investigation work is + captured in [#2739](https://github.com/concourse/concourse/issues/2739) but the original request comes + from [#1122](https://github.com/concourse/concourse/issues/1122) + +### Core + +- SPACCEEEE [#2651](https://github.com/concourse/concourse/pull/2651) + +### UX + +- Continuing our work on supporting pinning of versions on resources from the UI. You can see some of our progress + on [#2508](https://github.com/concourse/concourse/pull/2508) + +### Runtime + +- Continuing our work on [#2676](https://github.com/concourse/concourse/pull/2676) + and [#1266](https://github.com/concourse/concourse/pull/1266) + +### Operations + +- Adding jobs to our pipeline to better support the Concourse Helm + Chart [#2743](https://github.com/concourse/concourse/pull/2743) +- And in general [Topher Bullock](https://medium.com/u/58876cdc2180) has been helping out with PRs and issues on the + Concourse Helm Chart. diff --git a/docs/blog/posts/2018/2018-11-09-concourse-update--nov-5-9-.md b/docs/blog/posts/2018/2018-11-09-concourse-update--nov-5-9-.md new file mode 100644 index 00000000..e9877365 --- /dev/null +++ b/docs/blog/posts/2018/2018-11-09-concourse-update--nov-5-9-.md @@ -0,0 +1,58 @@ +--- +title: Concourse Update (Nov 5–9) +date: 2018-11-09 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Nov-5-9-/1-JcXgBDqfq8Lwc4WNwyJgQg.jpeg" alt="" width=" +50%" >}} + + + +Right off the bat I’d like to give a shoutout to [Jamie Klassen](https://medium.com/u/f0f4a8a2fbb8) and his new post +about the upcoming feature for pinning resources. You can check it out the new post +here:[https://medium.com/concourse-ci/resource-page-explained-eb99cf256fb5](https://medium.com/concourse-ci/resource-page-explained-eb99cf256fb5) + +I also wanted to mention that the Github Pull Request that was maintained by JT +Archie ([https://github.com/jtarchie/github-pullrequest-resource](https://github.com/jtarchie/github-pullrequest-resource)) +has been officially deprecated. + +1. The official docs for the resource types no longer point to jtarchie/pr for the PR resource. They are pointing + to [https://github.com/telia-oss/github-pr-resource](https://github.com/telia-oss/github-pr-resource) now. +2. There will no longer be any maintenance, issues or PRs accepted on the resource. + +We also spent some time this week finalizing our plans for the Concourse 2019 roadmap. We’ll be writing it up in a wiki +to share with everyone next week, so keep an eye out for another followup announcement! + +On to the update: + +**Pipeline** + +- We finally got a deploy going onto our [prod environment](https://ci.concourse-ci.org/). Everything broke but hey, its + the attempt that matters + +**API** + +- We’re still investigating various options for refactoring and documenting our + API. [Joshua Winters](https://medium.com/u/d6d52be6c4b0) is on it! + +**UX** + +- Pinning versions on resources. Make sure you read our write-up on + it [here](https://medium.com/concourse-ci/resource-page-explained-eb99cf256fb5)! + +**Core** + +- Resource v2 and Spatial resource design! Most of that work is currently being done in + a [feature branch](https://github.com/concourse/concourse/tree/spaces). + +**Runtime** + +- Picked up [#2529](https://github.com/concourse/concourse/issues/2529) + +**Operations / K8s** + +- In addition to picking up some issues reported by users around helm-deployed concourse in + 4.2.1, [Topher Bullock](https://medium.com/u/58876cdc2180) is going to try spending his Fridays making / looking at + Concourse Helm chart PRs. diff --git a/docs/blog/posts/2018/2018-11-23-concourse-rbac-preview.md b/docs/blog/posts/2018/2018-11-23-concourse-rbac-preview.md new file mode 100644 index 00000000..5510bcd1 --- /dev/null +++ b/docs/blog/posts/2018/2018-11-23-concourse-rbac-preview.md @@ -0,0 +1,125 @@ +--- +title: Concourse RBAC Preview +date: 2018-11-23 +--- + +{{< image src="/images/downloaded_images/Concourse-RBAC-Preview/0-oy1M_w9ksoAje2LR.jpg" width="50%" >}} + + + +One of the big themes for Concourse in 2018 has +been [Users](https://concourse-ci.org/download.html#v400), [multiple auth connectors](https://concourse-ci.org/install.html#auth-config), +and role-based access control ( +aka [RBAC](https://github.com/pivotal-jwinters/rfcs/blob/proposal/rbac/03-rbac/proposal.md)). With RBAC in the final +phases of development, I wanted to give you a preview of some of the functionality that you can expect in our upcoming +release; Concourse 5.0 + +## Admins, Owners, Members and Viewers + +Concourse 5.0 will come with 4 roles: Concourse Admin, Team Owner, Team Member, and Team Viewer. + +### Concourse Admin + +A Concourse Admin is the same as today’s [admin user](https://concourse-ci.org/main-team.html). Members of main team +will automatically be Concourse Admins* and have the ability to administrate teams with fly: set-team, destroy-team, +rename-team, and teams. Given that all Concourse Admins must be a member of the main team, all Concourse Admins must +have at least one other role; and that should typically be the Team Owner role. + +_* There’s an open issue to restrict this grant to Team Owners on main +in [#2846](https://github.com/concourse/concourse/issues/2846)_ + +### Team Owner + +Team Owners have read, write and auth management capabilities within the scope of their team. For those familiar with +Concourse today, the scope of allowed actions for a Team Owner is very closely aligned to today’s Concourse team member. +The new change is that you can no longer rename your own team or destroy your own team as an owner. + +### Team Member + +Team Member is a new role that lets users operate within their teams in a read & write fashion; but prevents them from +changing the auth configurations of their team. + +### Team Viewer + +Team Viewer is also a new role that gives users “read-only” access to a team. This locks everything down, preventing +users from doing a set-pipeline or hijack. + +### Other Roles + +We considered other role types while developing this feature; including roles that would specifically prevent intercept +and abort. We ultimately decided that our current configuration made more sense for the first release of RBAC. +Ultimately every organization will have different needs for their access control, so we are also planning for a future +where users can supply their own customized roles & permissions matrix. + +### Full Roles Breakdown + +For a full list of each role’s allowed actions you can reference our handy permission matrix on Google +Sheets [here](https://docs.google.com/spreadsheets/d/1np3hyJy3mVRfB2gcgKykz3QTQg5qEj28QgK523SEmao/edit#gid=1437859537). + +## Configuring Roles with fly + +Now that we’ve gone over the new roles, we can do a quick overview of how we can go about setting users & roles on +teams. + +### Default Behaviour + +By default, if no configuration is provided the user is given theTeam Owner role: + +```shell-session +fly -t dev set-team -n PowerRangers --local-user=Zordon +``` + +This behaviour also applies to groups as well, so be careful! + +```shell-session +fly -t dev set-team -n A-Team \ + --github-team=MightyMorphin:PowerRangers +``` + +### Specifying Roles with `-c` + +Roles must be specified in a separate configuration file using the -c + +```shell-session + fly -t dev set-team -n PowerTeam -c ./team.yml +``` + +`team.yml`: + +```yaml +roles: + - name: owner + local: + users: [ "Zordon" ] + - name: member + local: + users: [ "RedRanger", "BlueRanger", "GreenRanger" ] + - name: viewer + local: + users: [ "Alpha" ] +``` + +### Inspecting Roles Configuration + +Once you’ve set the team configuration you can verify it using the details flag on fly teams: + +```shell-session +fly -t dev teams -d + +name users groups +A-Team/member local:RedRanger, BlueRanger, GreenRanger none +A-Team/owner local:Zordon none +A-Team/viewer local:Alpha none +``` + +...where you’ll find the output is now updated to list each team/role combination and its associated users/groups. + +## What’s left? + +And that’s RBAC in a nutshell! We’re really excited to get this in your hands in our upcoming release of Concourse. +There’s only a few more issues that we want to finish off before releasing this feature, specifically: + +- [#2846](https://github.com/concourse/concourse/issues/2846) Admin users should be restricted to members of the main + team with the owner role. This is so you don’t get weird cases of a Team Viewer on main getting Admin access +- [#2843](https://github.com/concourse/concourse/issues/2843) Dashboard team labels updated to display User Role. We + need this otherwise users on the Web UI have no idea what they can / can’t do diff --git a/docs/blog/posts/2018/2018-11-23-concourse-update--nov-19-23-.md b/docs/blog/posts/2018/2018-11-23-concourse-update--nov-19-23-.md new file mode 100644 index 00000000..fe7eb156 --- /dev/null +++ b/docs/blog/posts/2018/2018-11-23-concourse-update--nov-19-23-.md @@ -0,0 +1,38 @@ +--- +title: Concourse Update (Nov 19–23) +date: 2018-11-23 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Nov-19-23-/1-fBOnArsQyRfYMLYGB4Uk_w.jpeg" width="50%" >}} + + + +It was a relatively light week this week due to some vacations. I did, however, get a chance to do some acceptance work +on our upcoming feature for role-based access control in Concourse. You can read more about how that’ll work in +our [feature preview post](https://medium.com/concourse-ci/concourse-rbac-preview-8e07616ddc47). + +On to the update: + +**API** + +- Our investigation into the API continues and branches out into more areas of the codebase. If you haven’t already, + make sure to check out the two related + RFCS: [https://github.com/concourse/rfcs/pull/14](https://github.com/concourse/rfcs/pull/14) + and [https://github.com/concourse/rfcs/pull/15](https://github.com/concourse/rfcs/pull/15) + +**UX** + +- We’ve decided to commit to completing our refactor the Web NavBar before picking up new stories. This’ll hopefully + prevent regressions when we pick up new stories down the road. We now have over 300 unit tests for our web-ui! + +**Runtime** + +- Picked up [#2577](https://github.com/concourse/concourse/issues/2577). We’re having conversations internally around + specific strategies that would help with this. On the one hand, we could try computing resource utilization on the + first run to inform our future allocations; or we could go with naive container/volume balancing. + +**Core** + +- Continuing our planning for Spatial resources diff --git a/docs/blog/posts/2018/2018-11-30-concourse-updates--nov-26-30-.md b/docs/blog/posts/2018/2018-11-30-concourse-updates--nov-26-30-.md new file mode 100644 index 00000000..a8727602 --- /dev/null +++ b/docs/blog/posts/2018/2018-11-30-concourse-updates--nov-26-30-.md @@ -0,0 +1,61 @@ +--- +title: Concourse Updates (Nov 26–30) +date: 2018-11-30 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Updates--Nov-26-30-/1-BFgbR-J5U389pc0g9nDi_Q.jpeg" width="100%" >}} + + + +As I mentioned last week I’ve been doing story acceptance in our dev environments for the +upcoming [RBAC](https://medium.com/concourse-ci/concourse-rbac-preview-8e07616ddc47) feature. The team’s been working +through some of the new issues that come out of that to give some final polish on to the release. + +Something that I haven’t talked too much about in the past weeks is our work on +the [Concourse k8s Helm chart](https://github.com/helm/charts/tree/master/stable/concourse). If you pull up some of the +PRs +under [stable/concourse](https://github.com/helm/charts/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+%5Bstable%2Fconcourse%5D+), +you’ll see that we’ve been proposing some changes to the chart. This all falls under our goals for helping the community +stabilize the Concourse Helm Chart and to increase the scope of automated tests using the Helm chart. You can follow +along some of our work in GH issues [#2753](https://github.com/concourse/concourse/issues/2753) +and [#2876](https://github.com/concourse/concourse/issues/2876). + +On to the update + +**API** + +- Removed “allow all users” in [#2721](https://github.com/concourse/concourse/issues/2721) +- Added the restriction that only owners of main can be + admins [#2846](https://github.com/concourse/concourse/issues/2846) + +**Fly** + +- Fixed [#2780](https://github.com/concourse/concourse/issues/2780) +- Fixed [#2414](https://github.com/concourse/concourse/issues/2414) +- Fixed [#2819](https://github.com/concourse/concourse/issues/2819) + +**UX** + +- Implemented [#2843](https://github.com/concourse/concourse/issues/2843) to help users understand what roles they have + on each team +- Finished [#2795](https://github.com/concourse/concourse/issues/2795), which added the “pin” colors to the legend +- Completed an issue that lets users unpin from the top bar [#2870](https://github.com/concourse/concourse/issues/2870) +- Moved the Exposed state on a pipeline off the team and onto the + pipeline [#2844](https://github.com/concourse/concourse/issues/2844) +- Fixed an old issue where users of new teams can’t un-pause their first + pipelines [#2882](https://github.com/concourse/concourse/issues/2882) + +**Core** + +- Began building up large scale test environments for Global Resource + caching [#2874](https://github.com/concourse/concourse/issues/2874) + +**Runtime** + +Continued with [#2577](https://github.com/concourse/concourse/issues/2577): + +> “..as a first effort solution, we have decided to go with using the existing number of active containers on the +> workers to determine container placement. This means that we are adding a placement strategy that adds the new task on +> to a worker with the least existing active containers.” diff --git a/docs/blog/posts/2018/2018-12-07-concourse-update--dec-3-7-.md b/docs/blog/posts/2018/2018-12-07-concourse-update--dec-3-7-.md new file mode 100644 index 00000000..7c2c0061 --- /dev/null +++ b/docs/blog/posts/2018/2018-12-07-concourse-update--dec-3-7-.md @@ -0,0 +1,50 @@ +--- +title: Concourse Update (Dec 3–7) +date: 2018-12-07 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Dec-3-7-/1-9QGCZafW4o8rGIVxN4QvYA.jpeg" alt="Stephen A. +Wolfe" width="75%" >}} + + + +We’re nearing the end on some UX refactoring work and finished off the issue regarding container scheduling. Between +those improvements and the global resource caching, we’re hoping to see a lot of efficiency improvements in 5.0 + +That said, we’ve decided that we need to perform some additional performance and load testing on Concourse 5.0 before we +cut the release. And with the holidays coming up, its increasingly unlikely that we’ll be able to push Concourse 5.0 +before the end of this year. In the meantime, we’re planning to make a big update post describing the new deployment +pipeline, contribution structure, major features in Concourse 5.0, and much more; so keep an eye out for that in the +coming days! + +If you’re attending [KubeCon](https://events.linuxfoundation.org/events/kubecon-cloudnativecon-north-america-2018/) next +week I’d encourage you to check out the talk +on [Using Concourse as a CI/CD Tool for Your Knative Ap](https://cfdayna18.sched.com/event/I7YM/using-concourse-as-a-cicd-tool-for-your-knative-app-dale-wick-divya-dadlani-pivotal#) +p. Concourse engineer Divya Dadlani will be co-speaker on this talk and if you ask nicely; she might give you one of our +fancy Concourse stickers. You should also check out Fairfax Media’s talk +on [Cloud Native Transformation](https://kccna18.sched.com/event/Grb7/cloud-native-transformation-pavel-nikolov-matt-hope-fairfax-media) +too, I hear they use a lot of Concourse! + +And finally, I’ll be taking some time off for the holidays starting Dec 13, and won’t be returning to work until the new +year. I’ve got a few posts scheduled to come out until then, but for now happy holidays, happy new year, and thanks for +another awesome year of Concourse. + +On to the update: + +**API** + +- Resolved [#2887](https://github.com/concourse/concourse/issues/2887) + +**UX** + +- Fixed a bug that happens when you try to log out from + Concourse [#2884](https://github.com/concourse/concourse/issues/2884) +- Fixed an issue with Fly where using -c on set-team with RBAC will fail silently if you use a badly-formed + file[#2904](https://github.com/concourse/concourse/issues/2904) +- Fixed an issue regarding the output of fly teams -d [#2880](https://github.com/concourse/concourse/issues/2880) + +**Runtime** + +- Slightly better scheduling [#2577](https://github.com/concourse/concourse/issues/2577) diff --git a/docs/blog/posts/2018/2018-12-12-concourse-2018-year-in-review.md b/docs/blog/posts/2018/2018-12-12-concourse-2018-year-in-review.md new file mode 100644 index 00000000..e3b08475 --- /dev/null +++ b/docs/blog/posts/2018/2018-12-12-concourse-2018-year-in-review.md @@ -0,0 +1,80 @@ +--- +title: Concourse 2018 Year in Review +date: 2018-12-12 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-2018-Year-in-Review/1-pzx9yhOYi-XyEFUgqnxBfw.jpeg" alt="Bernal +Saborio" width="100%" >}} + + + +2018 has been an action-packed year for us. We saw a major +release ([Concourse 4.0.0](https://concourse-ci.org/download.html#v400)) with a lot of new +features: [new auth connectors and users](https://medium.com/concourse-ci/oh-auth-f4fe68438171), [dashboard](https://medium.com/concourse-ci/designing-a-dashboard-for-concourse-fe2e03248751), [distributed GC](https://medium.com/concourse-ci/distributed-garbage-collection-ae3867ab5438) +and other runtime improvements. At the same time our team grew from 3 engineering pairs at the start of 2018 to 8 +engineering pairs and an additional PM (👋 [Scott Foerster](https://medium.com/u/86d0fa097bb9)) working on Concourse OSS +and supporting Concourse for PCF. + +## By the Numbers + +- 13 releases of Concourse +- 1 TLD snafu leading to[our domain change](https://medium.com/concourse-ci/were-switchin-domains-5597dcd0b48b) +- 1 new website design +- 2 new ways to contact us ([Discord](https://discord.gg/MeRxXKW), [Discuss](https://discuss.concourse-ci.org/)) +- 1,070 members on Concourse Discord, 45 members in #contributors +- 937 new GitHub issues created in 2018. Given th at only 1,694 issues were opened prior to 2018, I’d say that’s a + pretty big jump in activity! +- 417 PRs, up by 15% from last year +- 3,300 stars, up by 38% from last year + +## Most Popular Posts + +We started this blog in Sept of last year. Since then we’ve had thousands of readers over 70 posts. The top 5 most +popular posts are: + +- [Designing a Dashboard for Concourse](https://medium.com/concourse-ci/designing-a-dashboard-for-concourse-fe2e03248751) +- [Getting Started with Concourse on macOS](https://medium.com/me/stats/post/fb3a49a8e6b4) +- [Earning our Wings](https://medium.com/me/stats/post/a0c307fa73e6) +- [Concourse Pipeline UI Explained](https://medium.com/concourse-ci/concourse-pipeline-ui-explained-87dfeea83553) +- [Sneak Peek: Spatial Resources](https://medium.com/concourse-ci/sneak-peek-spatial-resources-d0eed9bb3fa) + +## Thanks to our Contributors + +Finally, Thanks to all our contributors across all our repos: + +[**concourse/concourse +**](https://github.com/concourse/concourse/graphs/contributors?from=2018-01-01&to=2018-12-12&type=c) **: +** [edtan](https://github.com/edtan), [ralekseenkov](https://github.com/ralekseenkov), [SHyz0rmZ](https://github.com/SHyx0rmZ), [databus23](https://github.com/databus23), [pivotal-kahin-ng](https://github.com/pivotal-kahin-ng), [jmcarp](https://github.com/jmcarp), [tkellen](https://github.com/tkellen), [aeijdenberg](https://github.com/aeijdenberg), [rosenhouse](https://github.com/rosenhouse), [andrewedstrom](https://github.com/andrewedstrom), [baptiste-bonnaudet](https://github.com/baptiste-bonnaudet), [PavelTishkin](http://PavelTishkin), [timchavez](https://github.com/timrchavez), [JamesClonk](https://github.com/JamesClonk), [rfliam](https://github.com/rfliam), [ArthurHlt](https://github.com/ArthurHlt), [christophermancini](https://github.com/christophermancini) + +[**concourse/concourse-docker: +**](https://github.com/concourse/concourse-docker/graphs/contributors?from=2018-01-01&to=2018-12-12&type=c) [danielrs](https://github.com/danielrs), [scottbri](https://github.com/scottbri), [dbbaskette](https://github.com/dbbaskette), [ElfoLiNk](https://github.com/ElfoLiNk), [jmcduffie32](https://github.com/jmcduffie32), +S[ergueiFedorov](https://github.com/SergueiFedorov) + +[**concourse/concourse-bosh-release: +**](https://github.com/concourse/concourse-bosh-release/graphs/contributors?from=2018-01-01&to=2018-12-12&type=c) [JamesClonk](https://github.com/JamesClonk), [ramonskie](https://github.com/ramonskie), [avanier](https://github.com/avanier), [rkoster](https://github.com/rkoster), [SHyz0rmZ](https://github.com/SHyx0rmZ), [aeijdenberg](https://github.com/aeijdenberg), [jmcarp](https://github.com/jmcarp), [ArthurHlt](https://github.com/ArthurHlt) + +[**concourse/docs: +**](https://github.com/concourse/docs/graphs/contributors?from=2018-01-01&to=2018-12-12&type=c) [vlad-ro](https://github.com/vlad-ro), [charlieoleary](https://github.com/charlieoleary), [AnianZ](https://github.com/AnianZ), [berlin-ab](https://github.com/berlin-ab), [a114m](https://github.com/a114m), [ukabu](https://github.com/ukabu), [arbourd](https://github.com/arbourd), [baptiste-bonnaudet](https://github.com/baptiste-bonnaudet), [marco-m](https://github.com/marco-m), [rosenhouse](https://github.com/rosenhouse), [patrickcrocker](https://github.com/patrickcrocker), [crstamps2](https://github.com/crstamps2), [JohannesRuolph](https://github.com/JohannesRudolph), [dbp587](https://github.com/dpb587), [headc4sh](https://github.com/headcr4sh), [aequitas](https://github.com/aequitas) + +[**concourse/docker-image-resource: +**](https://github.com/concourse/docker-image-resource/graphs/contributors?from=2018-01-01&to=2018-12-12&type=c) [ghostsquad](https://github.com/ghostsquad), [dhinus](https://github.com/dhinus), [norbertbuchmueller](https://github.com/norbertbuchmueller), [hephex](https://github.com/hephex), [chrishiestand](https://github.com/chrishiestand), [kmacoskey](https://github.com/kmacoskey), [irfanhabib](https://github.com/irfanhabib), [simonjohansson](https://github.com/simonjohansson), [et7peho](https://github.com/et7peho), [itsdalmo](https://github.com/itsdalmo), [krishicks](https://github.com/krishicks), [mook-as](https://github.com/mook-as) + +[**concourse/git-resource: +**](https://github.com/concourse/git-resource/graphs/contributors?from=2018-01-01&to=2018-12-12&type=c) [norbertbuchmueller](https://github.com/norbertbuchmueller), [talset](https://github.com/talset), [ppaulweber](https://github.com/ppaulweber), [elgohr](https://github.com/elgohr), [alucillo](https://github.com/alucillo), [goddenrich](https://github.com/goddenrich), [njbennett](https://github.com/njbennett), [timchavez](https://github.com/timrchavez), [suda](https://github.com/suda), [jamesjoshuahill](https://github.com/jamesjoshuahill), [gcapizzi](https://github.com/gcapizzi), [mdomke](https://github.com/mdomke), [benmoss](https://github.com/benmoss), [ljfranklin](https://github.com/ljfranklin), [oliveralberini](https://github.com/oliverralbertini), [krishicks](https://github.com/krishicks) + +[**concourse/s3-resource: +**](https://github.com/concourse/s3-resource/graphs/contributors?from=2018-01-01&to=2018-12-12&type=c) [ghostsquad](https://github.com/ghostsquad), [talset](https://github.com/talset), [46bit](https://github.com/46bit), [bandesz](https://github.com/bandesz), [ruurdk](https://github.com/ruurdk) + +…and many more. Special thanks to all the contributors who’ve built new resources for Concourse in 2018, contributed to +the health of resources and also took over old resources. + +## See you in 2019! + +I hope y’all get to enjoy some time off this holiday season. We’ve got a lot of updates planned for 2019, like our new +contributor workflow, Concourse 5.0 and spatial resources! Look forward to an in-depth post +from [Alex Suraci](https://medium.com/u/263a63b2f209) in the next few days. + +Thanks again for all the support, and we’ll see you in 2019! + diff --git a/docs/blog/posts/2018/2018-12-20-the-great-process-update-of-2018.md b/docs/blog/posts/2018/2018-12-20-the-great-process-update-of-2018.md new file mode 100644 index 00000000..43d7ea22 --- /dev/null +++ b/docs/blog/posts/2018/2018-12-20-the-great-process-update-of-2018.md @@ -0,0 +1,107 @@ +--- +title: The Great Process Update of 2018 +date: 2018-12-20 +--- + +### Or, “why we haven’t shipped any new features since September.” + +{{< image src="/images/downloaded_images/The-Great-Process-Update-of-2018/1-Fdk1aihMwmllUR7HOBp2kg.jpeg" width="80%" >}} + + + +You may have noticed that our release cadence has slowed down significantly in the past few months. The _bad news_ is we +probably won’t get a release out this year (mainly due to end-of-year vacations and slowing down in general), but the +_good news_ is the next release is huge — big enough to bump us to v5.0 — and it’s just about ready. I’ll have more +information on the next release in an upcoming post. + +This post will go over all the changes we’ve made to our project structure and processes surrounding contribution. These +changes aren’t very visible to end-users, but they set the stage for the community growth and collaboration that will +make our future releases even better and bring more depth to our culture and ecosystem. + +## A newly minted process for RFCs + +We’ve finally established a process for submitting and accepting RFCs! Head over to +the [concourse/rfcs](https://github.com/concourse/rfcs)[repo](https://github.com/concourse/rfcs) if you want to check it +out. + +This new process enables anyone in the community to have a big impact on Concourse’s direction. I’m really looking +forward to seeing where this goes. We’ll be posting status updates for RFCs on this blog to notify the community of RFCs +that are newly opened or near acceptance. + +We've already started submitting RFCs for substantial features +like [Resources V2](https://github.com/concourse/rfcs/pull/1) and [RBAC](https://github.com/concourse/rfcs/pull/6), +though we jumped the gun a bit on implementation as we hadn’t yet figured out what we wanted from the RFC process (we +just needed a better way to plan things in the open). There are a few loose ends to tidy up with existing RFCs now that +we have a full process in place. + +Credit where it’s due: this process based pretty heavily on [Rust’s](https://github.com/rust-lang/rfcs). Just about +every detail seemed to apply just as appropriately to Concourse, and we’re just as cautious about far-reaching changes, +so it was a great match. + +## Switching from CLA to DCO + +Up until now, all pull request authors have had to sign off on the Pivotal CLA in order for their pull request to be +accepted (unless it was an “obvious fix”). + +On occasion contributors would get caught in a corporate quagmire when trying to get their company to sign off on the +CLA, and it was also kind of jarring for individuals. The need for something like the CLA hasn’t gone away, but we felt +it may have been hindering more than helping. + +So, we’re abandoning the CLA process and instead adopting +the [Developer Certificate of Origin (“DCO”)](https://developercertificate.org) process. This process is much more +lightweight, only requiring pull request authors to include a “Signed-off-by:” line in each commit, which can be done +via git commit -s. More information on this is available +in [CONTRIBUTING.md](https://github.com/concourse/concourse/blob/master/CONTRIBUTING.md#signing-your-work). + +## Completing the Great Project Restructuring of 2018 + +The single biggest cause of the release slowdown has +been [The Great Project Restructuring of 2018](https://github.com/concourse/concourse/issues/2534), which was a massive +revamp of how we develop, build, test, and ship Concourse. We knew this would be a “stop-the-world” transition that +would prevent us from shipping for a while, but we really had to bite the bullet at some point. + +The focal point of this restructuring: almost all of Concourse’s code now lives in one +big [concourse](https://github.com/concourse/concourse)[monorepo](https://github.com/concourse/concourse), using the +new [Go 1.11 module system](https://github.com/golang/go/wiki/Modules) to track dependencies. We’ve replaced our +BOSH-centric development and pipeline workflow with a Docker-based workflow which is more intuitive and has a much +faster feedback cycle. + +This means you can now git clone the [Concourse repo](https://github.com/concourse/concourse) and get a cluster built +from source and running in single command: docker-compose up. It’s never been easier to make changes and test them out +locally. Check out the new [CONTRIBUTING.](https://github.com/concourse/concourse/blob/master/CONTRIBUTING.md)md for +more information! + +This change kicked off a ripple effect that improved a ton of things about the developer, contributor, and operator +experience: + +- Now that all the code is together in one repo, cross-cutting changes can now be submitted as a single pull request! 🎊 + Pull requests now trigger acceptance tests too, which is something we couldn’t really do easily before. +- Resources are now versioned and shipped independently from Concourse versions. Each resource is published as + concourse/\-resource with appropriate tags (e.g. 1.2.3, 1.2, 1, latest, dev). This means you can refer to + specific versions when necessary by using resource\_types: in your pipeline. A core set of resource types will still + be shipped with Concourse, at whichever version they were when the release was frozen. +- The concourse repo is no longer a BOSH release; we’ve split it out + into [its own repository](https://github.com/concourse/concourse-bosh-release) instead. The new BOSH release simply + wraps the binary distribution, rather than building from source. This reduces the surface area for support and removes + any discrepancies between the platforms — everything just uses the binary now! This also makes deploying the BOSH + release faster because there’s not much to compile. +- We’ve changed how the concourse executable is packaged. We’re switching to a .tgz format containing the binary and its + dependencies, rather than a self-extracting “all-in-one” binary. This results in way fewer moving parts and + dramatically reduces concourse worker start-up time. + +## Where are we now? + +Overall, I think these recent changes may be the most important thing we’ve done for the project since its inception, +even if it meant not shipping for a while. + +The RFC process will make it easier to collaborate, switching to the DCO removes a hurdle for new contributors, and the +the new project structure should dramatically improve the developer experience. + +I’d like to give special thanks to everyone that has tried out and given feedback on this new development process, and +all the users that have waited patiently for the next release. 😅 + +## What’s next? + +Well, now that the dust is settling it’s time to actually start shipping software again. The next post will go over +what’s in store for 5.0 and peek ahead into what we’re planning for 2019. See you then! + diff --git a/docs/blog/posts/2019/2019-01-11-concourse-update--jan-7-11-.md b/docs/blog/posts/2019/2019-01-11-concourse-update--jan-7-11-.md new file mode 100644 index 00000000..2b2bdded --- /dev/null +++ b/docs/blog/posts/2019/2019-01-11-concourse-update--jan-7-11-.md @@ -0,0 +1,48 @@ +--- +title: Concourse Update (Jan 7–11) +date: 2019-01-11 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jan-7-11-/1-xc1d4AtxzZanyNVDiEPrvg.jpeg" width="50%" >}} + + + +…and we’re back! The team’s been pretty quiet over the past few weeks due to vacations and holidays. This was our first +week back at full strength so we’ve got some interesting updates for ya’ll + +## How are issues managed? + +This is an issue that comes up a lot in our open source community, and [Alex Suraci](https://medium.com/u/8a9db60441c) +has taken some time to clean up our issues backlog and add in some bots. You can read the full details +here: [How Issues are Managed](https://github.com/concourse/concourse/wiki/How-Issues-are-Managed) + +In addition to the changes to how issues are labeled, we’ve also changed how we used projects and milestones under +concourse/concourse. Epics are now organized +under [projects in concourse/concourse](https://github.com/concourse/concourse/projects), and release markers are +managed under [milestones in concourse/concourse](https://github.com/concourse/concourse/milestones). And as always, our +“tracks of work” can be found at the [org-level project page](https://github.com/orgs/concourse/projects). + +## Updates + +**UX** + +Thanks to the hard work of the UX team, they were able to crank through a lot of nice UI issues over the past few weeks. +This includes [#2405](https://github.com/concourse/concourse/issues/2405) +and [#2881](https://github.com/concourse/concourse/issues/2881). We will also be scheduling a big track of work for +transitioning to Elm 0.19. + +**Core** + +We’re picking up from the global resource cache work from last year and picking off the remaining blockers to release. +Specifically [#2908](https://github.com/concourse/concourse/issues/2908) needs to be addressed otherwise everyone’s +time-resource will kick off at the same time; which may be very bad news for shared environments. In order to keep the +release process on track we will be parallelizing [#2874](https://github.com/concourse/concourse/issues/2874) +performance testing to another pair. + +**Runtime** + +Having completed the placement strategy and testing it in prod, we’re proceeding to do some refactoring +in [#2926](https://github.com/concourse/concourse/issues/2926) + diff --git a/docs/blog/posts/2019/2019-01-18-concourse-update--jan-14-18-.md b/docs/blog/posts/2019/2019-01-18-concourse-update--jan-14-18-.md new file mode 100644 index 00000000..843ba43d --- /dev/null +++ b/docs/blog/posts/2019/2019-01-18-concourse-update--jan-14-18-.md @@ -0,0 +1,50 @@ +--- +title: Concourse Update (Jan 14–18) +date: 2019-01-18 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jan-14-18-/1-6MKC6FrHvi5u_9yZCklyeA.png" alt="for context... +or don’t 🤷" width="100%" >}} + + + +Some updates worth bringing up this week. As I had mentioned last week we began to do a re-organization of projects and +issues in our concourse/concourse repo; you can read more about it on +our [wiki page here](https://github.com/concourse/concourse/wiki/How-Issues-are-Managed). With that said, you can find +the issues and PRs that are slated for Concourse 5.0.0’s release in +our [5.0.0 Milestones](https://github.com/concourse/concourse/milestone/33). If you’d like to help us with +documentation, we’ve started a new branch in the docs repo under [v5.0](https://github.com/concourse/docs/tree/v5.0). + +One of the items we want to resolve before release is issue [#3003](https://github.com/concourse/concourse/issues/3003) +“Determine full set of core resources that we should bundle with Concourse”. In this discussion we’re going over the +idea of removing the pre-baked resources in favour of slimming down the Concourse footprint and only shipping what is +absolutely needed. We want to hear how this may impact you and your Concourse experience. We’d like to wrap this up +soon, so please drop in a comment at your earliest convenience! + +In other big news, the Concourse core engineering team has officially switched to a PR based workflow. That means we are +no longer allowing direct commits to master and all issue “acceptance” will be conducted via the merging of pull +requests. We hope this will make our development process even _more_ transparent and further involve the community in +day-to-day work! + +On to the update: + +**UX** + +- Added a comments bar to indicate paused resources are now pinned ( + PR[#3064](https://github.com/concourse/concourse/pull/3064)) +- You can now force check a resource from the web UI (PR [#3051](https://github.com/concourse/concourse/pull/3051)) + +**Core** + +- Completed [#2908](https://github.com/concourse/concourse/issues/2908). This is one of the key blocking issues + preventing us from releasing Concourse 5.0.0 +- Picked up [#3013](https://github.com/concourse/concourse/issues/3013) as a way to address the two very clear use cases + where you might not want it: lots of time resources and resources that use IAM roles +- Picked up the performance test work [#2874](https://github.com/concourse/concourse/issues/2874) + +**Runtime** + +- In the first issue of many around runtime refactoring, we picked + up [#3502](https://github.com/concourse/concourse/issues/3052) to break up the responsibilities of containerProvider diff --git a/docs/blog/posts/2019/2019-01-25-concourse-update--jan-21-25-.md b/docs/blog/posts/2019/2019-01-25-concourse-update--jan-21-25-.md new file mode 100644 index 00000000..156bf162 --- /dev/null +++ b/docs/blog/posts/2019/2019-01-25-concourse-update--jan-21-25-.md @@ -0,0 +1,41 @@ +--- +layout: post +title: Concourse Update (Jan 21–25) +date: 2019-01-25 +categories: + - product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--Jan-21-25-/1-t_CkNyt-IVxZrxMiARCJLQ.jpeg" alt="Dennis Jarvis" +width="50%" >}} + + + +Its been a week since we switched over to the PR workflow and so far its been great! We’re still working through some of +the kinks with this process so please bear with us while we continue to burn down through the list of open PRs! + +And now…on to the update! I might have missed a few issues while I’m still getting used to our new workflow. Completed +issues now appear as +c[losed PRs in concourse/concourse](https://github.com/concourse/concourse/pulls?q=is%3Apr+is%3Aclosed) + +**Docs** + +- Started to burn down our list of todos for Concourse docs pre-release. You can follow along + in [#143](https://github.com/concourse/docs/issues/143) +- Proposed a new structure for our docs in [#136](https://github.com/concourse/docs/issues/136). Brace yourselves for + broken links. + +**UX** + +- Added a “new version” tooltip to versions in Resource page [#3136](https://github.com/concourse/concourse/pull/3136) +- Fixed a whole bunch of UX quirks in preparation for v5.0.0 release +- Beginning our Elm 0.19 refactor and upgrade + +**Core** + +- Upgrade and performance testing for Concourse 5.0 [#2874](https://github.com/concourse/concourse/issues/2874) + +**Runtime** + +- Decoupling container and volume creation in + FindOrCreateContainer [#3052](https://github.com/concourse/concourse/issues/3052) diff --git a/docs/blog/posts/2019/2019-03-08-an-overview-of-authorization-in-concourse-3--4-and-5.md b/docs/blog/posts/2019/2019-03-08-an-overview-of-authorization-in-concourse-3--4-and-5.md new file mode 100644 index 00000000..fab82330 --- /dev/null +++ b/docs/blog/posts/2019/2019-03-08-an-overview-of-authorization-in-concourse-3--4-and-5.md @@ -0,0 +1,272 @@ +--- +title: An Overview of Authorization in Concourse 3, 4 and 5 +date: 2019-03-08 +categories: +- product-update +--- + +{{< image src=" +/images/downloaded_images/An-Overview-of-Authorization-in-Concourse-3--4-and-5/1-sh1rcJO5eSRDQrmxIF8qTA.jpeg" alt="NASA +HQ Photo" width="60%" >}} + + + +With the release of [Concourse 5.0.0](https://concourse-ci.org/download.html#v500) this week I thought it would be a +good time to review the evolving implementation of authorization in Concourse. I’ll also be covering some helpful +debugging information for you to consider when configuring authorization in your own Concourse instance. + +## Read the Docs + +The revised [Concourse Auth & Teams docs](https://concourse-ci.org/auth.html)is a great place to start when diving into +Concourse 5.0.0. The docs will cover important steps around provider configuration and team configuration for your +cluster. If you’re more interested in how things used to work compared to how they now work; then read on! + +## How Authorization Works in 3.x + +_This section will only be useful to operators who are migrating into 4.x and beyond. Feel free to skip ahead if this +does not apply to you._ + +{{< image src=" +/images/downloaded_images/An-Overview-of-Authorization-in-Concourse-3--4-and-5/1-cNIh0ygLLcNnPbGEDOhcig.png" alt="" +width="100%" >}} + +Every Concourse instance starts with a main team that must be configured against an Authentication Provider on start-up. +The main team is an admin team, meaning it can create teams, update other teams and view system-scoped details on +workers, containers, etc. + +One of the tasks that only a main user can do is to create new teams via set-team. When creating the team, the operator +must specify: + +- An Authentication Provider e.g. Basic Auth, GitHub, OAuth +- The relevant configuration of the Authentication Provider e.g. secrets, tokens +- The user/group to authorize (if applicable) + +Some important notes to keep in mind: + +- Authentication Provider configurations are attached to an individual team, and not shared across teams. As an + operator, you will have to repeat/resupply the Authentication Provider configuration for each team. If Team 1 wanted + to change their own auth to add a member or group they would have to ask the Operator for the github API token or + bring their own. +- Since Authentication Provider details are provided per-team, operators can set unique provders for each. A common + use-case is to provision Team 1 to authenticate against the USA-East-1 OAuth server and Team2 to authentiate against + the EMEA OAuth server. +- You can _stack_ Authentication Providers by supplying multiple parameters when applying set-team; e.g. a team can have + both GitHub _and_ Basic Auth configured to authenticate users. +- Users who are authorized to access more than one team can only see one team at a time. + +## Concourse 4.0 — Users + +Concourse 4 introduced Users and totally revamped the authorization flow: + +- Identity providers must be specified when Concourse first starts up (this includes local users as well!) +- Identity providers are shared across teams and can no longer be customized per-team +- Adding/removing Identity Providers require a restart to the web node +- When specifying groups in provider configuration, administrators must use : as the separator instead of / +- Users logging into Concourse are whitelisted into all teams that match thier provider membership. More on this later + +### Overview of Authorization Flow + +1. Operator determines the Identity Providers they will allow in Concourse and configures their Concourse startup + scripts (Docker, BOSH, Helm, etc.) with the necessary parameters as described + in [Configuring Auth](https://concourse-ci.org/install.html#quickstart). +2. If there are any local users that have Basic Auth (username/password) identities, the operator will add them to the + startup scripts as outlined in [Local Auth](https://concourse-ci.org/install.html#local-auth-config) +3. The Operator will start Concourse and begin creating teams using the fly set-team command. Keeping in mind the auth + providers that were added in step (1) the Operator can specify the allowed users/groups/teams from that provider. + See [Configuring Team Auth](https://concourse-ci.org/authentication.html) for more details. +4. When a User logs into Concourse, they are asked to login using one of the configured providers from (1). +5. Once the User selects a provider, Concourse will redirect the User to the identity provider’s authentication + mechanism and wait for a successful login response +6. When a login success response is recieved, Concourse will examine all of the teams/orgs the User belongs to under + that provider. Concourse will then match the user’s information against the internal list of Concourse teams and + their list of whitelisted users/teams/orgs. The resulting list will be the teams that the User can access +7. The User is logged into Concourse and can access the teams they were whitelisted into + +### Identity Providers + +An Identity Provider is any external entity that creates, manages and maintains identity information to auth. Concourse +4 uses the OSS [dex library](https://github.com/dexidp/dex) to do most of the heavy lifting. + +**Specifying Identity Providers** + +You will need to provide the connection details for all the auth connectors you plan to use for teams up front. The full +list of supported providers and their require parameters can be found on the Concourse docs site +under [Configuring Auth](https://concourse-ci.org/install.html#quickstart). + +**Local Users, The Special Case** + +Local Auth users are a bit of a special case because there’s no external auth provider for them, and you can no longer +“create” them on set-team. + +To add a local user you will need to add that user to the Concourse startup parameter list as described in +the [Local Auth](https://concourse-ci.org/install.html#local-auth-config)docs. + +**Whitelisting Users with** **set-team** + +Once you have configured the providers you can freely add users/teams/orgs/groups/whatever to a team. This is as simple +as using the parameters described in the fly set-team docs +for [Configuring Team Auth](https://concourse-ci.org/authentication.html). + +As with most fly commands, you can actually attach multiple users/teams across providers to a team. For example: if you +have GitHub and OAuth providers set up, a team owner could attach two teams (one from GitHub, one from OAuth) to the +team. + +### Examples + +{{< image src=" +/images/downloaded_images/An-Overview-of-Authorization-in-Concourse-3--4-and-5/1-c4yd3A2DIIrRYF8uqh9_fw.png" width=" +100%" >}} + +In this example we have a simple Concourse installation with two identity providers: GitHub and a single Local User. + +On the left we have two simple GitHub orgs: Pivotal and Concourse. Pivotal has three teams: cloud, billing and admin. +Concourse has one team. Each team has a single user attached to them. + +On the right we have a map of the Concourse teams and their allowed users/groups. + +Let’s go through a few scenarios to get a good understanding of how auth works in Concourse 4. + +**Local User Logs In** + +A Concourse user uses the local user provider to login with username:password and only sees Team Local. + +**Alice Logs In** + +Alice logs into Concourse using the GitHub auth scheme. She finishes the flow and sees..two teams! Because she is a +member of the Pivotal GitHub org she sees Team All, which is configured to allow all users under the pivotal org on +GitHub. She also sees Team 1 because it allows all users who are also memebers of pivotal:cloud on GitHub. + +**Operator Logs In** + +The Operator logs in using GitHub auth and…can see everything! Because the Operator is part of the main team, they can +see all teams. However, that does _not_ mean the Operator can see all the team pipelines. In this scenario, the Operator +can only see the Main and Team All team pipelines. + +**A non-member logs in** + +Jama finds out about this cool Concourse thing and logs into Concourse using the GitHub auth provider. Since he has a +GitHub account he is able to login successfully. However, once the login flow is completed he is returned to Concourse +and a blank screen…nothing is available to him! Jama is not a member of a GitHub team/organization that was specified in +the Concourse team configurations. + +## Debugging Login Problems + +### What are the auth settings for [insert team name]? + +If you are an operator and you need to figure out what the exact auth settings are, you can use the new fly teams +-dcommand. This will list the teams with details, including the users and groups whitelisted into that team + +### Help, I logged in but I can’t see my team + +1. Try using the search function on the dashboard. This is silly but for large Concourse clusters there are a LOT of + teams with exposed pipelines and it can be hard to find the team you need +2. Logout and Log back in. Due to the implementation of the auth scheme, Users who are already logged into Concourse and + are added into a new team must refresh their token by logging out and logging in. + Yes, [we know it sucks](https://github.com/concourse/concourse/issues/2441). +3. Is the user a member of the org that was specified in set-team? For example, if GitHub team pivotal:foo was used, + make sure to ask if the user is a member of that team on GitHub! +4. Was there a typo? Use fly set-team -d to look for the team in question and triple-check the spelling of usernames and + teams +5. Did you use the correct separator? Concourse requires all group separators to use : and not /: + +- pivotal:foo is OK +- pivotal/foo will fail silently on set-team + +### I have two Identity servers, how do I add them both? + +Unfortunately, that is not possible in Concourse 4. You’ll notice that you can only supply one set of credentials when +providing auth providers. The side-effect limitation is that a single Concourse installation can’t be connected to more +than one of the same provider. The operator will have to set up another Concourse if they absolutely must be able to +connect to two differet identity providers of the same type. + +## Concourse 5.0 — RBAC + +Concourse 5.0 comes with 4 roles: Concourse Admin, Team Owner, Team Member, and Team Viewer. + +**Concourse Admin** + +A Concourse Admin is the same as today’s [admin user](https://concourse-ci.org/main-team.html). Members of main team +will automatically be Concourse Admins\* and have the ability to administrate teams with fly: set-team, destroy-team, +rename-team, and teams. Given that all Concourse Admins must be a member of the main team, all Concourse Admins must +have at least one other role; and that should typically be the Team Owner role. + +**Team Owner** + +Team Owners have read, write and auth management capabilities within the scope of their team. For those familiar with +Concourse today, the scope of allowed actions for a Team Owner is very closely aligned to today’s Concourse team member. +The new change is that you can no longer rename your own team or destroy your own team as an owner. + +**Team Member** + +Team Member is a new role that lets users operate within their teams in a read & write fashion; but prevents them from +changing the auth configurations of their team. + +**Team Viewer** + +Team Viewer is also a new role that gives users “read-only” access to a team. This locks everything down, preventing +users from doing a set-pipeline or intercept. + +**Full Roles Breakdown** + +For a full list of each role’s allowed actions you can reference our handy permission matrix on Google +Sheets [here](https://docs.google.com/spreadsheets/d/1np3hyJy3mVRfB2gcgKykz3QTQg5qEj28QgK523SEmao/edit#gid=1437859537). + +### Configuring Roles with fly + +Now that we’ve gone over the new roles, we can do a quick overview of how we can go about setting users & roles on +teams. + +### Default Behaviour + +By default, if no configuration is provided the user is given the Team Owner role: + +```bash +fly -t dev set-team -n PowerRangers --local-user=Zordon + +#This behaviour also applies to groups as well, so be careful! +fly -t dev set-team -n A-Team \ + --github-team=MightyMorphin:PowerRangers +``` + +### Specifying Roles with `-c` + +Roles must be specified in a separate configuration file using the -c + +```bash +fly -t dev set-team -n A-Team -c ./team.yml +``` + +`team.yml` + +```yaml +roles: +- name: owner + local: + users: ["Zordon"] +- name: member + local: + users: ["RedRanger", "BlueRanger", "GreenRanger"] +- name: viewer + local: + users: ["Alpha"] +``` + +## Inspecting Roles Configuration + +Once you’ve set the team configuration you can verify it using the details flag on fly teams: + +```shell-session +fly -t dev teams -d +name users groups +A-Team/member local:RedRanger, BlueRanger, GreenRanger none +A-Team/owner local:Zordon none +A-Team/viewer local:Alpha none +``` + +..where you’ll find the output is now updated to list each team/role combination and its associated users/groups. + +## Further Reading + +- [Oh, Auth by Josh Winters](https://medium.com/concourse-ci/oh-auth-f4fe68438171) +- [Concourse RBAC Preview](https://medium.com/concourse-ci/concourse-rbac-preview-8e07616ddc47). +- [Concourse Auth & Teams docs](https://concourse-ci.org/auth.html) diff --git a/docs/blog/posts/2019/2019-04-01-concourse-update-----april-1--2019-.md b/docs/blog/posts/2019/2019-04-01-concourse-update-----april-1--2019-.md new file mode 100644 index 00000000..4c806c4c --- /dev/null +++ b/docs/blog/posts/2019/2019-04-01-concourse-update-----april-1--2019-.md @@ -0,0 +1,133 @@ +--- +title: "Concourse Update (\U0001F937-April 1, 2019)" +date: 2019-04-01 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update-----April-1--2019-/1-Z49uzJr_wqYlpCGLBpnoXQ.jpeg" alt="Some +airport somewhere... waiting" width="50%" >}} + + + +Phew, it’s been a while since I last wrote an update. For some background behind why I slowed down, hop on over to this +thread on our +forms: [“What would you like to see on our blog”](https://discuss.concourse-ci.org/t/what-kind-of-topics-would-you-like-to-see-on-our-blog/1222/8). + +That said, I _do_ have a lot of interesting updates to share, so let’s get started + +## Concourse 5.0.0 + +In case you missed it, [Concourse 5.0.0](https://concourse-ci.org/download.html#v500) +and [5.0.1](https://concourse-ci.org/download.html#v501) came out a few weeks ago in March. This is a major version +release with tons of new features, including: + +- Role Based Access Control +- Global Resource Cache +- fewest-build-containers placement strategy +- Resource pinning +- Inputs on the put step of a pipeline +- UI tweaks +- and [much much more](https://concourse-ci.org/download.html#v500)! + +Be warned, there are some breaking changes in this release as well; so make sure you +read [all](https://concourse-ci.org/download.html#v500-note-1) [of](https://concourse-ci.org/download.html#v500-note-2)[the](https://concourse-ci.org/download.html#v500-note-3) [release](https://concourse-ci.org/download.html#v500-note-4) [notes](https://concourse-ci.org/download.html#v500-note-5) [before](https://concourse-ci.org/download.html#v500-note-6) +you upgrade! + +You’ll also notice that we recently gave the [Concourse homepage](https://concourse-ci.org/)a small makeover as well. +We’ve tightened up the navigation and expanded some sections of of our docs, check it out: + +- Expanded docs on[Credential Management](https://concourse-ci.org/creds.html) with Vault and AWS SSM +- More info on the new [Container Placement](https://concourse-ci.org/container-placement.html) strategies +- A primer on the new [Global Resources](https://concourse-ci.org/global-resources.html) feature +- Our spiffy new [Examples](https://concourse-ci.org/learning.html#examples) section, which gives you a side-by-side + comparison of a pipeline and the yml that made it + +## Interesting Blog Posts + +There’s also been some interesting blog posts about Concourse from around the interwebs…and not all of them were written +by me! + +- [An Overview of Authorization in Concourse 3, 4 and 5](https://medium.com/concourse-ci/an-overview-of-authorization-in-concourse-3-4-and-5-7128cca36194) + is a useful overview of auth across 3 major versions of Concourse +- [Installing Concourse 5.0 on Kubernets using Helm](https://medium.com/concourse-ci/installing-concourse-5-0-on-pivotal-container-service-using-helm-9f20e4e1b8bf) + is a great two-part overview of getting PKS installed and using the Concourse helm chart +- [Building Go code, with and without Go modules, with Concourse](https://www.orsolabs.com/post/building-go-code-with-concourse/) +- Aptomi described how to + do [CI/CD for Knative serverless apps on Kubernetes with Concourse](https://medium.com/aptomi/ci-cd-for-knative-serverless-apps-on-kubernetes-with-concourse-54bafef51767) +- Concourse-Up is now renamed + to [“Control Tower”](http://www.engineerbetter.com/blog/concourse-up-renamed-to-control-tower/) +- Someone compared us to Drone.io + in [CI/CD tool showdown pits adoptability vs. adaptability](https://searchsoftwarequality.techtarget.com/tip/CI-CD-tool-showdown-pits-adoptability-vs-adaptability) +- We got a mention + on [PorscheDev’s Technology Radar vol 2](https://medium.com/porschedev/technology-radar-vol-2-4833fb31e2fd) (I think + they like us :D) + +## Concourse Swag + +
+ +We have swag! With the help of the team at Pivotal we’ve listed our first Concourse-branded sweater under +the [official Pivotal apparel store](https://store.pivotal.io/collections/all-products/products/pivotal-unisex-crewneck-sweatshirt). +A few notes: + +- The sweaters themselves are listed at-cost, so we’re not making any profit off of them +- Apologies to anyone who’s not in the United States because international shipping through this store is _atrocious_. + We’re going to be working with our partners to see if we can find a better shipping solution. +- At the time of this writing we’re relatively low on M and L sweaters, there’s a new shipment of those sizes coming in + soon so the store should be updated in a week or so +- Once this batch of sweaters sell out we’ll be planning on doing new designs to keep things fresh! + +## Concourse IRL + +The Concourse team will be attending CF Summit NA 2019 this week in Philadelphia, so come by the Pivotal booth and say +hi to the team! + +I’ll also be attending a the ConcourseCI Bay Area User Group meetup on April 11th in Palo Alto. The title of the meetup +is [“Kubernetes Deployments with Concourse CI and Spinnaker”](https://www.meetup.com/concourse/events/259904171/). Come +check it out if you’re in the bay area! + +## Milestones and Interesting RFCs + +[Alex Suraci](https://medium.com/u/263a63b2f209) has been experimenting with re-organizing our backlog of epics by using +the GitHub Projects feature. You can see our current list of epics in the +concourse/concourse[project list](https://github.com/concourse/concourse/projects). The big things we’re working on are: + +- Spatial Resource +- API refactoring +- Ephemeral check containers (Runtime) +- and Concourse + K8s runtime + +On the topic of k8s runtime situation, please take a second to +review [Topher Bullock](https://medium.com/u/58876cdc2180)’s +new [RFC #22 How Do We Best Leverage K8s as Runtime?](https://github.com/topherbullock/rfcs/blob/e4a80f902bc835b2d528a7550b427bfa83a5660d/008-k8s-runtime/proposal.md). +The team is evaluating Concourse + [Tekton CD](https://github.com/tektoncd/pipeline) vs Concourse + K8s our own way. + +## Thanks to our Community 🙏 + +Finally, I wanted to give shout outs to our growing community of Concourse fans and followers. In early 2019 the +Concourse team made two changes to our contributor workflow: we switched over to a looser Contributors License +Agreement (CLA) and the core team moved towards a PR-based workflow. Since then we’ve seen a lot more engagement on the +work that we’ve doing, and we’ve also started to see a lot of new PRs coming in! + +
# of PRs opened over time against concourse/concourse and other key resources
+ +In 2018, we saw 263 PRs opened against concourse/concourse and its core resources. As of today we already have more than +160 PRs opened by non-Pivots! Some notable PRs that I wanted to + +- [#3580 Add parallel Step](https://github.com/concourse/concourse/pull/3580) +- [#3163 [POC] Super nasty rendering of jobs that needs manual triggering](https://github.com/concourse/concourse/pull/3163) +- [#3560 Time based retention for build log collector](https://github.com/concourse/concourse/pull/3560) +- [#3430 Default the target if there is exactly one](https://github.com/concourse/concourse/pull/3430) +- [#3577 Auditor](https://github.com/concourse/concourse/pull/3577) +- [#3398 Make values starts with https or http clickable in build](https://github.com/concourse/concourse/pull/3398) +- [#3579 Display Task Duration on Finished Tasks](https://github.com/concourse/concourse/pull/3579) +- [#3475 web: add pause button to top bar of pipeline view](https://github.com/concourse/concourse/pull/3475) +- [#3248 Add option to prune all stalled workers instead of just one at a time](https://github.com/concourse/concourse/pull/3248) + +## The Future of Weekly Updates + +I’ll do my best to resume the weekly cadence of the project updates. In the meantime, if you have any specific opinions +on what kind of blog posts we should right, I’d suggest you check out this thread on our +forums: [“What would you like to see on our blog”](https://discuss.concourse-ci.org/t/what-kind-of-topics-would-you-like-to-see-on-our-blog/1222/8) + diff --git a/docs/blog/posts/2019/2019-04-05-concourse-update--april-1-5-.md b/docs/blog/posts/2019/2019-04-05-concourse-update--april-1-5-.md new file mode 100644 index 00000000..b5854887 --- /dev/null +++ b/docs/blog/posts/2019/2019-04-05-concourse-update--april-1-5-.md @@ -0,0 +1,45 @@ +--- +title: Concourse Update (April 1–5) +date: 2019-04-05 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--April-1-5-/1-nZQ4xbWXguDpNQzLEkcJcA.jpeg" alt="Concourse in +action at the CF Summit 2019 Grape Up booth" width="50%" >}} + + + +Greetings from sunny Philadelphia! The team was there for +the [Cloud Foundry 2019 NA Summit](https://www.cloudfoundry.org/event/nasummit2019/) for a few days; talking to +Concourse users and attending talks. Recorded videos of the talks should be uploaded soon; so I’ll point you to the +interesting Concourse-related ones next week. + +On to the update. + +## For Active Discussion + +- Please take some time to review and comment on the + latest [Concourse + k8s Runtime RFC](https://github.com/topherbullock/rfcs/blob/e4a80f902bc835b2d528a7550b427bfa83a5660d/008-k8s-runtime/proposal.md) +- Regarding the runtime, there’s been an active conversation around better build scheduling and load distribution. You + can catch up on the thread [here](https://github.com/concourse/concourse/issues/2928). We’d love for you to tell us + about your own experience in our meta-issue [#3695](https://github.com/concourse/concourse/issues/3695) + +## Coming Soon: Concourse 5.1.0 + +{{< image src="/images/downloaded_images/Concourse-Update--April-1-5-/1-dEssJTEo9_VnNszNDUj6gQ.png" alt="Icons on +resources" width="100%" >}} + +We’re in the process of polishing up some items that we weren’t quite able to zfit into the 5.0.0 release. There’s been +also some interesting new features and new PRs that you can look forward to in 5.1.0 as well: + +- Resource icons [#3581](https://github.com/concourse/concourse/pull/3581) thanks to efforts of + contributor [mockersf](https://github.com/mockersf) +- You can now pause the pipeline from the pipeline view [#3475](https://github.com/concourse/concourse/pull/3475) thanks + to the efforts of contributor [robwhitby](https://github.com/robwhitby) +- There’s been a great de-coupling of the API from the runtime & + scheduler [#3307](https://github.com/concourse/concourse/pull/3307). This is a refactor and cleanup that brings us + closer to an API that we’d be happy to publish and support. +- Introduced an on\_error option to allow outside sources to be notified of CI + failure [#3652](https://github.com/concourse/concourse/pull/3652) thanks to + contributor [amanw](https://github.com/amanw) diff --git a/docs/blog/posts/2019/2019-04-18-concourse-update--april-8-18-.md b/docs/blog/posts/2019/2019-04-18-concourse-update--april-8-18-.md new file mode 100644 index 00000000..dabcb78c --- /dev/null +++ b/docs/blog/posts/2019/2019-04-18-concourse-update--april-8-18-.md @@ -0,0 +1,35 @@ +--- +title: Concourse Update (April 8–18) +date: 2019-04-18 +categories: +- product-update +--- + +{{< image src="/images/downloaded_images/Concourse-Update--April-8-18-/1-gTTAFBV8KHzEL0CV-I-_kA.jpeg" alt="Roman +Alekseenkov from Aptomi giving a talk on Concourse at the Bay Area User Group" width="50%" >}} + + + +Sorry for missing the update last week. I was travelling out to the Bay area to attend the ConcourseCI Bay Area User +Group. For those who missed it, you can find a recording of the +event [here](https://www.youtube.com/watch?v=1RRZHPlTkXs). On to the update. + +{{< image src="/images/downloaded_images/Concourse-Update--April-8-18-/1-QqwW-_RArz5a_sprZC7PZw.png" width="100%" >}} + +In case you missed it, Concourse 5.1.0 is out! It’s got icons on resources, better garbage collection, `on_error` on +pipelines, and much more! As usual, you can read the full list of new +features [here](https://concourse-ci.org/download.html#v510). + +Other interesting developments: + +- The runtime team has been looking into the administrative overhead of running tasks on workers. The results are pretty + sobering. More to come next week! +- We’re still looking into the k8s Tekton integration. We expect things to pick up in pace starting next week, where + we’ll have a few more Pivots lending a helping hand. Again, you can find our + RFC [here](https://github.com/concourse/rfcs/pull/22) +- The sidebar is coming [back](https://github.com/concourse/concourse/issues/2440), and we’re exploring how we can + extend the [search and filtration capabilities](https://github.com/concourse/concourse/issues/3630) across Concourse +- I added a section called “Concourse Users” on + our [Community page](https://concourse-ci.org/community.html#concourse-users). This is just some of the companies and + folks that have spoken about their Concourse usage in the past. If you’d like to add to that list feel free to make + a [PR here](https://github.com/concourse/docs/blob/master/lit/concourse-users.lit) diff --git a/docs/blog/posts/2019/2019-05-03-concourse-update-april-29-3.md b/docs/blog/posts/2019/2019-05-03-concourse-update-april-29-3.md new file mode 100644 index 00000000..3670bb4b --- /dev/null +++ b/docs/blog/posts/2019/2019-05-03-concourse-update-april-29-3.md @@ -0,0 +1,57 @@ +--- +title: Concourse Update April 29–3 +date: 2019-05-03 +categories: +- product-update +--- + +In case you missed it, we’ve made some tweaks to the structure of the website. I’m happy to report +that [Alex Suraci](https://medium.com/u/263a63b2f209) drastically improved our site-wide search. This +resolves [#181](https://github.com/concourse/docs/issues/181) and we’re all the better for it! + + + +Second, you’ll notice that a lot of the community related comment that was on our homepage has now been moved to +our [Concourse GitHub Wiki](https://github.com/concourse/concourse/wiki). We hope this change will make contributor and +community specific content more discoverable and more maintainable over time. + +{{< image src="/images/downloaded_images/Concourse-Update-April-29-3/1-08IsVksi-Nc9O0BnmW5MiA.png" alt="Concourse Wiki +with more Contributor things!" width="100%" >}} + +Notably, +the [Resource Types](https://github.com/concourse/concourse/wiki/Resource-Types), [Tutorials](https://github.com/concourse/concourse/wiki/Tutorials), +and [Tools](https://github.com/concourse/concourse/wiki/Tools) page has moved over to the wiki. Content that may be new +to some of you include the Project Management +section: [How Issues are Managed](https://github.com/concourse/concourse/wiki/How-Issues-are-Managed), [How to Process PRs](https://github.com/concourse/concourse/wiki/How-to-Process-PRs), +and [Release Process](https://github.com/concourse/concourse/wiki/Release-Process). + +On to the update. + +### K8s Runtime + +Bohen and Sameer have been doing some great write-ups on their research. You can get caught up with their latest +research in two GitHub issues: [What does k8s offer as a runtime](https://github.com/concourse/concourse/issues/3798) +and [What does Tekton offer as a runtime](https://github.com/concourse/concourse/issues/3797). If you’d like to track +along with this project’s movements you can bookmark the K8s Runtime project board +here: [https://github.com/concourse/concourse/projects/14](https://github.com/concourse/concourse/projects/14) + +### UX + +The Sidebar is coming back! Check out our latest designs +in [#2440](https://github.com/concourse/concourse/issues/2440#issuecomment-482133483). + +We’ve been looking into a few UI regressions in the web frontend as well. +Specifically, [#3745](https://github.com/concourse/concourse/issues/3745) +and [#3748](https://github.com/concourse/concourse/issues/3748) have been moved to the top of the backlog + +### Runtime + +We’ve been working on [#3607](https://github.com/concourse/concourse/issues/3607) +and [#3810](https://github.com/concourse/concourse/issues/3607) as sub-stories to help with Ephemeral Check +Containers [#3424](https://github.com/concourse/concourse/issues/3424) + +[Divya Dadlani](https://medium.com/u/521c9107181d) has also been thinking a lot more about “Performance benchmarking for +Concourse releases” [#3816](https://github.com/concourse/concourse/issues/3816). The idea is that we should be a bit +more rigorous in tracking how Concourse improves with some of the runtime performance changes. Jump on over to the issue +and drop a line if you have any ideas/opinions on this subject. + diff --git a/docs/blog/posts/2019/2019-06-07-concourse-update-june-7.md b/docs/blog/posts/2019/2019-06-07-concourse-update-june-7.md new file mode 100644 index 00000000..cf82caea --- /dev/null +++ b/docs/blog/posts/2019/2019-06-07-concourse-update-june-7.md @@ -0,0 +1,72 @@ +--- +title: Concourse Update June 7 +date: 2019-06-07 +categories: +- product-update +--- + +...and we’re back! Apologies for the lack of updates lately. I’ve just come back from some time off and work travel has +taken up a lot of my time. I’m back in Toronto now so let’s get back into it. + + + +## Release Engineering & Concourse 5.3.0 + +In the past, we relied _a lot_ on [Alex Suraci](https://medium.com/u/263a63b2f209) to handle a lot of our release +engineering work. Release Engineering is incredibly important and valuable work for the Concourse team, but it can also +very time consuming. Thankfully, the UX track has volunteered some of their time to spin up our +new [Release Engineering track](https://github.com/orgs/concourse/projects/36) of work to help alleviate Alex from some +of his responsibilities. This means a short-term slowdown in the throughput of the UX team, but we think its well worth +the tradeoff. + +On that note, you can now follow along with our release plans for Concourse 5.3.0 by tracking +our [project note](https://github.com/orgs/concourse/projects/36#card-22467664). Unfortunately, we were mostly blocked +on some metrics instabilities in our [production instance](https://ci.concourse-ci.org/) this week. Those issues have +been mostly cleared up and we hope to be able to continue with our production and wings tests next + +## Core/API + +The team’s been making a lot of progress on two key issues: + +- [The Algorithm™](https://github.com/concourse/concourse/issues/3602) +- [Resource check queue](https://github.com/concourse/concourse/issues/3788) + +The team has been doing some preliminary performance tests with the new Algorithm and the results so far have been very +promising. We’ll be reporting more details on the performance improvements in the coming weeks; so keep an eye out for +that! + +## Runtime + +[Ephemeral check containers](https://github.com/concourse/concourse/issues/3424) is back! We’ve deployed our changes +as-is on our test environment and are monitoring it for lower container counts in our environments + +[Parallel Input Streaming](https://github.com/concourse/concourse/issues/3992) was picked up by Krishna today, its +amazing and there’s totally lots of detail to be found on the linked issue. + +## K8s + Concourse == Koncourse? + +We have two RFCs in flight, please take some time to read through the changes: + +- Exploring [Initial Run/Store interface](https://github.com/concourse/architecture-rfcs/pull/1): In order to port + Concourse on to non Garden/Baggageclaim runtimes (including Kubernetes!) we need to separate the two concepts of + containers as the unit of execution and volumes as the unit of storage. We’re fleshing out the interface that these + components can implement in this RFC. +- [Extract Core Resource Types](https://github.com/concourse/rfcs/pull/30): a proposal to not ship bundled base resource + types with concourse. The change would require Concourse to pull the base resource types on-demand at runtime. This is + required for moving establishing a more generic storage interface. + +You’ll also note that we’ve created a architecture-rfcs repo. This repository is reserved for internal RFCs that should +not directly impact a Concourse user. + +## Duty Free + +The proposal for a Concourse “Duty Free” was first reported +in [issue #191](https://github.com/concourse/concourse/issues/191); its the idea of creating a separate site to +highlight community resources and other re-usable Concourse artifacts for our community. Today, advertise Concourse +resources through the [Resources page in our wiki](https://github.com/concourse/concourse/wiki/Resource-Types), but a +dedicated Concourse Duty Free site would have a lot more pizzaz. + +We’ve always wanted to build Duty Free but we were never been able to figure out how to slot it into our work schedule. +Thankfully, the Pivotal team out in Dublin had some time and offered to help kick-start the project for us. We’re still +in the very early stages of development and design, but you can follow along the project on their GitHub repo +here: [concourse/dutyfree](https://github.com/concourse/dutyfree) diff --git a/docs/blog/posts/2019/2019-06-21-concourse-update--july-21-2019-.md b/docs/blog/posts/2019/2019-06-21-concourse-update--july-21-2019-.md new file mode 100644 index 00000000..d2db6542 --- /dev/null +++ b/docs/blog/posts/2019/2019-06-21-concourse-update--july-21-2019-.md @@ -0,0 +1,41 @@ +--- +layout: post +title: Concourse Update (July 21 2019) +date: 2019-06-21 +categories: +- product-update +--- + +The Concourse team had the opportunity to visit some Concourse users out in Montreal last week. We had a blast meeting +everyone, including some folks from the Concourse OSS community. Thanks again for hosting us! + + + +I’ll also be in Kansas City for two days next week to meet some other Concourse users as well, so give me a tap +on[Twitter](http://twitter.com/pioverpi) or Discord (username jama) if you wanna meet up. + +## Parallel Input Streaming + +{{< image src="/images/downloaded_images/Concourse-Update--July-21-2019-/0-ywZaAHKMEtZGTx5c.png" alt="initialization +dropped from 1 hour 22 min to just over 4 min" width="60%" >}} + +In addition to the work on [Algorithm](https://github.com/concourse/concourse/issues/3602) improvements from the Core +track, the Runtime track tested out their new work +on [Parallel Input Streaming](https://github.com/concourse/concourse/issues/3992). By parallelizing the input streams we +saw a _massive_ improvement on the initialization of tasks in our test pipelines. In our test we +saw [Dwayne Forde](https://medium.com/u/225055297bdc)’s Strabo pipeline (which has over 100 input resources on a job) go +from a 1 hour, 22 min initialization to just over 4 min. We were able to observe these results on both the BOSH and k8s +deployment of Concourse. Exciting work! + +## Runtime Interface Track + +For those who are interested, you can follow along our swappable runtimes (including k8s) work in +the [Runtime Interface track](https://github.com/concourse/concourse/projects/16). We’ve been doing a lot of planning +and research, but its all come down to “lets just give it a shot”. We’ll probably have more to say on this next update. + +## Release Engineering + +One of the big changes that have come out of our Release Engineering track is extracting our ci automation into its own +repository. This was done to make our project more resilient and reusable. You can now track those changes +under [concourse/ci](http://github.com/concourse/ci) + diff --git a/docs/blog/posts/2019/2019-07-02-designing-a-dashboard-for-concourse.md b/docs/blog/posts/2019/2019-07-02-designing-a-dashboard-for-concourse.md new file mode 100644 index 00000000..304bba17 --- /dev/null +++ b/docs/blog/posts/2019/2019-07-02-designing-a-dashboard-for-concourse.md @@ -0,0 +1,100 @@ +--- +title: Designing a Dashboard for Concourse +date: 2019-07-02 +--- + +## How does the Concourse team go about solving the problem of pipeline monitoring? + +With the growing popularity of Concourse, we noticed that our development teams wanted to observe and monitor multiple +pipelines simultaneously. This behaviour wasn’t limited to just Pivotal engineering teams; in fact, it was even more +prevalent amongst our Open Source Community. Our users currently solve this by cramming multiple browser windows into TV +their monitor view or they use the [Concourse Summary](https://github.com/dgodd/concourse-summary) (aka Crystal) by +David Goddard of the Pivotal Buildpacks team. + + + +{{< image src="/images/downloaded_images/1_nU107xCbOq-21YkWl2OBXQ.png" alt="Concourse pipelines (left) Datadog and +Concourse Summary (right)" width="100%" >}} +[https://github.com/dgodd/concourse-summary](https://github.com/dgodd/concourse-summary) + +So, we embarked on a deeper Discovery effort with the goal of understanding and evaluating our assumptions around how +Concourse users were solving this problem today. + +At Pivotal, we believe that products’ solutions need to be designed with the user in mind and we practice a style of +user-centered design that progresses in four phases: + +1. **Learning:** How are Users are solving this problem today? +2. **Framing:** Formulate a hypothesis based on your learnings. Create a prototype or experiment that is based on the + exploratory research. +3. **Assessing:** Put your experiment in front of users to see if your hypothesis is right. +4. **Iterating:** Repeat steps 1–3 to iterate on the solution as you get feedback. + +#### Learning: Understanding the Problem space + +We began this process by thinking about the assumptions that were made about this feature and what we needed to validate +in our interviews. + +- Users are not satisfied with the current single pipeline view — especially on a monitor vs. a dedicated CI display + like a TV +- Only seeing red or green pipeline status is all that is important for pipeline summary +- Users want to understand the state of all their teams +- Users recognize their pipelines by their shape in the UI + +{{< image src="/images/downloaded_images/1_fnSsJkhigoNgfuURiOPCpg.png" class="Miro board" width="100%" >}} + +After we went out in the field to talk to users, we came back to synthesize our findings using this virtual whiteboard +tool called [Realtimeboard](https://realtimeboard.com/app/board/o9J_k0EAilo=/). One of our team members was remote, so +this tool allowed us to easily collaborate on our research. + +> _“Did it just turn red 10 seconds ago, or one week ago? I have no idea.” — a Pipeline Engineer_ + +From our research, we found that users only care about failed jobs and the amount of time that their pipeline has been +failing. This information is crucial for engineering teams as it is used to triage their pipeline errors and influences +the prioritization of work. Many development teams we talked to are using a micro-service based architecture and +therefore most of their pipelines are composed of four jobs (build, test, deploy and health check). While we assumed +that the shape of a pipeline would be identifying, it was more important for the user to see the status. + +Based on the feedback we collected we began to prioritize our insights and frame our solution. We proceeded to +brainstorm and sketch ideas for a prototype experiment. + +{{< image src="/images/downloaded_images/0_Iz72Bh80-LNbQc-I.png" alt="Early Dashboard prototype" width="50%" >}} + +Our first InVision prototype represented each team’s pipeline as a series of thumbnails. We believed this approach would +help users identify their pipelines, and at the same time have an at-a-glance view of the pipeline status. Our first +round of feedback from users revealed that the thumbnail was not as useful as we had thought, and our approach made it +more difficult to understand what the pipeline status was. + +So, we pivoted and started to explore the idea of a pipeline thumbnail that abstracts the current pipeline +representation into a more substantial information radiator. + +Alex Suraci, co-creator of Concourse, had been working on a UI experiment, based on a treemap chart (below), that looked +like something we could expand upon. I hypothesized that by removing the resources from this view and stripping down the +thumbnail to just jobs we could provide the user with just enough information for ‘at a glance’ triaging. + +{{< image src="/images/downloaded_images/0_VKfa4IVa51zOWoiU.png" width="50%" >}} +{{< image src="/images/downloaded_images/0_V2tAM1z62u0gd-Sx.png" alt="Alex’s pipeline treemap algorithm experiment ( +left). Thumbnail compression of the pipeline for the Concourse Dashboard (right)." width="25%" >}} + +This was a radical idea with significant departures from the current visual style of Concourse. We didn’t want to just +“do it” and release it to our community of users without some kind of feedback first. As a product designer, my first +inclination was to start drawing up thumbnail variations that we could test with our users. However, there was no clear +taxonomy of pipelines because every team within Pivotal has a drastically different pipeline configuration. We needed a +quick way to test this design with “realistic” pipeline configurations at scale. Luckily for us, the Concourse team runs +an internally managed multi-tenant instance of +Concourse [called Wings](https://medium.com/concourse-ci/earning-our-wings-a0c307fa73e6). We use Wings as a sandbox for +new features, so I paired with an engineer to do a lightweight implementation for Wings. + +{{< image src="/images/downloaded_images/0_bJ8wRINc9fo3aZ5L.png" width="100%" >}} + +Since our initial rollout of the dashboard on Wings in September 2017, we have undergone at least 3 major revisions of +the dashboard based on the feedback we had received from teams within Pivotal. Our next step was to incorporate this +dashboard into the core product as a beta feature without disrupting users who are looking for a more stable Concourse +experience. + +As of Concourse 3.5.0 you can find the dashboard under `/dashboard/` and as of Concourse 3.6.0 you can find the +dashboard under `/beta/dashboard` . We hope you like this feature and are actively looking for feedback from the +community. + +If you have a comment and want to participate in the conversation for the dashboard UI, please visit the issue in +GitHub: [https://github.com/concourse/concourse/issues/1829](https://github.com/concourse/concourse/issues/1829) . + diff --git a/docs/blog/posts/2019/2019-07-17-core-roadmap-towards-v10.md b/docs/blog/posts/2019/2019-07-17-core-roadmap-towards-v10.md new file mode 100644 index 00000000..fbad197c --- /dev/null +++ b/docs/blog/posts/2019/2019-07-17-core-roadmap-towards-v10.md @@ -0,0 +1,511 @@ +--- +title: 'Core roadmap: towards v10' +date: 2019-07-17 +categories: +- roadmap +--- + +_A long-term roadmap for the core design of [Concourse](../../../index.md), a general-purpose CI/CD tool._ + + + +_[Accompanying slides](https://vito.github.io/slides/v10.html). Recommended +viewing: [episode 1 of Yu-Gi-Oh](https://www.youtube.com/watch?v=2ot9eV9DybI)._ + +Concourse's design philosophy is to be expressive, versatile, and safe while limited to a handful of simple, proven +concepts. The design of these concepts should make good practices feel intuitive and bad practices feel uncomfortable. + +Coming up with these designs can be very challenging. There are many different workflows and patterns across the +software industry, and they each have to be deeply understood in order to know what the good and bad practices are. + +This post provides a bit of insight into what we've been up to with Concourse's core design - chiefly regarding ' +spaces', which has become a bit of a white whale on our roadmap. + +There are a lot of words here - sorry! If you just want to skim, I've added a single-paragraph summary under each +roadmap entry. + +Each roadmap entry corresponds to an RFC or an issue, linked in their header. If you want to get involved in our design +process or just provide feedback, please check them out and submit a PR review! (Thanks!) + +## Table of contents + +1. [Where is 'spaces'?](#where-is-spaces) +2. [Where are we now?](#where-are-we-now) + +- [Issue #3602: a new algorithm](#issue-3602-a-new-algorithm) +- [Issue #413: build re-triggering](#issue-413-build-re-triggering) + +3. [Where are we going?](#where-are-we-going) + +- [RFC #24: resources v2](#rfc-24-resources-v2) +- [RFC #26: artifact resources](#rfc-26-artifact-resources) +- [RFC #31: `set_pipeline` step](#rfc-31-set_pipeline-step) +- [RFC #32: projects](#rfc-32-projects) +- [RFC #33: archiving pipelines](#rfc-33-archiving-pipelines) +- [RFC #34: instanced pipelines](#rfc-34-instanced-pipelines) +- [RFC #29: spatial resources](#rfc-29-spatial-resources) +- [RFC #27: trigger resources](#rfc-27-trigger-resources) +- [RFC #28: notification resources](#rfc-28-notification-resources) + +4. [What comes after all this?](#what-comes-after-all-this) +5. [Thanks!](#thanks) + +# Where is 'spaces'? + +For those of you not familiar with [spaces](https://github.com/concourse/concourse/issues/1707), it was a big ol' +feature that enabled the following workflows: + +- Dynamically running against things like branches/pull requests, which change over time (i.e. commits to a branch) + _and_ space (i.e. the set of branches themselves). Hence the name 'spaces.' +- Fanning in using `passed` constraints across spaces. This is currently impossible to do with separate pipelines, + because pipelines can't reference each other's resources. +- Automatically cleaning up spaces for closed PRs, etc. This is annoying to automate and requires keeping track of + state. + +These workflows still make sense, so why is 'spaces' dead? + +Well, I approached it the wrong way. To me, the idea of resources tracking change over time and space felt pretty solid +from a theoretical standpoint. In hindsight, maybe it just sounded cool. + +I had no reservations baking 'spaces' in to every layer of the stack - it would add more depth to all the existing +ideas. Everything was going to change: the resource interface, the web UI, how jobs work... It was all so exciting! + +But as time went on it became terrifying. It was a double-or-nothing bet. Either 'spaces' made sense everywhere, or ' +spaces' didn't make sense at all. I tried to carve out work that could be done before fully committing to spaces, but it +didn't make the monolithic feature any less monolithic. + +{{< image src="/images/2019/07/scaredy-cat-2.gif" alt="me vs the space dragon" width="25%" >}} + +# Where are we now? + +First off, I want to give a quick update on a couple of big things that you can expect in v6.0: + +## [Issue #3602](https://github.com/concourse/concourse/issues/3602): a new algorithm + +_We are re-designing the algorithm used for determining the candidate input versions for a job. The new approach will +rely less on brute force and will perform better with large installations._ + +This new algorithm fixes long-standing architectural issues with the old one, which loaded each pipeline's entire build +and resource version history into memory and determined valid candidates using brute force. + +The key difference between the old and new algorithm is how `passed` constraints are implemented, specifically when +multiple inputs depend on the same job: + +```yaml +plan: +- get: foo + passed: [foo-unit, integration] +- get: bar + passed: [bar-unit, integration] +- get: baz + passed: [integration] +``` + +In Concourse, this means "give me versions of `foo`, `bar`, and `baz` that have passed through `integration` _together +in the same build_, with the same version of `foo` having passed `foo-unit` and the same version of `bar` having passed +`bar-unit`." + +How does this work? Well, it's hard to describe either algorithm succinctly, but I'll try: + +- The old algorithm goes through resource versions, newest first, and checks whether each version satisfies the input's + own `passed` constraints. Next it checks that any other already-chosen input versions which mention the same job in + _their_ `passed` constraints also came from the same build, recursing and walking through versions until everything is + satisfied. This process is brute-force, an uses a lot of CPU. +- The new algorithm instead loops over _build output version sets_ via the jobs listed in each `passed` constraint, + assigning all the relevant versions for a given build at once as long as the versions match the other already-chosen + versions assigned via builds of prior jobs in the `passed` constraint. + +This new approach really simplifies things because the versions are _inherently_ coming from the same build. Now that we +don't have to do the extra cross-referencing, the new flow can just make a handful of cheap database queries instead of +having to load the whole pipeline's dataset into memory. + +We've been testing the old and new algorithm in two separate environments, each at the scale of 1,000 jobs with varying +`passed` constraints and a sprinkle of `version: every` across four `web` nodes. + +- The old algorithm starts off very fast but grows slower and slower as the pipeline dataset grows, eventually + exhausting the `web` nodes of RAM and swap. +- The new algorithm starts off slightly slower than the old one - it's hard to beat an in-memory dataset - but it stays + stable, uses less CPU, and does not leak memory. + +We're making a few final touches as we to get as much performance out of the new algorithm as possible, since we don't +tend to touch it often. Once we're finished, we'll jump straight to...: + +## [Issue #413](https://github.com/concourse/concourse/issues/413): build re-triggering + +_The new algorithm changes the behavior for today's pinning-based flow for re-triggering a build, so we're going to +implement proper support for build re-triggering and ship these two features together in v6.0._ + +Right now the only way to "re-trigger" a build is to pin each of the job's upstream resources to the version from the +build, trigger a new build, and go back and un-pin them all. It's pretty cumbersome and error-prone. + +It also kind of breaks with the new algorithm. Now that the new algorithm is based on _build output sets_ and not +version history, once the new build succeeds its older versions will end up being the first set attempted for that job, +potentially propagating them to downstream jobs. + +That's not what I would expect from a _re-trigger_. I would expect a re-trigger to act "in-place," while preserving the +logs of the original failure for posterity. + +To avoid this surprising change in behaviour, we're going to implement build re-triggering properly and stop abusing the +version pinning feature, which was originally designed for temporarily pinning a broken upstream dependency to a "known +good" one. + +Build re-triggering will be implemented in a way that preserves the order of the builds that the algorithm will go over. +If the re-triggered build succeeds, its set of outputs will be available to downstream jobs based on the original +build's order. + +Another benefit to implementing re-triggering soon is that folks using a pull request resource will have a much easier +time re-triggering failed pull requests, without having to wait on the rest of the roadmap (i.e. 'spaces'). + +# Where are we going? + +So, going back to the 'spaces' initiative. The pieces really started to fall into place over the past few months, and I +think I've arrived at a roadmap that accomplishes all of the goals of 'spaces' but in a significantly more Concourse-y +way. + +Instead of one monolithic feature, I have a bunch of smaller features to propose that are independently valuable and can +be delivered in any order. As we complete them, a bigger picture will start to take shape. + +Let's jump right in! + +## [RFC #24](https://github.com/concourse/rfcs/pull/24): resources v2 + +_Resources v2 is the first major revision of the resource interface since Concourse's inception. It's a step to take +very carefully. I think we're finally ready to go._ + +**UPDATE: Just kidding! This proposal has been superceded by something even more +general: [Prototypes](2019-10-15-reinventing-resource-types.md)! ([RFC #37](https://github.com/concourse/rfcs/blob/master/037-prototypes/proposal.md)) +** + +The v2 interface brings long-awaited polish to the interface: it renames `in` and `out` to `get` and `put` to match +their step names, introduces a `delete` action, standardises TLS configuration, and revises terminology so as to not be +coupled to the 'versioned artifacts' use case. + +The latest proposal for Resources v2, [RFC #24](https://github.com/concourse/rfcs/pull/24), is a lot +like [RFC #1](https://github.com/concourse/rfcs/pull/1) but with one big difference: 'spaces' is no longer a +foundational piece of the interface. Instead, RFC #24 proposes that we generalize and simplify the interface to an +extent that it can be used for various pipeline workflows, not just versioning artifacts. + +The new direction is to leverage composition between resources and pipelines via _config fragments_, which can be passed +from one resource to another or used for `((vars))` in a pipeline template. 'Config fragments' replace 'versions' in the +interface, and are used as versions for the 'versioned artifacts' flow (today's primary use of resources). + +By generalizing the resource concept we set the stage for proper pipeline-level support for notifications (e.g. Slack +alerts, GitHub commit status), trigger-only resources (e.g. `time`), and spatial resources (e.g. branches, pull +requests) without tying each use case into the interface itself. + +Now that 'spaces' is gone from the interface, the actual change in the interface protocol is somewhat cosmetic. As a +result, Concourse pipelines will be able to use v1 and v2 resources side-by-side for all the same functionality. This +way we can move forward with pipeline-level resource features without fragmenting the resource ecosystem! + +## [RFC #26](https://github.com/concourse/rfcs/pull/26): artifact resources + +_Artifact resources are an interpretation of the generic resource interface that maps to today's usage of the resource +interface._ + +**UPDATE: this is now [RFC #38](https://github.com/concourse/rfcs/blob/master/038-resource-prototypes/proposal.md), " +Resource Prototypes"** + +Artifact resources use config fragments as _versions_, modeling change to an external entity over time. This should +sound familiar to anyone using Concourse today: they're the sole use case that Concourse resources were originally +designed around. + +The 'artifact resources' proposal clarifies that this is now just _one_ use case for the general resource interface, and +outlines a few long-awaited features: + +- Versions can be deleted using the `delete` action in the resource interface. +- The `put` action can emit multiple versions. Each will be recorded as an output of the build. +- The automatic `get` after the `put` step will be made opt-in. (Huzzah!) + +The automatic `get` after each `put` is something that has confused and occasionally frustrated users, but we didn't +want to break backwards compatibility and we didn't want users to have to 'opt out' (that's too +many [knobs](https://github.com/concourse/concourse/wiki/Anti-Patterns#knobs) to turn). + +This RFC will provide a backwards-compatible transition path to artifact resources. Check +out [RFC #26](https://github.com/concourse/rfcs/pull/26) for more details! + +## [RFC #31](https://github.com/concourse/rfcs/pull/31): `set_pipeline` step + +_The first step on our journey towards 'spaces' is to introduce a simple, but critical piece of the puzzle: +a `set_pipeline` step._ + +The `set_pipeline` step is used like so: + +```yaml +jobs: +- name: bootstrap + plan: + - get: ci + trigger: true + - set_pipeline: concourse + file: ci/pipelines/concourse.yml +``` + +This job will configure a `concourse` pipeline within the job's team. The pipeline will be automatically unpaused, and +no authentication is required. + +The first thing this lets us do is deprecate the `concourse-pipeline` resource, which has two pretty fundamental +problems: + +- Having to configure auth is really awkward - you have to set up a local user and give the resource the keys to the + kingdom. +- Keeping the version of `fly` within the resource in sync with your own Concourse's version is a bit clunky. + +With the `set_pipeline` step, both of these problems immediately go away and pipelines start to feel a more first-class +rather than just being the tip of the abstraction iceberg. + +## [RFC #32](https://github.com/concourse/rfcs/pull/32): projects + +Ok, I promised to provide a tl;dr for each roadmap entry, but projects can't really be summed up that easily. This is +the most impactful feature on this roadmap. + +- _A "project" is a new concept bootstrapped by two existing ones: a [resource](https://concourse-ci.org/resources.html) + from which to continuously load the project's config, which specifies + a [build plan](https://concourse-ci.org/steps.html) to execute whenever the project resource changes._ +- _Projects act as a namespace for pipelines, and provide a long-requested workflow for automating their configuration. + As the roadmap goes on, this workflow becomes more and more powerful._ +- _Projects allow you to define project-wide resources which let you clean up duplicate definitions across your + pipelines and support cross-pipeline `passed` constraints._ +- _Projects also define project-wide tasks, which remove the need to thread a resource through all your jobs just to + have the task configs to execute, and finally gives meaning to task names (the `x` in `task: x`)._ + +A project's build plan can be used for anything you want. Small projects could use the build plan to run tests and/or +perform various steps in a single build - a workflow more familiar to users of other CI systems: + +```yaml +name: ci +plan: +- get: booklit + trigger: true +- task: unit +``` + +Larger projects could use the build plan to execute `set_pipeline` steps. Concourse has long encouraged users to keep +their pipelines under source control, but it never enforced it: `fly set-pipeline` was still a manual operation, and +users would often forget to check in their changes. Projects will fix that: + +```yaml +name: ci +plan: +- set_pipeline: booklit +``` + +Small projects may start without pipelines and start using pipelines as they grow. Our original slogan, 'CI that scales +with your project,' is now pretty literal! The hope is that by introducing build plans without requiring knowledge of +pipelines and jobs, we'll have made Concourse's learning curve more gradual and made Concourse feel less overkill for +side-projects. + +This feature will have far-reaching implications for Concourse, so it won't be sneaking in quietly. I've +opened [RFC #32](https://github.com/concourse/rfcs/pull/32) and would really appreciate feedback! + +## [RFC #33](https://github.com/concourse/rfcs/pull/33): archiving pipelines + +_Archiving pipelines is a way to soft-delete a pipeline while still being able to peruse the build history for a +pipeline you no longer want._ + +Well, after that bombshell this one's pretty easy to explain. Let's take a look at our own Concourse team's pipelines: + +{{< image src="/images/2019/07/Screenshot-from-2019-07-16-11-49-33.png" width="100%" >}} + +Look at all that cruft! So many old, paused or bit-rotting pipelines which I really don't care about anymore but don't +really have the heart to delete. That `old-concourse` pipeline served us well for years - it has sentimental value. In +some cases you may also want to keep the history around for auditing purposes. + +Archiving pipelines will allow you to humanely retire a pipeline in a way that gets it out of your way while still +allowing you to peruse the build history should you ever need to. Archived pipelines are no longer active and will allow +you to re-use their name without bringing the old pipeline back. + +There's already an open pull request for this: [#2518](https://github.com/concourse/concourse/issues/2518) - shout-out +to [@tkellen](https://github.com/tkellen)! The ball has been in our court for a while to figure out the UI/UX, so we're +just going to submit a new RFC and work out all the details. + +## [RFC #34](https://github.com/concourse/rfcs/pull/34): instanced pipelines + +_Instanced pipelines group together pipelines which share a common template configured with different `((vars))`. They +provide a simple two-level hierarchy and automatic archiving of instances which are no longer needed._ + +Instanced pipelines are an important piece of the 'spaces' puzzle: it's how users will navigate through their spatial +pipelines, and it's what keeps no-longer-relevant spaces for e.g. merged PRs and deleted branches from piling up +forever. + +Pipeline instances are created using the `set_pipeline` step like so: + +```yaml +plan: +- set_pipeline: branch + instance_vars: + branch: feature/projects +- set_pipeline: branch + instance_vars: + branch: feature/new-algorithm +``` + +At the end of a build which uses `set_pipeline`, all instances of the named pipelines which were not configured by the +build will be automatically archived. + +Check out [RFC #34](https://github.com/concourse/rfcs/pull/34) for more details! + +## [RFC #29](https://github.com/concourse/rfcs/pull/29): spatial resources + +_Spatial resources are resources whose `check` monitors spatial change, not change over time. Two common examples are +the set of branches or open pull requests for a repo. The `across` step allows a build to process each 'space' and +trigger on changes to the set._ + +**UPDATE: the syntax for this step has since been tweaked so that multi-var matrices don't require nesting `across` +steps.** + +When used with the new `across` step, the  `set_pipeline` step, and instanced pipelines, this enables dynamic +pipeline configuration across spatial change. + +The final piece of the puzzle for 'spaces' is the addition of an `across` step. This step points to a resource and has a +plan which will execute for _every config fragment_ returned by the resource's `check`, all within one build. + +Let's first look at a simple use case, which is to execute a task across many variants: + +```yaml +plan: +# ... +- across: supported-go-versions + as: go + do: + - task: unit + image: go +``` + +In this case, imagine we have a `supported-go-versions` resource whose `check` returns a config fragment for each tag +and digest based on a pre-configured list of supported tags (e.g. `1.10`, `1.11`, `1.12`), and whose `in`/`get` fetches +the image. + +When nested, the `across` step enables dynamic build matrices: + +```yaml +plan: +# ... +- across: supported-go-versions + as: go + do: # needed so we can define another 'across' + - across: other-things + as: some-input + task: unit + image: go +``` + +When used with `set_pipeline` and instanced pipelines, it enables dynamic _pipeline_ matrices: + +```yaml +plan: +- across: repo-branches + as: repo-branch + set_pipeline: branch + instance_vars: + branch_name: ((repo-branch.name)) +``` + +(Assuming we provide the ability to access fields of an artifact with `((vars))`.) + +## [RFC #27](https://github.com/concourse/rfcs/pull/27): trigger resources + +_Trigger resources allow jobs to specify parameters that can trigger new builds but don't have anything to fetch - they +just propagate config fragments to the build._ + +**UPDATE: this has turned into a `get_var` step, rather than a `param` step** + +This is also a relatively simple feature, but it will improve today's usage of the `time` resource by having per-job +trigger semantics rather than having all jobs downstream of one `time` resource leading to a thundering herd of builds +hitting your workers all at once. + +Rough sketch: + +```yaml +jobs: +- name: smoke-test + plan: + - param: 10m + trigger: true +``` + +Semantically, `param` is similar to `get` but with one key difference: there is no central version history. Rather than +being used as an _artifact_, the resource is used solely for its _config fragments._ Concourse will `check` against the +job's last used config fragment for the trigger resource, `10m`, and if a different fragment is returned the job will +trigger with the new one. + +This skips the `get`, eliminates the thundering herd issue (because all jobs have their own interval), and could enable +an interesting pattern for manually-parameterized builds: just write a resource type that can fetch user-provided config +fragments from some external source (i.e. a repo). + +Here's one idea of what that may look like, where the config fragments returned by param are somehow usable with +`((vars))` syntax in subsequent steps: + +```yaml +plan: +- param: environment +- task: smoke-test + vars: + environment: ((environment.name)) +``` + +Another interesting use case would be to use it as a `instance_fragment` with the `set_pipeline` step. + +This idea is pretty half-baked - I've been mainly focusing on the 'spatial resources' idea. Follow along in +the [RFC #27](https://github.com/concourse/rfcs/pull/27) and help the idea develop! + +## [RFC #28](https://github.com/concourse/rfcs/pull/28): notification resources + +_Notification resources will allow you to monitor the flow of a resource through your pipeline and emit build status +notifications (e.g. Slack alerts, GitHub commit status) without having to sprinkle `put` steps all over your pipeline._ + +Have you ever wanted to reflect your CI build status on your GitHub commits? Or send a Slack notification whenever the +build is fixed or broken? + +If so, you're probably aware of how ugly it can make your pipelines, both in YAML and in the UI. + +A simple pipeline quickly turns into a mess of boxes and lines: + +{{< image src="/images/2019/07/before-notifications-1.png" width="100%" >}} +{{< image src="/images/2019/07/after-notifications.png" alt="a simple pipeline before and after notifications were +added" width="100%" >}} + +Not only is it a lot of manual work to copy-paste those `on_success` and `on_failure` hooks, when you finally configure +it it really ruins the signal-to-noise ratio of the pipeline UI. + +So, the plan for notification resources is to leverage _composition_, a pattern set forth in +the [Resources v2 RFC (#24)](https://github.com/concourse/rfcs/pull/24). Instead of annotating every single job, you +annotate a resource, and any time that resource is used in a build a notification will be fired, by executing the +notification resource's `put` step with the config fragment of the original resource (e.g. `ref: abcdef`) and the status +of the build. + +This way you don't have to update all of your jobs, and notifications don't clutter up the pipeline UI. Neato! + +This idea is _also_ a bit half-baked - follow along in [RFC #28](https://github.com/concourse/rfcs/pull/28) when you +have time! + +# What comes after all this? + +I dunno. + +I have a lot of respect for software that is eventually considered 'done.' I would really like Concourse's core design +to achieve that someday. + +We'll always have things to improve, whether it's through better efficiency, better UX, support for new underlying +technologies (Kubernetes, Nomad), or just making our codebase more accessible for contributors. But from a core design +standpoint, I think the most important thing is stability. + +The software industry changes quickly. Hot new tools show up all the time and get replaced by newer and better tools. I +don't want our users to have to keep re-doing their CI stack just to keep up. + +Concourse should insulate projects from the constant churn in the industry by providing a solid set of principles and +abstractions that hold true regardless of the underlying technology. + +We will continue to listen to user feedback and improve Concourse. Our goal is for it to support good patterns and +prevent anti-patterns that we can identify in workflows across the industry. Thankfully patterns don't change as +frequently as tools do. + +## Thanks! + +Everything I've outlined here comes from years of feedback through all of your GitHub issues, forum posts, and +conversations in Discord (or Slack for you OGs). I'm very thankful for those of you that have stuck around and helped us +understand your workflows, and I'm especially grateful for your patience. + +For those of you who couldn't wait and ultimately had to switch tools, I hope we +accomplished [one of our original goals](https://github.com/concourse/docs/blob/dbf2dc1299be7c579012c2a8e8c21933890d21bb/what-and-why.scrbl#L14-L16), +and I hope to see you back in the future! + diff --git a/docs/blog/posts/2019/2019-08-30-concourse-mid-year-update.md b/docs/blog/posts/2019/2019-08-30-concourse-mid-year-update.md new file mode 100644 index 00000000..880f9ede --- /dev/null +++ b/docs/blog/posts/2019/2019-08-30-concourse-mid-year-update.md @@ -0,0 +1,72 @@ +--- +title: Concourse Mid-year Update +date: 2019-08-30 +--- + +Phew, it's been a while. I got lots of info to cover so let's just get right into it + + + +## Concourse OSS Growth + +As some of you may know, the Concourse team switched over to a PR-based workflow at the beginning of the year. This +change is in line with our objectives of being open and transparent with our community of contributors. Plus, its just +good thing to do because that's what most OSS projects do. Since then we've noticed a noticeable uptick in PRs opened by +non-Concourse core contributors across our repos: + +{{< image src="/images/2019/08/Screen-Shot-2019-08-30-at-10.12.17-AM-2.png" width="100%" >}} + +And while our peak period seemed to be concentrated at the beginning of the year, we're still seeing steady +contributions through the summer months + +{{< image src="/images/2019/08/Screen-Shot-2019-08-30-at-10.13.09-AM.png" width="100%" >}} + +The Concourse project also hit another big milestone: we now have over **4000 Github stars**! As of today we're sitting +at 4213 stars, a 52% increase in popularity from this time last year. + +## Concourse v10 + +In case you missed it, Alex wrote out a great blog post that outlines our long term vision for Concourse. It's got a +breakdown of some exciting new features, from references to Spaces, the new Algorithm, Concourse Projects, etc. You can +read more about it here [2019-07-17-core-roadmap-towards-v10.md](2019-07-17-core-roadmap-towards-v10.md) + +## Concourse Project Planning + +{{< image src="/images/2019/08/Screen-Shot-2019-08-30-at-10.27.27-AM.png" width="100%" >}} + +There's been some big changes to how we organize and visualize each track of work now. If you take a peek in the project +board [https://project.concourse-ci.org](https://project.concourse-ci.org/) you'll see now that each of our tracks are +clearly labeled as swimlanes, with each prioritized Epic as cards under each swimlane. We hope you'll find this new +format easier to consume + +## Increased Transparency on Team Process + +We recently held our first Concourse team offsite. We discussed topics such as: + +- How can we tighten up our PR workflow? +- When do we release, and how often? +- Let's tackle the issue of Tech Quality +- Concourse Principles + +We've got a lot of action items and takeaways from that meetings, so look forward to updates from the team once they +begin to formalize! + +## Concourse Teams in the Wild + +Members from the Concourse core team will be making a few more conference appearances before the end of year. + +### **CF Summit EU 2019** + +Taylor Silva and Scott Foerster will be giving +a [Concourse CI 102](https://cfeu19.sched.com/event/RBoh/concourse-ci-102-denise-yu-pivotal) talk at CF Summit EU 2019. +I got a preview of the talk recently and its super informative for folks who are interested in learning more about the +internals of the `web` node and how jobs/resources are scheduled + +### **SpringOne 2019** + +The Concourse team will be hosting a 2 hour workshop in Austin during the workshop days before SpringOne 2019. The +registration list is already full (sorry!) but if you're lucky you might get a spot if someone drops out. Or, you know, +write a Concourse pipeline to watch the website and register yourself if the state changes! You can read more about the +event +here [https://springoneplatform.io/2019/workshops/concourse](https://springoneplatform.io/2019/workshops/concourse) + diff --git a/docs/blog/posts/2019/2019-10-15-reinventing-resource-types.md b/docs/blog/posts/2019/2019-10-15-reinventing-resource-types.md new file mode 100644 index 00000000..7307731d --- /dev/null +++ b/docs/blog/posts/2019/2019-10-15-reinventing-resource-types.md @@ -0,0 +1,375 @@ +--- +title: Re-inventing resource types +date: 2019-10-15 +categories: +- roadmap +--- + +Before the paint completely dries on the [v10 roadmap](2019-07-17-core-roadmap-towards-v10.md), there is one last +big unknown I want to explore in case it brings more clarity to our direction: generic tasks. + + + +Resource types are a great way to share tools and integrations for others to use in their pipelines. Unfortunately, +they're basically the _only_ way, and because resources are a very opinionated concept, the resource type interface is +not always a good fit. + +Concurrent to this problem, there's been a lot of talk about generic re-usable tasks. The idea is to make tasks just as +easy to share and use as resource types. This would be a great alternative to resource types for workflows that don't +really fit the resource model! + +I finally found the time to dig in to these problems, and I have two new RFCs that I'm excited to propose: + +- [RFC #37: Prototypes](https://github.com/concourse/rfcs/pull/37) +- [RFC #38: Resource Prototypes](https://github.com/concourse/rfcs/pull/38) + +These proposals will have a lasting impact so I wanted to share some of my thought process here. + +## What makes a resource? + +If you'll humor me for a moment, I want to pin down what makes a resource a resource. + +Resources are the _continuous_ part of Concourse. They represent inputs changing over time, passing different versions +through jobs to form a pipeline. Resources are how the continuous thing-doer knows that there are things to do: +pipelines converge on the latest available versions for each job's inputs, running builds until everything stabilizes. + +A resource is a **single object** with a linear version sequence. This assumption allows Concourse pipelines to skip +ahead to the latest version by default instead of having to process every single version. + +Resources have an **external source of truth** ; the same resource definition will always yield the same versions, in +the same order, in any pipeline, in any Concourse installation. This makes Concourse pipelines portable and +self-contained, which is critical for disaster recovery. + +Resources are **immutable** ; fetching the same version will always give you the same bits. This allows `get` steps to +be cached so that they don't have to be downloaded all the time. + +Resources are **idempotent** ; outputs will always result in the same external effect when given the same configuration +and bits. This allows for builds to be safely re-run even if some of its `put` steps already ran. + +A resource definition looks something like this: + +```yaml +resources: +- name: booklit + type: git + source: + uri: https://github.com/vito/booklit + branch: master +``` + +Every resource definition has a `type` and a `source`. The _type_ denotes the resource type - i.e. the implementation of +the [Concourse resource interface](https://concourse-ci.org/implementing-resource-types.html) to use. The _source_ +represents the location of the resource, i.e. the source of versions. This configuration is interpreted by the resource +type, and is a black box to Concourse. + +## How do resource types work? + +A resource type is packaged as a container image with 3 executables living under `/opt/resource`: `check`, used for +finding versions, `in`, used for fetching versions, and `out`, used for writing versions. Each command reads a JSON +request on `stdin` and emits a JSON response on `stdout`. These actions are run by Concourse during pipeline scheduling +and build execution. + +Concourse comes with a few "core" resource types. Some are necessary for bootstrapping, like the `registry-image` or +`docker-image` resource types. Some are included just to support common use cases, like `git` and `time`. We plan to +remove most of them though; it's making the download size pretty +big. ([#4586](https://github.com/concourse/concourse/issues/4586)) + +All other resource types must be configured in your pipeline under `resource_types:`. This makes the pipeline more +self-contained, decoupling it from the resource types the Concourse installation happens to have installed. + +Pipelines define their own resource types by configuring a resource for the type's container image: + +```yaml +resource_types: +- name: git + type: registry-image + source: + repository: concourse/git-resource + tag: 1 +``` + +Technically, resource types work by using _another_ resource type to fetch their container image. It's turtles all the +way down! + +A resource type that fits the original design of resources implements the following semantics: + +- `check` queries the external source of truth to find new versions of the object. +- `in` reads from the external source of truth and and always produces the same bits for the same version and `params`. +- `out` writes to the external source of truth if necessary based on the given bits and `params`. Any version emitted by + `out` can also be found by `check`. + +The easiest example of a 'proper' resource type is `git`. The `check` action consults `git log --first-parent` to return +ordered commits for a single branch. The `in` action does a `git clone` to fetch the repo and check out the given +commit; this is easily cached. The `out` action does a `git push`, optionally rebasing and returning a new version in +the event of a conflict. + +## When is a resource type not a _resource_ type? + +{{< image src="/images/2019/10/image-3.png" alt="the treachery of container images" width="40%" >}} + +Resource types should always implement `check` and `in`. Being able to find and fetch versions is what makes a resource +a [resource](https://www.merriam-webster.com/dictionary/resource). Some resource types, however, only implement `out`. +These resource types exist solely to be run as a `put` step - a form of "generic tasks" limited by the fact that it +can't produce any outputs local to the build. + +Resource types should always represent a single object. This is pretty foundational to Concourse pipeline semantics. +Some resource types, however, try to represent sets of objects. The easiest example is pull request resource types, +which represent each pull request as a version so that you can use Concourse to run tests for all your PRs. + +This is fraught with peril: + +- If you don't set `version: every` your builds will skip pull requests because all Concourse cares about is converging + on the latest version of each object. If each version is actually a different object, this assumption breaks. +- Because `version: every` [allows versions to be skipped](https://github.com/concourse/concourse/issues/736) when used + with `passed:` constraints, now you have to cram everything into one monolithic job. You can try to work around this + by splitting it up and setting `serial: true` everywhere, but now you can't run PRs in parallel. +- Pull requests can skipped if the version history shifts around in a certain way. It's fundamentally impossible to try + to represent changes to all pull requests as one version stream with a stable order, so the order jumps around all the + time. If someone leaves a comment on a PR or pushes a new commit, it can get bumped to "latest" - and if a build has + already run for it, the other ("older") PRs won't run. Even with `version: every`, Concourse won't go _back in time_ + to run old versions. +- Navigation is awkward. The pipeline UI is pretty meaningless since all the jobs just reflect the status of the most + recent PR that ran, and going through the build history of a job is pretty confusing because each build may be a + different pull request. +- Re-running builds for a pull request is annoying. You have to go to the PR resource, find the version for your PR, pin + it, trigger all the builds, wait for them all to start, and _then_ you can unpin the resource, lest you forget and + your pipeline never runs another PR again. This will get slightly better in v6.0 as we've finally implemented build + re-triggering ([#413](http://github.com/concourse/concourse/issues/413)), but that won't help with triggering builds + for an "older" PR that hasn't run yet. + +This pain is the main source of motivation for the [v10 roadmap](2019-07-17-core-roadmap-towards-v10.md), which +introduces all the required components to dynamically set a pipeline for each pull request instead - each with a +resource representing only one pull request, as Concourse intended. + +In short, we have an interface being used for things beyond its original design. This results in surprising and unwanted +behavior because Concourse functionality that is sensible for _resources_ doesn't make sense for these other workflows. +This hurts everyone: users have to deal with surprises and terrible UX, resource type authors have to deal with these +limitations and workarounds, and the concept of 'resources' kind of erodes as these patterns spread. + +At this point it's pretty clear that there's a need to be able to share workflows and bespoke tools within the +community, but it's also clear that resources aren't the best pipeline-level representation for all of them. So if +resources aren't a good fit, what about tasks? + +## Usability of generic tasks + +Tasks can _almost_ be packaged up and re-used as easily as resource types. I've been experimenting with this idea by +writing [a generic task for building OCI images](https://github.com/vito/oci-build-task). It works by configuring +`vito/oci-build-task` as the task's image and configuring the rest of the task according to the README in the repo. + +So far, this UX doesn't sound that far off from using a resource type; you configure a resource type's image in +`resource_types:` and figure out how to configure the rest using its README, too. On paper, the only difference is that +a task's image is configured in `resources:` or with `image_resource:` instead. + +Let's compare what it looks like to take a `git` repo, build an OCI image from its `Dockerfile`, and push the image to a +registry, using a generic task vs. using a resource type. + +We'll begin with two resources: one for my image source code, and one for the image repository on the registry: + +```yaml +resources: +- name: my-image-src + type: git + source: + uri: # ... + +- name: my-image + type: registry-image + source: + repository: # ... + tag: latest +``` + +Next we'll add a job that does the build-and-push. + +Let's see how it looks to use a generic task: + +```yaml +jobs: +- name: build-and-push + plan: + # fetch repository source (containing Dockerfile) + - get: my-image-src + + # build using `oci-build` task + - task: build + image: oci-build-task + config: + platform: linux + + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + + params: + CONTEXT: my-image-src + + inputs: + - name: my-image-src + + outputs: + - name: image + + run: + path: build + + # push using `registry-image` resource + - put: my-image + params: {image: image/image.tar} +``` + +Now let's see how it feels to use a resource type instead. If we switch the `my-repo` resource from `registry-image` to +`docker-image`, we can leverage its ([quite contentious](https://github.com/concourse/docker-image-resource/issues/190)) +build-and-push behavior: + +```yaml +jobs: +- name: build-and-push + plan: + # fetch repository source (containing Dockerfile) + - get: my-image-src + + # build + push using `docker-image` resource + - put: my-image + params: + build: my-image-src +``` + +Resources clearly take a _lot_ less effort to use in a pipeline. No wonder they're being used for everything! + +Providing a full task config is a lot of work. It allows for a lot of flexibility, but it feels verbose. Verbosity means +wasting time on typos and forgotten boilerplate. + +Verbosity aside, tasks are also strictly worse at parameterization. Task `params` are really environment variables, so +every value has to be a string. This is OK for simple values, but anything more complicated will need to be marshalled +and unmarshalled. This is really crummy compared to resource types, which support complex YAML/JSON config structures +like lists and objects. + +It seems like we need something in between tasks and resource types. We need something as versatile as tasks and as easy +to use as resource types. + +## Bridging the gap + +{{< image src="/images/2019/10/image-1.png" width="100%" >}} + +Let's hone in on the reason why resource types don't work for every use case: they have a particular set of actions +which have particular semantics because they're built for a particular Concourse use case: resources. + +The [v10 roadmap](2019-07-17-core-roadmap-towards-v10.md#rfc-24-resources-v2) +introduced [RFC #24](https://github.com/concourse/rfcs/pull/24), a "generalized resource" interface which supports +`check`, `get`, `put`, and `delete` actions while avoiding resource terminology like "version" and "source" so that it +can be used for other workflows. It's kind of a strange middle ground: it's limited to resource-y actions while avoiding +resource-y semantics. + +Aside from the resource-y actions, RFC #24 was pretty darn close to what I wanted out of generic tasks, so I decided to +just fork it as [RFC #37](https://github.com/concourse/rfcs/pull/37) and make one key change: instead of supporting +`check`, `get`, `put`, and `delete`, support arbitrary actions instead. + +With `check` and `get` removed, the interface was definitely not a _resource_ type interface anymore. And with its +support of multiple actions, it definitely wasn't a task interface either, so I needed a new name for it. + +After much deliberation, I decided to call these things **prototypes**. This name is inspired by prototype-based +object-oriented languages like JavaScript, [Self](http://www.selflanguage.org/), and [Io](https://iolanguage.org/). +Conveniently enough, it still has "type" in the name, so all those `type:` fields on resources still make sense! + +{{< image src="/images/2019/10/image-2.png" width="100%" >}} + +The next change in my fork of RFC #24 was to adjust the terminology. Now that the interface was so open-ended, I wanted +to build a solid mental model so that prototype authors would have an idea of how prototypes are meant to be designed. I +did this by stealing more terminology from prototype-based OOP. + +Here's where I landed: prototypes handle messages (previously 'actions') being sent to objects (previously 'config'). In +response to a message, a prototype may emit more objects (previously 'config fragments'). + +Thinking about Concourse as "object-oriented CI/CD" feels pretty compelling. This mental model can be easily used to +describe how resource types work: + +- The `check` message is sent to the `source` object to list `version` objects. +- The `get` message is sent to a `version` object (a [clone](https://en.wikipedia.org/wiki/Prototype-based_programming) + of the `source` object) to fetch its bits. +- The `put` message is sent to the `source` object to create `version` objects. + +Prototype implementations have full control over their domain of objects and the messages supported by those objects. +For example, a `git` prototype could support multiple types of objects: + +- a **repo** object, `{"uri":"..."}`, could support `branches` to find branch objects and `check` to find commit objects + in the "default" branch +- a **branch** object, `{"uri":"...","branch":"..."}`, could support `check` to find commit objects on the branch or + `delete` to delete the branch +- a **commit** object, `{"uri":"...","branch":"...","sha":"..."}`, could support `get` to clone the repo and checkout + the commit + +Over time, we can start to identify patterns and implement pipeline semantics for certain interfaces, just like we have +with `check`, `get`, and `put`. For example, when a build status changes, Concourse could run the `notify` message +handler for any objects in the build which support it. A `git` prototype could implement this to automatically update +commit status on GitHub. This would eliminate a whole class of `put`-only resource types and de-clutter everyone's +pipelines. + +## Prototypes as 'generic tasks' + +Whereas a task is built around a single action, a prototype is built around objects which can handle messages. As such, +the `oci-build` task would instead be an `oci-image` prototype supporting a `build` message. + +Here's how it could look to use a prototype for building an OCI image (note the use of `prototypes:` instead of +`resource_types:`): + +```yaml +prototypes: +- name: oci-image + type: registry-image + source: + repository: vito/oci-image-prototype + +jobs: +- name: build-and-push + plan: + # fetch repository source (containing Dockerfile) + - get: my-image-src + + # build using `oci-image` prototype + - run: build + type: oci-image + inputs: [my-image-src] + params: {context: my-image-src} + outputs: [image] + + # push using `registry-image` resource + - put: my-image + params: {image: image/image.tar} +``` + +Here we use a new `run` step to run the `oci-image` prototype and send the `build` message to an object, given as +`params`. With the `run` step, `inputs` and `outputs` must be explicitly provided, though `inputs` can be automated in +the future with [#2692](https://github.com/concourse/concourse/issues/2692). + +All in all, this feels a whole lot better than the generic tasks of old. It's way less verbose, and feels a lot like +using a `put` step, with no abstractions being abused and no surprising behavior. Mission accomplished? + +## How does this impact the roadmap? + +Through all of this, the only thing I've really added to the roadmap is the `run` step. Everything else is a lateral +move; instead of using 'generalized resources' for spatial resources, notifications, and triggers, we would use ' +prototypes' instead. + +I think the larger impact will be on future roadmaps. With a more flexible model at our disposal we can shorten the path +from identifying a common workflow and implementing richer pipeline semantics for it. Concourse becomes a "language of +CI/CD," where the objects are provided at runtime and can be shared with the community. + +## How to get involved + +I'm still getting a grip on this idea myself but I'm excited to see the places we can go with it. If you'd like to get +involved, I could use some feedback on the RFCs! + +- [RFC #37: Prototypes](https://github.com/concourse/rfcs/pull/37) is based + on [RFC #24](https://github.com/concourse/rfcs/pull/24), allowing implementations to support arbitrary messages and + switching everything over to prototype-based terminology. It also introduces the above `run` step for executing + arbitrary message handlers. +- [RFC #38: Resource Prototypes](https://github.com/concourse/rfcs/pull/38) shows that prototypes which implement + `check` and `get` messages can be used a resources in a pipeline, while maintaining backwards-compatibility for a + smooth migration to prototype-based resources over time. + +If everything goes well I plan to close RFC #24 and the other 'generalized resources' based RFCs in favor of these new +prototype-based RFCs. (I still need to write up new prototype-based RFCs for the rest though: spatial resources, +notification resources, trigger-only resources.) + +Special thanks to everyone that has helped me talk through ideas in Discord, on GitHub, and in person! diff --git a/docs/blog/posts/2020/2020-01-24-a-new-hangar-for-resource-types-2.md b/docs/blog/posts/2020/2020-01-24-a-new-hangar-for-resource-types-2.md new file mode 100644 index 00000000..b1961888 --- /dev/null +++ b/docs/blog/posts/2020/2020-01-24-a-new-hangar-for-resource-types-2.md @@ -0,0 +1,62 @@ +--- +layout: post +title: A New Hangar For Resource Types +date: 2020-01-24 +--- + +{{< image src="/images/downloaded_images/Strategic-Plan_Page_13_Image_0001-750.jpg" alt="The inside of an airplane +hangar" width="100%" >}} + +_Photo:_ [_National Parks +Service_](https://www.nps.gov/subjects/aviation/images/Strategic-Plan_Page_13_Image_0001-750.jpg) + +The [idea to build a dedicated resource types catalog](https://github.com/concourse/concourse/issues/191) has been +long-discussed. We’d like to announce that the idea has come to fruition: the new +Concourse [resource types catalog](https://resource-types.concourse-ci.org/) is wheels up! + + + +The catalog lists Concourse [resource types](https://concourse-ci.org/resource-types.html) that have recently been +submitted to the [resource types GitHub repo](https://github.com/concourse/resource-types). Originally, resource types +were listed on a [GitHub wiki](https://github.com/concourse/concourse/wiki/Resource-Types) page. While the wiki page +listed resource types, it didn’t provide much information about each resource. The resource types catalog will provide +more information about each resource and enhanced search, both of which will make it easier to compare and find resource +types. + +The addition of the resource types catalog means that the original resource types wiki page will be deprecated. If you +have a resource listed on the wiki page, please migrate it over to +the [GitHub repo](https://github.com/concourse/resource-types). + +## Contribution + +As part of the effort to move resource types to a new home, we’ve also spent some time thinking through the resource +type submission process. This new process should make it easier for members of the community to contribute new resource +types. + +The updated process consists of forking the existing resource types repository, adding your YAML file and submitting a +pull request. After a quick review by community members, the resource type will be added to the repository and will be +available on [resource-types.concourse-ci.org.](https://resource-types.concourse-ci.org) The process is described in +more detail [here](https://github.com/concourse/resource-types/blob/master/README.md). We’re also working on automating +some of this process using Concourse! + +If you’ve gotten this far, have taken a quick look at the catalog, and are wondering why there is no “_resource type for +x_”, it’s a great opportunity to add your own! There are already some helpful walkthroughs from other community members +on writing resource +types ([Implementing a Resource Type](https://concourse-ci.org/implementing-resource-types.html), [Developing a Custom Concourse Resource](https://content.pivotal.io/blog/developing-a-custom-concourse-resource), [How to Make a Concourse Resource Type](http://www.mikeball.info/blog/how-to-make-a-concourse-resource-type/)) +which are a great place to start. + +## What’s Next? + +We’ve come a long way with Concourse resource types and are excited about the new catalog. We now have our sights set on +adding more functionality on the page (check out +the [backlog](https://github.com/concourse/resource-types-website/projects/1)). This includes displaying more +information about each resource type on the cards (including GitHub stars and resource type actions), as well as +improved search and sorting. + +We also have an eye on the V10 roadmap and can see [prototypes](../2019/2019-10-15-reinventing-resource-types.md) on the +horizon. + +In the spirit of the open-source project that it is, we’d also love feedback to inform our roadmap. So if you have +feedback, we’d love to hear it. The best way to reach us is to either drop us a line in #resource-types +on [Discord](https://discord.gg/cShhjvr) or submit an issue against +the [GitHub repository](https://github.com/concourse/resource-types-website/issues/new). diff --git a/docs/blog/posts/2020/2020-03-17-2020-survey.md b/docs/blog/posts/2020/2020-03-17-2020-survey.md new file mode 100644 index 00000000..698cf4e9 --- /dev/null +++ b/docs/blog/posts/2020/2020-03-17-2020-survey.md @@ -0,0 +1,45 @@ +--- +title: Concourse 2020 Community Survey +date: 2020-03-17 +categories: +- product-update +- roadmap +--- + +{{< image src="/images/2020/03/whyconcourse2.png" width="100%" >}} + + + +## Help shape the future of Concourse + +Since Concourse CI was created, thousands of users worldwide have helped the project by opening issues, committing code, +and providing feedback to the team that develops the product. This community involvement is priceless - thank you, +Concourse community! 👏 + +One of the ways the Concourse team collects feedback is through our annual Community Survey. This lets us gather crucial +information about how users deploy Concourse, how different use cases scale, and various configuration patterns. We can +also directly collect requests for new features and bug fixes. Past surveys have led us toward crucial features like the +Dashboard UI, RBAC, new container placement strategies, and dozens of performance fixes. + +Each year’s results are published in [a blog post](https://medium.com/concourse-ci/2018-community-survey-ddff90bdc35b) +to help share out our most interesting findings. + +## The state of Concourse in 2020 + +Today we are launching the Concourse 2020 Community Survey. This survey is open to everyone who uses Concourse, whether +it’s your daily CI/CD tool, one of many automation tools your company uses, something you’re experimenting with on the +side, or something you’re hoping to learn more about in 2020. The answers you provide will help ensure the product, +community, and ecosystem fit the needs of the people closest to it. + +Please help us by participating in this 5-minute survey: + +**Edit: Survey closed! Thank you to everyone who +participated. [Check out the results in the 2020 Community Report.](2020-05-14-community-survey-2020-results.md)** + +#### **Spread the word!** + +We need as many users as possible to participate in this survey to help us better understand our global user base. We'd +be grateful if you would spread the word by sharing this post on your social network feeds, around the office, at +meet-ups, and in other communities. + +Thank you! diff --git a/docs/blog/posts/2020/2020-03-25-concourse-wfh.md b/docs/blog/posts/2020/2020-03-25-concourse-wfh.md new file mode 100644 index 00000000..e7dfe010 --- /dev/null +++ b/docs/blog/posts/2020/2020-03-25-concourse-wfh.md @@ -0,0 +1,160 @@ +--- +title: "Developing Concourse (from home \U0001F3E1)" +date: 2020-03-25 +--- + +In March 2020, countless companies made a shift to have their employees work from home. For remote staff getting work +done can be challenging enough, but staying connected to your team and company culture can be even more challenging. On +the Concourse team, we’re working hard to keep our product development running smoothly with some additions to our tech +stack and day to day workflow. + + + +## Concourse team composition + +At VMware (formerly Pivotal Software), there are fourteen engineers and five product folks working on +the[Concourse CI project](https://github.com/concourse/concourse/) full time. We all care deeply about the software +we're building, and put a great deal of effort and consideration into _how we build software_ in order to be as +effective as possible. + +Most of our team lives in Toronto, but in order to maintain velocity as the team expands to include engineering and +product talent from other countries, and as we travel to meet with customers and talk about Concourse at conferences and +meetups, we've refined our process to make ourselves resilient to occasions when a few employees might need to work from +home or abroad. + +Then 2019's novel coronavirus arrived, and the ensuing pandemic in late February and early March 2020 forced us +_provided the right opportunity_ to test a fully-remote team experience. + +We thought we'd put together a quick post here to share how we're making it work. 😄 + +## Pre-COVID19 + +The majority of the Concourse team is located in the greater Toronto area, with satellite members in a few cities in the +United States. The Pivotal Software office in downtown Toronto is our home throughout the week. The team is set up with +rows of computer workstations on the East side of the office, and gathers in various meeting rooms around the office for +standups, prioritization meetings, discussions, and presentations. + +If you're a Pivotal/VMware customer, a Concourse contributor, or a user interview collaborator who's chatted with the +team in the past, then you're probably familiar with a few of the meeting rooms from which we conduct Zoom meetings. + +The Toronto office is a great work environment. We're lucky to have the opportunity to get up to play a game of ping +pong between engineering stories or meetings, and it's great to be in the same room to collaborate around a whiteboard +or break down a problem with post-it notes. + +But what happens when your government enforces non-essential businesses to close their offices and asks employees to +work from home instead? + +## Post-COVID19 + +When the news and scale of the outbreak arrived in early March, we made the shift to collectively working from home as +quickly as possible. Since the week of March 8th, all of us have been dutifully self-isolating, working out of our +respective homes each day. + +**Most of the solutions we've come up with are focused on maintaining our existing process** (especially our use of +IPMs, pairing, retrospectives, and a high degree of collaboration on every possible front) **while making life remote +friendly at the same time.** + +With those goals in mind, our team has pulled from different experiences and techniques learned from past projects as +well as new ideas that we're still iterating on daily. This effort has made for a novel mix of video, audio, and text +applications that help us work in a comfortable, fun, efficient manner. + +### Zoom + +First and foremost, we have a perpetual group [Zoom](https://zoom.us/) meeting set up that everybody on the team hangs +out in throughout the workday. + +{{< image src="/images/2020/03/Screen-Shot-2020-03-20-at-12.08.28-PM.png" alt="A typical workday in the Concourse +hangar." width="100%" >}} + +Having one single room might be unconventional, but negates the need to constantly jump in and out of different Zoom +meetings, and all of the confusion that can create. It's tied to a easy-to-remember URL that makes it painless to join +each day - no more memorizing meeting IDs! + +Additionally, having everyone in the same place makes it feel more like we're all in the office together. Seeing each +other's faces throughout the day makes working at home feel a lot more friendly and less isolated. + +### Discord + +The other major difference to Zoom meetings we held in the past is that now we remain fully muted. Zoom takes care of +video communication, and we rely on [Discord](https://discordapp.com/) for audio and individual screen sharing. + +We were already using Discord for our open source community's text-based chat, but it also excels at fast, simple, and +effective voice communications. The Concourse team uses a series of private voice channels (named after famous aircraft, +of course ✈️) that we can join and depart with a single click. + +{{< image src="/images/2020/03/Screen-Shot-2020-03-23-at-1.56.04-PM.png" alt="Pairs of engineers working in audio +channels on Discord" width="20%" >}} + +This makes it easy to navigate for pair programming, impromptu meetings, or general chat and attention-getting. There’s +even a #water-cooler channel that acts as the defacto hangout spot! With this system you can see who is paired up in the +respective rooms at a glance, adding a level of transparency and organization that isn't possible when everyone is +pairing through separate Zoom meetings or other telecommunication products. + +### Slack + +The Concourse team has always used Slack to communicate internally, and Slack still plays a big role in organizing our +work. Since moving to work from home and using Zoom and Discord as described above, however, we're using it less - the +number of Slack messages simply doesn't need to be as high. If nothing else, it's great to have Slack as a backup option +for screen sharing, especially since we can use it to pull in non-team members from the company as well. + +It still plays a large role in communicating with other product, engineering, and customer-facing teams in the company, +and can't be beat for asynchronous messaging. However, when we need to chat within our team throughout the workday, we +can just grab someone's attention and start talking immediately instead. + +## Remote Pairing + +Remote pairing is one area of our work process that is still up in the air. The Concourse team +practices [pair programming](https://en.wikipedia.org/wiki/Pair_programming) everyday, and trying to do that remotely +can be challenging at times. We’ve tried the following methods of pair programming, with each having different strengths +and weaknesses for different situations. + +### Toronto workstations + +The pairing workstations in the Toronto office are set up so we can use OSX's screen sharing tool to securely connect to +them over our company’s VPN. This allows us to share everything (browser, IDE, terminal, and more) as we normally would +if we were sitting side by side in the office to pair program. + +### Discord Go Live + +In a pinch we’ve been able to share screens +using [Discord’s Go Live](https://support.discordapp.com/hc/en-us/articles/360040816151-Share-your-screen-with-Go-Live-Screen-Share) +feature. In mid-March Discord raised the viewing limit on Go Live and Go Live - Screen Share streams from 10 people at a +time to 50 people, making this work well for mobbing as a group around one person's screen as well. + +One of the drawbacks is that it only streams in 720p resolution, making it hard to read text on the screen unless the +text is enlarged. But there are a lot of times when a modest screen resolution is all you need, and we can switch to +Slack's screen sharing in the rare case that we need higher fidelity. + +### SSH + tmux + +Sometimes you just need a command line. In these situations, the team can also ssh onto any of the Toronto workstations, +or any of the team's various VMs running linux on the cloud to share a [tmux](https://github.com/tmux/tmux/wiki) +session. + +## Attitude + +This article has been heavy on technological solutions and workflow, but another thing worth mentioning is how positive +and encouraging the team's attitude has been during the pandemic. + +{{< image src="/images/2020/03/Screen-Shot-2020-03-19-at-10.41.42-AM-1.png" alt="Concourse engineers, best engineers." +width="100%" >}} + +Even isolated to our respective homes, we've seen everyone step up to keep each other happy and healthy, and to keep +work moving at a sustainable pace. I won't go into detail about all the memes, inside jokes, guitar solos, Zoom virtual +backgrounds, and pet cameos that have been shared among the team in the past couple weeks - you can use your +imagination. 😂 + +Don't underestimate the importance of taking breaks and having fun with your work! + +## What next? + +Nobody knows for certain how long we'll be working in full isolation, but since our goal is +to [flatten the curve of the outbreak](https://www.livescience.com/coronavirus-flatten-the-curve.html), it's in our best +interests to be prepared for a long wait. + +Our team is built around iterating on process and practices, and we plan to continue working on how we can collaborate +to make sure we continue to deliver as much product value as possible. + +[Join us on Discord](https://discord.gg/MeRxXKW) to learn about new work in progress, report on bugs, collaborate on the +codebase, or just keep us company! 😁 + diff --git a/docs/blog/posts/2020/2020-04-16-nobody-wants-a-stale-bot.md b/docs/blog/posts/2020/2020-04-16-nobody-wants-a-stale-bot.md new file mode 100644 index 00000000..ba1c8ecd --- /dev/null +++ b/docs/blog/posts/2020/2020-04-16-nobody-wants-a-stale-bot.md @@ -0,0 +1,99 @@ +--- +layout: post +title: "Community update: enter Discussions! \U0001F389" +date: 2020-04-16 +--- + +Hasta la vista, stale bot. + + + +{{< youtube src="https://www.youtube.com/embed/0Kug8mJ8WiM?start=110&feature=oembed" >}} + +## tl;dr: + +- We've been granted access to GitHub's beta Discussions feature! 🎉 +- [Discussions on the `concourse` repo](https://github.com/concourse/concourse/discussions) will be used for **questions + and technical support**. +- [Discussions on the `rfcs` repo](https://github.com/concourse/rfcs/discussions) will be for **incubating ideas for new + workflows** , which eventually turn into Pull Requests (also on the `rfcs` repo). +- From here on, Issues on the `concourse` repo are exclusively for **project backlog** and **bug reports** - i.e. + planned or emergent work. +- [Creating an Issue](https://github.com/concourse/concourse/issues/new/choose) directs you to these options, so there's + no need to change your muscle memory. +- [ + `CONTRIBUTING.md` now covers this workflow](https://github.com/concourse/concourse/blob/fab3de1722a2ce998d3710bd066453594f24ec57/CONTRIBUTING.md#from-ideas-to-implementation) + in addition to the more technical content. +- With these changes in place, the stale bot we all know and hate has been terminated. +- All Pull Requests will be assigned to someone as part of our daily process, and we will begin dedicating half of each + day to PR review. +- I am going to shift my focus from planning/prioritizing to shepherding RFCs and writing code. Expect more blog posts + in the future! + +## An update on triage + +With Concourse, there is _always_ a lot of work to do. I personally would love to see some of +the [larger issues](https://github.com/concourse/concourse/issues/324) worked on today, but we (the Concourse team) have +to choose our battles. A good chunk of our time is spent on upkeep, architectural improvements, and trying to identify +the underlying needs across many feature requests so that we can make a lower volume of high-impact changes. + +The long and short of it is that the amount of work to do – both in code and in the community – greatly exceeds the +number of people available to do it. Concourse is a product that has the _entire software industry_ as its customer – +including video game devs, mobile app devs, DevOps, and people who just want CI for personal side-projects. It's a lot +to stay on top of, but it's something to embrace: it forces us to think in the abstract. It just takes time. + +The main goal of these changes is to promote healthier discourse by setting expectations about the status of an +engagement more clearly. Issues are concrete; they will be prioritized and finished at some point, by the core team or – +in a perfect world – by a volunteer from the community. Discussions on the other hand are at an earlier stage in the +process. + +Discussions on the `concourse` repo will be used for for questions and support. These can be more open-ended than bug +reports – there may indeed be a bug, but there might also just be an answer or a better approach. The outcome of these +discussions may be a bug report, an improvement to the docs, an answer to the question, or perhaps a new Discussion on +the `rfcs` repo. + +Discussions on the `rfcs` repo will be used for incubating new ideas. By eliminating the "solution-first" framing of +feature request issues, we can begin to focus on the _problems_ instead. The hope is that we can all more easily +identify underlying patterns and try to form broader solutions – whether they're ones we need to plan, whether they're +already on the roadmap, or whether there's simply an existing solution that needs to be easier to discover. + +With these changes, we no longer have any need for the 'stale bot' as Discussions can just keep trucking along at their +own pace. The bot has been terminated. Unfortunately, I removed its configuration before uninstalling it, causing it to +assume the default settings and unleash its annoying comments across a slew of issues and pull requests, going out in +one last blaze of glory. Sorry about that. + +## Improving RFC engagement + +Some of you have submitted RFCs and haven't received much feedback yet. I'm really sorry about that. + +With v6.0 out and with the dust settling on the ["v10" roadmap](../2019/2019-07-17-core-roadmap-towards-v10.md), I am going +to shift my role towards shepherding RFCs and getting back to writing code rather than endlessly planning and +prioritizing. It's been a long time! This will also eliminate the conflict-of-interest where I author RFCs and then +prioritize them while neglecting others. Definitely not a trend that I want to continue. + +Expect more RFC update blog posts soon! + +## Improving Pull Request engagement + +Another area we're always trying to improve on is Pull Request engagement. We've been tried a lot of things, but in the +end it's been hard to integrate into our day-to-day pairing process and escape the single-point-of-failure (_cough_ me). + +We're going to start assigning each and every PR to someone on the team and dedicate half of each day to PR review. Our +goal is to dramatically shorten the feedback cycle time and not leave anyone hanging. + +## What about discuss.concourse-ci.org? + +These changes make [our forums](https://discuss.concourse-ci.org) a little (ok a lot) redundant. Once the Discussions +feature feels solid I plan to shut the forums down down and centralize our community in GitHub (in addition to Discord +for real-time chat). + +## What's happening with VMware? + +Some of you may be wondering what the future holds for Concourse through VMware's acquisition of Pivotal, the company +that has supported Concourse's development since 2015. + +VMware is heavily invested in Concourse – in fact some of our recent significant contributions originated from VMware +pre-acquisition. Concourse is already being used internally, and there is work underway planning Concourse's integration +into VMware's product ecosystem. We ain't going anywhere! + +Thanks and stay safe everyone! diff --git a/docs/blog/posts/2020/2020-05-06-rfc-roundup-may-6th-2020.md b/docs/blog/posts/2020/2020-05-06-rfc-roundup-may-6th-2020.md new file mode 100644 index 00000000..98d342da --- /dev/null +++ b/docs/blog/posts/2020/2020-05-06-rfc-roundup-may-6th-2020.md @@ -0,0 +1,76 @@ +--- +layout: post +title: 'RFC round-up: May 6th, 2020' +date: 2020-05-06 +categories: + - rfcs +--- + +Howdy, and welcome to our first RFC round-up! 🤠 + + + +{{< image src="/images/2020/05/image.png" width="40%" >}} + +For those unaware, [Concourse RFCs](https://github.com/concourse/rfcs#concourse-rfcs) are a process for proposing and +collaborating on improvements to core Concourse functionality, including pipeline behavior, new step types, new operator +capabilities, etc. + +In short, RFCs are where all the _cool new stuff_ is planned. 😎 + +My goal is to provide an update at least every few weeks on the status of RFCs and shepherd them through the process via +blog posts like this one. Each post will be limited to a handful of RFCs in order to focus our energy and not overwhelm +readers. + +## RFCs ready to merge + +The following RFCs have been given the `resolution/merge` label: + +- [RFC #33: archiving pipelines](https://github.com/concourse/rfcs/pull/33) proposes that pipelines can be "archived" - + effectively a soft-delete, or perhaps a long-pause. This RFC is ready to go, and in fact we've already started to + implement it. It will be an experimental opt-in feature until this RFC is merged. +- [RFC #34: pipeline instances](https://github.com/concourse/rfcs/pull/34) proposes a mechanism for grouping related + pipelines together under a single identifier, further breaking down each instance by a set of associated vars. + +Both of these RFCs are key components to our plan for Git branch/PR pipeline automation, as described in +the [v10 blog post](../2019/2019-07-17-core-roadmap-towards-v10.md). + +Per the [resolution process](https://github.com/concourse/rfcs/blob/master/README.md#resolution), if there are no +objections or significant changes in the 2 weeks after this post is published, they will be merged! 🚀 + +## RFCs in need of specific feedback + +These two RFCs are nearing completion, but have some outstanding questions: + +- [RFC #39: var sources](https://github.com/concourse/rfcs/pull/39) is the RFC behind the [experimental + `var_sources:` feature](https://concourse-ci.org/vars.html#var-sources) introduced in v5.8.0. The main question is + around whether and how it may be used to replace the cluster-wide credential manager configuration. +- [RFC #31: `set_pipeline` step](https://github.com/concourse/rfcs/pull/31) is mostly implemented already, + also [shipped experimentally](https://concourse-ci.org/jobs.html#schema.step.set-pipeline-step.set_pipeline) in + v5.8.0. The remaining question is around whether to support `set_pipeline: self` - this is a point of contention as + there may be a better pattern for that sort of thing in the + future ([hint](https://github.com/concourse/rfcs/pull/32)). + +Lend us your opinions! + +## RFCs in need of attention + +These ones just need more eyes on'em: + +- [RFC #43: task queue](https://github.com/concourse/rfcs/pull/43) proposes a "Resource Pool" mechanism with the end + goal of fixing the age-old problem of Concourse overloading workers. If you've run into this before and you'd like to + see it fixed, this is your chance to get involved! +- [RFC #41: OPA integration](https://github.com/concourse/rfcs/pull/41) proposes support for policy enforcement + through [Open Policy Agent](https://www.openpolicyagent.org/), which would allow access control to be delegated to an + external OPA endpoint. Neat! + +## Wrapping up... + +Thanks to everyone who has gotten involved already, and special thanks to the RFC authors for your patience! + +Sorry if you had an RFC that didn't make the cut. 😕 We have +a [backlog of 23 RFCs](https://github.com/concourse/rfcs/pulls) at the moment, and I'll be going through all of them +through the next few posts. + +Happy trails! 🐎 + diff --git a/docs/blog/posts/2020/2020-05-14-community-survey-2020-results.md b/docs/blog/posts/2020/2020-05-14-community-survey-2020-results.md new file mode 100644 index 00000000..f0454aab --- /dev/null +++ b/docs/blog/posts/2020/2020-05-14-community-survey-2020-results.md @@ -0,0 +1,223 @@ +--- +title: Concourse 2020 Community Report +date: 2020-05-14 +categories: +- design +--- + +A little over a month ago, the Concourse team sent a survey out to the community. The purpose of this survey was to gain +insight into our users as well as measure our year-over-year growth. In the process of learning about how you all deploy +and manage Concourse, we also received tons of great feedback about what's working well and what needs work in order to +make Concourse even better. We’re excited to share our findings! + + + +A huge thank you to everyone who responded. At the time of this writing, we’ve received over 100 responses and that +number is still climbing. Your contributions are valuable, and learning about how different segments of our user base +works with our product is going to help us make Concourse even better in 2020! + +## Feature requests, areas to improve + +### Configuration ⚙️ + +This is a big one. The community wants more control for administrators and operators, more options for integrations, and +more power over resource types configuration. We also learned a lot about the specific ways Concourse is making life +more difficult than it needs to be in terms of configuring tasks, pipelines, teams, and the product itself. + +Code and configuration duplication is a serious issue, and our users want more powerful templating tools to help them +split pipeline configuration into more manageable chunks that will be easier to reason about and maintain. + +In addition, there's a lot of support for concepts covered by +our [Instanced Pipelines](https://github.com/concourse/rfcs/pull/34), [Spatial Resources](https://github.com/concourse/rfcs/pull/29), +and other major architectural ideas that we have prioritized for 2020. + +We're also paying particular attention to the number of responses that were focused on git integration and GitOps +workflows. If you have a way of using Concourse that you feel isn’t well represented by the current featureset or +CLI/UI, please [@mention us on Twitter](https://twitter.com/concourseci) +or [drop by Discord](https://discord.gg/MeRxXKW) and tell us about it. + +### Web UI 🖥 + +Concourse’s web UI is a hot topic! While most of the web UI feedback is positive, there are lots of suggestions on how +to improve it or what to add next. Feedback from the survey about the web UI could make for its own blog post, so in the +interest of being brief, I’m just going to touch lightly on the strongest signals/insights that were generated. + +A number of respondents called to attention the ease of use and clarity of information in the current UI. While we’ve +been continuously iterating to add text labels instead of just icons where possible, and to add clarifying tooltips +elsewhere, there’s clearly a need for more. In addition to several smaller tweaks, we have work underway around adding +the minimum viable [Favorite Pipelines](https://github.com/concourse/concourse/issues/5434) functionality that will be +built upon to extend the [Archiving Pipelines](https://github.com/concourse/concourse/issues/5434) functionality +introduced in v6.1.0 to the front end. Hopefully these fixes will make a big impact, decluttering Concourse dashboards +and making it a lot faster to find what you want at the same time. + +Another area where we can clearly improve is by adding more detail to the dashboard. Users are requesting more options +for adding notes, tracking an audit trail of actions in the UI, clearer and more detailed error messages, and more +statistical information like build duration and lead times. We’ll be looking at the possibilities in this space over the +coming months. If you have ideas, start a [Discussion on Github](https://github.com/concourse/concourse/discussions). + +### Runtime Improvements 📈 + +In addition to more stability and performance, the community puts a high level of importance and value on improving the +efficient use of check containers, global locks on resource checking, and the ability to clear cached resource versions +of a worker on demand with _fly_. + +### Docker Enhancements and Performance 💨 + +We hear you. 😀 + +Comments from the community emphasized optimizing docker-image resources, facilitating docker in worker containers, and +better reporting on docker image status. There are a number of different voices in this conversation all with very +different strategies for how they use Concourse, and we're sorting through feedback to help us prioritize low hanging +fruit and high value enhancements that the team can prioritize. + +Additionally, we're actively monitoring issues and continuously collecting data on Docker performance so that we can +make more improvements - we understand that every last bit of performance we can squeeze out of Docker interactions +results in a huge benefit to many of our users. + +### Stability, Kubernetes, Documentation ⚖️ 🚢 📚 + +These issues have remained top of mind in the community for the past few years, and this year's survey is no exception. +From a stability perspective, the team has made great strides with the release of the new algorithm +in [version 6.0.0](https://github.com/concourse/concourse/releases/tag/v6.0.0). The team has also taken further steps +into being more k8s native by beginning an ongoing track of work dedicated to running K8s workloads. And lastly, our +documentation work is ongoing - we hope to prioritize more ‘getting started’ materials for  beginners in order to +enable new users to climb the learning curve faster than before. For more advanced users, we also plan more +documentation around topics like autoscaling, tracing, and build statistics, among others. + +## Demographic Data + +### How long have you used Concourse? + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.24.35-AM-1.png" width="100%" >}} + +Most of the people who responded indicated they had been using Concourse for one year or less. It's great to see that +more people are picking up and experimenting with Concourse with each new release, and it's just as exciting to see that +people stick around: more than 45% of respondents said they have been using Concourse for **2+ years**. Whenever we +interpret feedback from the community, we want to make sure we're taking into account the experiences of both newcomers, +established users, and very experienced power users. Each segment experiences different challenges, and prioritizes +different parts of the product. + +### Other CI/CD tools used + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.24.03-AM-1.png" width="100%" >}} + +Another dimension that's helpful to understand is the related experiences that each survey respondent is equipped with. +When looking at other CI/CD tools that our community employs, **Jenkins** is still the top dog, accounting for nearly +30% of the tools mentioned. **Github Actions** has seen a rise in adoption since its initial release, and Travis, +Gitlab, Bitbucket, and CircleCI are all fairly common options as well. + +### How did you find out about Concourse? + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.22.53-AM.png" width="100%" >}} + +Pivotal Software (now VMware) has been Concourse's largest supporter since the project's inception. In previous years, +it was common to see more than half of respondents say they were introduced to Concourse CI through a Pivotal Labs +engagement, or through Concourse’s role in automation of the Pivotal Platform, +Pivotal [Cloud Foundry](https://www.cloudfoundry.org/). Now the community has started to branch out, with only 22% of +people reporting that they learned about the product through Pivotal. + +The majority of users seem to have found Concourse organically, through search engines or social media. We're hoping to +expand the use of our blog this year to help support the number of people hunting for Concourse content. Be on the +lookout for more tutorials, advanced operations articles, and general updates about the Concourse product development +and roadmap. + +We'd love to grow that _Conference or Meetup_ section in 2020 - who's up for a remote meetup over Zoom? 🙌 + +### Why use Concourse? + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.24.16-AM.png" width="100%" >}} + +When asked about the very important _ **why** _ behind their Concourse usage, concerns about **Open Source** tooling and +**flexibility** were top of mind. The special emphasis that Concourse put on **reproducibility** and **user interface** +also ranked highly, along with Concourse's **scalability** and overall feature set. Scalability is always a huge concern +for the team, as we see enterprise customers frequently testing the limits of their tooling (sometimes with hundreds of +Concourse clusters, many thousands of teams, and many _hundreds of thousands_ of pipelines). Likewise, reproducibility +is a commitment we're not planning on straying from any time soon. + +### Concourse Versions + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.53.24-AM.png" width="100%" >}} + +We released the survey _just_ as v6.0.0 of Concourse was being finalized, so it was only close to the end that we +started to see people upgrading to v6. We're thrilled, nonetheless, to see so many people had already upgraded to +v5.8.x. Together, versions v5.8.x and v5.7.x represented the majority of survey respondents, with a low (\>10) rate of +responses for any other version. + +To those 12 users who are still on v4.x.x and 7 users still on v3.x.x, feel free +to[get in touch on the Concourse Discord](https://discord.gg/MeRxXKW) if you need any help upgrading! You can find all +of the wonderful reasons to upgrade in the[release notes](https://github.com/concourse/concourse/releases), and we'll +write blog articles in the coming months highlighting some of the latest and greatest new features and optimizations, as +well as some upcoming enhancements on our roadmap. + +### Scale + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.23.29-AM.png" width="100%" >}} + +The data gathered shows that the majority of respondents are working with Concourses organized with fewer teams. And +when it comes to users... + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.23.48-AM.png" width="100%" >}} + +... we see a lot of smaller Concourse instances of under 10 users. There are also a few examples of large, enterprise +scale deployments of 100+ users over 50+ teams. On the Concourse team, we frequently reach out to enterprise customers +for special feedback on more massive implementation concerns. We also survey and interview members of the open source +community to make sure we're building solutions that scale _down_ to single users and small teams. + +If you'd like to add your voice, feel free to join in on +the[Concourse Discussions](https://github.com/concourse/concourse/discussions) board. + +### Deployment Method + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.22.23-AM.png" width="100%" >}} + +**Docker** remains the most frequently used deployment method, but the margins are slowly shrinking, and there's more +even distribution across other popular options than we've seen in past years. + +Nearly identical numbers of responses came in citing **Kubernetes** (via the Helm chart), **BOSH** , and **VM** +deployment strategies, reinforcing both our interest in facilitating K8s workflows and supporting our substantial BOSH +user base. + +### Concourse Usage Style + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.24.53-AM.png" width="100%" >}} + +This year we asked about our users' usage style - specifically, what sort of development scenarios they were using +Concourse to facilitate. Concourse remains an **Infrastructure Automation** powerhouse, and a similar number of users +are using it to perform **CI for web development** and **deploying software** as part of their **path to production**. + +### Workloads + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-11.57.17-AM.png" width="100%" >}} + +**Linux workloads** represent the vast majority for the Concourse community. We're also paying attention to special +concerns for those running **Windows** and **Darwin** workloads, however this knowledge will help us prioritize fixes to +help the largest group of users possible. + +### Preferred IAAS + +{{< image src="/images/2020/05/Screen-Shot-2020-05-14-at-1.10.07-PM.png" alt="" width="100%" >}} +_Note: RMDH is remotely-managed dedicated hardware_ + +Finally, when asked about their preferred IAAS, **AWS** takes the top position again for the third year in a row. We +consistently see a strong vSphere presence from enterprise customers, but it's really interesting to see the variety of +setups that the open source community as a whole employs when deploying Concourse. + +## Summary + +A recurring topic that comes up in conversations with customers and internal teams at VMware is the sheer variety of +ways that Concourse can be set up and put to work. Running this survey further reinforces that idea, giving us insight +into an even larger number of configurations and implementations than what we see during our day to day enterprise +development and support. + +It’s also interesting to reflect on how far the project has come since Concourse CI was first introduced. We’re nearing +22k commits from over 318 contributors adding up to 333 releases as of the typing of this sentence, and we’re looking +forward to speeding up even further in 2020. + +Of course, we want to make sure that we’re developing the right features, prioritizing the right fixes and enhancements, +and validating that each step we take has been made in the right direction. In the upcoming months we’ll be +consolidating all of these ideas into a new high level roadmap that sets out quarterly milestones for the team. + +Keep watch on the [Github discussions page](https://github.com/concourse/concourse/discussions), this blog, +and [the Concourse Twitter feed](https://twitter.com/concourseci) for more updates, and don’t forget +to[join the conversation on Discord](https://discord.gg/MeRxXKW). + diff --git a/docs/blog/posts/2020/2020-05-25-introduction-to-task-inputs-and-outputs.md b/docs/blog/posts/2020/2020-05-25-introduction-to-task-inputs-and-outputs.md new file mode 100644 index 00000000..0ac0edf3 --- /dev/null +++ b/docs/blog/posts/2020/2020-05-25-introduction-to-task-inputs-and-outputs.md @@ -0,0 +1,515 @@ +--- +layout: post +title: Introduction to Task Inputs and Outputs +date: 2020-05-25 +categories: + - tutorials +--- + +Understanding how task inputs and outputs work in Concourse can be a little confusing initially. This post will walk you +through a few example pipelines to show you how inputs and outputs work within a single Concourse job. By the end you +should understand how inputs and outputs work within the context of a single job. + + + +Let's define some jargon first. + +- **step** : A [step](https://concourse-ci.org/jobs.html#steps) is a container running code within the context of a + Concourse job. A [step](https://concourse-ci.org/jobs.html#steps) may have inputs and/or outputs, or neither. +- **Job plan** : A list of [step](https://concourse-ci.org/jobs.html#steps)s that a job will execute when triggered. +- **Inputs and Outputs** : These are directories. Within Concourse they're generically referred to as **artifacts**. + These artifacts are mounted in a **step**'s container under a directory with _some-name_. You, as a writer of + Concourse pipelines, have control over what the name of your artifacts will be. If you're coming from the Docker + world, artifact is synonymous with [volumes](https://docs.docker.com/storage/volumes/). + +To run the pipelines in the following examples yourself you can get your own Concourse running locally by following +the [Quick Start guide](https://concourse-ci.org/quick-start.html). Then use [ +`fly set-pipeline`](https://concourse-ci.org/setting-pipelines.html) to see the pipelines in action. + +Concourse pipelines contain a lot of information. Within each pipeline YAML there are comments to help bring specific +lines to your attention. + +## Example One - Two Tasks + +This pipeline will show us how to create outputs and pass outputs as inputs to the +next [step](https://concourse-ci.org/jobs.html#steps)(s) in +a [job plan](https://concourse-ci.org/jobs.html#schema.job.plan). + +This pipeline has two tasks. The first task outputs a file with the date. The second task reads and prints the contents +of the file from the first task. + +```yaml +jobs: + - name: a-job + plan: + - task: create-one-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: alpine } + outputs: + # Concourse will make an empty dir with this name + # and save the contents for later steps + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file + - task: read-ouput-from-previous-step + config: + platform: linux + image_resource: + type: registry-image + source: { repository: alpine } + # You must explicitly name the inputs you expect + # this task to have. + # If you don't then outputs from previous steps + # will not appear in the step's container. + # The name must match the output from the previous step. + # Try removing or renaming the input to see what happens! + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./the-output/file +``` + +Here's a visual graphic of what happens when the above job is executed. + +{{< image src="/images/2020/05/example-one-10.gif" width="100%" >}} + +## Example Two - Two tasks with the same output, who wins? + +This example is to satisfy the curiosity cat inside all of us! Never do this in real life because you're definitely +going to hurt yourself! + +There are two jobs in this pipeline. The first job has two [step](https://concourse-ci.org/jobs.html#steps)s; both steps +will produce an artifact named `the-output` in parallel. If you run the `writing-to-the-same-output-in-parallel` job +multiple times you'll see the file in `the-output` folder changes depending on which of the parallel tasks finished +last. Here's a visualization of the first job. + +{{< image src="/images/2020/05/example-two-parallel.gif" width="100%" >}} + +The second job is a serial version of the first job. In this job the second task always wins because it's the last task +that outputs `the-output`, so only `file2` will be in `the-output` directory in the +last [step](https://concourse-ci.org/jobs.html#steps) in +the [job plan](https://concourse-ci.org/jobs.html#schema.job.plan). + +{{< image src="/images/2020/05/example-two-serial.gif" width="100%" >}} + +This pipeline illustrates that you could accidentally overwrite the output from a +previous [step](https://concourse-ci.org/jobs.html#steps) if you're not careful with the names of your outputs. + +```yaml +jobs: + - name: writing-to-the-same-output-in-parallel + plan: + # running two tasks that output in parallel?!? + # who will win?? + - in_parallel: + - task: create-the-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file1 + - task: also-create-the-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file2 + # run this job multiple times to see which + # previous task wins each time + - task: read-ouput-from-previous-step + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./the-output + echo "Get ready to error!" + cat ./the-output/file1 ./the-output/file2 + + - name: writing-to-the-same-output-serially + plan: + - task: create-one-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file1 + - task: create-another-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file2 + - task: read-ouput-from-previous-step + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./the-output + echo "Get ready to error!" + cat ./the-output/file1 ./the-output/file2 +``` + +## Example Three - Input/Output Name Mapping + +Sometimes the names of inputs and outputs don't match, or they do match and you don't want them overwriting each other, +like in the previous example. That's when [ +`input_mapping`](https://concourse-ci.org/jobs.html#schema.step.task-step.input_mapping) and [ +`output_mapping`](https://concourse-ci.org/jobs.html#schema.step.task-step.output_mapping) become helpful. Both of these +features map the inputs/outputs in the task's config to some artifact name in +the [job plan](https://concourse-ci.org/jobs.html#schema.job.plan). + +This pipeline has one job with four tasks. + +The first task outputs a file with the date to the `the-output` directory. `the-output` is mapped to the new name +`demo-disk`.  The artifact `demo-disk` is now available in the rest of +the [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) for +future [step](https://concourse-ci.org/jobs.html#steps)s to take as inputs. The remaining steps do this in various ways. + +The second task reads and prints the contents of the file under the new name `demo-disk`. + +The third task reads and prints the contents of the file under another name, `generic-input`. The `demo-disk` artifact +in the [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) is mapped to `generic-input`. + +The fourth task tries to use the artifact named `the-output` as its input. This task fails to even start because there +was no artifact with the name `the-output` available in +the [job plan](https://concourse-ci.org/jobs.html#schema.job.plan); it was remapped to `demo-disk`. + +Here's a visualization of the job. + +{{< image src="/images/2020/05/example-three-1.gif" width="100%" >}} + +Here's the pipeline YAML for you to run on your local Concourse. + +```yaml +jobs: + - name: a-job + plan: + - task: create-one-output + # The task config has the artifact `the-output` + # output_mapping will rename `the-output` to `demo-disk` + # in the rest of the job's plan + output_mapping: + the-output: demo-disk + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file + # this task expects the artifact `demo-disk` so no mapping is needed + - task: read-ouput-from-previous-step + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + inputs: + - name: demo-disk + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./demo-disk/file + - task: rename-and-read-output + # This task expects the artifact `generic-input`. + # input_mapping will map the tasks `generic-input` to + # the job plans `demo-disk` artifact + input_mapping: + generic-input: demo-disk + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + inputs: + - name: generic-input + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./generic-input/file + - task: try-and-read-the-output + input_mapping: + generic-input: demo-disk + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + # `the-output` is not available in the job plan + # so this task will error while initializing + # since there's no artiact named `the-output` in + # the job's plan + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./generic-input/file +``` + +## Example Four - Can you add files to an existing output artifact? + +This pipeline will also have two jobs in order to illustrate this point. What happens if we add a file to an output? If +you think back to example two you may already know the answer. + +The first task will create `the-output` with `file1`. The second task will add `file2` to the `the-output`. The last +task will read the contents of `file1` and `file2`. + +As long as you re-declare the input as an output in the second task you can modify any of your outputs. + +This means you can pass something between a bunch of tasks and have each task add or modify something in the artifact. + +```yaml +jobs: + - name: add-file-to-output + plan: + - task: create-one-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file1 + - task: add-file-to-previous-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + # this task lists the same artifact as + # its input and output + inputs: + - name: the-output + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file2 + - task: read-ouput-from-previous-step + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./the-output + cat ./the-output/file1 ./the-output/file2 +``` + +Here's a visualization of the job. + +{{< image src="/images/2020/05/example-four.gif" width="100%" >}} + +## Example Five - Multiple Outputs + +What happens if you have a task that has multiple outputs and a second task that only lists one of the outputs? Does the +second task get the extra outputs from the first task? + +The answer is no. A task will only get the artifacts that match the name of the inputs listed in the task's config. + +```yaml +jobs: + - name: multiple-outputs + plan: + - task: create-three-outputs + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + outputs: + - name: the-output-1 + - name: the-output-2 + - name: the-output-3 + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output-1/file + date > ./the-output-2/file + date > ./the-output-3/file + - task: take-one-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + # only one of the three outputs are + # listed as inputs + inputs: + - name: the-output-1 + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./ + cat ./the-output-1/file + - task: take-two-outputs + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + # this task pulls in the other + # two outputs, just for fun! + inputs: + - name: the-output-2 + - name: the-output-3 + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./ + cat ./the-output-2/file + cat ./the-output-3/file +``` + +Here's a visualization of the above job. + +{{< image src="/images/2020/05/example-five.gif" width="100%" >}} + +## Example Six - Get Steps + +The majority of Concourse pipelines have at least one resource, which means they have at least +one [get step](https://concourse-ci.org/jobs.html#get-step). Using a get step in a job makes an artifact with the name +of the get step available for later steps in the [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) to +consume as inputs. + +```yaml +resources: + - name: concourse-examples + type: git + source: { uri: "https://github.com/concourse/examples" } + +jobs: + - name: get-step + plan: + # there will be an artifact named + # "concourse-examples" available in the job plan + - get: concourse-examples + - task: take-one-output + config: + platform: linux + image_resource: + type: registry-image + source: { repository: busybox } + inputs: + - name: concourse-examples + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./ + cat ./concourse-examples/README.md +``` + +Here's a visualization for the above job. + +{{< image src="/images/2020/05/example-six.gif" width="100%" >}} + +I hope you found these example helpful with figuring out how inputs and outputs work within a single Concourse job. + diff --git a/docs/blog/posts/2020/2020-06-10-rfc-roundup-june-10th-2020.md b/docs/blog/posts/2020/2020-06-10-rfc-roundup-june-10th-2020.md new file mode 100644 index 00000000..1f231770 --- /dev/null +++ b/docs/blog/posts/2020/2020-06-10-rfc-roundup-june-10th-2020.md @@ -0,0 +1,78 @@ +--- +title: 'RFC round-up: June 10th, 2020' +date: 2020-06-10 +categories: + - rfcs +--- + +First off: sorry, I immediately failed to keep my target pace for these. 😓 I got wrapped up in a deadline, and since I +alternate weeks between engineering and community duties like this post, when I miss a week for RFC updates the 2-week +interval can quickly turn into 4 or 5. + + + +Owing to the missed round-up, and in hopes of burning through the backlog more quickly so that interested contributors +may volunteer for merged RFCs, I'm going to expand the scope of this post to include more RFCs than the last one - +primarily by proposing that we merge ones that are nearly certain for +the [v10 roadmap](../2019/2019-07-17-core-roadmap-towards-v10.md). + +## Merged RFCs + +- [RFC #33](https://github.com/concourse/rfcs/pull/33) (pipeline archiving) + and [RFC #34](https://github.com/concourse/rfcs/pull/34) (pipeline instances) have both been merged! 🎉 + +## RFCs ready to merge + +The following RFCs have been given the `resolution/merge` label: + +- [RFC #31: `set_pipeline` step](https://github.com/concourse/rfcs/pull/31) is the RFC corresponding to the + `set_pipeline` step that was introduced experimentally in v5.8. Once this is merged, the step itself will no longer be + experimental, but there _are_ a couple of experimental features for the step that are now outlined in the RFC - `self` + and `team:`. These features will result in warnings when used. +- [RFC #40: valid identifiers](https://github.com/concourse/rfcs/pull/40) proposes that we restrict the set of allowed + characters in Concourse identifiers such as pipeline names, job names, and resource names. Existing pipelines and + objects will be grandfathered in to ease the transition. _Note: if you're worried about this change you may be + interested in [RFC #34](https://github.com/concourse/rfcs/pull/34)._ +- [RFC #39: var sources](https://github.com/concourse/rfcs/pull/39) is the RFC corresponding to the `var_sources` + feature, which was also introduced experimentally in v5.8. This feature is a key component to v10 - it unblocks + spatial pipelines, per-job timed triggers, and per-pipeline credential management configuration. +- [RFC #27: var steps](https://github.com/concourse/rfcs/pull/27) is behind the [ + `load_var` step](https://concourse-ci.org/jobs.html#load-var-step) (shipped experimentally in v6.0), and also + introduces a `get_var` step which can theoretically be used to implement per-job trigger intervals. This RFC builds on + the var sources concept described in RFC #39. + +Per the [resolution process](https://github.com/concourse/rfcs/blob/master/README.md#resolution), if there are no +objections or significant changes in the 2 weeks after this post is published, they will be merged! 🚀 + +## RFCs in need of attention + +Quite a few RFCs have had some pretty interesting discussions or developments since the last round-up: + +- [RFC #36: manual step](https://github.com/concourse/rfcs/pull/36) has had some juicy conversation around how things + like approval and manual gating in a pipeline should be expressed in a Concoursey way - if you have thoughts on this, + please chime in! +- [RFC #37: prototypes](https://github.com/concourse/rfcs/pull/37) is the RFC for the "Prototypes" concept introduced in + the [Re-inventing resource types](../2019/2019-10-15-reinventing-resource-types.md) blog post. The latest revision + introduces encryption, which will enable Prototypes to implement credential managers. If you are a resource type + author or if you have a security background, please give it a look! +- [RFC #32: projects](https://github.com/concourse/rfcs/pull/32) now has a pretty radical new question: can Projects + replace Teams in order to provide more complete cluster config automation? If you've ever had a need for automating + team configuration, or if you have a thirst for GitOps, this should be a pretty interesting conversation! + +## New RFCs + +- [RFC #53: configurable build event stores](https://github.com/concourse/rfcs/pull/53) proposes a pluggable + architecture for build event storage as an alternative to storing them in the database. +- [RFC #59: static configuration](https://github.com/concourse/rfcs/pull/59) proposes a method for configuring Concourse + with a config file that prescribes the teams and projects, in addition to the regular config that would previously + have been set in flags or env vars. It also proposes disallowing the use of `fly set-team` at runtime so that the + config is the source of truth. + +## Thanks! + +Giving feedback on RFCs is critical to our ability to move forward more quickly and with higher confidence. Any and all +comments and questions we receive are deeply appreciated. Thanks to everyone who's been involved, and thanks in advance +to everyone else! 🙂 + +(Stay safe!) + diff --git a/docs/blog/posts/2020/2020-06-19-how-to-build-and-publish-a-container-image.md b/docs/blog/posts/2020/2020-06-19-how-to-build-and-publish-a-container-image.md new file mode 100644 index 00000000..51958bfb --- /dev/null +++ b/docs/blog/posts/2020/2020-06-19-how-to-build-and-publish-a-container-image.md @@ -0,0 +1,374 @@ +--- +layout: post +title: How To Build and Publish a Container Image +date: 2020-06-19 +categories: + - tutorials +--- + +In this blog post we are going to show how to build and publish container images using +the [oci-build task](https://github.com/vito/oci-build-task) +and [registry-image resource](https://github.com/concourse/registry-image-resource). This post assumes you understand +how to build container images with `Dockerfile`'s and publish to [Docker Hub](https://hub.docker.com/) or another image +registry using the [`docker` cli](https://docs.docker.com/engine/reference/commandline/cli/). + + + +_If you just want to see the pipeline, scroll to the bottom +or [click here](https://github.com/concourse/examples/blob/master/pipelines/build-and-push-simple-image.yml). What +follows is a detailed explanation of what each part of the pipeline does._ + +First we need a Dockerfile. You can store this in your own repo or reference +the [github.com/concourse/examples](https://github.com/concourse/examples) repo. The rest of this post assumes you use +the [examples](https://github.com/concourse/examples) repo. All files in this blog post can be found in the examples +repo. + +## The Dockerfile + +We are going to use a very +basic [Dockerfile](https://github.com/concourse/examples/blob/master/Dockerfiles/simple/Dockerfile) so we can focus on +building the Concourse pipeline. + +```dockerfile +FROM busybox + +RUN echo "I'm simple!" +COPY ./stranger /stranger +RUN cat /stranger +``` + +## Defining Pipeline Resources + +Now we can start building out our pipeline. Let's declare our [resources](https://concourse-ci.org/resources.html) +first. We will need one resource to pull in the repo where our Dockerfile is located, and a second resource pointing to +where we want to push the built container image to. + +_There are some [variables](https://concourse-ci.org/pipeline-vars-example.html#variables) in this file that we will +fill out later._ + +```yaml +resources: + # The repo with our Dockerfile + - name: concourse-examples + type: git + icon: github + source: + uri: https://github.com/concourse/examples.git + branch: master + + # Where we will push the image to + - name: simple-image + type: registry-image + icon: docker + source: + repository: ((image-repo-name))/simple-image + username: ((registry-username)) + password: ((registry-password)) +``` + +## Create a Job + +Next we will create a [job](https://concourse-ci.org/jobs.html) that will build and push our container image. + +```yaml +jobs: + - name: build-and-push +``` + +### Retrieve the Dockerfile + +The first [step](https://concourse-ci.org/jobs.html#schema.step) in +the [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) will be to retrieve the repo where our Dockerfile is. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples +``` + +### Build the Container Image + +The second step in our job will build the container image. + +To build the container image we are going to use the [oci-build-task](https://github.com/vito/oci-build-task). +The [oci-build-task](https://github.com/vito/oci-build-task) is a container image that is meant to be used in a +Concourse [task](https://concourse-ci.org/tasks.html) to build other container images. Check out the [ +`README`](https://github.com/vito/oci-build-task/blob/master/README.md) in the repo for more details on how to configure +and use the [oci-build-task](https://github.com/vito/oci-build-task) in more complex build scenarios. + +Let's add a [task](https://concourse-ci.org/tasks.html) to +our [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) and give it a name. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image +``` + +All configuration of the `oci-build-task` is done through a [task config](https://concourse-ci.org/tasks.html). Viewing +the [`README`](https://github.com/vito/oci-build-task/blob/master/README.md) from the repo we can see that the task +needs to be run as a [privileged task](https://concourse-ci.org/jobs.html#schema.step.task-step.privileged) on a linux +worker. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux +``` + +To use the `oci-build-task` container image we specify the [ +`image_resource`](https://concourse-ci.org/tasks.html#schema.task.image_resource) that the task should use. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task +``` + +Next we will add [`concourse-examples`](https://github.com/concourse/examples) as +an [input](https://concourse-ci.org/tasks.html#schema.task.inputs) to the build task to ensure the artifact from +the [get step](https://concourse-ci.org/jobs.html#get-step) (where our `Dockerfile` is fetched) is mounted in our +`build-task-image` step. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + inputs: + - name: concourse-examples +``` + +The `oci-build-task` [outputs the built container image](https://github.com/vito/oci-build-task#outputs) in a directory +called `image`. Let's add `image` as an output artifact of our task so we can publish it in a later step. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + inputs: + - name: concourse-examples + outputs: + - name: image +``` + +Next we need to tell the `oci-build-task` what +the [build context](https://docs.docker.com/engine/reference/commandline/build/) of our Dockerfile is. The [ +`README`](https://github.com/vito/oci-build-task) goes over a few other methods of creating your build context. We are +going to use the simplest use-case. By specifying `CONTEXT` the `oci-build-task` assumes a `Dockerfile` and its build +context are in the same directory. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + inputs: + - name: concourse-examples + outputs: + - name: image + params: + CONTEXT: concourse-examples/Dockerfiles/simple +``` + +The last step is specifying what our `build-task-image` should execute. The `oci-build-task` container image has a +binary named [ +`build`](https://github.com/vito/oci-build-task/blob/230df3baa27fb389484ee0fb74355cd8b7977298/Dockerfile#L11) located in +its `PATH` in the [ +`/usr/bin` directory](https://github.com/vito/oci-build-task/blob/230df3baa27fb389484ee0fb74355cd8b7977298/Dockerfile#L15). +We'll tell our task to execute that binary, which will build our container image. + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + inputs: + - name: concourse-examples + outputs: + - name: image + run: + path: build + params: + CONTEXT: concourse-examples/Dockerfiles/simple +``` + +At this point in our job the container image is built! The `oci-build-task` has saved the container image as a tarball +named `image.tar` in the `image` artifact specified in the task outputs. This tar file is the same output you would get +if you built the container image using Docker and then did [ +`docker save`](https://docs.docker.com/engine/reference/commandline/save/). + +## Publish the Container Image + +Now let's push the container image to an image registry! For this example we're pushing +to [Docker Hub](https://hub.docker.com/) using the [ +`registry-image` resource](https://github.com/concourse/registry-image-resource). You can use the `registry-image` +resource to push to any image registry, private or public. Check out the [ +`README.md`](https://github.com/concourse/registry-image-resource/blob/master/README.md) for more details on using the +resource. + +To push the container image add a [put step](https://concourse-ci.org/jobs.html#put-step) to our job plan and tell the +regstry-image resource where the tarball of the container image is. + +The put step will push the container image using the information defined in the +resource's [source](https://concourse-ci.org/resources.html#schema.resource.source), when we defined +the [pipeline's resources](https://concourse-ci.org/pipelines.html#schema.pipeline.resources). + +This is where you'll need to replace the three [variables](https://concourse-ci.org/vars.html) found under +`resource_types`. You can define them [statically](https://concourse-ci.org/vars.html#static-vars) using `fly`'s `--var` +flag when [setting](https://concourse-ci.org/setting-pipelines.html) the pipeline. _(In production make sure to use +a [credential management system](https://concourse-ci.org/creds.html) to store your secrets!)_ + +```yaml +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + inputs: + - name: concourse-examples + outputs: + - name: image + params: + CONTEXT: concourse-examples/Dockerfiles/simple + run: + path: build + - put: simple-image + params: + image: image/image.tar +``` + +## The Entire Pipeline + +Putting all the pieces together, here is our pipeline that builds and pushes (publishes) a container image. + +```yaml +resources: + # The repo with our Dockerfile + - name: concourse-examples + type: git + icon: github + source: + uri: https://github.com/concourse/examples.git + branch: master + + # Where we will push the image + - name: simple-image + type: registry-image + icon: docker + source: + repository: ((image-repo-name))/simple-image + username: ((registry-username)) + password: ((registry-password)) + +jobs: + - name: build-and-push + plan: + - get: concourse-examples + - task: build-task-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + inputs: + - name: concourse-examples + outputs: + - name: image + params: + CONTEXT: concourse-examples/Dockerfiles/simple + run: + path: build + - put: simple-image + params: + image: image/image.tar +``` + +You can set the pipeline with the following `fly` command, updating the variable values with real values the pipeline +can use. The behaviour is similar to [`docker push`](https://docs.docker.com/engine/reference/commandline/push/): + +```shell-session +fly -t set-pipeline -p build-and-push-image \ + -c ./examples/pipelines/build-and-push-simple-image.yml \ + --var image-repo-name= \ + --var registry-username= \ + --var registry-password= +``` + +{{< image src="/images/2020/06/build-and-publish-pipeline-3.png" alt="build-and-push-pipeline" >}} + +## Further Readings + +Understanding what the _build context_ is is important when building container images. You can +read [Dockerfile Best Practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#understand-build-context) +for more details about _build contexts_. + +The [inputs](https://github.com/vito/oci-build-task#inputs) section of the oci-build-task's `README` has examples on how +to create a build context with multiple inputs and other complex build scenarios. + +Read the `README`'s in the [oci-build-task](https://github.com/vito/oci-build-task) +and [registry-image resource](https://github.com/concourse/registry-image-resource/) to learn more about their other +configuration options. + +If you had trouble following how the artifacts get passed between the steps of a job then read our other blog post +about [task inputs and outputs](2020-05-25-introduction-to-task-inputs-and-outputs.md). + diff --git a/docs/blog/posts/2020/2020-06-24-rfc-round-up-june-24th-2020.md b/docs/blog/posts/2020/2020-06-24-rfc-round-up-june-24th-2020.md new file mode 100644 index 00000000..549d693a --- /dev/null +++ b/docs/blog/posts/2020/2020-06-24-rfc-round-up-june-24th-2020.md @@ -0,0 +1,55 @@ +--- +title: 'RFC round-up: June 24th, 2020' +date: 2020-06-24 +categories: + - rfcs +--- + +With the four(!) RFCs from the last round-up now merged, it's time to move on to the next RFC milestone: Prototypes! + + + +## Merged RFCs 🎉 + +- [RFC #31: `set_pipeline` step](https://github.com/concourse/rfcs/pull/31) +- [RFC #40: valid identifiers](https://github.com/concourse/rfcs/pull/40) +- [RFC #39: var sources](https://github.com/concourse/rfcs/pull/39) +- [RFC #27: var steps](https://github.com/concourse/rfcs/pull/27) + +## RFCs ready to merge 🤞 + +- [RFC #37: Prototypes](https://github.com/concourse/rfcs/pull/37) is finally ready to go! For (much) further reading, + check out the [Re-inventing resource types](../2019/2019-10-15-reinventing-resource-types.md) blog post. The importance + of this RFC really cannot be overstated; it will be the most significant change to Concourse since its creation. +- [RFC #38: Resource Prototypes](https://github.com/concourse/rfcs/pull/38) demonstrates how the Prototype protocol may + be used to implement the next generation of resource prototypes (formerly resource types) and gain long-requested + functionality along the way. + +## Shiny new RFCs ✨ + +- [RFC #62: worker pools](https://github.com/concourse/rfcs/pull/62) introduces a "worker pool" which will allow a + many-to-many relationship between workers and teams. +- [RFC #63: API auth flow for applications](https://github.com/concourse/rfcs/pull/63) is a conversation-starter around + adding a token-based auth flow for read-only APIs. +- [RFC #61: add "watch" parameter for API endpoints](https://github.com/concourse/rfcs/pull/61) introduces a + long-polling approach to API requests to reduce the load from constant polling from the web UI. + +## Open call for contributors 📢 + +The following is a list of issues for each merged RFC that still has work to be done. + +- Pipeline instances: [#5808](https://github.com/concourse/concourse/issues/5808) +- Valid identifiers: [#5810](https://github.com/concourse/concourse/issues/5810) +- Finishing var sources: [#5813](https://github.com/concourse/concourse/issues/5813) +- Finishing the `set_pipeline` step: [#5814](https://github.com/concourse/concourse/issues/5814) +- Implementing the `get_var` step: [#5815](https://github.com/concourse/concourse/issues/5815) + +If anyone's interested in throwing their hat into the ring and getting involved with Concourse development, let us know +by replying to one of the issues linked above! Future RFC round-ups will include the same list, presumably with more +entries. + +The v10 roadmap is highly parallelizeable, so if more people get involved we can make it to v10 much more quickly and +with a healthier project that has more people able to contribute. We're super keen to give guidance and help out. 👌 + +Thanks! + diff --git a/docs/blog/posts/2020/2020-07-10-rfc-round-up-july-10th-2020.md b/docs/blog/posts/2020/2020-07-10-rfc-round-up-july-10th-2020.md new file mode 100644 index 00000000..15c91597 --- /dev/null +++ b/docs/blog/posts/2020/2020-07-10-rfc-round-up-july-10th-2020.md @@ -0,0 +1,52 @@ +--- +layout: post +title: 'RFC round-up: July 10th, 2020' +date: 2020-07-10 +categories: + - rfcs +--- + +Happy Friday! This one's brief. + + + +## Merged RFCs + +- [RFC #37: prototypes](https://github.com/concourse/rfcs/pull/37) has landed! ...but it probably could have use more + detail regarding the `run` step, which is the only immediately actionable part of it. 🤔 I'll draft another RFC for + that; #37 mainly covered the protocol. +- [RFC #38: resource prototypes](https://github.com/concourse/rfcs/pull/38) is in! Its associated issue for + implementation is [#5870](https://github.com/concourse/concourse/issues/5870). + +## RFCs to merge + +- n/a - taking a breather for this round-up to focus on the below RFCs and "reset" the 2 week merge window so I can + start publishing these posts earlier in the week. 😅 + +## RFCs in need of feedback + +- [RFC #43: tasks queue](https://github.com/concourse/rfcs/pull/43) still needs some love! The goal is to introduce a + queuing mechanism to resolve the long-running issue of Concourse over-working workers. +- [RFC #29: `across` step](https://github.com/concourse/rfcs/pull/29) introduces the special sauce for build matrices + and branch/PR pipeline automation, and the proposal has been heavily revised. Check it out! + +## Open call for contributors + +Valid identifiers ([#5810](https://github.com/concourse/concourse/issues/5810)) is now spoken for - +thanks [@mouellet](https://github.com/mouellet)! 🍻 + +The following issues are up for grabs: + +- Pipeline instances: [#5808](https://github.com/concourse/concourse/issues/5808) +- Finishing var sources: [#5813](https://github.com/concourse/concourse/issues/5813) +- Finishing the `set_pipeline` step: [#5814](https://github.com/concourse/concourse/issues/5814) +- Implementing the `get_var` step: [#5815](https://github.com/concourse/concourse/issues/5815) + +If anyone's interested in helping out, or just learning how, let us know by replying to any of the issues linked above +or asking in [Discord](https://discord.gg/MeRxXKW)! + +This section will be repeated in each RFC round-up - the goal is to get to the finish line on the v10 roadmap by +tackling items in parallel while improving project health by enabling more people to make significant contributions. + +Thanks everyone! + diff --git a/docs/blog/posts/2020/2020-08-24-gitops-for-pipelines.md b/docs/blog/posts/2020/2020-08-24-gitops-for-pipelines.md new file mode 100644 index 00000000..af784abf --- /dev/null +++ b/docs/blog/posts/2020/2020-08-24-gitops-for-pipelines.md @@ -0,0 +1,356 @@ +--- +layout: post +title: GitOps For Your Pipelines +date: 2020-08-24 +categories: + - tutorials +--- + +In this blog post we're going to cover how to use git and Concourse to automatically set, update, and archive your +pipelines using the `set_pipeline` step. No longer will you need to use `fly set-pipeline` to update any of your +pipelines! + + + +For consistency we will refer to the pipeline that contains all the `set_pipeline` steps as the **parent pipeline**. The +pipelines created by the `set_pipeline` steps will be called **child pipelines**. + +_Scroll to the bottom to see the final pipeline template +or [click here](https://github.com/concourse/examples/blob/master/pipelines/set-pipelines.yml). What follows is a +detailed explanation of how the parent pipeline works along with git and automatic archiving._ + +## Prerequisites + +To run the pipelines in this blog post for yourself you can get your own Concourse running locally by following +the [Quick Start guide](https://concourse-ci.org/quick-start.html). + +You will also need to fork the [github.com/concourse/examples](https://github.com/concourse/examples) repo and replace +`USERNAME` with your github username in the below examples. We will continue to refer to the repo as +`concourse/examples`. Once you have forked the repo clone it locally onto your machine and `cd` into the repo. + +```shell-session +$ git clone git@github.com:USERNAME/examples.git +$ cd examples +``` + +## Create the Parent Pipeline + +Inside your fork of `concourse/examples` that you have cloned locally, create a file named `reconfigure-pipelines.yml` +inside the `pipelines` folder. This is the pipeline that we are going to be building. We will refer to this pipeline as +the _parent pipeline_. + +```shell-session +$ touch ./pipelines/reconfigure-pipelines.yml +``` + +Like the `fly set-pipeline` command, the `set_pipeline` step needs a YAML file containing a pipeline configuration. We +will use the concourse/examples repo as the place to store our pipelines and thankfully it already contains many +pipelines! Let's add the repo as a resource to our parent pipeline. + +```yaml +resources: + - name: concourse-examples + type: git + icon: github + source: + uri: git@github.com:USERNAME/examples.git +``` + +Now we will add a job that will set our pipelines. The first step in the job will fetch the `concourse/examples` repo, +making it available to future steps as the `concourse-examples` artifact. We will also add the `trigger` parameter to +ensure that the job will run whenever a new commit is pushed to the `concourse/examples` repo. + +```yaml +resources: + - name: concourse-examples + type: git + icon: github + source: + uri: git@github.com:USERNAME/examples.git + +jobs: + - name: configure-pipelines + public: true + plan: + - get: concourse-examples + trigger: true +``` + +Next we will add the `set_pipeline` step to set one of the pipelines in the `concourse/examples` repo. We will set the +`hello-world` pipeline first. + +```yaml +resources: + - name: concourse-examples + type: git + icon: github + source: + uri: git@github.com:USERNAME/examples.git + +jobs: + - name: configure-pipelines + public: true + plan: + - get: concourse-examples + trigger: true + - set_pipeline: hello-world + file: concourse-examples/pipelines/hello-world.yml +``` + +Let's commit what we have so far and push it to github. + +```shell-session +$ git add pipelines/reconfigure-pipelines.yml +$ git commit -m "add reconfigure-pipelines" +$ git push -u origin head +``` + +## Setting the Parent Pipeline + +Now we have a [chicken or the egg](https://en.wikipedia.org/wiki/Chicken_or_the_egg) problem, except in this case we +know our parent pipeline comes first! Let's set our pipeline with `fly` and execute the `configure-pipelines` job. + +```shell-session +$ fly -t local set-pipeline \ + -p reconfigure-pipelines \ + -c pipelines/reconfigure-pipelines.yaml + +... +apply configuration? [yN]: y + +$ fly -t local unpause-pipeline \ + -p reconfigure-pipelines + +unpaused 'reconfigure-pipelines' + +$ fly -t local trigger-job \ + -j reconfigure-pipelines/configure-pipelines \ + --watch +``` + +Once the job is done running you should see two pipelines, `reconfigure-pipelines` and `hello-world`. + +{{< image src="/images/2020/08/hello-world.png" alt="Concourse dashboard showing two pipelines" width="100%" >}} + +Now any changes you make to the `hello-world` pipeline will be updated automatically in Concourse once it picks up the +commit with your changes. + +## Pipelines Setting Themselves + +Our parent pipeline is setting and updating one other pipeline now but it has one glaring limitation: it doesn't set +itself. We have to `fly set-pipeline` every time we want to add a new pipeline to the `configure-pipelines` job. + +To resolve this we can do the following to our parent pipeline: + +- Add a job **before** the `configure-pipelines` job that self-updates the parent pipeline. We'll name the job + `configure-self`. +- Add a `passed` constraint to the `configure-pipelines` job to only run once the `concourse-examples` resource has + passed the new `configure-self` job. + +By doing the above we will never have to use `fly` to update the parent pipline again. Every commit to the +`concourse/examples` repo will cause the parent pipeline to update itself and then all of its child pipelines. Now our +pipelines are following a GitOps type of workflow! + +Here is what the above changes look like when implemented: + +```yaml +resources: + - name: concourse-examples + type: git + icon: github + source: + uri: git@github.com:USERNAME/examples.git + +jobs: + - name: configure-self + plan: + - get: concourse-examples + trigger: true + - set_pipeline: reconfigure-pipelines + file: concourse-examples/pipelines/reconfigure-pipelines.yml + - name: configure-pipelines + plan: + - get: concourse-examples + trigger: true + passed: [ configure-self ] + - set_pipeline: hello-world + file: concourse-examples/pipelines/hello-world.yml +``` + +**Side-note** : for the `configure-self` job, you could also use the [ +`self` keyword](https://concourse-ci.org/jobs.html#schema.step.set-pipeline-step.set_pipeline), though this is labelled +as experimental and may disappear in the future. + +Lets set the parent pipeline one more time with `fly` and then we'll make commits to the repo to make all future +changes. + +```shell-session +$ fly -t local set-pipeline \ + -p reconfigure-pipelines \ + -c pipelines/reconfigure-pipelines.yaml + +... +apply configuration? [yN]: y +``` + +The parent pipeline should now look like this. Now the pipeline will first update itself and then update any existing +child pipelines. + +{{< image src="/images/2020/08/set-self.png" alt="parent pipeline with config-self job" width="100%" >}} + +Let's commit our changes, which will be a no-op since we've already updated the pipeline with the latest changes. + +```shell-session +$ git add pipelines/reconfigure-pipelines.yml +$ git commit -m "add configure-self job" +$ git push +``` + +Now comes the real fun! To add a pipeline to Concourse all we need to do is add a `set_pipeline` step to the parent +pipeline, commit it to the `concourse/examples` repo, and let the parent pipeline pick up the new commit and make the +changes for us. + +Lets add the `time-triggered` pipeline to our `reconfigure-pipelines.yml` file. + +```yaml +resources: + - name: concourse-examples + type: git + icon: github + source: + uri: git@github.com:USERNAME/examples.git + +jobs: + - name: configure-self + plan: + - get: concourse-examples + trigger: true + - set_pipeline: reconfigure-pipelines + file: concourse-examples/pipelines/reconfigure-pipelines.yml + - name: configure-pipelines + plan: + - get: concourse-examples + trigger: true + passed: [ configure-self ] + - set_pipeline: hello-world + file: concourse-examples/pipelines/hello-world.yml + - set_pipeline: time-triggered + file: concourse-examples/pipelines/time-triggered.yml +``` + +Commit and push the changes to github. + +```shell-session +$ git add pipelines/reconfigure-pipelines.yml +$ git commit -m "add time-triggered pipeline" +$ git push +``` + +Once Concourse picks up the commit (may take up to a minute by default) you should see three pipelines on the dashboard. +Now you never need to use `fly` to set pipelines! + +{{< image src="/images/2020/08/three-pipelines.png" alt="parent and child pipelines" width="100%" >}} + +## Detour: A Future Alternative of Setting Pipelines + +In the future there will be a different solution to setting parent pipelines: no more parent pipelines! How will +Concourse eliminate the current need to start with a parent pipeline in order to set child pipelines? The answer +is [RFC 32: Projects](https://github.com/concourse/rfcs/pull/32/). + +If [RFC 32](https://github.com/concourse/rfcs/pull/32/) is implemented as currently described then you won't have to +ever use `fly set-pipeline` to create pipelines, you'll simply create a **Project** , which involves pointing Concourse +to a repo where you code lives. In the proposed `project.yml` you can then define all of your child pipelines with +`set_pipeline` steps. No need to create a parent pipeline; the `project.yml` replaces the parent pipeline and no longer +requires you to have a separate job that does `set_pipeline: self`. + +The [RFC is still open](https://github.com/concourse/rfcs/pull/32) and looking for feedback. Check out the PR and leave +your thoughts for the community to discuss! + +Now let's get back on track and talk about the last step in a pipeline's lifecycle: archiving. + +## Automatically Archiving Pipelines + +Having Concourse automatically set pipelines for you is great but that only covers half of the lifecycle that a pipeline +can go through. Some pipelines stay around forever and get continously updated. Other pipelines may only be around for a +small amount of time and then be deleted or archived. + +Thanks to [RFC #33](https://github.com/concourse/rfcs/pull/33) you can now archive pipelines and have Concourse * +*automatically archive** pipelines for you as well. You've been able to archive pipelines using `fly` since Concourse +6.1.0. Automatic archiving was added in 6.5.0. + +A pipeline will only be considered for automatic archiving if it was previously set by a `set_pipeline` step. It will be +archived if one of the following is true: + +- the `set_pipeline` step is removed from the job +- the job that was setting the child pipeline is deleted +- the parent pipeline is deleted or archived + +We can test this out with the parent pipeline we were just using. Let's remove the `hello-world` pipeline. + +```yaml +resources: + - name: concourse-examples + type: git + icon: github + source: + uri: git@github.com:USERNAME/examples.git + +jobs: + - name: configure-self + plan: + - get: concourse-examples + trigger: true + - set_pipeline: reconfigure-pipelines + file: concourse-examples/pipelines/reconfigure-pipelines.yml + - name: configure-pipelines + plan: + - get: concourse-examples + trigger: true + passed: [ configure-self ] + - set_pipeline: time-triggered + file: concourse-examples/pipelines/time-triggered.yml +``` + +Commit and push the changes to github. + +```shell-session +$ git add pipelines/reconfigure-pipelines.yml +$ git commit -m "remove hello-world pipeline" +$ git push +``` + +After a few seconds the pipeline should disappear from the dashboard (unless you toggle "show archived" on). + +With automatic archiving the entire lifecycle of your pipelines can now be managed with a git repo and a few commits. + +I suggest checking out the documentation for [ +`set_pipeline`](https://concourse-ci.org/jobs.html#schema.step.set-pipeline-step.set_pipeline) to see all the other +fields available for the step, like `team` and `vars`! + +## The Parent Pipeline Template (tl;dr) + +```yaml +resources: + - name: ci + type: git + icon: github + source: + uri: git@github.com:USERNAME/repo-where-pipelines-live.git + +jobs: + - name: configure-self + plan: + - get: ci + trigger: true + - set_pipeline: self + file: ci/path/to/parent-pipeline.yml + - name: configure-pipelines + plan: + - get: ci + trigger: true + passed: [ configure-self ] + - set_pipeline: some-pipeline + file: ci/path/to/some-pipeline.yml + - set_pipeline: another-pipeline + file: ci/path/to/another-pipeline.yml +``` diff --git a/docs/blog/posts/2020/2020-12-29-running-docker-in-concourse.md b/docs/blog/posts/2020/2020-12-29-running-docker-in-concourse.md new file mode 100644 index 00000000..bed11872 --- /dev/null +++ b/docs/blog/posts/2020/2020-12-29-running-docker-in-concourse.md @@ -0,0 +1,65 @@ +--- +layout: post +title: Running Docker in Concourse +date: 2020-12-29 +categories: + - tutorials +--- + +So you want to run Docker in Concourse? Well this is the guide for you! + + + +Let' clarify what it is we want to do. **We want to be able to run `docker-compose` inside a task in Concourse to bring +up our application along side some other services (i.e. Redis, Postgres, MySQL, etc.).** + +Thankfully this challenge has been solved by the community! There are a few "Docker-in-Docker" images designed to run in +Concourse that are maintained by the community. Here's a short list made from a cursory search, in no particular order: + +- [github.com/meAmidos/dcind](https://github.com/meAmidos/dcind) +- [github.com/karlkfi/concourse-dcind](https://github.com/karlkfi/concourse-dcind) +- [github.com/fhivemind/concourse-dind](https://github.com/fhivemind/concourse-dind) +- [github.com/taylorsilva/dcind](https://github.com/taylorsilva/dcind) + +You can also opt to build your own fork of the above images. + +All of the above repositories have their own example pipelines that you can use to get started. What follows are some +bits of information that are useful to know when using these task images. + +## Privileged Tasks + +Running Docker inside Concourse requires the [task step](https://concourse-ci.org/jobs.html#schema.step.task-step.task) +to be [privileged](https://concourse-ci.org/jobs.html#schema.step.task-step.privileged) because Docker needs access to +the hosts cgroup filesystem in order to create containers. + +You can verify this by looking at the bash scripts for each of the above images which all take inspiration from +the [docker-image resource](https://github.com/concourse/docker-image-resource). Read the [ +`sanitize_cgroups` function](https://github.com/concourse/docker-image-resource/blob/babf5a7dc293102e34bd2bf93815ee3d35aac54e/assets/common.sh#L5-L48) +to see what exactly is being mounted from the host. (tldr: mount all cgroups as read-write) + +## Externalize All Images + +You should avoid having Docker fetch any images from inside your task step where you are running `docker-compose`. You +should externalize these as [image resources](https://github.com/concourse/registry-image-resource) if they're a +dependency of your application (e.g. Postgres, MySQL). + +For the container image that contains your application you should have that built in a +previous [step](https://concourse-ci.org/jobs.html#schema.step) +or [job](https://concourse-ci.org/pipelines.html#schema.pipeline.jobs). You +can [build and publish an image](2020-06-19-how-to-build-and-publish-a-container-image.md) using +the [oci-build task](https://github.com/vito/oci-build-task). + +To ensure Docker doesn't try to fetch the images itself you can use [ +`docker load`](https://docs.docker.com/engine/reference/commandline/load/) and [ +`docker tag`](https://docs.docker.com/engine/reference/commandline/tag/) to load your externalized images into +Docker. [meAmidos's](https://github.com/meAmidos) has a +great [example pipeline](https://github.com/meAmidos/dcind/blob/master/example/pipe.yml) that does exactly that. + +meAmidos also makes two great points about why you should externalize your image: + +- If the image comes from a private repository, it is much easier to let Concourse pull it, and then pass it through to + the task. +- When the image is passed to the task, Concourse can often get the image from its cache. + +That's all you need to know to run Docker inside Concourse! + diff --git a/docs/blog/posts/2025/2025-01-06-concourse-2024-review.md b/docs/blog/posts/2025/2025-01-06-concourse-2024-review.md new file mode 100644 index 00000000..64b629fd --- /dev/null +++ b/docs/blog/posts/2025/2025-01-06-concourse-2024-review.md @@ -0,0 +1,126 @@ +--- +title: Concourse 2024 in Review +date: 2025-01-06 +authors: + - taylorsilva +--- + +Hey Concourse community, it’s been a while since a blog post was made. My name is Taylor Silva and I’m the current lead +maintainer of the project. I have fallen into this role for historical (was on the Concourse team at Pivotal starting in +2019) and personal reasons (still using Concourse at my day job). I really like using Concourse and I haven't found +another tool that works as well as Concourse does, that's why I've stuck around. + + + +This post isn't about me though, it's about the project and what's happening with it. I'm going to talk about the last +year of the project to recap where the project is at. Then I'll discuss where I see the project going over the next +year. + +## Concourse is 10 Years Old! + +The [first commit](https://github.com/concourse/concourse/commit/e3cb2182bb1523718f65714d0c20e176572726a9#diff-c693279643b8cd5d248172d9c22cb7cf4ed163a3c98c8a3f69c2717edd3eacb7) +for Concourse was made April 13th, 2014. That's over 10 years ago! Not sure how all that time flew by, but I guess it +means Concourse is "legacy software", especially on the timescale of the internet. Concourse is well on its way to +getting its pilot license, only four more years to go ( +in [Canada](https://tc.canada.ca/en/aviation/licensing-pilots-personnel/flight-crew-licences-permits-ratings/general-information-pilot-licences-permits) +at least). + +Concourse has changed so much over the years. Overall I would say Concourse's development has been about slow and +thoughtful improvements. I think it's paid off well so far for the project and will continue to do so in the future. +There are a lot of exciting things we can still add onto Concourse, and I think there are a lot of existing things we +can continue to refine and improve. + +## v7.12.0 + +2024 was a very slow on the release front. We had one Minor +release, [v7.12.0](https://github.com/concourse/concourse/releases/tag/v7.12.0) and two earlier patch releases for +v7.11.0. + +There was quite a bit of turbulence this year in Concourse's development. Broadcom has unfortunately scaled down the +engineering time they dedicate towards Concourse. I picked up the slack mid-way through the year and got v7.12.0 pushed +out. I was able to get Broadcom to reaffirm their commitment to providing the infrastructure +behind [ci.concourse-ci.org](https://ci.concourse-ci.org/). + +There weren't many features added in v7.12.0. I think the biggest one that's worth shouting out is IPv6 support being +added by Qjammer in [#8801](https://github.com/concourse/concourse/pull/8801). This feature only works for the +Containerd runtime. This feature is a big push in making sure Concourse is future-proofed as more people build out +networks with IPv6 or dual-stack setups. + +## Project Leadership + +Overall it kinda sucked how little development happened this year. Lots of folks where posting messages on GitHub and +Discord, asking what was going on with the project. There's definitely been a leadership gap since +Alex ([vito](https://github.com/vito/)) left Pivotal/VMware, and therefore the project. (Please don't message him about +Concourse stuff, he's moved on to other things). + +Earlier this year I decided I would try to start filling that leadership gap. This was hard for me do while working a +full-time job, but I was able to push out v7.12.0 using my evenings and weekends. It was rewarding but also very +draining. I learned that I could not do my full-time job and properly steward Concourse at the same time. Concourse is +too large of a project to manage in one's spare time. + +My reward for pushing out v7.12.0 is that I got people seeing me as the leader of the project. Yay! Goal accomplished! +This lead to people in the community reaching out to me and one group came to me with an interesting offer, which I'll +talk about more in a bit. + +## Concourse in 2025 + +Where is Concourse going in 2025? + +Right now I'm planning to focus my efforts on refining what we have right now. There are some annoying bugs that I'd +like to get fixed, and PR's I want to merge in. I've updated the milestones to reflect what I'm +prioritizing: [https://github.com/concourse/concourse/milestones](https://github.com/concourse/concourse/milestones) + +There might also be a v8 release this year to bundle together some larger changes, like changing the default worker +runtime to containerd. There's also a milestone for this. + +I am also going to declare **Issue Bankruptcy**. There are over 700 issues in the +main [concourse/concourse](https://github.com/concourse/concourse/) repo which is completely unmanageable. I want to get +the number of open issues down to less than 100. I will keep more recent ones open and anything that's an obvious +feature request or bug that could reasonably be done open as well. If I can't grok the issue within a few seconds of +looking at it, it's getting closed. + +I also really want to get an ARM64 version of Concourse officially built. The work on this has already been started by +Rui, I just need to pick up the thread from where he left off. I am very confident that we will have official ARM64 +builds this year! + +Now you might be wondering: + +> * +*_Taylor, you just said you can't do your full-time job AND steward Concourse at the same time. How will you find time +for all of this?!_** + +My answer to that is: I'm not doing both. I quit my job. I'm doing Concourse full-time. + +Hooray, problem solved, everything is good now, Concourse has a full-time maintainer again! + +Okay, of course this problem is not fully solved. How am I going to afford to live?! + +## A "Concourse" Company + +Some folks in the community reached out to me offering to try starting a company centered around Concourse. I of course +said yes! Right now we are a small, dedicated team with a combined 10+ years of experience running Concourse clusters +both small and large. The goal of this commercial venture is to advance and sustain then open-source Concourse project. +We think Concourse is still the best CI/CD tool out there and that we can make a compelling commercial offering around +it. + +~~If your company may be interested in what a "Concourse" company has to offer then please share your email with us +here: LINK REMOVED~~ + +> **May 2025 Update** - If you're looking for someone to run a managed Concourse (SaaS) for you please reach out to the +> folks at [CentralCI](https://centralci.com/). If you're looking for commercial support for your on-premise Concourse +> please reach out to [Pixel Air](https://pixelair.io/). + +One last thing I want to mention about this company we're building, because it's important to us, is that we are not +taking any kind of VC funding. We are not creating a company that will be focused on "growth at all costs". Our focus +will be on developing the product (Concourse) and using the product to solve CI/CD problems for customers, and finally +catching up to the rest of the CI/CD world with a managed/SaaS version of Concourse. We want to build a sustainable +business. We are not building a business to eventually sell; we are building a business that will advance and sustain +Concourse and the entire community surrounding it. + +## 2025 + +So that's everything about the project right now. If you have any thoughts or comments you can leave them in the +discussion thread for this blog +post: [https://github.com/concourse/concourse/discussions/9048](https://github.com/concourse/concourse/discussions/9048) + +Here's to a better 2025 ✈️🥂 diff --git a/docs/blog/posts/2025/2025-04-03-7-13-release-and-project-update.md b/docs/blog/posts/2025/2025-04-03-7-13-release-and-project-update.md new file mode 100644 index 00000000..3f65e541 --- /dev/null +++ b/docs/blog/posts/2025/2025-04-03-7-13-release-and-project-update.md @@ -0,0 +1,174 @@ +--- +title: v7.13.0 Release and Project Update +date: 2025-04-03 +authors: + - taylorsilva +--- + +First minor release of 2025! This release is FULL of bug fixes, performance improvements, and even a few new features. +I'll start with a quick project update and then dive into the exciting things you can find in this release. + + + +## Concourse Joins the Cloud Foundry Foundation + +Concourse officially joined the [Cloud Foundry Foundation](https://www.cloudfoundry.org/) +back [in February](https://github.com/cloudfoundry/community/pull/1047). [Derek Richard](https://github.com/drich10) +from Broadcom deserves the credit for making this happen. This was one of those tasks that so many folks have wanted to +happen over the last few years, but through all the acquisitions (Pivotal -> VMware -> Broadcom) it kept getting started +and stopped over and over and over. A HUGE thanks to Derek for finally making this happen. + +There are still some small stuff transitioning behind the scenes that Derek is taking care of. You can watch +our [monthly working group meetings](https://www.youtube.com/watch?v=x2v9xFGH2Rg&list=PLhuMOCWn4P9ji8ZCY2a-FvMeT7S74-Hhm) +for more details or chat with us on [Discord](https://discord.gg/MeRxXKW) if you have any questions about this. + +Now let's get into the details about 7.13.0! + +## 🎉 v7.13.0 + +Like any release, I recommend reading the [release notes](https://github.com/concourse/concourse/releases/tag/v7.13.0) +for all the details. I'm going to call out some of the big ticket items here though. + +Overall this release is light on new features, but is packed full of bug fixes, optimizations, and upgrades. + +## Breaking Change for PgBouncer users + +We've migrated our database driver from [lib/pq](https://github.com/lib/pq) +to [jackc/pgx](https://github.com/jackc/pgx). `lib\pq` has been in maintenance mode for years now and the majority of +the Go community now uses `pgx` as the preferred Postgresql driver. + +This means we had to remove the `CONCOURSE_POSTGRES_BINARY_PARAMETERS` flag because it was exposing a feature specific +to `lib/pq`. As far as I know, this flag was introduced and used by PgBouncer users. There is a similar flag that pgx +exposes, but based on [recent pgx discussions](https://github.com/jackc/pgx/discussions/1784) and +the [PgBouncer release notes](https://www.pgbouncer.org/2025/01/pgbouncer-1.24.0), it seems there shouldn't be any +issues for PgBouncer users of Concourse as long as you're using PgBouncer >1.21.0. Concourse has never made any promises +about compatibility with PgBouncer, but if PgBouncer users have any issues I'll gladly review a PR to improve the +situation for you folks. + +Thankfully this is the only breaking change and I don't plan to make any new breaking changes outside of a Major version +bump. The scope of this one seemed quite small which is why I decided to push this out in a Minor version bump. + +## New Features + +* [@A1kmm](https://github.com/A1kmm) added `CONCOURSE_CONTAINERD_PRIVILEGED_MODE` to the `concourse worker` command. + This is useful for the security-conscious operator and limits the permissions that privileged containers can get while + still allowing tools like Podman and Buildah to work. You can also use this flag to disable the use of privileged + containers completely. [PR 9017](https://github.com/concourse/concourse/pull/9017) +* [@analytically](https://github.com/analytically) added a `background_filter` option to the `display` field, which + allows you to specify [CSS filters](https://developer.mozilla.org/en-US/docs/Web/CSS/filter) on your pipeline + background images. Useful if you're tired of grey backgrounds and want more colour in your pipelines + 🌈 [PR 9117](https://github.com/concourse/concourse/pull/9117) +* [@IvanChalukov](https://github.com/IvanChalukov) added the `--team` flag to the `containers` and + `clear-resource-cache` fly commands. + PRs [9106](https://github.com/concourse/concourse/pull/9106), [9107](https://github.com/concourse/concourse/pull/9107) + +Like I said, this release is light on new features. Most of the work went into fixing long-standing bugs and updating +the code base. + +## 💫 UUID Collisions?! + +If you search _"what are the chances of a UUID collision?"_ in your preferred search engine, you'll find many comments +stating _"You're more likely to be killed by a gamma ray than get a UUID collision"_, usually with a caveat that you're +generating v4 UUID's. Well, I have _another_ caveat to add to that statement: the library you're using to generate UUID' +s implements [RFC 9562](https://datatracker.ietf.org/doc/html/rfc9562) correctly! + +If you want one reason to upgrade to 7.13.0, I'd say this is why. All versions prior to this release will occasionally +have UUID collisions when creating containers and volumes. I'm not sure how often this happens, it could be every 1,000 +or 100,000,000, calls to `uuid.New()`, but it **definitely does happen**. On a low-usage cluster we only saw it once. +The error we saw was this one: + +``` +container "bba06975-46f6-4bbe-73f5-9e39a869719a": already exists +``` + +The UUID library we were using was the most popular library at the time. Concourse has been using this library from the +very beginning. I'm sure it's been the source of a handful of weird "ghost in the machine" type of errors over the +years. We can now lay this ghost to rest! We are now using the [github.com/google/uuid](https://github.com/google/uuid) +library instead. + +[PR 9083](https://github.com/concourse/concourse/pull/9083) for full details. + +## Fly Login With Chrome + +If you use a Chromium web browser and have tried to `fly login` you've definitely run into this bug where Chrome says " +your token could not be sent to fly", but saw that it _was_ sent to fly. What +gives? [Preflight requests](https://developer.chrome.com/blog/private-network-access-preflight/) is what gives! You +write your web app to send _one_ HTTP request and SURPRISE, Chrome sends a bonus second request first! + +The `fly` CLI will now handle this ⭐special bonus⭐ preflight request from Chrome, give Chrome the thumbs up, and then +wait until the real request from Concourse actually comes through. You'll now see a "login successful!" message when you +`fly login` now. + +[PR 9051](https://github.com/concourse/concourse/pull/9051) for full details. + +There is of course another browser bug that's been annoying users for the last few years... + +## Preserve Existing Browser Session + +After you did that janky `fly login` with Chrome, you'd then run into another issue. You had a tab open and logged into +Concourse before logging in with `fly`. Now when you go back to that tab and click around in the UI you get told to +login AGAIN. What gives?! + +[@IvanChalukov](https://github.com/IvanChalukov) dove into this issue and fixed it. I can only imagine the amount of +hours he spent debugging this issue. The TL;DR is that a CSRF token got wrapped in quotes and the quotes were then +considered part of the token, resulting in an invalid CSRF check server-side. Thank you Ivan for fixing this mild but +annoying bug. Many browser sessions will now be saved! 🙏 + +[PR 9109](https://github.com/concourse/concourse/pull/9109) for full details. + +## Everything Else + +Those are the big bugs that I think most people will be excited to see fixed. There's also been a lot of chore-level +changes as well. Concourse is quite a big application and has a lot of dependencies. I think we've done a good job of +dusting things off and upgrading things. Here's a quick rundown of the other exciting things in this release: + +* This release is built with [Go 1.24](https://go.dev/blog/go1.24), gaining the performance improvements from that + release, which boasts a 2-3% decrease in CPU +* [@analytically](https://github.com/analytically) upgraded a bunch of stuff in the web frontend: + * Upgraded Graphviz from v2.47.1 to v12.2.1 + * Upgraded D3js from v3.5.5 to v7 + * Updated Material Design Icons from v5.0.45 to 7.4.47, adding 2,350 new icons to use in your pipelines! + * Reduced the size of all SVG's using [svgo](https://svgo.dev/) + * Replaced the unmaintained [NYTimes/gziphandler](https://github.com/NYTimes/gziphandler) + with [klauspost/compress](https://github.com/klauspost/compress/), giving a performance boost to all HTTP + endpoints. +* [@IvanChalukov](https://github.com/IvanChalukov) and [@Kump3r](https://github.com/Kump3r) upgraded our fork of Dex to + use the CF v3 API, ensuring users of CF Auth aren't locked out of their Concourse when the CF v2 API goes away. +* The experimental warning seen during the `set_pipeline` step is now gone. It's interface and current behaviour are + considered stable. +* Modernization of the Go codebase, mostly removing usage of deprecated Go functions and other similar improvements. +* +The [registry-image](https://github.com/concourse/registry-image-resource), [s3](https://github.com/concourse/s3-resource), +and [semver](https://github.com/concourse/semver-resource) resources have been updated to use v2 of the AWS Go SDK. +While making this change I've also changed the authentication behaviour when these resources are interacting with AWS. +They will now use the default authentication chain that the AWS SDK uses. This means these resource types can now use +the Concourse Worker's IAM role and static credentials are no longer required to use these resources. +* [@mariash](https://github.com/mariash) enabled cgroupv2 support for the Guardian runtime, enabling the use of the + Guardian runtime on newer kernel versions. +* I updated Concourse's seccomp profile to include newer syscalls, bringing it inline with the default seccomp profile + from Docker and Containerd. +* Shout-out to [@aliculPix4D](https://github.com/aliculPix4D) and the folks at Pix4D for testing and finding some last + minute critical bugs before the release went out. +* Shout-out to [@IvanChalukov](https://github.com/IvanChalukov) and [@Kump3r](https://github.com/Kump3r) from SAP for + their help testing Concourse on Bosh. + +Again, read the [release notes](https://github.com/concourse/concourse/releases/tag/v7.13.0) for everything that's been +fixed or improved. + +Thank you everyone that contributed to this release. If you find any bugs, open an issue +on [GitHub](https://github.com/concourse/concourse/). + +* [GitHub Release](https://github.com/concourse/concourse/releases/tag/v7.13.0) +* [Docker Hub Image](https://hub.docker.com/r/concourse/concourse) +* [Helm Chart](https://artifacthub.io/packages/helm/concourse/concourse) +* [Bosh Release](https://bosh.io/releases/github.com/concourse/concourse-bosh-release?all=1) + +## 🍎 One More Thing... + +Myself and my co-founders are happy to officially launch [CentralCI](https://centralci.com/), a company providing +managed Concourse clusters. We run Concourse for you, owning the operational overhead of running Concourse. We've put a +lot of effort into solving some common pains that operators of Concourse have, allowing you to focus on writing your +pipelines. + +You can learn more about CentralCI in +our [introduction blog post](https://centralci.com/blog/posts/introducing_centralci). diff --git a/docs/blog/posts/2025/2025-05-08-may-2025-update.md b/docs/blog/posts/2025/2025-05-08-may-2025-update.md new file mode 100644 index 00000000..78f86912 --- /dev/null +++ b/docs/blog/posts/2025/2025-05-08-may-2025-update.md @@ -0,0 +1,93 @@ +--- +title: May 2025 Project Update +date: 2025-05-08 +authors: + - taylorsilva +--- + +It's the May 2025 project update! I'm going to try and do this at a somewhat regular cadence. Once a month seems like a +good idea, though I may skip a month here and there if there's nothing interesting to write about. + + + +## Administrative + +I've opened up a [discussion thread](https://github.com/concourse/concourse/discussions/9184) about the breaking changes +currently planned to go into v8 of Concourse. Nothing is set in stone, all feedback is welcome! Likely the most +disruptive one so far is to finally enforce +the [valid identifiers](https://github.com/concourse/concourse/discussions/9184#discussioncomment-13077550) instead of +warning when they're detected. See the link for full details and reasoning. + +The [Concourse Working Group](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/concourse.md) has +been meeting once per month. You can view past meetings +on [YouTube](https://www.youtube.com/watch?v=X_XoNsUtX-Y&list=PLhuMOCWn4P9ji8ZCY2a-FvMeT7S74-Hhm) and +view [meeting notes here](https://docs.google.com/document/d/1KSp7npDEARGbsp5SmB2DHp4VJCEJ0GAqYwv9ixHH2DI/). + +The last thing I want to mention is that I decided to step away from [CentralCI](https://centralci.com/). They are still +operating and working at making a fantastic managed Concourse that you can go and buy right now!!! If you want Concourse +but don't want the overhead of running it, I highly recommend reaching out to them. I will be focusing my time on the +community and those currently running Concourse on-premise. + +If you want to support my work stewarding Concourse you +can [sponsor me on GitHub](https://github.com/sponsors/taylorsilva). Thank you to those who already found the page and +are sponsoring me 💙 + +## Roadmap + +I have two project boards that I'm using to track work right now. + +- [Roadmap](https://github.com/orgs/concourse/projects/53) +- [Pull Requests](https://github.com/orgs/concourse/projects/54) + +The Roadmap board is tracking work that I am actively working on and planning to work on next. Right now the ARM build +stuff is taking up most of my time. I have to update some of the tooling we use to build images (the registry-image +resource and oci-build task) to make it easier to build and push multi-arch images. I'm also taking the time to clean up +a bunch of stuff in the [concourse/ci](https://github.com/concourse/ci) repo. It's a lot of work but it's slowly +happening. I see no major roadblocks with this work right now. + +The Pull Requests board is a place for me to see all open pull requests across all repositories. I've added most open +PR's to this board. If you see your PR on this board, that means I'm aware of it and I'll get to it eventually. + +## S3 Resource Type + +I made a few breaking changes with the [S3 resource type](https://github.com/concourse/s3-resource) that came out with +7.13. The [v2.0.0 release notes](https://github.com/concourse/s3-resource/releases/tag/v2.0.0) go over those. I did not +hit the mark with these changes sadly. Users found that they couldn't pull down items from public S3 buckets. Resources +pulling from public buckets were using no credentials previously, but with v2 they now tried to use any credentials the +default SDK authentication chain could find, which usually ended up being an EC2 instance profile. I then made +a [PR](https://github.com/concourse/s3-resource/pull/187) to fix this, but then that made it, again, impossible for +users to use the EC2 instance profile. Fritz then made a [PR](https://github.com/concourse/s3-resource/pull/189) that +adds a new flag to the S3 resource, `enable_aws_creds_provider`, which allows the behaviour introduced in v2 to happen +if you want it to. + +I published a new version of the resource type with this +PR, [v2.2.0](https://github.com/concourse/s3-resource/releases/tag/v2.2.0). If you want to use that in your pipelines +you can do so with this snippet: + +```yaml +resource_types: + - name: s3 + type: registry-image + source: + repository: docker.io/concourse/s3-resource + tag: 2.2.0 +``` + +That will override the S3 resource type found on the worker for pipelines that you add this to. Apologies again for the +blunder and thank you to those who helped get the resource back in a state that works for everyone. + +## VS Code Extension + +Shouting this out as it looks handy. A user made a VS code extension for previewing Concourse pipelines. You can see +their post where that shared that here: https://github.com/concourse/concourse/discussions/9193 + +## Cloud Foundry Day North America + +[Cloud Foundry Day](https://events.linuxfoundation.org/cloud-foundry-day-north-america/) is happening next week on May +14th. Derek (my co-lead of the Concourse working group) [has a talk](https://sched.co/1xAEo) about Concourse's journey +into the Cloud Foundry foundation. Make sure you check it out and say hi to Derek if you'll be there. + +## That's All Folks! + +And that's the update! If anyone wants to chat I'm always hanging around on [discord](https://discord.gg/MeRxXKW). You +can also reach me at `dev` at `taydev.net`. diff --git a/docs/blog/posts/2025/2025-08-12-new-release-7-14.md b/docs/blog/posts/2025/2025-08-12-new-release-7-14.md new file mode 100644 index 00000000..77720794 --- /dev/null +++ b/docs/blog/posts/2025/2025-08-12-new-release-7-14.md @@ -0,0 +1,131 @@ +--- +title: v7.14.0 - The First ARM Build of Concourse +date: 2025-08-11 +authors: + - taylorsilva +--- + +[v7.14.0](https://github.com/concourse/concourse/releases/tag/v7.14.0) is out +and with it also comes the first ARM build of Concourse! There were a lot of +behind the scenes changes that were required to make this happen, so let's dive +into everything that's in this release. + + + +## 🫆Pipeline Identity Tokens + +I figure we'll start with the new features, because that's always fun. +[#9035](https://github.com/concourse/concourse/pull/9035) added a whole new +`var_source` called "Identity Tokens". These are JWT's that Concourse can +generate for you and that you can then use to authenticate to third-party +systems that support "identity federation", such as Vault, AWS, and Azure. + +[@dbaumgarten](https://github.com/dbaumgarten) did everything here: wrote the +[RFC](https://github.com/concourse/rfcs/pull/139), made the +[PR](https://github.com/concourse/concourse/pull/9035), AND wrote a +comperhensive set of +[docs](https://concourse-ci.org/idtoken-credential-manager.html) with examples +on how to use it. Huge thank you to him for bringing this feature to the +community. I think a lot of users will find it useful and help them migrate away +from using static credentials. + +## 🦾 The Road to ARM + +This has been a longgggggg time coming. There was a community fork of Concourse +out there for a while, specifically for building an ARM version of Concourse. I +remember when Ciro, a co-worker from Pivotal, did a [little exploration running +Concourse on his Raspberry +Pi](https://ops.tips/blog/raspberry-pi-concourse-ci-worker/). He showed it was +possible, though the road wasn't completely smooth. + +Thankfully, the container ecosystem has continued to develop and mature these last +few years while Concourse was in limbo. More workloads are running on ARM now +and as a result more of our tools and libraries just work when trying to do +cross-compilation or ARM stuff. The big win for us was [Docker making +multi-platform +builds](https://www.docker.com/blog/faster-multi-platform-builds-dockerfile-cross-compilation-guide/) +a thing. This saved us from having to manually build an ARM Concourse worker, +like Ciro did, just so we could build ARM images of Concourse and all the +resource-types that we ship with Concourse. + +At this point, all we needed to do was update our pipelines to support building +and releasing ARM versions of _everything!_ + +The journey went like this: + +- Update all 12 base resource-types to use + [Wolfi](https://github.com/wolfi-dev/) as their base image because the + previous base image did not have an ARM variant. The `concourse/concourse` + image also uses Wolfi as its base image now. +- Update the [OCI Build task](https://github.com/concourse/oci-build-task/) and + [Registry Image + resource](https://github.com/concourse/registry-image-resource) to better + support multi-platform images and workflows. +- Update the pipeline used to build, test, and release the resource-types, and + release ARM versions of all the resource-types. +- Update the main Concourse pipeline and the release pipeline to build and test the ARM variant of Concourse. +- Added a [`/download-fly`](https://github.com/concourse/concourse/pull/9240) + page to support all variants of `fly`, replacing the three static download + links previously located in the footer + +Each of those steps took many hours to complete. I had to touch every repository +we own and got to do a little clean-up everywhere. + +One nice side-effect of moving to Wolfi for the base image is that the size of +all the container images we produce dropped. Some by a lot, some by just a +little. For example, the Git resource went from +[218MB](https://hub.docker.com/layers/concourse/git-resource/1.18/images/sha256-743af77614876650e1842a95b00ba19a90553d97f5d6596fa1b80e3d68f4da44) +to +[37MB](https://hub.docker.com/layers/concourse/git-resource/1.19/images/sha256-b4eb2006bd23ca648e8ca98d42f0281fefb14d0ba392a22288ad7f95e1c73ebb), +a massive drop in size! Some other resources dropped by only a few MB's. The +size drops weren't 100% due to Wolfi. I took some extra time to ensure we were +only adding what was necessary for each resource-type to function. + +Collectively, this results in us shipping a much smaller `concourse/concourse` +image. v7.14.0 clocks in at [928MB (x86_64) and 883MB +(ARM)](https://hub.docker.com/layers/concourse/concourse/7.14.0/images/sha256-f68c1c39abbd2b3f8d948b0073f1334292e9eebc92f1ca1c1ab60a54870b989f), +down from [1.41GB +(x86_64)](https://hub.docker.com/layers/concourse/concourse/7.13/images/sha256-cc40ecf37fee9056dad7e0b0751d321e8791449e3fe56603995c2e56a80ad1c1). +That's a 34% drop in size for the x86_64 image and 37% for the ARM image. The +ARM-based images are also always smaller, so you save a bit more disk space if +you go for a fully ARM-based Concourse deployment. + +Now there is finally an ARM version of Concourse and fly that folks can run on +their Raspberry Pi's, M-Series macs, and ARM cloud servers. I'm excited to see +what Concourse will end up running on now 🎉 + +## ☁️ Cloud Foundry Foundation + +As a project member of the Cloud Foundry Foundation (CFF), Concourse has had two tasks +assigned to it. + +1. Reduce project cost: We've reduced [project costs by + 40%](https://github.com/cloudfoundry/community/issues/1190#issuecomment-3001014447), + leaving us in a decent state. There's some smaller things we can tackle, but + urgency from CFF is gone. +2. Running a [shared Concourse + cluster](https://github.com/cloudfoundry/community/pull/1238) for CFF member + projects. This is related to the CFF's wider goal of reducing costs across + all projects. This is something myself and Derek plan to work on over the + next few months. + +## 🧭 What's Next + +My goal with Concourse right now is to continue to refine and improve what we +currently have. There are plenty of little bugs littered throughout the code +base that I want to resolve to help make Concourse feel even more stable and +reliable than it currently is. + +I try to keep this [GitHub project +board](https://github.com/orgs/concourse/projects/53) up to date with what I +plan to work on. Folks are free to look at the board, and if it isn't in the "In +Progress" column, feel free to pick up the issue and work on it. + +I'll be putting a lot of my attention to the breaking changes planned for v8. +See [this discussion post](https://github.com/orgs/concourse/discussions/9184) +for details and leave any thoughts or comments there. + +That's all I have for everyone now. See everyone over on +[Discord](https://discord.gg/MeRxXKW) or [GitHub +Discussions](https://github.com/orgs/concourse/discussions). Enjoy the new +release! diff --git a/docker-compose.yml b/docs/docker-compose.yml similarity index 100% rename from docker-compose.yml rename to docs/docker-compose.yml diff --git a/docs/docs/auth-and-teams/caveats.md b/docs/docs/auth-and-teams/caveats.md new file mode 100644 index 00000000..c3b326c5 --- /dev/null +++ b/docs/docs/auth-and-teams/caveats.md @@ -0,0 +1,24 @@ +--- +title: Security Caveats +--- + +At present, teams only provide trusted _multi-tenancy_. This means it should be used for cases where you know and trust +who you're allowing access into your Concourse cluster. + +There are a few reasons it'd be a bad idea to do otherwise: + +* Any team can run builds with [`task` + step **`privileged`**](https://concourse-ci.org/task-step.html#schema.task.privileged) tasks. A bad actor in the mix + could easily use this to harm your workers and your cluster. You + can [lock down privileged mode](https://concourse-ci.org/security-hardening.html#locking-down-privileged-mode) if you + use the containerd runtime and avoid this issue all together. +* There are no networking restrictions in place, and traffic to and from the worker's Garden and Baggageclaim endpoints + is currently unencrypted and unauthorized. Anyone could run a task that does horrible things to your worker's + containers, possibly stealing sensitive information. +
+
+ This can be remedied with configuration specified on Garden to restrict access to the internal network, but this is + not detailed in our docs, and we'll probably want to find a better answer than configuration in the future. +
+
+ You could put firewall rules in place between workers to mitigate this issue as well. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/bitbucket-cloud.md b/docs/docs/auth-and-teams/configuring/bitbucket-cloud.md new file mode 100644 index 00000000..e930df83 --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/bitbucket-cloud.md @@ -0,0 +1,73 @@ +--- +title: BitBucket Cloud Auth +--- + +A Concourse server can authenticate against BitBucket Cloud to leverage its permission model. + +## Authentication + +First, you'll need +to [create an OAuth consumer on Bitbucket Cloud](https://confluence.atlassian.com/display/BITBUCKET/OAuth+on+Bitbucket+Cloud). + +The consumer will need the following permissions: + +* Account: + * Email + * Read +* Team membership: + * Read + +The "Callback URL" must be the URL of your Concourse server with `/sky/issuer/callback` appended. This address must be +reachable by BitBucket Cloud - it can't be `localhost`. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +You will be given a Client ID and a Client Secret for your new application. The client ID and secret must then be +configured on the [`web` node](../../install/running-web.md) by setting the following env: + +```properties +CONCOURSE_BITBUCKET_CLOUD_CLIENT_ID=myclientid +CONCOURSE_BITBUCKET_CLOUD_CLIENT_SECRET=myclientsecret +``` + +## Authorization + +BitBucket users and teams can be authorized for a team by passing the following flags to [ +`fly set-team`](../managing-teams.md#fly-set-team): + +* `--bitbucket-cloud-user=LOGIN` - Authorize an individual user. +* `--bitbucket-cloud-team=TEAM_NAME` - Authorize an entire organization's members. + +For example: + +```shell +fly set-team -n my-team \ + --bitbucket-cloud-user my-bitbucket-login \ + --bitbucket-cloud-team my-bitbucket-team +``` + +... or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + bitbucket-cloud: + users: [ "my-bitbucket-login" ] + teams: [ "my-bitbucket-team" ] +``` + +### Configuring main Team Authorization + +BitBucket users and teams can be added to the [`main` team](../main-team.md) authorization config by setting the +following env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_BITBUCKET_CLOUD_USER=my-bitbucket-login +CONCOURSE_MAIN_TEAM_BITBUCKET_CLOUD_TEAM=my-bitbucket-team +``` + +Multiple teams and users may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/cf-uaa.md b/docs/docs/auth-and-teams/configuring/cf-uaa.md new file mode 100644 index 00000000..a431d5fd --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/cf-uaa.md @@ -0,0 +1,101 @@ +--- +title: CF / UAA Auth +--- + +Cloud Foundry (CF) auth can be used for operators who wish to authenticate their users configured against their Cloud +Foundry instance via the UAA auth component. + +## Authentication + +You'll need to configure your UAA with a `concourse` client by setting the following under [ +`uaa.clients`](http://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release#p=uaa.clients): + +```yaml +concourse: + id: myclientid + secret: myclientsecret + scope: openid,cloud_controller.read + authorized-grant-types: "authorization_code,refresh_token" + access-token-validity: 3600 + refresh-token-validity: 3600 + redirect-uri: https://concourse.example.com/sky/issuer/callback +``` + +The value for `redirect-uri` must be the external URL of your Concourse server with `/sky/issuer/callback` appended. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +Next, you'll need to take the same client ID and secret and configure it on the [ +`web` node](../../install/running-web.md) by setting the following env: + +```properties +CONCOURSE_CF_API_URL=http://mycf.example.com +CONCOURSE_CF_CLIENT_ID=myclientid +CONCOURSE_CF_CLIENT_SECRET=myclientsecret +``` + +Note: if you're integrating with Cloud Foundry, you're probably also deploying Concourse via BOSH - in which case you'll +want to set the [ +`cf_auth.*`](https://bosh.io/jobs/atc?source=github.com/concourse/concourse-bosh-release#p=cf_auth.client_id) properties +in your manifest instead of setting the above env. + +## Authorization + +CloudFoundry users and org/space members can be authorized for a team by passing the following flags to fly set-team: + +* `--cf-user=USERNAME` - Authorize an individual user. +* `--cf-org=ORG_NAME` - Authorize an entire organization's members. Members will need to be part of a Space inside the + organization. +* `--cf-space=ORG_NAME:SPACE_NAME` - Deprecated in favor of `--cf-space-with-developer-role`. Authorize the members with + `developer` role of a space within an organization. +* `--cf-space-with-any-role=ORG_NAME:SPACE_NAME` - Authorize the members with any role of a space within an + organization. +* `--cf-space-with-developer-role=ORG_NAME:SPACE_NAME` - Authorize the members with `developer` role of a space within + an organization. +* `--cf-space-with-auditor-role=ORG_NAME:SPACE_NAME` - Authorize the members with `auditor` role of a space within an + organization. +* `--cf-space-with-manager-role=ORG_NAME:SPACE_NAME` - Authorize the members with `manager` role of a space within an + organization. +* `--cf-space-guid=SPACE_GUID` - Authorize the members with any role of a space within an organization by space GUID. + +For example: + +```shell +fly set-team -n my-team \ + --cf-user my-username \ + --cf-org my-org \ + --cf-space my-other-org:my-space +``` + +... or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + cf: + users: [ "my-username" ] + orgs: [ "my-org" ] + spaces: [ "my-other-org:my-space" ] +``` + +### Adding CF Users to the `main` Team + +CloudFoundry users and org/space members can be added to the [`main` team](../main-team.md) authorization config by +setting the following env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_CF_USER=username +CONCOURSE_MAIN_TEAM_CF_ORG=org-name +CONCOURSE_MAIN_TEAM_CF_SPACE=org-name:space-name +CONCOURSE_MAIN_TEAM_CF_SPACE_WITH_ANY_ROLE=org-name:space-name +CONCOURSE_MAIN_TEAM_CF_SPACE_WITH_DEVELOPER_ROLE=org-name:space-name +CONCOURSE_MAIN_TEAM_CF_SPACE_WITH_AUDITOR_ROLE=org-name:space-name +CONCOURSE_MAIN_TEAM_CF_SPACE_WITH_MANAGER_ROLE=org-name:space-name +CONCOURSE_MAIN_TEAM_CF_SPACE_GUID=SPACE_GUID +``` + +Multiple users, spaces, etc. may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/generic-oauth.md b/docs/docs/auth-and-teams/configuring/generic-oauth.md new file mode 100644 index 00000000..b08b28b9 --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/generic-oauth.md @@ -0,0 +1,77 @@ +--- +title: Generic OAuth Auth +--- + +A Concourse server can authenticate against any valid OAuth auth provider, though it's a bit "closer to the metal" as +you'll need to explicitly configure the auth, token, and user-info URLs. You may want to see if you can +use [Generic OIDC auth](generic-oidc.md) if your auth provider is compatible with OIDC. + +## Authentication + +First you'll need to create a client with your oAuth provider. + +The callback URL must be the URL of your Concourse server with `/sky/issuer/callback` appended. This address must be +reachable by your oAuth provider - it can't be `localhost`. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +The Generic oAuth provider has many values to set - for a full list consult `concourse web --help`. + +A typical [web node](../../install/running-web.md) env config may look something like this: + +```properties +CONCOURSE_OAUTH_DISPLAY_NAME=Acme +CONCOURSE_OAUTH_CLIENT_ID=myclientid +CONCOURSE_OAUTH_CLIENT_SECRET=myclientsecret +CONCOURSE_OAUTH_AUTH_URL=https://oauth.example.com/oauth2/auth +CONCOURSE_OAUTH_TOKEN_URL=https://oauth.example.com/oauth2/token +CONCOURSE_OAUTH_USERINFO_URL=https://oauth.example.com/oauth2/userinfo +``` + +Consult `concourse web --help` for a full list of flags with descriptions. + +## Authorization + +OAuth users and groups can be authorized for a team by passing the following flags to [ +`fly set-team`](../managing-teams.md#fly-set-team): + +* `--oauth-user=USERNAME` - Authorize an individual user. +* `--oauth-group=GROUP_NAME` - Authorize anyone from the group. + * You may only configure groups if the auth provider exposes this information in either the token itself, or in the + contents of the userinfo endpoint. + * You can configure which claim points to the groups information by specifying `CONCOURSE_OAUTH_GROUPS_KEY` on the [ + `web` node](../../install/running-web.md). + +For example: + +```shell +fly set-team -n my-team \ + --oauth-user my-username \ + --oauth-group my-group +``` + +... or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + oauth: + users: [ "my-username" ] + groups: [ "my-group" ] +``` + +### Configuring `main` Team Authorization + +OAuth users and groups can be added to the [`main` team](../main-team.md) authorization config by setting the following +env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_OAUTH_USER=my-user +CONCOURSE_MAIN_TEAM_OAUTH_GROUP=my-group +``` + +Multiple users and groups may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/generic-oidc.md b/docs/docs/auth-and-teams/configuring/generic-oidc.md new file mode 100644 index 00000000..518a020f --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/generic-oidc.md @@ -0,0 +1,113 @@ +--- +title: Generic OIDC Auth +--- + +A Concourse server can authenticate against any valid OIDC auth provider. This provider is similar +to [Generic oAuth](generic-oauth.md) except it only requires an issuer URL rather than auth/token/userinfo URLs. + +## Authentication + +First you'll need to create a client with your oAuth provider. + +The callback URL must be the URL of your Concourse server with `/sky/issuer/callback` appended. This address must be +reachable by your OIDC provider - it can't be `localhost`. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +A typical [`web` node](../../install/running-web.md) env config may look something like this: + +```properties +CONCOURSE_OIDC_DISPLAY_NAME=Acme +CONCOURSE_OIDC_CLIENT_ID=myclientid +CONCOURSE_OIDC_CLIENT_SECRET=myclientsecret +CONCOURSE_OIDC_ISSUER=https://oidc.example.com +``` + +Consult `concourse web --help` for a full list of flags with descriptions. + +### A note about user lookup + +When determining the user identity, Concourse will first look at the `preferred_username` claim. If this claim is empty +or missing, it will then look at the claim specified by `CONCOURSE_OIDC_USER_NAME_KEY` (which defaults to `username`). + +Let's say that you want to tie each user to their email by using `CONCOURSE_OIDC_USER_NAME_KEY=email`. + +If your OIDC provider returns the following claims, Concourse will still resolve the user to `Jane Doe`: + +```json +{ + "sub": "248289761001", + "username": "j.doe", + "preferred_username": "Jane Doe", + "email": "janedoe@example.com" +} +``` + +However, if the `preferred_username` claim is empty or missing, Concourse will respect the key and resolve the user to +`janedoe@example.com`: + +```json +{ + "sub": "248289761001", + "username": "j.doe", + "preferred_username": "", + "email": "janedoe@example.com" +} +``` + +## Authorization + +!!! warning + + When authorizing individual users, it's up to you to ensure that the `preferred_username` claim and/or the claim + specified by `CONCOURSE_OIDC_USER_NAME_KEY` is unique. If they're not, then it's possible for users to impersonate + each other + +OIDC users and groups can be authorized for a team by passing one or more of the following flags +to [fly set-team](../managing-teams.md#fly-set-team): + +* `--oidc-user=USERNAME` - Authorize an individual user. +* `--oidc-group=GROUP_NAME` - Authorize anyone from the group. + * You may only configure groups if the auth provider exposes this information in either the token itself, or in the + contents of the userinfo endpoint. + * You can configure which claim points to the groups information by specifying `CONCOURSE_OIDC_GROUPS_KEY` on the [ + `web` node](../../install/running-web.md). + +For example: + +```shell +fly set-team -n my-team \ + --oidc-user my-username \ + --oidc-user another-username \ + --oidc-group my-group \ + --oidc-group my-other-group +``` + +...or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + oidc: + users: [ "my-username", "another-username" ] + groups: [ "my-group", "my-other-group" ] +``` + +Both users and groups are optional. You may opt to only provide privileges based on membership to a group and not to any +user explicitly and vice versa. + +### Configuring `main` Team Authorization + +OIDC users and groups can be added to the [`main` team](../main-team.md) authorization config by setting the following +env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_OIDC_USER=my-user +CONCOURSE_MAIN_TEAM_OIDC_GROUP=my-group +``` + +Multiple users and groups may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/generic-saml.md b/docs/docs/auth-and-teams/configuring/generic-saml.md new file mode 100644 index 00000000..9bf973ab --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/generic-saml.md @@ -0,0 +1,85 @@ +--- +title: Generic SAML Auth +--- + +A Concourse server can authenticate against any valid SAML auth provider. + +## Authentication + +First you'll need to create an application with your SAML provider. Note that the terminology used for configuring an +application may vary between SAML providers - this document uses Okta's terminology. + +SAML Assertion Consumer Service (ACS) URL must be the URL of your Concourse server with `/sky/issuer/callback` appended. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +Audience URI (SP Entity ID) must match `CONCOURSE_SAML_ENTITY_ISSUER`, which defaults to the URL of your Concourse +server with `/sky/issuer/callback` appended. + +Attribute statements that you define in the SAML provider can be remapped in Concourse: + +```properties +CONCOURSE_SAML_USERNAME_ATTR=name # default +CONCOURSE_SAML_EMAIL_ATTR=email # default +CONCOURSE_SAML_GROUPS_ATTR=groups # default +``` + +Finally, the SAML provider will generate a SSO URL, a CA certificate, and an Identity Provider Issuer. These values +correspond with `CONCOURSE_SAML_SSO_URL`, `CONCOURSE_SAML_CA_CERT`, and `CONCOURSE_SAML_SSO_ISSUER` respectively. + +A typical [web node](../../install/running-web.md) env config may look something like this: + +```properties +CONCOURSE_SAML_DISPLAY_NAME=Okta +CONCOURSE_SAML_SSO_URL=https://acme.okta.com/app/Y/Z/sso/saml +CONCOURSE_SAML_CA_CERT=/path/to/ca_cert +CONCOURSE_SAML_SSO_ISSUER=http://www.okta.com/X +``` + +Consult `concourse web --help` for a full list of flags with descriptions. + +## Authorization + +OAuth users and groups can be authorized for a team by passing the following flags to [ +`fly set-team`](../managing-teams.md#fly-set-team): + +* `--saml-user=USERNAME` - Authorize an individual user. +* `--saml-group=GROUP_NAME` - Authorize anyone from the group. + * You may only configure groups if the auth provider exposes this information in either the token itself, or in the + contents of the userinfo endpoint. + * You can configure which claim points to the groups information by specifying `CONCOURSE_SAML_GROUPS_ATTR` on the [ + `web` node](../../install/running-web.md). + +For example: + +```shell +fly set-team -n my-team \ + --saml-user my-username \ + --saml-group my-group +``` + +... or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + saml: + users: [ "my-username" ] + groups: [ "my-groups" ] +``` + +### Configuring `main` Team Authorization + +SAML users and groups can be added to the [`main` team](../main-team.md) authorization config by setting the following +env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_SAML_USER=my-user +CONCOURSE_MAIN_TEAM_SAML_GROUP=my-group +``` + +Multiple users and groups may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/github.md b/docs/docs/auth-and-teams/configuring/github.md new file mode 100644 index 00000000..54c680f6 --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/github.md @@ -0,0 +1,80 @@ +--- +title: GitHub Auth +--- + +A Concourse server can authenticate against GitHub to leverage their permission model and other security improvements in +their infrastructure. + +## Authentication + +First, you'll need to [create an OAuth application on GitHub](https://github.com/settings/applications/new). + +The "Authorization callback URL" must be the URL of your Concourse server. This address must be reachable by GitHub - it +can't be `localhost`. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +You will be given a Client ID and a Client Secret for your new application. The client ID and secret must then be +configured on the [`web` node](../../install/running-web.md) by setting the following env: + +```properties +CONCOURSE_GITHUB_CLIENT_ID=myclientid +CONCOURSE_GITHUB_CLIENT_SECRET=myclientsecret +``` + +Note that the client must be created under an _organization_ if you want to authorize users based on organization/team +membership. In addition, the GitHub application must have at least read access on the organization's members. If the +client is created under a personal account, only individual users can be authorized. + +If you're configuring GitHub Enterprise, you'll also need to set the following env: + +```properties +CONCOURSE_GITHUB_HOST=github.example.com +CONCOURSE_GITHUB_CA_CERT=/path/to/ca_cert +``` + +The GitHub Enterprise host must not contain a scheme, or a trailing slash. + +## Authorization + +Users, teams, and entire organizations can be authorized for a team by passing the following flags to [ +`fly set-team`](../managing-teams.md#fly-set-team): + +* `--github-user=LOGIN` - Authorize an individual user. +* `--github-org=ORG_NAME` - Authorize an entire organization's members. +* `--github-team=ORG_NAME:TEAM_NAME` - Authorize a team's members within an organization. + +```shell +fly set-team -n my-team \ + --github-user my-github-login \ + --github-org my-org \ + --github-team my-other-org:my-team +``` + +... or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + github: + users: [ "my-github-login" ] + orgs: [ "my-org" ] + teams: [ "my-other-org:my-team" ] +``` + +### Configuring `main` Team Authorization + +GitHub users, teams, and organizations can be added to the [`main` team](../main-team.md) authorization config by +setting the following env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_GITHUB_ORG=org-name +CONCOURSE_MAIN_TEAM_GITHUB_TEAM=org-name:team-name +CONCOURSE_MAIN_TEAM_GITHUB_USER=some-user +``` + +Multiple orgs, teams, and users may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/gitlab.md b/docs/docs/auth-and-teams/configuring/gitlab.md new file mode 100644 index 00000000..9894008d --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/gitlab.md @@ -0,0 +1,76 @@ +--- +title: GitLab Auth +--- + +A Concourse server can authenticate against GitLab to leverage their permission model. + +## Authentication + +First you need to [create an OAuth application on GitLab](https://gitlab.com/-/user_settings/applications) with the +following scopes: + +* read_user +* openid + +The "Authorization callback URL" must be the URL of your Concourse server with `/sky/issuer/callback` appended. This +address must be reachable by GitLab - it can't be `localhost`. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +You will be given a Client ID and a Client Secret for your new application. The client ID and secret must then be +configured on the [`web` node](../../install/running-web.md) by setting the following env: + +```properties +CONCOURSE_GITLAB_CLIENT_ID=myclientid +CONCOURSE_GITLAB_CLIENT_SECRET=myclientsecret +``` + +If you're configuring a self-hosted GitLab instance, you'll also need to set the following flag: + +```properties +CONCOURSE_GITLAB_HOST=https://gitlab.example.com +``` + +The GitLab host must contain a scheme and not a trailing slash. + +## Authorization + +Users and groups can be authorized for a team by passing the following flags +to [fly set-team](../managing-teams.md#fly-set-team): + +* `--gitlab-user=USERNAME` - Authorize an individual user. +* `--gitlab-group=GROUP_NAME` - Authorize an entire group's members. + +For example: + +```shell +fly set-team -n my-team \ + --gitlab-user my-gitlab-user \ + --gitlab-group my-group +``` + +... or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + gitlab: + users: [ "my-gitlab-login" ] + groups: [ "my-gitlab-group" ] +``` + +### Configuring `main` Team Authorization + +GitLab users and groups can be added to the [`main` team](../main-team.md) authorization config by setting the following +env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_GITLAB_GROUP=group-name +CONCOURSE_MAIN_TEAM_GITLAB_USER=some-user +``` + +Multiple groups and users may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/index.md b/docs/docs/auth-and-teams/configuring/index.md new file mode 100644 index 00000000..11e060c9 --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/index.md @@ -0,0 +1,78 @@ +--- +title: Configuring Auth +--- + +The very first thing to configure with Concourse is how users will log in, and what those users should be able to do. + +This is configured in two separate tiers: + +* **Authentication**, how users identify themselves, is configured on the [`web` node](../../install/running-web.md). +* **Authorization**, how user access is determined, is configured on each team. + +Concourse currently supports the following auth methods: + +
+ +- :material-lock: Local Auth + + --- + [:octicons-arrow-right-24: Configure](local-user.md) + +- :material-github: GitHub Auth + + --- + [:octicons-arrow-right-24: Configure](github.md) + +- :material-gitlab: GitLab Auth + + --- + [:octicons-arrow-right-24: Configure](gitlab.md) + +- :material-bitbucket: BitBucket Cloud Auth + + --- + [:octicons-arrow-right-24: Configure](bitbucket-cloud.md) + +- :simple-cloudfoundry: CF / UAA Auth + + --- + [:octicons-arrow-right-24: Configure](cf-uaa.md) + +- :material-database: LDAP Auth + + --- + [:octicons-arrow-right-24: Configure](ldap.md) + +- :material-microsoft-azure: Microsoft Auth + + --- + [:octicons-arrow-right-24: Configure](microsoft.md) + +- :material-openid: Generic OIDC Auth + + --- + [:octicons-arrow-right-24: Configure](generic-oidc.md) + +- :material-lock: Generic OAuth + + --- + [:octicons-arrow-right-24: Configure](generic-oauth.md) + +- :material-lock: Generic SAML Auth + + --- + [:octicons-arrow-right-24: Configure](generic-saml.md) + + +
+ +Any number of providers can be enabled at any one time. Users will be given a choice when logging in as to which one +they would like to use. + +Concourse uses a fork of [Dex](https://github.com/dexidp/dex) for its authentication. You can find additional +documentation on the supported auth providers in +the [Dex connectors documentation](https://github.com/dexidp/dex/tree/master/Documentation/connectors). + +Adding a new auth provider to Concourse is as simple as submitting a pull request to +our [fork of Dex](https://github.com/concourse/dex) and then adding a bit of configuration to the [ +`skymarshal` component](https://github.com/concourse/concourse/tree/master/skymarshal). \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/ldap.md b/docs/docs/auth-and-teams/configuring/ldap.md new file mode 100644 index 00000000..83ed6d0e --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/ldap.md @@ -0,0 +1,115 @@ +--- +title: LDAP Auth +--- + +The LDAP provider can be used for operators who wish to authenticate their users against an LDAP server. + +## Authentication + +The LDAP provider is configured by pointing it to an LDAP host with a read-only bind DN and password. This bind DN and +password is used for authenticating with the LDAP host and querying the users. + +Additionally, the base DN under which users are searched as well as the attribute of the users to associate to ' +usernames' must also be configured. + +These can be specified via env to the [`web` node](../../install/running-web.md) like so: + +```properties +CONCOURSE_LDAP_DISPLAY_NAME=Acme # optional; default "LDAP" +CONCOURSE_LDAP_HOST=ldap.example.com # port defaults to 389 or 636 +CONCOURSE_LDAP_BIND_DN='cn=read-only-admin,dc=example,dc=com' +CONCOURSE_LDAP_BIND_PW=read-only-admin-password +CONCOURSE_LDAP_USER_SEARCH_BASE_DN='cn=users,dc=example,dc=com' +CONCOURSE_LDAP_USER_SEARCH_USERNAME=uid +``` + +To configure TLS, you may need to set a CA cert: + +```properties +CONCOURSE_LDAP_CA_CERT=/path/to/ca_cert +``` + +If your LDAP host does not use TLS, you must set: + +```properties +CONCOURSE_LDAP_INSECURE_NO_SSL=true +``` + +To fine-tune which users are queried, you can specify a user search filter like so: + +```properties +CONCOURSE_LDAP_USER_SEARCH_FILTER='(objectClass=person)' +``` + +To set which user attributes map to the token claims, you can set the following: + +```properties +CONCOURSE_LDAP_USER_SEARCH_ID_ATTR=uid # default +CONCOURSE_LDAP_USER_SEARCH_EMAIL_ATTR=mail # default +CONCOURSE_LDAP_USER_SEARCH_NAME_ATTR=some-attr # no default +``` + +### Configuring LDAP group search + +The LDAP provider can also be configured with group search configuration, so that users can be configured for team +authorization by their 'group' in LDAP. + +For example, to find groups and identify them by their `ou` attribute, you would configure: + +```properties +CONCOURSE_LDAP_GROUP_SEARCH_BASE_DN='cn=groups,dc=example,dc=com' +CONCOURSE_LDAP_GROUP_SEARCH_NAME_ATTR=ou +``` + +The attributes correlating a user to a group must be specified like so: + +```properties +CONCOURSE_LDAP_GROUP_SEARCH_USER_ATTR=uid +CONCOURSE_LDAP_GROUP_SEARCH_GROUP_ATTR=members +``` + +This specifies that the `uid` attribute of the user must be present in the `members` attribute of the group. + +An additional filter may be specified, just like with users: + +```properties +CONCOURSE_LDAP_GROUP_SEARCH_FILTER='(objectClass=posixGroup)' +``` + +## Authorization + +LDAP users and groups can be authorized for a team by passing the following flags to [ +`fly set-team`](../managing-teams.md#fly-set-team): + +* `--ldap-user=USERNAME` - Authorize an individual user. +* `--ldap-group=GROUP_NAME` - Authorize anyone from the group. + +For example: + +```shell +fly set-team -n my-team \ + --ldap-user my-username \ + --ldap-group my-group +``` + +... or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + ldap: + users: [ "my-username" ] + groups: [ "my-groups" ] +``` + +### Configuring `main` Team Authorization + +LDAP users and groups can be added to the [`main` team](../main-team.md) authorization config by setting the following +env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_LDAP_USER=my-user +CONCOURSE_MAIN_TEAM_LDAP_GROUP=my-group +``` + +Multiple users and groups may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/local-user.md b/docs/docs/auth-and-teams/configuring/local-user.md new file mode 100644 index 00000000..fe1aa48a --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/local-user.md @@ -0,0 +1,86 @@ +--- +title: Local User Auth +--- + +Local User auth is a primitive username/password-based auth mechanism. All users and passwords are configured +statically. + +In general, we recommend configuring one of the other providers instead, but for small deployments with only a few +users, local user auth may be all you need. + +## Authentication + +Local users are configured on the [`web` node](../../install/running-web.md) by setting the following env: + +```properties +CONCOURSE_ADD_LOCAL_USER=myuser:mypass,anotheruser:anotherpass +``` + +This configures two users, `myuser` and `anotheruser`, with their corresponding passwords. The literal password can be +provided, or a [bcrypt](https://en.wikipedia.org/wiki/Bcrypt) hash of the password. + +When local users are configured, the log-in page in the web UI will show a username/password prompt. + +Local users can also log in via [`fly login`](https://concourse-ci.org/fly.html#fly-login) with the `--username` and +`--password` flags. + +### Bcrypt Hashing Passwords + +Instead of passing in user passwords in plaintext, you can provide Concourse with a bcrypt hash of the passwords. + +There aren't any great CLI tools for quickly hashing passwords with bcrypt. Here's a simple Go program that can do the +hashing for you. + +```go +package main + +import ( + "fmt" + + "golang.org/x/crypto/bcrypt" +) + +func main() { + password := []byte("mypass") + hash, _ := bcrypt.GenerateFromPassword(password, 12) + fmt.Println(string(hash)) +} +``` + +Put that in a `main.go` then run go run `main.go` and it will output a hash for your password. You can run this program +in the [Go Playground](https://go.dev/play/p/Ucv-ADJ9M0J) if you want to avoid installing Go. + +Hashing the passwords for the previous example, you would then set `CONCOURSE_ADD_LOCAL_USER` to the following: + +```properties +CONCOURSE_ADD_LOCAL_USER='myuser:$2a$12$L8Co5QYhD..S1l9mIIVHlucvRjfte4tuymMCk9quln0H/eol16d5W,anotheruser:$2a$12$VWSSfrsTIisf96q7UVsvyOBbrcP88kh5CLtuXYSXGwnSnM3ClKxXu' +``` + +## Authorization + +Local users are granted access to teams via [`fly set-team`](https://concourse-ci.org/managing-teams.html#fly-set-team), +using the `--local-user` flag: + +```shell +fly set-team -n my-team --local-user some_username +``` + +...or via --config for [setting user roles](https://concourse-ci.org/managing-teams.html#setting-roles): + +```yaml +roles: + - name: member + local: + users: [ "some_username" ] +``` + +### Configuring `main` Team Authorization + +Local users can be added to the [`main` team](../main-team.md) authorization config by setting the following env on +the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_LOCAL_USER=myuser +``` + +Multiple users may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/configuring/microsoft.md b/docs/docs/auth-and-teams/configuring/microsoft.md new file mode 100644 index 00000000..451e422a --- /dev/null +++ b/docs/docs/auth-and-teams/configuring/microsoft.md @@ -0,0 +1,66 @@ +--- +title: Microsoft Auth +--- + +A Concourse server can authenticate against Microsoft Azure AD to leverage its permission model. + +## Authentication + +You'll need +to [register a new application on Azure](https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade). + +The "Callback URL" must be the URL of your Concourse server with `/sky/issuer/callback` appended. This address must be +reachable by Microsoft - it can't be `localhost`. + +For example, Concourse's own CI server's callback URL would be: + +``` +https://ci.concourse-ci.org/sky/issuer/callback +``` + +You will be given a Client ID and a Client Secret for your new application. The client ID and secret must then be +configured on the [`web` node](../../install/running-web.md) by setting the following env: + +```properties +CONCOURSE_MICROSOFT_CLIENT_ID=myclientid +CONCOURSE_MICROSOFT_CLIENT_SECRET=myclientsecret +``` + +Consult `concourse web --help` for a full list of flags with descriptions. + +## Authorization + +!!! warning + + Individual user auth is disabled due to a quirk with with Microsoft returning unique IDs but non-unique usernames + +Groups can be authorized for a team by passing the following flags to fly set-team: + +* `--microsoft-group=GROUP_NAME` - Authorize an entire group's members. + +For example: + +```shell +fly set-team -n my-team \ + --microsoft-group my-group +``` + +...or via `--config` for [setting user roles](../managing-teams.md#setting-user-roles): + +```yaml +roles: + - name: member + microsoft: + groups: [ "my-groups" ] +``` + +### Configuring `main` Team Authorization + +Microsoft groups can be added to the [`main` team](../main-team.md) authorization config by setting the following env on +the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_MAIN_TEAM_MICROSOFT_GROUP=my-group +``` + +Multiple teams may be specified by comma-separating them. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/exposing.md b/docs/docs/auth-and-teams/exposing.md new file mode 100644 index 00000000..613e85fc --- /dev/null +++ b/docs/docs/auth-and-teams/exposing.md @@ -0,0 +1,12 @@ +--- +title: Pipeline & Build Visibility +--- + +Every newly configured pipeline is hidden to anyone but the pipeline's team. To make a pipeline publicly viewable, both +by other teams and unauthenticated users, see [ +`fly expose-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-expose-pipeline). + +Even with a pipeline exposed, all build logs are hidden by default. This is because CI jobs are prone to leaking +credentials and other ... unsavory information. After you've determined that a job's builds should be safe for public +consumption, you can set [`public: true`](https://concourse-ci.org/jobs.html#schema.job.public) on the job in your +pipeline. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/index.md b/docs/docs/auth-and-teams/index.md new file mode 100644 index 00000000..274257df --- /dev/null +++ b/docs/docs/auth-and-teams/index.md @@ -0,0 +1,13 @@ +--- +title: Auth & Teams +--- + +A single Concourse installation can accommodate many projects and users. + +Pipelines, builds, and all other user data are owned by _teams_. A team is just a conceptual owner and a separate +namespace, tied to an authorization config. For example, a team may authorize all members of the `concourse` GitHub +organization to be a [member](https://concourse-ci.org/user-roles.html#team-member-role). + +When a user authenticates, each team's authorization config is checked against the user to determine +which [role](https://concourse-ci.org/user-roles.html), if any, to grant for the team. This information is then stored +in the user's token to determine access control for future requests. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/main-team.md b/docs/docs/auth-and-teams/main-team.md new file mode 100644 index 00000000..d7f68891 --- /dev/null +++ b/docs/docs/auth-and-teams/main-team.md @@ -0,0 +1,19 @@ +--- +title: The main team +--- + +# The `main` team + +Out of the box, Concourse comes with a single team called `main`. + +The `main` team is an _admin_ team, meaning members (specifically, users with +the [owner](https://concourse-ci.org/user-roles.html#team-owner-role) role) can create and update other teams. Currently +there is no way to promote a team to become an admin team, so `main` is a special-case. + +The `main` team is different in that all flags normally passed to [ +`fly set-team`](https://concourse-ci.org/managing-teams.html#fly-set-team) are instead passed to the `concourse web` +command, prefixed with `--main-team-`. The values set in these flags take effect whenever the `web` node starts up. This +is done so that you can't get locked out. + +To learn how to configure your `main` team, continue on to the appropriate section for your auth provider of choice +under [Configuring Auth](configuring/index.md). \ No newline at end of file diff --git a/docs/docs/auth-and-teams/managing-teams.md b/docs/docs/auth-and-teams/managing-teams.md new file mode 100644 index 00000000..1d2f8ad7 --- /dev/null +++ b/docs/docs/auth-and-teams/managing-teams.md @@ -0,0 +1,136 @@ +--- +title: Managing Teams +--- + +## `fly set-team` + +Once you've [logged in as the `main` team with `fly`](https://concourse-ci.org/fly.html#fly-login), you can run [ +`fly set-team`](#fly-set-team) to create or update other teams. Users with a [ +`owner` role](https://concourse-ci.org/user-roles.html#team-owner-role) can also update their own configuration with the +same command. + +For example, to create a new team that authorizes the local `foo` user, you would run: + +```shell +fly -t example set-team --team-name my-team --local-user foo +``` + +Note that each time `set-team` is run, the team's authorization config is set _as a whole_ - it is not a stateful +operation. + +There are many different ways to configure team auth; see [Configuring Auth](configuring/index.md) for more information. + +Once the team has been created, you can use [`fly login`](https://concourse-ci.org/fly.html#fly-login) to log in: + +```shell +fly -t example login -n my-team +``` + +Any newly configured pipelines (via [ +`fly set-pipeline`](https://concourse-ci.org/setting-pipelines.html#fly-set-pipeline)) and one-off builds (via [ +`fly execute`](https://concourse-ci.org/tasks.html#running-tasks)) will be owned by the authorized team. Commands that +list content will be scoped to the current team by default, such as [ +`fly pipelines`](https://concourse-ci.org/managing-pipelines.html#fly-pipelines) and [ +`fly builds`](https://concourse-ci.org/builds.html#fly-builds). The web UI will reflect the same state. + +Newly configured pipelines are hidden by default, meaning other teams and unauthorized visitors cannot view them. To +make them publicly viewable, see [Pipeline & Build Visibility](exposing.md). + +### Setting User Roles + +By default, authorization config passed to `set-team` configures the [ +`owner` role](https://concourse-ci.org/user-roles.html#team-owner-role). + +More advanced [roles](https://concourse-ci.org/user-roles.html) configuration can be specified through the `--config` or +`-c` flag. + +The `-c` flag expects a `.yml` file with a single field, `roles:`, pointing to a list of role authorization configs. + +All the attributes in each config will vary by provider. Consult the appropriate section for your provider +under [Configuring Auth](configuring/index.md) for specifics. + +For example, the following config sets three roles with different auth config for each role's provider: + +```yaml +roles: + - name: owner + github: + users: [ "admin" ] + - name: member + github: + teams: [ "org:team" ] + - name: viewer + github: + orgs: [ "org" ] + local: + users: [ "visitor" ] +``` + +## `fly active-users` + +To list all users that have logged into your instance in the last two months, run: + +```shell +fly -t example active-users +``` + +The output will include the username, connector (which method they used to authenticate) and the date of their last +login. + +You can list users whose last login was within a different range by using: + +```shell +fly -t example active-users --since yyyy-MM-dd +``` + +This can be helpful to get a sense of how active your cluster is. + +## `fly teams` + +To list all the teams, run: + +```shell +fly -t example teams +``` + +This can be useful if you've forgotten your team name. + +### `fly teams -d`: With Details + +To list all the teams with authentication details and members, run: + +```shell +fly -t example teams -d +``` + +This can be helpful when debugging OAuth, OIDC groups or listing all individual members. + +## `fly get-team` + +To show a team's configuration, run: + +```shell +fly -t example get-team -n some-team +``` + +## `fly rename-team` + +To rename a team, run: + +```shell +fly -t example rename-team --old-name my-team --new-name cool-team +``` + +This can only be run by the [`main` team](main-team.md). + +## `fly destroy-team` + +To remove a team, including all of its pipelines and one-off builds, first log in as the [`main` team](main-team.md), +and then run: + +```shell +fly -t example destroy-team --team-name my-team +``` + +Currently, if there were any workers assigned specifically to this team, they'll be orphaned, without having their +containers or volumes cleaned up. \ No newline at end of file diff --git a/docs/docs/auth-and-teams/user-roles.md b/docs/docs/auth-and-teams/user-roles.md new file mode 100644 index 00000000..7d1669f8 --- /dev/null +++ b/docs/docs/auth-and-teams/user-roles.md @@ -0,0 +1,306 @@ +--- +title: User Roles & Permissions +--- + +Concourse comes with five roles: + +1. Concourse Admin +2. Team Owner +3. Team Member +4. Pipeline Operator +5. Team Viewer + +These roles are strictly ordered, so that each role always has all the permissions of any other role lower on the list. +This means that a Pipeline Operator can always do anything a Team Viewer can, and so on. + +In this document we say an action is assigned to a role if that role is capable of performing the action, but any +less-privileged role is not. For example, the `SaveConfig` action is _assigned_ to the `member` role, so owners and +members can set a pipeline config, but pipeline operators and viewers cannot. + +## Concourse Admin + +`Admin` is a special user attribute granted only to [owners](#owner-role) of the [`main` team](main-team.md). + +Admins have the ability to administrate teams using [`fly set-team`](managing-teams.md#fly-set-team), [ +`fly destroy-team`](managing-teams.md#fly-destroy-team), [`fly rename-team`](managing-teams.md#fly-rename-team), etc. + +Admins always have permission to perform any action on any team. You cannot assign actions to the admin role using the +`--config-rbac` flag. + +The following actions are also assigned to admins, and cannot be reconfigured: + +```yaml +- GetLogLevel +- ListActiveUsersSince +- SetLogLevel +- GetInfoCreds +- SetWall +- ClearWall +``` + +## `owner` role + +Team owners have read, write and auth management capabilities within the scope of their team, and can rename or destroy +the team. + +Actions assigned to the `owner` role by default: + +```yaml +owner: + - SetTeam + - RenameTeam + - DestroyTeam +``` + +## `member` role + +Team members can operate within their team in a read & write fashion, but they can not change the configuration of their +team. + +Actions assigned to the `member` role by default: + +```yaml +member: + - SaveConfig + - CreateBuild + - DeletePipeline + - OrderPipelines + - OrderPipelinesWithinGroup + - ExposePipeline + - HidePipeline + - RenamePipeline + - ArchivePipeline + - CreatePipelineBuild + - RegisterWorker + - LandWorker + - RetireWorker + - PruneWorker + - HeartbeatWorker + - DeleteWorker + - HijackContainer + - ReportWorkerContainers + - ReportWorkerVolumes + - CreateArtifact + - GetArtifact +``` + +## `pipeline-operator` role + +Team pipeline operators can perform pipeline operations such as triggering builds and pinning resources, however they +cannot update pipeline configurations. + +Actions assigned to the `pipeline-operator` role by default: + +```yaml +pipeline-operator: + - AbortBuild + - RerunJobBuild + - CreateJobBuild + - PauseJob + - UnpauseJob + - ClearTaskCache + - UnpinResource + - SetPinCommentOnResource + - CheckResource + - CheckResourceWebHook + - CheckResourceType + - EnableResourceVersion + - DisableResourceVersion + - PinResourceVersion + - PausePipeline + - UnpausePipeline + - ClearResourceCache +``` + +## `viewer` role + +Team viewers have "read-only" access to a team and its pipelines. This locks everything down, preventing users from +doing a [`fly set-pipeline`](https://concourse-ci.org/setting-pipelines.html#fly-set-pipeline) or [ +`fly intercept`](https://concourse-ci.org/builds.html#fly-intercept). + +Actions assigned to the `viewer` role by default: + +```yaml +viewer: + - GetConfig + - GetCC + - GetBuild + - GetCheck + - GetBuildPlan + - ListBuilds + - BuildEvents + - BuildResources + - GetBuildPreparation + - GetJob + - ListAllJobs + - ListJobs + - ListJobBuilds + - ListJobInputs + - GetJobBuild + - GetVersionsDB + - JobBadge + - MainJobBadge + - ListAllResources + - ListResources + - ListResourceTypes + - GetResource + - ListResourceVersions + - GetResourceVersion + - ListBuildsWithVersionAsInput + - ListBuildsWithVersionAsOutput + - GetResourceCausality + - ListAllPipelines + - ListPipelines + - GetPipeline + - ListPipelineBuilds + - PipelineBadge + - ListWorkers + - DownloadCLI + - GetInfo + - ListContainers + - GetContainer + - ListDestroyingContainers + - ListVolumes + - ListDestroyingVolumes + - ListTeams + - GetTeam + - ListTeamBuilds + - ListBuildArtifacts +``` + +## Action Matrix + +In this table, an action is marked as _customizable_ if it is possible to change its permissions by providing the +`--config-rbac` flag, documented below. Assigning an action to a role that is not customizable will have no effect on +its permissions. + +| Action | `fly` commands affected | UI actions affected | can be performed unauthenticated? | customizable | +|-------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|:---------------------------------:|:----------------:| +| GetBuild | n/a | view one-off build page | :material-check: | :material-check: | +| BuildResources | n/a | view build page | :material-check: | :material-check: | +| GetBuildPreparation | n/a | view build page | :material-check: | :material-check: | +| BuildEvents | [`fly watch`](https://concourse-ci.org/builds.html#fly-watch),[`fly execute`](https://concourse-ci.org/tasks.html#running-tasks) | view build page | :material-check: | :material-check: | +| GetBuildPlan | n/a | view build page | :material-check: | :material-check: | +| ListBuildArtifacts | n/a | n/a | :material-check: | :material-check: | +| AbortBuild | [`fly abort-build`](https://concourse-ci.org/builds.html#fly-abort-build) | abort button on build page | :material-close: | :material-check: | +| PruneWorker | [`fly prune-worker`](https://concourse-ci.org/administration.html#fly-prune-worker) | n/a | :material-close: | :material-check: | +| LandWorker | [`fly land-worker`](https://concourse-ci.org/administration.html#fly-land-worker) | n/a | :material-close: | :material-check: | +| RetireWorker | n/a | n/a | :material-close: | :material-close: | +| ListDestroyingVolumes | n/a | n/a | :material-close: | :material-close: | +| ListDestroyingContainers | n/a | n/a | :material-close: | :material-close: | +| ReportWorkerContainers | n/a | n/a | :material-close: | :material-close: | +| ReportWorkerVolumes | n/a | n/a | :material-close: | :material-close: | +| GetPipeline | n/a | view pipeline page | :material-check: | :material-check: | +| GetJobBuild | n/a | view build page | :material-check: | :material-check: | +| PipelineBadge | n/a | n/a | :material-check: | :material-check: | +| JobBadge | n/a | n/a | :material-check: | :material-check: | +| ListJobs | [`fly jobs`](https://concourse-ci.org/jobs.html#fly-jobs) | view pipeline page | :material-check: | :material-check: | +| GetJob | n/a | view job page | :material-check: | :material-check: | +| ListJobBuilds | [`fly builds`](https://concourse-ci.org/builds.html#fly-builds) | view job page | :material-check: | :material-check: | +| ListPipelineBuilds | [`fly builds`](https://concourse-ci.org/builds.html#fly-builds) | n/a | :material-check: | :material-check: | +| GetResource | n/a | view resource page | :material-check: | :material-check: | +| ListBuildsWithVersionAsInput | n/a | expand version on resource page | :material-check: | :material-check: | +| ListBuildsWithVersionAsOutput | n/a | expand version on resource page | :material-check: | :material-check: | +| GetResourceCausality | n/a | n/a | :material-check: | :material-check: | +| GetResourceVersion | n/a | n/a | :material-check: | :material-check: | +| ListResources | fly resources | view pipeline page | :material-check: | :material-check: | +| ListResourceTypes | n/a | n/a | :material-check: | :material-check: | +| ListResourceVersions | fly resource-versions,[`fly pin-resource`](https://concourse-ci.org/managing-resources.html#fly-pin-resource) | view resource page | :material-check: | :material-check: | +| CreateBuild | [`fly execute`](https://concourse-ci.org/tasks.html#running-tasks) | n/a | :material-close: | :material-check: | +| GetContainer | n/a | n/a | :material-close: | :material-check: | +| HijackContainer | [`fly intercept`](https://concourse-ci.org/builds.html#fly-intercept) | n/a | :material-close: | :material-check: | +| ListContainers | [`fly containers`](https://concourse-ci.org/administration.html#fly-containers) | n/a | :material-close: | :material-check: | +| ListWorkers | [`fly workers`](https://concourse-ci.org/administration.html#fly-workers) | n/a | :material-close: | :material-check: | +| RegisterWorker | n/a | n/a | :material-close: | :material-close: | +| HeartbeatWorker | n/a | n/a | :material-close: | :material-close: | +| DeleteWorker | n/a | n/a | :material-close: | :material-close: | +| GetTeam | [`fly get-team`](https://concourse-ci.org/managing-teams.html#fly-get-team) | n/a | :material-close: | :material-check: | +| SetTeam | [`fly set-team`](https://concourse-ci.org/managing-teams.html#fly-set-team) | n/a | :material-close: | :material-check: | +| ListTeamBuilds | [`fly builds`](https://concourse-ci.org/builds.html#fly-builds) | n/a | :material-close: | :material-check: | +| RenameTeam | [`fly rename-team`](https://concourse-ci.org/managing-teams.html#fly-rename-team) | n/a | :material-close: | :material-check: | +| DestroyTeam | [`fly destroy-team`](https://concourse-ci.org/managing-teams.html#fly-destroy-team) | n/a | :material-close: | :material-check: | +| ListVolumes | [`fly volumes`](https://concourse-ci.org/administration.html#fly-volumes) | n/a | :material-close: | :material-check: | +| DownloadCLI | [`fly sync`](https://concourse-ci.org/fly.html#fly-sync) | icons on dashboard and pipeline pages | :material-check: | :material-close: | +| CheckResourceWebHook | n/a | n/a | :material-check: | :material-close: | +| GetInfo | n/a | n/a | :material-check: | :material-close: | +| GetCheck | [`fly check-resource`](https://concourse-ci.org/managing-resources.html#fly-check-resource),[`fly check-resource-type`](https://concourse-ci.org/managing-resource-types.html#fly-check-resource-type) | check button on resource page | :material-close: | :material-check: | +| ListTeams | [`fly teams`](https://concourse-ci.org/managing-teams.html#fly-teams) | view dashboard page | :material-check: | :material-close: | +| ListAllPipelines | n/a | view dashboard page | :material-check: | :material-close: | +| ListPipelines | [`fly pipelines`](https://concourse-ci.org/managing-pipelines.html#fly-pipelines) | n/a | :material-check: | :material-check: | +| ListAllJobs | [`fly teams`](https://concourse-ci.org/managing-teams.html#fly-teams) | view dashboard page | :material-check: | :material-close: | +| ListAllResources | n/a | view dashboard page | :material-check: | :material-close: | +| ListBuilds | [`fly builds`](https://concourse-ci.org/builds.html#fly-builds) | n/a | :material-check: | :material-close: | +| MainJobBadge | n/a | n/a | :material-check: | :material-close: | +| GetLogLevel | n/a | n/a | :material-close: | :material-close: | +| SetLogLevel | n/a | n/a | :material-close: | :material-close: | +| GetWall | n/a | n/a | :material-check: | :material-close: | +| SetWall | n/a | n/a | :material-close: | :material-close: | +| ClearWall | n/a | n/a | :material-close: | :material-close: | +| ListActiveUsersSince | [`fly active-users`](https://concourse-ci.org/managing-teams.html#fly-active-users) | n/a | :material-close: | :material-close: | +| GetInfoCreds | n/a | n/a | :material-close: | :material-close: | +| CheckResource | [`fly check-resource`](https://concourse-ci.org/managing-resources.html#fly-check-resource) | check button on resource page | :material-close: | :material-check: | +| CheckResourceType | [`fly check-resource-type`](https://concourse-ci.org/managing-resource-types.html#fly-check-resource-type) | n/a | :material-close: | :material-check: | +| CreateJobBuild | [`fly trigger-job`](https://concourse-ci.org/jobs.html#fly-trigger-job) | trigger button on job and build pages | :material-close: | :material-check: | +| RerunJobBuild | [`fly rerun-build`](https://concourse-ci.org/jobs.html#fly-rerun-build) | rerun button on build page | :material-close: | :material-check: | +| CreatePipelineBuild | [`fly execute`](https://concourse-ci.org/tasks.html#running-tasks) | n/a | :material-close: | :material-check: | +| DeletePipeline | [`fly destroy-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-destroy-pipeline) | n/a | :material-close: | :material-check: | +| DisableResourceVersion | [`fly disable-resource-version`](https://concourse-ci.org/managing-resources.html#fly-disable-resource-version) | version disable widget on resource page | :material-close: | :material-check: | +| EnableResourceVersion | [`fly enable-resource-version`](https://concourse-ci.org/managing-resources.html#fly-enable-resource-version) | version enable widget on resource page | :material-close: | :material-check: | +| PinResourceVersion | [`fly pin-resource`](https://concourse-ci.org/managing-resources.html#fly-pin-resource) | pin buttons on resource page | :material-close: | :material-check: | +| UnpinResource | fly unpin-resource | pin buttons on resource page | :material-close: | :material-check: | +| SetPinCommentOnResource | [`fly pin-resource`](https://concourse-ci.org/managing-resources.html#fly-pin-resource) | comment overlay on resource page | :material-close: | :material-check: | +| GetConfig | [`fly get-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-get-pipeline) | n/a | :material-close: | :material-check: | +| GetCC | n/a | n/a | :material-close: | :material-check: | +| GetVersionsDB | n/a | n/a | :material-close: | :material-check: | +| ListJobInputs | n/a | n/a | :material-close: | :material-check: | +| OrderPipelines | [`fly order-pipelines`](https://concourse-ci.org/managing-pipelines.html#fly-order-pipelines) | drag and drop on dashboard | :material-close: | :material-check: | +| OrderPipelinesWithinGroup | [`fly order-instanced-pipelines`](https://concourse-ci.org/instanced-pipelines.html#fly-order-instanced-pipelines) | drag and drop within instance group on dashboard | :material-close: | :material-check: | +| PauseJob | [`fly pause-job`](https://concourse-ci.org/jobs.html#fly-pause-job) | pause button on job page | :material-close: | :material-check: | +| PausePipeline | [`fly pause-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-pause-pipeline) | pause button on pipeline or dashboard | :material-close: | :material-check: | +| RenamePipeline | [`fly rename-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-rename-pipeline) | n/a | :material-close: | :material-check: | +| UnpauseJob | [`fly unpause-job`](https://concourse-ci.org/jobs.html#fly-unpause-job) | play button on job page | :material-close: | :material-check: | +| UnpausePipeline | [`fly unpause-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-unpause-pipeline) | play button on pipeline or dashboard | :material-close: | :material-check: | +| ExposePipeline | [`fly expose-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-expose-pipeline) | eyeball button on dashboard | :material-close: | :material-check: | +| HidePipeline | [`fly hide-pipeline`](https://concourse-ci.org/managing-pipelines.html#fly-hide-pipeline) | slashed eyeball button on dashboard | :material-close: | :material-check: | +| SaveConfig | [`fly set-pipeline`](https://concourse-ci.org/setting-pipelines.html#fly-set-pipeline) | n/a | :material-close: | :material-check: | +| ClearTaskCache | [`fly clear-task-cache`](https://concourse-ci.org/jobs.html#fly-clear-task-cache) | n/a | :material-close: | :material-check: | +| CreateArtifact | [`fly execute`](https://concourse-ci.org/tasks.html#running-tasks) | n/a | :material-close: | :material-check: | +| GetArtifact | [`fly execute`](https://concourse-ci.org/tasks.html#running-tasks) | n/a | :material-close: | :material-check: | +| ClearResourceCache | [`fly clear-resource-cache`](https://concourse-ci.org/managing-resources.html#fly-clear-resource-cache) | n/a | :material-close: | :material-check: | + +## Configuring RBAC + +!!! warning "Experimental Feature" + + Configuring RBAC is **experimental**, and this may change in the future. + +It is possible to promote or demote the roles to which actions are assigned by passing the `--config-rbac` to the +`concourse web` command with a path to a `.yml` file, like the following: + +```shell +concourse web --config-rbac=/path/to/rbac/config.yml +``` + +This file should be a YAML map where the keys are role names (`owner`, `member`, `pipeline-operator`, and `viewer` are +valid). For each role, the value should be a list of actions. On startup, Concourse will assign each role to its +associated list of actions. + +For example, in the default configuration only pipeline-operators and above can abort builds. To restrict aborting +builds to only members and above, you could pass this as a `--config-rbac` file: + +```yaml +member: + - AbortBuild +``` + +On the other hand, only members and above can order pipelines by default. To extend this privilege down to +pipeline-operators, you can use a `--config-rbac` file like the following: + +```yaml +pipeline-operator: + - OrderPipelines +``` + +You do not need to specify a role for every possible action; if an action does not appear in the file, then the default +role (as described in the sections above) will be assigned to that action. Also, please avoid specifying the same action +under multiple roles in this file - it can have unpredictable results. \ No newline at end of file diff --git a/docs/docs/builds.md b/docs/docs/builds.md new file mode 100644 index 00000000..292eb325 --- /dev/null +++ b/docs/docs/builds.md @@ -0,0 +1,167 @@ +--- +title: Builds +--- + +A build is an execution of a _build plan_, which is either + +* configured as a sequence of [steps](steps/index.md) in a [job](jobs.md) +* generated by the [Resource Checker](internals/checker.md) to run + a [`check`](resource-types/implementing.md#check-check-for-new-versions) +* submitted directly to Concourse as a one-off build via [`fly execute`](tasks.md#running-tasks-with-fly-execute) + +Containers and volumes are created as [`get` steps](steps/get.md), [`put` steps](steps/put.md), +and [`task` steps](steps/task.md) run. When a build completes successfully, these containers go away. + +A failed build's containers and volumes are kept around so that you can debug the build +via [`fly intercept`](#fly-intercept). If the build belongs to a [job](jobs.md), the containers will go away when the +next build starts. If the build is a one-off, its containers will be removed immediately, so make sure you intercept +while it's running, if you want to debug. + +## Rerunning a Build + +Concourse supports build rerunning, which means to run a new build using the exact same set of input versions as the +original build. There are two ways to rerun a build: through the web UI on the builds page and through +the [`fly rerun-build`](jobs.md#fly-rerun-build). + +When a build is rerun, it will create a new build using the name of the original build with the rerun number appended to +it, e.g. `3.1` for the first rerun of build `3`. + +Rerun builds are ordered chronologically after the original build, rather than becoming a new "latest" build. Similarly, +when the scheduler is resolving [`passed`](steps/get.md) constraints that reference a job with rerun builds, those rerun +builds are processed in this same order. This ensures that the versions, which made it through a rerun build, do not +become the new "latest versions". Instead, they act as if the original build had succeeded at its point in the build +history. + +This may sound a little confusing, but the summary is that reruns should behave as if they replace the original failed +build. + +### Current caveats with rerunning + +The current implementation of rerunning is an early iteration with one key limitation: a rerun build will use the +**current state of the job config**, instead of running the exact build plan the original build ran with. + +This means that if the [`job.plan`](jobs.md#job-schema) has changed in a way that is backwards-incompatible, the rerun +build may error. For example, if a new input is added, its version will not be available as the original build did not +use it. + +## `fly builds` + +To list the most recent builds, run: + +```shell +fly -t example builds +``` + +To list the builds of a job, run: + +```shell +fly -t example builds -j pipeline-name/job-name +``` + +This can be useful for periodically monitoring the state of a job. The output also works well with tools like `awk` +and `grep`. + +By default, the most recent 50 builds are shown. To see more builds, use the `-c` flag, like so: + +```shell +fly -t example builds -c 100 +``` + +To see builds within a certain time range, you can use `--since` and `--until`. You can use one or both flags to filter +builds. The flags accept a time format of `yyyy-mm-dd HH:mm:ss`. + +```shell +fly -t example builds --since '2006-02-06 00:00:00' +``` + +## `fly intercept` + +Sometimes it's helpful to connect to the machine where tasks run. This way you can either profile or inspect tasks, or +see the state of the machine at the end of a run. Due to Concourse running tasks in containers on remote machines this +would typically be hard to access. + +To this end, there is a `fly intercept` command that will give you an interactive shell inside the specified container. +Containers are identified by a few things, so you may need to specify a few flags to hone down the results. If there are +multiple containers that the flags could refer to, an interactive prompt will show up allowing you to disambiguate. + +For example, running the following will run a task and then enter the finished task's container: + +```shell +fly -t example execute +fly -t example intercept --step build +``` + +!!! tip "Windows Workers" + + When intercepting a task running on a Windows worker, you will need to specifically tell fly to run `powershell`: + + ```shell + fly -t example intercept powershell + ``` + +Containers are around for a short time after a build finishes in order to allow people to intercept them. + +You can also intercept builds that were run in your pipeline. By using `--job`, `--build`, and `--step` you can +intercept a specific step from a build of a job in your pipeline. These flags also have short forms, like so: + +```shell +fly -t example intercept -j some-pipeline/some-job -b some-build -s some-step +``` + +Note that `--build` can be omitted, and will default to the most recent build of the job. One-off builds can be reached +by passing in their build ID to `--build` which can be found on the build list page. + +The `--step` flag can also be omitted; this will let you pick the step interactively, if you don't know the exact name. + +Resource checking containers can also be intercepted with `--check` or `-c`: + +```shell +fly -t example intercept --check some-pipeline/some-resource +``` + +A specific command can also be given, e.g. `fly intercept ps auxf` or `fly intercept htop`. This allows for patterns +such as `watch fly intercept ps auxf`, which will continuously show the process tree of the current build's task, even +as the "current build" changes. + +The working directory and any relevant environment variables (e.g. those having come +from [`task` step `params`](steps/task.md#task-step)) used by the original process will also be used for the process run +by intercept. + +## `fly abort-build` + +To abort a build of a job, run: + +```shell +fly -t example abort-build --job my-pipeline/my-job --build 3 +``` + +This will cancel build `3` of the `my-job` job in the `my-pipeline` pipeline. + +## `fly watch` + +Concourse emits streaming colored logs on the website, but it can be helpful to have the logs available to the command +line (e.g. so that they can be processed by other commands). + +The `watch` command can be used to do just this. You can also view builds that are running in your pipeline, or builds +that have already finished. + +Note that unlike [`fly execute`](tasks.md#running-tasks-with-fly-execute), killing `fly watch` via `SIGINT` or `SIGTERM` +will _not_ abort the build. + +To watch the most recent one-off build, just run `fly watch` with no arguments. To watch a specific build (one-off or +not), pass `--build` with the ID of the build to watch. This ID is available at the start +of [`fly execute`'s](tasks.md#running-tasks-with-fly-execute) output or by browsing to the builds list in the web UI. + +By using the `--job` and `--build` flags you can pick out a specific build of a job to watch. For example, the following +command will either show the archived logs for an old build, if it has finished running, or it will stream the current +logs, if the build is still in progress. + +```shell +fly -t example watch --job my-pipeline/tests --build 52 +``` + +If the `--job` flag is specified and `--build` is omitted, the most recent build of the specified job will be selected. + +If there is a mismatch between the `fly` and `web` versions, it is possible to run +into `failed to parse next event: unknown event type` error. The `--ignore-event-parsing-errors` flag can be passed to +ignore such errors. \ No newline at end of file diff --git a/docs/docs/config-basics.md b/docs/docs/config-basics.md new file mode 100644 index 00000000..973f36e9 --- /dev/null +++ b/docs/docs/config-basics.md @@ -0,0 +1,214 @@ +--- +title: Config Basics +--- + +Concourse configuration for things like [Pipelines](pipelines/index.md) and [Tasks](tasks.md) is done through +declarative [YAML](https://en.wikipedia.org/wiki/YAML) files. + +Concourse configuration supports basic variable substitution by way of `((vars))`. There is no built-in support for +fancier templating constructs, e.g. loops and conditionals; users are free to use whatever templating system they like. + +## Intro to YAML + +[YAML](https://yaml.org/) is a human-friendly syntax for writing structured documents. You can think of it as JSON +without the sharp edges. + +If you want a slightly more in-depth overview of YAML compared to what we provide below, we recommend +reading [Learn YAML in Y Minutes](https://learnxinyminutes.com/yaml/). + +Here's a quick example demonstrating common YAML syntax: + +```yaml +# commented lines are prefixed with the '#' character + +# strings +quoted_string: "bar" +unquoted_string: hello world! +multiline_string: | + hello, world! + this is one big string with a trailing linebreak! + +# arrays +array: [ hello, world ] +multiline_array: + - hello + - world + +# objects +object: { one: uno, two: dos } +multiline_object: + one: uno + two: dos + +# boolean values +booleans: [ true, false ] + +# numeric values +numeric: [ 1234, 12.34 ] +``` + +### YAML Tips & Tricks + +YAML anchor syntax can be used to avoid repetition within configuration. + +For example, the following YAML document...: + +```yaml +large_value: &my_anchor + do_the_thing: true + with_these_values: [ 1, 2, 3 ] + +duplicate_value: *my_anchor +``` + +...is exactly equivalent to: + +```yaml +large_value: + do_the_thing: true + with_these_values: [ 1, 2, 3 ] + +duplicate_value: + do_the_thing: true + with_these_values: [ 1, 2, 3 ] +``` + +If you find yourself repeating configuration throughout your pipeline, it may be a sign that Concourse is missing some +kind of abstraction to make your pipeline less verbose. If you have the time and are interested in helping out with +Concourse's design, feedback of this sort is welcome +in [GitHub Discussions](https://github.com/concourse/concourse/discussions)! + +We do want to avoid implementing an entire YAML templating engine within Concourse. We encourage you to reach for your +favourite templating tool if you're eager about [DRYing](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) up your +pipelines as much as possible. + +### YAML Quirks + +YAML has some weird parts. For example, all the following terms are acceptable boolean values: `true`, `false`, `yes`, +`no`, `on`, `off`. + +YAML is also whitespace-sensitive. For the most part, this is really handy because it keeps you from having to count +curly-braces in deeply nested parts of configuration such as [ +`job.plan`](https://concourse-ci.org/jobs.html#schema.job.plan). Sometimes, though, it can be hard to keep track of the +correct indentation level. + +## Basic Schemas + +Throughout the Concourse documentation you will come across schema definitions for each API. + +The following are basic schema definitions that the other schemas refer to. You can probably skip past this and just +make assumptions along the way; this is just here for completeness! + +### `number` schema + +Any integer, i.e. `1000`. + +### `string` schema + +An arbitrary string value with no content restrictions. + +### `config` schema + +An arbitrary object representing configuration that is not directly interpreted by Concourse - typically given to +a [resource type](resource-types/index.md). + +```yaml +uri: https://github.com/vito/booklit +branch: master +``` + +All object keys must be strings, preferably `snake_cased`. + +### `vars` schema + +An arbitrary object representing key-value definitions for `((vars))`. + +As with [`config` schema](#config-schema), all object keys must be strings, preferably `snake_cased`. + +### `env-vars` schema + +An object containing string keys and string values. Each pair represents an environment variable to set to the given +value. + +```yaml +SOME_SIMPLE_VAR: simple-var +SOME_LONG_VAR: | + This is an example of using YAML multi-line string syntax to set a very + long environment variable value. +SOME_NUMERIC_VALUE: "1" +``` + +Note that in the last example we took special care to quote the number. + +### `boolean` schema + +`true` or `false`. + +YAML also supports the alias `yes`, `no`, `on`, or `off`, but ... please don't. + +### `value` schema + +An arbitrary YAML value. It may be a [`number` schema](#number-schema), [`string` schema](#string-schema), [ +`boolean` schema](#boolean-schema), [`config` schema](#config-schema), or a [`[value` schema`]`](#value-schema). + +```yaml +values: + - 123 + - bar + - true + - key1: abc + key2: def + - [ hello, world ] +``` + +### `identifier` schema + +An _identifier_ is a string value. The following defines the allowed character set for an _identifier_: + +* Unicode lowercase letters, while still supporting languages that don't have any casing (e.g. Japanese). +* Decimal numbers. +* Hyphens `-` and underscores `_` as word separators. +* Periods `.` in order to support domain names and version numbers. + +The validation rule is as follows: + +```regexp +^[\p{Ll}\p{Lt}\p{Lm}\p{Lo}\d][\p{Ll}\p{Lt}\p{Lm}\p{Lo}\d\-_.]*$ +``` + +Where all _identifier_ must start with a Unicode lowercase letter or number, followed by any number of allowed +characters. + +Currently, the validation will only show as warnings. For the sake of future-proofing, you may want to conform to it. + +### `dir-path` schema + +A string value specifying a (typically relative) path of a directory. + +### `file-path` schema + +A string value specifying a (typically relative) path of a file. + +### `duration` schema + +A string value in [Go `time.ParseDuration` format](https://golang.org/pkg/time/#ParseDuration). `1h` for one hour, `5m` +for 5 minutes. + +### `version` schema + +An object with string keys and string values. + +The following is an array of versions: + +```yaml +- { "ref": "33042e15e930b6786fc9b0a9ea5dec78689c5e4b" } +- ref: v1.2.0, + sha: 33042e15e930b6786fc9b0a9ea5dec78689c5e4b +- foo: "0" +``` + +Note that in the last example we took special care to quote the number. + +In many scenarios where a version can be specified, e.g. [`get` step +`version`](https://concourse-ci.org/get-step.html#schema.get.version), only a subset of the full set of fields is +necessary. The latest version matching the fields specified will be chosen. diff --git a/docs/docs/fly.md b/docs/docs/fly.md new file mode 100644 index 00000000..fb7c8999 --- /dev/null +++ b/docs/docs/fly.md @@ -0,0 +1,178 @@ +--- +title: The fly CLI +--- + +# The `fly` CLI + +The first step to getting started with Concourse is to install the `fly` CLI tool. You can download `fly` from any +Concourse installation by clicking the download link in the bottom-right corner of the web UI. + +Throughout the Concourse documentation we'll stick to the long-form name of every command and flag. Once you've learned +what the commands do, you may want to consult `fly -h` to learn the short forms. + +## `fly login` + +The first thing you'll want to do is authenticate with your target. This is done with the `fly login` command. This is +also useful to save targets under a more convenient alias, so you don't have to type out the URL all the time: + +The `login` command serves double duty: it authenticates with a given endpoint, and saves it under a more convenient +name. The name and token are stored in `~/.flyrc` (though you shouldn't really edit the file manually). + +Concourse deployments can be occupied by multiple [teams](auth-and-teams/index.md). To specify the team to which to +log in, specify the `--team-name` or `-n` flag. If not specified, this defaults to the [ +`main` team](auth-and-teams/main-team.md). + +So, to log in to a team `my-team` an endpoint served at `https://ci.example.com` and save it as the more convenient name +`example`, you would run: + +```shell +fly --target example login --team-name my-team \ + --concourse-url https://ci.example.com +``` + +The `login` command will see which authentication methods are available for the specified team and prompt you to choose +one. For basic auth, it will ask your username and password and use them to acquire a token. For OAuth, it will give you +a link to click, and after you've gone through the OAuth flow it will print an OAuth token on the page that you can then +copy and paste into the prompt. + +Note that if no authentication methods are configured, `fly` will acquire a token without any prompting. You can then +use the alias like normal. + +In any case, a token is saved in your `~/.flyrc`, which will expire after one day. + +If your Concourse uses SSL but does not have a certificate signed by a trusted CA, you can use the `--ca-cert` flag so +that `fly` can trust the connection, like so: + +```shell +fly -t example login -c https://ci.example.com --ca-cert ./ca.crt +``` + +This will read the value out of the file `./ca.crt` and save it into `~/.flyrc` so you don't have to pass it on every +`login` invocation. + +If your Concourse instance is protected by a proxy server requiring client certificates, you can use `--client-cert` and +`--client-key` to point to where your certificate is stored. These paths will be stored in `.flyrc` and the certificate +will by attached to every request made to that target. + +```shell +fly -t example login -c https://ci-example.com \ + --client-cert ./client.pem \ + --client-key ./client.key +``` + +After you've logged in you can use `--target example` (or `-t example` for short) to run a command against the saved +target `example`. For example, `fly -t example builds` will list the last few builds on the `example` Concourse +instance. + +The `-t` flag is intentionally stateless and must be explicitly added to each command. This reduces the risk of +accidentally running a command against the wrong environment when you have multiple targets defined. + +## `fly targets` + +To see what targets are currently known to `fly`, run: + +```shell +fly targets +``` + +This will show each target's name, URL, and when its token expires. + +## `fly status` + +To check your current authentication status with a given target, run: + +```shell +fly -t example status +``` + +This will let you know if the token has expired. + +## `fly userinfo` + +To check what user you're logged in as, as well as which teams you are currently authenticated to and which roles within +each team you have, run: + +```shell +fly -t example userinfo +``` + +## `fly logout` + +To clear out your token for a given target, run: + +```shell +fly -t example logout +``` + +To clear out your token for all targets, run: + +```shell +fly logout -a +``` + +!!! note + + These two variations are mutually exclusive. If the target parameter `-t` and all parameter `-a` are both + specified, an error will occur. + +## `fly edit-target` + +To modify a target's name, team, or URL, run: + +```shell +fly -t example edit-target \ + --target-name new-name \ + --concourse-url https://ci.example.com \ + --team-name my-team +``` + +Each flag is optional - only the specified flags will be changed. + +## `fly delete-target` + +When logging out just isn't enough, a target can be completely removed from `~/.flyrc` by running: + +```shell +fly -t example delete-target +``` + +To delete _all_ targets, run: + +```shell +fly delete-target -a +``` + +!!! note + + These two variations are mutually exclusive. If the target parameter `-t` and all parameter `-a` are both + specified, an error will occur. + +## `fly sync` + +Occasionally we add additional features to `fly` or make changes to the communication between it and Concourse's API +server. To make sure you're running the latest and greatest version that works with the Concourse you are targeting we +provide a command called `sync` that will update your local `fly`. It can be used like so: + +```shell +fly -t example sync +``` + +The `fly` command will also warn you if it notices that your CLI version is out of sync with the server. + +## `fly completion` + +Fly can output autocomplete configuration for some shells. For example, you can add an entry to your `.bashrc` like +this: + +```shell +source <(fly completion --shell bash) +``` + +or, using the `/etc/bash_completion.d` directory: + +```shell +fly completion --shell bash > /etc/bash_completion.d/fly +``` + +Note that, unlike other fly commands, this command does not interact with a remote server so you do not need to provide +the `-t` or `--target` flag. diff --git a/docs/docs/getting-started/assets/github-release.png b/docs/docs/getting-started/assets/github-release.png new file mode 100644 index 00000000..89729778 Binary files /dev/null and b/docs/docs/getting-started/assets/github-release.png differ diff --git a/docs/docs/getting-started/assets/hello-world-first-build.gif b/docs/docs/getting-started/assets/hello-world-first-build.gif new file mode 100644 index 00000000..9955dc19 Binary files /dev/null and b/docs/docs/getting-started/assets/hello-world-first-build.gif differ diff --git a/docs/docs/getting-started/assets/hello-world-passing-artifacts.png b/docs/docs/getting-started/assets/hello-world-passing-artifacts.png new file mode 100644 index 00000000..cac46f9c Binary files /dev/null and b/docs/docs/getting-started/assets/hello-world-passing-artifacts.png differ diff --git a/docs/docs/getting-started/assets/resource-pipeline-done.png b/docs/docs/getting-started/assets/resource-pipeline-done.png new file mode 100644 index 00000000..e1a8e2ca Binary files /dev/null and b/docs/docs/getting-started/assets/resource-pipeline-done.png differ diff --git a/docs/docs/getting-started/assets/resource-pipeline-fix-tests.gif b/docs/docs/getting-started/assets/resource-pipeline-fix-tests.gif new file mode 100644 index 00000000..269f1d77 Binary files /dev/null and b/docs/docs/getting-started/assets/resource-pipeline-fix-tests.gif differ diff --git a/docs/docs/getting-started/assets/resource-pipeline-tests-trigger.gif b/docs/docs/getting-started/assets/resource-pipeline-tests-trigger.gif new file mode 100644 index 00000000..921d7a9d Binary files /dev/null and b/docs/docs/getting-started/assets/resource-pipeline-tests-trigger.gif differ diff --git a/docs/docs/getting-started/assets/resource-publish-job.png b/docs/docs/getting-started/assets/resource-publish-job.png new file mode 100644 index 00000000..e82947b0 Binary files /dev/null and b/docs/docs/getting-started/assets/resource-publish-job.png differ diff --git a/docs/docs/getting-started/assets/resource-publish-logs.png b/docs/docs/getting-started/assets/resource-publish-logs.png new file mode 100644 index 00000000..c3d98945 Binary files /dev/null and b/docs/docs/getting-started/assets/resource-publish-logs.png differ diff --git a/docs/docs/getting-started/assets/resource-version-web-ui.png b/docs/docs/getting-started/assets/resource-version-web-ui.png new file mode 100644 index 00000000..1497130a Binary files /dev/null and b/docs/docs/getting-started/assets/resource-version-web-ui.png differ diff --git a/docs/docs/getting-started/assets/resources-pipeline-tests-trigger.png b/docs/docs/getting-started/assets/resources-pipeline-tests-trigger.png new file mode 100644 index 00000000..39cc0546 Binary files /dev/null and b/docs/docs/getting-started/assets/resources-pipeline-tests-trigger.png differ diff --git a/docs/docs/getting-started/assets/resources-pipeline-tests.png b/docs/docs/getting-started/assets/resources-pipeline-tests.png new file mode 100644 index 00000000..816fb15b Binary files /dev/null and b/docs/docs/getting-started/assets/resources-pipeline-tests.png differ diff --git a/docs/docs/getting-started/assets/trigger-tests-job-get-repo.gif b/docs/docs/getting-started/assets/trigger-tests-job-get-repo.gif new file mode 100644 index 00000000..af943b99 Binary files /dev/null and b/docs/docs/getting-started/assets/trigger-tests-job-get-repo.gif differ diff --git a/docs/docs/getting-started/assets/trigger-tests-job-go-test.gif b/docs/docs/getting-started/assets/trigger-tests-job-go-test.gif new file mode 100644 index 00000000..d5bf2810 Binary files /dev/null and b/docs/docs/getting-started/assets/trigger-tests-job-go-test.gif differ diff --git a/docs/docs/getting-started/assets/welcome-screen.png b/docs/docs/getting-started/assets/welcome-screen.png new file mode 100644 index 00000000..610362ff Binary files /dev/null and b/docs/docs/getting-started/assets/welcome-screen.png differ diff --git a/docs/docs/getting-started/hello-world.md b/docs/docs/getting-started/hello-world.md new file mode 100644 index 00000000..56c2b595 --- /dev/null +++ b/docs/docs/getting-started/hello-world.md @@ -0,0 +1,176 @@ +--- +title: Hello World Pipeline +--- + +## Creating a Pipeline + +Let's start where all tutorials start, with a `Hello World!` example! + +In this section you're going to create a pipeline that simply prints `Hello world!` to the console. While building up +the pipeline we will pause to explain the core pieces of the pipeline. + +Let's first answer: **what is a pipeline made up from?** + +The simplest Concourse [pipeline](https://concourse-ci.org/pipelines.html) is made of two objects: + +- An unordered list of Jobs which contains... +- An ordered list of Steps + +If you've used other pipeline building tools in the past, then what you think of as a pipeline is probably most similar +to a [job](https://concourse-ci.org/jobs.html) in Concourse. + +For our `Hello World!` pipeline we will need **one job** with **one step**. This is the smallest pipeline you can make +in Concourse. The single step is what will print `Hello World!` to the console. + +Create and open a new file called `hello-world.yml`. Inside that file let's add the first top-level +key, [jobs](https://concourse-ci.org/jobs.html). + +```yaml +jobs: +``` + +The jobs key is where we define the list of [jobs](https://concourse-ci.org/jobs.html) that should make up our pipeline. +The order of the jobs does not matter. **The order of jobs does not define the structure of the pipeline.** We'll get +into pipeline structure and job ordering later when we talk about [Resources](https://concourse-ci.org/resources.html) +and passed constraints. + +## Add a job + +We only need one job right now so let's add a single job named `hello-world-job`. + +```yaml +jobs: + - name: hello-world-job +``` + +Awesome, we have a [job](https://concourse-ci.org/jobs.html) named `hello-world-job`! Now we need to add +a [`step`](https://concourse-ci.org/steps.html) to our job. To define a list +of [steps](https://concourse-ci.org/steps.html) a job should execute, we need to add +the [plan](https://concourse-ci.org/jobs.html#schema.job.plan) key to our job. + +```yaml +jobs: + - name: hello-world-job + plan: +``` + +## Add a Step + +Unlike [jobs](https://concourse-ci.org/jobs.html), the order of [steps](https://concourse-ci.org/steps.html) **does +matter!** Concourse will run the [steps](https://concourse-ci.org/steps.html) in the order that they are listed. Let's +_carefully_ place a [task step](https://concourse-ci.org/task-step.html#schema.task.task) as the first (and only) step +in our job. + +```yaml +jobs: + - name: hello-world-job + plan: + - task: hello-world-task +``` + +Fantastic! Now we need to tell Concourse _how_ to run our task step. We do that by providing +a [task config](https://concourse-ci.org/tasks.html#schema.task-config). + +```yaml +jobs: + - name: hello-world-job + plan: + - task: hello-world-task + config: +``` + +At this point we are going to pause to explain steps a bit more. + +## What is a step? + +A step is a single container running on a [Concourse worker](../install/running-worker.md). +Each [step](https://concourse-ci.org/steps.html) in a [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) +runs in its own container. You can run anything you want inside the container (_i.e. run my tests, run this bash script, +build this image, etc._). + +So if you have a job with five steps Concourse will create five containers, one for each step. Therefore, we need to +tell Concourse the following about each step: + +- What type of [worker](../install/running-worker.md) to run the task on (linux/windows/darwin) +- What container image to use (`Linux only`) +- What command to run inside the container + +!!! info + + Concourse currently only supports Linux containers. Concourse does not yet run Windows containers. + Darwin does not have native containers. + +## Fill in the Task Config + +Let's answer the previous three questions for our `hello-world-task`: + +- **What type of [worker](../install/running-worker.md) to run the task on (linux/windows/darwin)** + - Linux, because our docker-composed Concourse only has one linux worker. You can verify this by + running `fly -t tutorial workers` +- **What container image to use** (_Linux only_) + - We'll use the super small [busybox image](https://hub.docker.com/_/busybox) +- **What command to run inside the container** + - `echo "Hello world!"` + +You can view the [task documentation](https://concourse-ci.org/tasks.html) to see all configurable options for tasks. +For now, you can add the following [task config](https://concourse-ci.org/tasks.html#schema.task-config) to the step. + +```yaml +jobs: + - name: hello-world-job + plan: + - task: hello-world-task + config: + # Tells Concourse which type of worker this task should run on + platform: linux + # This is one way of telling Concourse which container image to use for a + # task. We'll explain this more when talking about resources + image_resource: + type: registry-image + source: + repository: busybox # images are pulled from docker hub by default + tag: latest + # The command Concourse will run inside the container + # echo "Hello world!" + run: + path: echo + args: [ "Hello world!" ] +``` + +## Run the pipeline + +That's the whole pipeline! You can now set it, unpause, and trigger it using +the [fly cli](https://concourse-ci.org/fly.html). You can then view it from +the [web ui](http://localhost:8080/teams/main/pipelines/hello-world/jobs/hello-world-job). + +```shell +fly -t tutorial set-pipeline -p hello-world -c hello-world.yml +# pipelines are paused when first created +fly -t tutorial unpause-pipeline -p hello-world +# trigger the job and watch it run to completion +fly -t tutorial trigger-job --job hello-world/hello-world-job --watch +``` + +You'll see extra output than what we're showing below (the busybox image being downloaded) but the last four lines will +be the task executing. + +``` +selected worker: 701785fa43a1 +running echo Hello world! +Hello world! +succeeded +``` + +You can also view the build from the web UI by clicking on the job and expanding the `hello-world-task` step. + +![](assets/hello-world-first-build.gif) + +Congratulations on building your first Concourse pipeline! + +In the next section we will build upon what we have learned about tasks and introduce inputs and outputs, which allow +you to pass files between tasks. + +!!! note + + If you have any feedback for this tutorial please share it in + [this GitHub discussion](https://github.com/concourse/concourse/discussions/7353). \ No newline at end of file diff --git a/docs/docs/getting-started/index.md b/docs/docs/getting-started/index.md new file mode 100644 index 00000000..04a42047 --- /dev/null +++ b/docs/docs/getting-started/index.md @@ -0,0 +1,28 @@ +--- +title: Getting Started +--- + +This tutorial will guide you through the basics of creating Concourse pipelines. You will use a local instance of +Concourse running on your machine to run pipelines. + +Before getting started you should have the following installed: + +- [Docker](https://docs.docker.com/get-docker/) +- [Docker-compose](https://docs.docker.com/compose/install/) + +This tutorial assumes you understand what Linux containers are and how to work with them. If you know what a Dockerfile +is and how to make your own then you're probably good to jump into this tutorial. If you're not familiar with Linux +containers then you may want to [get started with Docker](https://docs.docker.com/get-started/) first before diving into +this tutorial. + +It will also help if you know how to read YAML. We have a +quick [Intro to YAML](https://concourse-ci.org/config-basics.html#intro-to-yaml) if you're not familiar with the syntax. + +!!! note + + If you have any feedback for this tutorial please share it in + [this GitHub discussion](https://github.com/concourse/concourse/discussions/7353). + + If you get stuck at any point, you can try asking for help on our [Discord server](https://discord.gg/MeRxXKW) + in the #need-help channel. + diff --git a/docs/docs/getting-started/inputs-outputs.md b/docs/docs/getting-started/inputs-outputs.md new file mode 100644 index 00000000..4da5f2b1 --- /dev/null +++ b/docs/docs/getting-started/inputs-outputs.md @@ -0,0 +1,170 @@ +--- +title: Inputs and Outputs +--- + +## Overview + +This section is going to go over how to pass data between different [steps](https://concourse-ci.org/steps.html) in +a [job](https://concourse-ci.org/jobs.html). We'll continue building on our `hello-world.yml` pipeline. + +In the previous section we learned that [steps](https://concourse-ci.org/steps.html) are where we tell Concourse what to +run (_i.e. run my tests, run this bash script, build this image, etc._). We are going to expand on the concept +of [steps](https://concourse-ci.org/steps.html) and show you how to pass artifacts/files +between [tasks](https://concourse-ci.org/tasks.html) using `inputs` and `outputs`. + +## What are inputs and outputs + +The simple answer is that inputs and outputs are directories that get passed between steps. We'll refer to both inputs +and outputs as **artifacts**. + +Let's start exploring how artifacts work by adding +a [`task-config.outputs`](https://concourse-ci.org/tasks.html#schema.task-config.outputs) to our `hello-world-task`. + +```yaml +jobs: + - name: hello-world-job + plan: + - task: hello-world-task + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + tag: latest + # Adds a "the-artifact" directory to our task + outputs: + - name: the-artifact + run: + # Change the command to `ls -lF` to see + # what the task sees in its working directory + path: ls + args: [ "-lF" ] +``` + +Update the pipeline and trigger the job: + +```shell +$ fly -t tutorial set-pipeline -p hello-world -c hello-world.yml +$ fly -t tutorial trigger-job --job hello-world/hello-world-job --watch +... +selected worker: 57d7419112ca +running ls -lF +total 4 +drwxr-xr-x 2 root root 4096 Apr 8 16:42 the-artifact/ +succeeded +``` + +We can see that in the task's [current working directory](https://en.wikipedia.org/wiki/Working_directory) there is now +a folder called `the-artifact`. Concourse makes output directories for you and will pass any contents inside the folder +onto later steps. Let's see how that works next. + +## Passing outputs to another task + +To pass artifacts from one task to another, the first task must declare an output. The second task must then declare an +input with the exact same name. Let's update the pipeline to do the following: + +* Have the first task create a file inside `the-artifact` +* Create a second task to read the file inside `the-artifact` from the previous step + +```yaml +jobs: + - name: hello-world-job + plan: + - task: hello-world-task + config: + platform: linux + image_resource: &image # Declaring a YAML anchor + type: registry-image + source: + repository: busybox + tag: latest + outputs: + - name: the-artifact + run: + # This is a neat way of embedding a script into a task + path: sh + args: + - -cx + - | + ls -l . + echo "hello from another step!" > the-artifact/message + # Add a second task that reads the contents of the-artifact/message + - task: read-the-artifact + config: + platform: linux + image_resource: *image + # To receive "the-artifact", specify it as an input + inputs: + - name: the-artifact + run: + path: sh + args: + - -cx + - | + ls -l . + cat the-artifact/message +``` + +Update the pipeline and trigger the job: + +```shell +$ fly -t tutorial set-pipeline -p hello-world -c hello-world.yml +$ fly -t tutorial trigger-job --job hello-world/hello-world-job --watch + +initializing +selected worker: 57d7419112ca +running sh -cx ls -l . +echo "hello from another step!" > the-artifact/message + ++ ls -l . +total 4 +drwxr-xr-x 2 root root 4096 Feb 26 19:09 the-artifact ++ echo 'hello from another step!' +initializing +selected worker: 57d7419112ca +running sh -cx ls -l . +cat the-artifact/message + ++ ls -l . +total 4 +drwxr-xr-x 1 root root 4096 Feb 26 19:09 the-artifact ++ cat the-artifact/message +hello from another step! +succeeded +``` + +It's a bit hard to see when the first task stops and the second one starts in the terminal. Looking at the build from +the web UI makes this clearer: + +![Hello World Passing Artifacts](assets/hello-world-passing-artifacts.png) + +With the above pipeline we can see that the file made in the first step is made available in the second step via +the `the-artifact`. + +## How does Concourse track artifacts? + +As Concourse is running the steps in your job, it is creating a list of **named artifacts**. Let's see what that looks +like for the pipeline we just ran. + +* Concourse runs the task step `hello-world-task` + with [output](https://concourse-ci.org/tasks.html#schema.task-config.outputs) `the-artifact` + +!!! note "" + + Concourse creates an empty artifact, assigns it the name `the-artifact`, and mounts it inside the task container. + +* Concourse runs the task step `read-the-artifact` + with [input](https://concourse-ci.org/tasks.html#schema.task-config.inputs) `the-artifact` + +!!! note "" + + Concourse looks up, in its list of artifacts for the build, for an artifact named `the-artifact`, and mounts + it inside the task container. If no input with that name is found then the build would fail. + +The next section will introduce you to the concept of [Resources](https://concourse-ci.org/resources.html). + +!!! note + + If you have any feedback for this tutorial please share it in + [this GitHub discussion](https://github.com/concourse/concourse/discussions/7353). \ No newline at end of file diff --git a/docs/docs/getting-started/quick-start.md b/docs/docs/getting-started/quick-start.md new file mode 100644 index 00000000..82e8369b --- /dev/null +++ b/docs/docs/getting-started/quick-start.md @@ -0,0 +1,85 @@ +--- +title: Quick Start +--- + +## Docker Compose Concourse + +Concourse is distributed as a single concourse binary, making it easy to run just about anywhere, especially with +Docker. + +If you'd like to get Concourse running somewhere quickly, so you can start to kick the tires, the easiest way is to use +our [docker-compose.yml](../../docker-compose.yml): + +``` +$ curl -O https://concourse-ci.org/docker-compose.yml +$ docker-compose up -d +Creating docs_concourse-db_1 ... +Creating docs_concourse-db_1 ... done +Creating docs_concourse_1 ... +Creating docs_concourse_1 ... done +``` + +Concourse will be running at [localhost:8080](http://localhost:8080/) on your machine. You can log in with the +username/password as `test`/`test`. + +![Concourse Landing Page](assets/welcome-screen.png) + +## Install Fly + +Next, install the [`fly` CLI](../fly.md) by downloading it from the web UI. If you're on +version >=v7.14.0 of Concourse, you can visit [http://localhost:8080/download-fly](http://localhost:8080/download-fly). + +Otherwise, you can follow these steps to install fly for your OS: + +=== "Linux" + + ``` sh linenums="1" + curl 'http://localhost:8080/api/v1/cli?arch=amd64&platform=linux' -o fly + chmod +x ./fly + mv ./fly /usr/local/bin/ + ``` + +=== "MacOS" + + ``` sh linenums="1" + curl 'http://localhost:8080/api/v1/cli?arch=amd64&platform=darwin' -o fly + chmod +x ./fly + mv ./fly /usr/local/bin/ + ``` + +=== "Windows (Powershell)" + + ``` ps1 linenums="1" + $concoursePath = 'C:\concourse\' + mkdir $concoursePath + [Environment]::SetEnvironmentVariable('PATH', "$ENV:PATH;${concoursePath}", 'USER') + $concourseURL = 'http://localhost:8080/api/v1/cli?arch=amd64&platform=windows' + Invoke-WebRequest $concourseURL -OutFile "${concoursePath}\fly.exe" + ``` + +Use [`fly login`](../fly.md#fly-login) to log into your local Concourse as the `test` user: + +``` +fly -t tutorial login -c http://localhost:8080 -u test -p test +``` + +You've successfully logged in if you see the following output: + +``` +logging in to team 'main' + +target saved +``` + +You'll notice that every fly command in this tutorial has to have +the [target (`-t tutorial`)](../fly.md#fly-targets) specified. This is annoying when you only have +one Concourse to target, but it helps ensure you don't trigger a job on the wrong Concourse instance. It will save you +from hurting yourself! + +Once you've confirmed everything is up and running by logging in through fly and the [web UI](http://localhost:8080/), +you can move onto the next section. + +!!! note + + If you have any feedback for this tutorial please share it in this + [GitHub discussion](https://github.com/concourse/concourse/discussions/7353) \ No newline at end of file diff --git a/docs/docs/getting-started/resources.md b/docs/docs/getting-started/resources.md new file mode 100644 index 00000000..ca07b5af --- /dev/null +++ b/docs/docs/getting-started/resources.md @@ -0,0 +1,584 @@ +--- +title: Resources +--- + +## The Heart of Concourse + +Resources are the heart of Concourse. Resources make Concourse tick and are the source of automation within all +Concourse pipelines. Resources are how Concourse interacts with the outside world. Here's a short list of things that +resources can do: + +* Run a job every five minutes: [Time resource](https://github.com/concourse/time-resource/). +* Run tests on new commits to the main branch: [Git resource](https://github.com/concourse/git-resource). +* Publish a new release of your app on + Github: [GitHub Release resource](https://github.com/concourse/github-release-resource/). +* Pull or push the latest image of your + app: [Registry-image resource](https://github.com/concourse/registry-image-resource/) + +Resources can do a lot of things! The main goal of resources is to represent some external system or object in your +pipeline. That external thing can then be used as a trigger for your Jobs or your Jobs can push back and modify the +external system or object. It all depends on the resource you use and what features its author has implemented. + +Resources are also how Concourse tries to stay as technology agnostic as possible. For example: Concourse doesn't care +what [version control system](https://en.wikipedia.org/wiki/Version_control) you store your code in, if you deploy apps +with [Helm](https://helm.sh/) or [Terraform](https://www.terraform.io/), or what language your apps are built in. If you +can put your latest and hottest tech behind the resource interface then Concourse can understand your workflow. + +The Concourse team bundles a few basic resource types that come with the Linux release that you +can [download from GitHub](https://github.com/concourse/concourse/releases). You'll notice that the Linux tarball is +much larger than the macOS or Windows tarball because of all the bundled resources. + +!!! note + + You can find out which resources a worker has by running: + + ```shell + fly -t tutorial workers --details + ``` + +Resources only run on Linux workers because resources are distributed as Linux container images. There are currently no +resources for macOS or Windows. Only [task steps](https://concourse-ci.org/task-step.html) can run on macOS or Windows +workers. + +## Versions + +Resources represent the external system or object to Concourse by +emitting [versions](https://concourse-ci.org/config-basics.html#schema.version). When a new version is emitted by a +resource, that is how Concourse knows to start jobs connected to the resource. + +A version is a map of key-value strings that a resource generates to uniquely identify the state of the external system +or object. + +For example, the git resource emits versions based on the SHA of new commits it finds. A single version from the git +resource will look like this to Concourse. + +```json +{ + "ref": "ce63af135a85029153ebd0f5dfe42c5481641b74" +} +``` + +Which looks like this in the web UI: + +![Resource Version Web UI](assets/resource-version-web-ui.png) + +Let's start digging into resources a bit more by going over the resource interface. + +## Resource Interface + +Resources are [container images](https://en.wikipedia.org/wiki/Open_Container_Initiative) that contain three +executables. Each executable is run by a different type of [step](https://concourse-ci.org/steps.html) within a +pipeline: + +* `/opt/resource/check` - implicitly run when a [job](https://concourse-ci.org/jobs.html) contains + a [get step](https://concourse-ci.org/get-step.html). Should return the latest version from the external system or + object. Its responsibility is to find new versions. Is never part of a Job's build plan. +* `/opt/resource/in` - run in a [get step](https://concourse-ci.org/get-step.html). in is given a specific version ( + generated by a `check` or [`put` step](https://concourse-ci.org/put-step.html)) and retrieves the files and + information representing that version from the external system or object. +* `/opt/resource/out` - run in a [put step](https://concourse-ci.org/put-step.html). Generates a new version, usually + based on some input generated by another step in the job. Depending on the resource, this may mean sending something + to the external system. For the git resource, this means pushing commits to the external git repository. + +That's a high-level overview of the resource interface, which should help you understand what's going on with resources +when we start using them in the next section. + +## Automatically Triggering Jobs With Get Steps + +We're going to create a new pipeline now. This pipeline is going to: + +* Fetch commits from a Git repository that contains an app +* Run some unit tests for that app +* Publish our app to GitHub + +We have a very basic app in our [github.com/concourse/examples](https://github.com/concourse/examples) repo that we'll +use in your pipeline. You should fork this repository so you can continue following along. We're going to build the Go +app under the `apps/golang/` directory. + +When creating a new [Job](https://concourse-ci.org/jobs.html) or [Pipeline](https://concourse-ci.org/pipelines.html), it +can be helpful to think of all the external things the job will need in order to run, and declare them +as [Resources](https://concourse-ci.org/resources.html) in our pipeline. We know we'll need our Git repository with our +app's code, so we'll declare that as a resource first. + +We will use the [git resource](https://github.com/concourse/git-resource/) to represent our Git repository. +The `README.md` contains the documentation for how to use the resource. + +```yaml +resources: + - name: repo + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... +``` + +!!! note + + You'll need to generate a Personal Access Token for GitHub in order to complete this tutorial. You can make a + fine-grained access token on [this page](https://github.com/settings/personal-access-tokens). The only permission + required is `read-write` access to `content`. You may scope the token to your fork of the `examples` repository. + +We've added a top-level key to our pipeline called [resources](https://concourse-ci.org/resources.html), which takes an +unordered list of resouces that can be referenced by jobs in our pipeline. + +When declaring a resource, Concourse only requires you to declare +the [`name`](https://concourse-ci.org/resources.html#schema.resource.name) +and [`type`](https://concourse-ci.org/resources.html#schema.resource.type). The `name` is how jobs will reference the +resource. + +Depending on the resource type you're using, the `source` will likely have one or more required fields. This is specific +to each resource type, so refer to the documentation of the resource type to find out what fields are required. + +Next, we can add a job that references our resource. Let's add a job to our pipeline that, for now, downloads our git +repo. We'll use the [`get` step](https://concourse-ci.org/get-step.html) to do this. + +```yaml +resources: + - name: repo + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + +jobs: + - name: tests + plan: + - get: repo +``` + +Let's set and run this pipeline. + +```shell +fly -t tutorial set-pipeline --pipeline go-app --config pipeline.yml +fly -t tutorial unpause-pipeline --pipeline go-app +``` + +To change things up, let's trigger our `tests` job from the web UI. From the dashboard you can click on the pipeline +header to see the entire pipeline. You'll see two boxes, the left box representing our repo resource, and the right box +representing our `tests` job. + +Clicking on the `tests` job will take you to the [Builds](https://concourse-ci.org/builds.html) page of the job. In the +top-right corner there will be a plus-sign button you can click to trigger the job. Click that button and wait for the +job to start and complete. You can click the [`get` step](https://concourse-ci.org/get-step.html) to expand it and see +the metadata fetched by the `git` resource. + +![](assets/trigger-tests-job-get-repo.gif) + +```yaml +resources: + - name: repo + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + +jobs: + - name: tests + plan: + - get: repo + - task: tests + config: + platform: linux + image_resource: + type: registry-image + source: # Use the Golang image from Docker Hub + repository: golang + tag: latest + inputs: + - name: repo + run: + path: sh + args: + - -c + - | + cd repo/apps/golang + go test -v . +``` + +There are a few things happening in this [`task` step](https://concourse-ci.org/task-step.html) we added. + +First, we had to choose a container image to use to run our tests in. To run our Go tests we need an image with the `go` +binary. The [golang image from Docker Hub](https://hub.docker.com/_/golang) is an easy solution to reach for here, so we +tell Concourse to use that image to run this task. Concourse uses +the [registry-image resource](https://github.com/concourse/registry-image-resource) to download the Golang image. + +Next, we needed to provide our task with a copy of our git repo. The `get: repo` step added an artifact named `repo` +that our task can reference as an [input](https://concourse-ci.org/tasks.html#schema.task-config.inputs). Concourse will +then take care of mounting a copy of our repo from the [`get` step](https://concourse-ci.org/get-step.html) into our +task's container. + +The last part is us writing a small shell script that changes to the directory of our Go tests and finally +runs `go test -v .`. We added `-v` to get a little more output from the tests. + +Let's update our pipeline and trigger the job from the web UI again. + +```shell +fly -t tutorial set-pipeline --pipeline go-app --config pipeline.yml +``` + +![](assets/trigger-tests-job-go-test.gif) + +Our tests run successfully! Let's update the job so it will run on every new commit instead of waiting for us to +manually start the job. We do this by adding `trigger: true` to the `get` step. + +```yaml +resources: + - name: repo + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + +jobs: + - name: tests + plan: + - get: repo + # Cause job to run on new commits + trigger: true + - task: tests + config: + platform: linux + image_resource: + type: registry-image + source: # Use the Golang image from Docker Hub + repository: golang + tag: latest + inputs: + - name: repo + run: + path: sh + args: + - -c + - | + cd repo/apps/golang + go test -v . +``` + +```shell +fly -t tutorial set-pipeline --pipeline go-app --config pipeline.yml +``` + +This change is also visually represented in the web view of the pipeline. The line connecting the resource to the job +changes from a dashed line: + +![Resource Pipeline Test - Dashed Line](assets/resources-pipeline-tests.png) + +To a solid line: + +![Resource Pipeline Test - Solid Line](assets/resources-pipeline-tests-trigger.png) + +Let's test this out by making a commit. In the `examples` repo, open `apps/golang/main.go`. Let's break the tests by +changing the `AddSix()` function to add seven instead of six. + +```go +func AddSix(i float64) float64 { + return i + 7 +} +``` + +Commit the change and then wait for the pipeline to pick it up and run the `tests` job. You can click on the resource to +see when Concourse finds the commit. You can then back out to the overview of the pipeline and see the job start on its +own. + +![](assets/resource-pipeline-tests-trigger.gif) + +!!! note + + By default, Concourse runs the `check` script of a resource every ~1 minute. You can click on the resource to see when + it was last checked. You can also press the refresh button to force a check to run. + +Let's fix the tests by undoing our change. + +```go +func AddSix(i float64) float64 { + return i + 6 +} +``` + +![](assets/resource-pipeline-fix-tests.gif) + +In the next section we'll add a job to publish our app as a release on GitHub. + +## Publishing with Put Steps + +We're going to add another job now that will publish our little Go app as +a [GitHub release](https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases) using +the [GitHub release resource](https://github.com/concourse/github-release-resource/). + +This job will need two pieces of information in order to create a GitHub release: + +* The git commit to publish as the release +* What we want the tag to be (e.g. v0.0.1) + +The git commit is already represented by the `repo` resource defined in our pipeline. We need to add another resource to +represent the tag we want to publish. + +There are many different ways one could represent the tag value, but since we've already got a git repository setup, +we'll continue to leverage that. + +Let's create a new branch in our `examples` repository called `version`. We'll create a file in there that will contain +the name of the next tag we want to publish our app under. You can run the following commands to do this: + +```shell linenums="1" +# Make a new, empty branch called "version" +git switch --orphan version +# You can make the initial version whatever you want +echo "v0.0.1" > next-version +git add next-version +git commit -m "initial version" +git push -u origin version +``` + +Now we can add this as a resource in our pipeline, so our `resources` look like this: + +```yaml linenums="1" +resources: + - name: repo + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + + - name: version + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + branch: version +``` + +Publishing to GitHub is also going to need another resource because it represents something external to Concourse. Let's +add a third resource that will represent our release on GitHub. + +```yaml linenums="1" +resources: + - name: repo + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + + - name: version + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + branch: version + + - name: release + type: github-release + source: + owner: concourse + repository: examples + access_token: github_pat_... +``` + +Now we can tie these resources together in a job. We'll create a job called `publish` that will: + +* Get the last commit that passed the `tests` job. +* Get the `next-version` file from our `version` branch. +* Compile our Go app into a binary to publish alongside our release. We'll write a [ + `task` step](https://concourse-ci.org/task-step.html) to do this. +* Publish a new GitHub release, uploading the binary, and tagging the commit that last passed `tests`. + +Add this job under the `jobs` key in your pipeline: + +```yaml linenums="1" +- name: publish + plan: + - get: repo + passed: [ tests ] # Only use commits that passed the 'tests' job + - get: version + - task: build-binary + config: + platform: linux + image_resource: + type: registry-image + source: + repository: golang + tag: latest + inputs: + - name: repo + outputs: # Declare an output so the put step can upload our binary + - name: final-build + run: + path: sh + args: + - -c + - | + output="$(pwd)/final-build" + cd repo/apps/golang + go build -o "${output}/addsix" . + - put: release + params: + # Comes from the 'get: version' step + name: version/next-version + tag: version/next-version + # Comes from the 'get: repo' step + commitish: repo/.git/ref #refer to the git-resource README + # Comes from the output of our 'task: build-binary' step + globs: final-build/addsix +``` + +Here's the entire pipeline put together: + +```yaml linenums="1" +resources: + - name: repo + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + + - name: version + type: git + source: + uri: https://github.com/concourse/examples.git + username: + password: github_pat_... + branch: version + + - name: release + type: github-release + source: + owner: concourse + repository: examples + access_token: github_pat_... + +jobs: + - name: tests + plan: + - get: repo + trigger: true + - task: tests + config: + platform: linux + image_resource: &image #YAML anchor, kind of like a variable + type: registry-image + source: + repository: golang + tag: latest + inputs: + - name: repo + run: + path: sh + args: + - -c + - | + cd repo/apps/golang + go test -v . + + - name: publish + plan: + - get: repo + passed: [ tests ] + - get: version + - task: build-binary + config: + platform: linux + image_resource: *image + inputs: + - name: repo + outputs: + - name: final-build + run: + path: sh + args: + - -cx + - | + output="$(pwd)/final-build" + cd repo/apps/golang + go build -o "${output}/addsix" . + - put: release + params: + name: version/next-version + tag: version/next-version + commitish: repo/.git/ref + globs: [ final-build/addsix ] +``` + +Let's update our pipeline: + +```shell +fly -t tutorial set-pipeline --pipeline go-app --config pipeline.yml +``` + +The pipeline should look like this in the web UI: + +![Resource Publish Job](assets/resource-publish-job.png) + +Go ahead and manually trigger the publish job. It should complete successfully and the logs should look similar to this: + +![Resource Publish Job Logs](assets/resource-publish-logs.png) + +!!! note + + You'll notice that an extra [`get` step](https://concourse-ci.org/get-step.html) snuck in there after + the [`put` step](https://concourse-ci.org/put-step.html) at the end. Concourse does this automatically after + every `put` step because a `put` step has no outputs. So if you ran a `put` step in the middle of your job and + wanted to use whatever you just published/uploaded in a later step, you wouldn't have access to it. Concourse + resolves this by automatically adding and running a `get` step. + + This extra `get` is not always necessary of course. If you want to have Concourse skip adding this `get` step, + you can set [`no_get`](https://concourse-ci.org/put-step.html#schema.put.no_get) to `true` in the `put` step. + This will save a few seconds off of your builds. + +On GitHub, you should see your release published, along with the binary `addsix` attached to the release: + +![Resources GitHub Release](assets/github-release.png) + +That's the whole pipeline! Congratulations on testing, building, and publishing a silly little Go app with a Concourse +pipeline 🎉 + +![Resources Pipeline Done](assets/resource-pipeline-done.png) + +## Using External Resource Types + +Concourse comes bundled with a lot of resources that are enough for most people to start using Concourse with. However, +users will want to extend Concourse to work with all sorts of systems and that means bringing your +own [Resource Types](https://concourse-ci.org/resource-types.html). + +Adding a [resource type](https://concourse-ci.org/resource-types.html) to your pipeline looks very similar to adding a +resource. You can even override the bundled resource types by re-declaring them in your pipeline. + +Remember, a resource is a container image. So to pull in a new resource type you need to tell Concourse where to pull +the image from. This is done by using the +built-in [registry-image resource](https://github.com/concourse/registry-image-resource/). The process of adding a +resource type is just like adding a regular resource, just under the top-level `resource_types` key instead. + +If you're looking for more resource types, there's a catalog of them +at [resource-types.concourse-ci.org](https://resource-types.concourse-ci.org/). + +## Time For Takeoff ✈️ + +This brings us to the end of the tutorial. You should have a basic understanding about how to read Concourse pipelines +and start creating your own. Here are some other parts of the site to help you take off with Concourse: + +* [How-To Guides](https://concourse-ci.org/how-to-guides.html) - Contains practical guides + for [working with pipelines](https://concourse-ci.org/pipeline-guides.html) and examples of common pipeline workflows, + such as [git](https://concourse-ci.org/git-guides.html) + and [container](https://concourse-ci.org/container-image-guides.html) workflows. +* Check out all the reference documentation: + * [Jobs](https://concourse-ci.org/jobs.html) + * [Tasks](https://concourse-ci.org/tasks.html) + * [Resources](https://concourse-ci.org/resources.html) + * [Resource Types](https://concourse-ci.org/resource-types.html) +* [Implement your own resource type](https://concourse-ci.org/implementing-resource-types.html) +* Find other resources at [resource-types.concourse-ci.org](https://resource-types.concourse-ci.org/) or put + `_something_ concourse resource` into your favorite search engine. + +Best of luck on your automation journey! + +!!! note + + If you have any feedback for this tutorial please share it in this + [GitHub discussion](https://github.com/concourse/concourse/discussions/7353) diff --git a/docs/docs/how-to/container-image-guides/build-push.md b/docs/docs/how-to/container-image-guides/build-push.md new file mode 100644 index 00000000..48d344f1 --- /dev/null +++ b/docs/docs/how-to/container-image-guides/build-push.md @@ -0,0 +1,162 @@ +--- +title: Building and Pushing an Image +--- + +In this guide we are going to show how to build and publish container images using +the [oci-build task](https://github.com/concourse/oci-build-task) +and [registry-image resource](https://github.com/concourse/registry-image-resource). This guide assumes you understand +how to build container images with [Dockerfile's](https://docs.docker.com/engine/reference/builder/) and publish +to [Docker Hub](https://hub.docker.com/) or another image registry using the docker cli. + +!!! note + + This is one way of building and pushing images. There are many other ways to accomplish this same task in Concourse. + +First we need a Dockerfile. You can store this in your own repo or reference +the [github.com/concourse/examples](https://github.com/concourse/examples) repo. The rest of this post assumes you use +the examples repo. All files in this blog post can be found in the examples repo. + +## The Dockerfile + +The `Dockerfile`: + +```dockerfile linenums="1" title="Dockerfile" +--8<-- "libs/examples/Dockerfiles/simple/Dockerfile" +``` + +The `stanger` text file: + +```text linenums="1" title="stranger" +--8<-- "libs/examples/Dockerfiles/simple/stranger" +``` + +## Defining Pipeline Resources + +Now we can start building out our pipeline. Let's declare our [Resources](../../resources/index.md) first. We will need +one resource to pull in the repo where our Dockerfile is located, and a second resource pointing to where we want to +push the built container image to. + +There are some [Variables](../../../examples/pipeline-vars.md#variables) in this file that we will fill out when setting +the pipeline. + +```yaml linenums="1" title="build-push.yml" +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml::20" +``` + +## Create the Job + +Next we will create a [job](../../jobs.md) that will build and push our container image. + +To build the job we will need to pull in the repo where the `Dockerfile` is. + +[//]: # (@formatter:off) +```yaml linenums="1" title="build-push.yml" +resources: ... # omitting resource section from above + +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml:22:25" +``` +[//]: # (@formatter:on) + +## Build the Image + +The second step in our job will build the container image. + +To build the container image we are going to use the [oci-build-task](https://github.com/concourse/oci-build-task). The +oci-build-task is a container image that is meant to be used in a Concourse [task](../../tasks.md) to build other +container images. Check out the [`README.md`](https://github.com/concourse/oci-build-task/blob/master/README.md) in the +repo for more details on how to configure and use the oci-build-task in more complex build scenarios. + +[//]: # (@formatter:off) +```yaml linenums="1" title="build-push.yml" +resources: ... # omitting resource section from above + +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml:22:35" +``` +[//]: # (@formatter:on) + +Next we will add [concourse-examples](https://github.com/concourse/examples) as an [ +`input`](../../tasks.md#task-config-schema) to the build task to ensure the artifact from the [ +`get` step](../../steps/get.md) (where our `Dockerfile` is fetched) is mounted in our `build-image` step. + +[//]: # (@formatter:off) +```yaml linenums="1" title="build-push.yml" +resources: ... # omitting resource section from above + +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml:22:37" +``` +[//]: # (@formatter:on) + +The oci-build-task [outputs the built container image](https://github.com/concourse/oci-build-task#outputs) in a +directory called `image`. Let's add image as an output of our task so we can publish it in a later step. + +[//]: # (@formatter:off) +```yaml linenums="1" title="build-push.yml" +resources: ... # omitting resource section from above + +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml:22:39" +``` +[//]: # (@formatter:on) + +## Defining the Build Context + +Next we need to tell the `oci-build-task` what +the [build context](https://docs.docker.com/engine/reference/commandline/build/) of our `Dockerfile` is. +The [README](https://github.com/concourse/oci-build-task) goes over a few other methods of creating your build context. +We are going to use the simplest use-case. By specifying `CONTEXT` the `oci-build-task` assumes a `Dockerfile` and its +build context are in the same directory. + +[//]: # (@formatter:off) +```yaml linenums="1" title="build-push.yml" +resources: ... # omitting resource section from above + +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml:22:44" +``` +[//]: # (@formatter:on) + +## Publish the Container Image + +To push the container image add a [`put` step](../../steps/put.md) to our job plan and tell the registry-image resource +where the tarball of the container image is. + +The `put` step will push the container image using the information defined previously in the +resource's [source](../../resources/index.md#resource-schema). + +[//]: # (@formatter:off) +```yaml linenums="1" title="build-push.yml" +resources: ... # omitting resource section from above + +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml:22:47" +``` +[//]: # (@formatter:on) + +## The Entire Pipeline + +Putting all the pieces together, here is our pipeline that builds and pushes a container image. + +```yaml linenums="1" title="build-push.yml" +--8<-- "libs/examples/pipelines/build-and-push-simple-image.yml" +``` + +You can set the pipeline with the following fly command, updating the variable values with real values the pipeline can +use to run. + +```shell +fly -t set-pipeline -p build-and-push-image \ + -c ./examples/pipelines/build-and-push-simple-image.yml \ + --var image-repo-name= \ + --var registry-username= \ + --var registry-password= +``` + +## Further Readings + +Understanding what the build context is important when building container images. You can +read [Dockerfile Best Practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#understand-build-context) +for more details about build contexts. + +The [inputs section](https://github.com/concourse/oci-build-task#inputs) of the oci-build-task's `README` has examples +on how to create a build context with multiple inputs and other complex build scenarios. + +Read the `README`'s in the [oci-build-task](https://github.com/concourse/oci-build-task) +and [registry-image resource](https://github.com/concourse/registry-image-resource/) to learn more about their other +configuration options. \ No newline at end of file diff --git a/docs/docs/how-to/container-image-guides/build-use.md b/docs/docs/how-to/container-image-guides/build-use.md new file mode 100644 index 00000000..bd3583c3 --- /dev/null +++ b/docs/docs/how-to/container-image-guides/build-use.md @@ -0,0 +1,46 @@ +--- +title: Building an Image and Using it in a Task +--- + +This guide will show you how to build and use an image within one [job](../../jobs.md) without pushing the image to an +external image registry like Docker Hub. + +## Build The Image + +To avoid repeating ourselves we're going to use the pipeline made in the other +guide [Building and Pushing an Image](build-push.md). We will start with the pipeline from +the [Defining the Build Context](build-push.md#defining-the-build-context) section. + +We will add the `UNPACK_ROOTFS` parameter to the task. This parameter tells +the [oci-build-task](https://github.com/concourse/oci-build-task) to include the image in a special format that +Concourse's container runtime uses. + +!!! note + + In the future this may not be necessary if Concourse starts using the OCI image format. + +```yaml linenums="1" title="build-and-use-image.yml" +--8<-- "libs/examples/pipelines/build-and-use-image.yml::33" +``` + +The above pipeline will build a container image and also output it in Concourse's rootfs image format. + +## Use the Image + +Next we want to add a second task to this job which will use the image generated from the first task as its container +image. To use the image from the previous step add the top-level `image` key to the [`task` step](../../steps/task.md). + +[//]: # (@formatter:off) +```yaml linenums="1" title="build-push.yml" +resources: ... # omitting resource section from above + +--8<-- "libs/examples/pipelines/build-and-use-image.yml:11:" +``` +[//]: # (@formatter:on) + +You can set the pipeline with the following fly command. + +```shell +fly -t set-pipeline -p build-and-use-image \ + -c ./build-and-use-image.yml +``` \ No newline at end of file diff --git a/docs/docs/how-to/container-image-guides/index.md b/docs/docs/how-to/container-image-guides/index.md new file mode 100644 index 00000000..d243af8b --- /dev/null +++ b/docs/docs/how-to/container-image-guides/index.md @@ -0,0 +1,3 @@ +--- +title: Container Image Guides +--- diff --git a/docs/docs/how-to/git-guides/basic.md b/docs/docs/how-to/git-guides/basic.md new file mode 100644 index 00000000..ab63a640 --- /dev/null +++ b/docs/docs/how-to/git-guides/basic.md @@ -0,0 +1,124 @@ +--- +title: Basic Git Operations +--- + +All of these examples use the [concourse/git-resource](https://github.com/concourse/git-resource) image. That image is +probably the most popular git resource for Concourse since it is shipped in +the [concourse/concourse](https://hub.docker.com/r/concourse/concourse) image and in the tarball on +the [GitHub release page](https://github.com/concourse/concourse/releases). It is not the only resource available for +working with git-related resources. If you don't see your use-case on this page then there is probably another resource +that you can use. For example, Pull Request workflows can be accomplished with +the [teliaoss/github-pr-resource](https://github.com/telia-oss/github-pr-resource). + +Check out the [docs](https://github.com/concourse/git-resource) for the git resource for all configuration options. + +## Fetching a Repository + +Here is how you fetch the contents of a git repository and use it in a task. + +```yaml +resources: + - name: concourse-examples + type: git + icon: github + source: + uri: https://github.com/concourse/examples + +jobs: + - name: read-the-readme + plan: + - get: concourse-examples + - task: cat-readme + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + inputs: # pass concourse-examples into this task step + - name: concourse-examples + run: + path: cat + args: [ "concourse-examples/README.md" ] +``` + +## Creating Commits and Tags + +Here's a simple way to create a commit using a bash script. + +```yaml +resources: + - name: repo-main + type: git + icon: github + source: + uri: https://github.com/user/my-repo + branch: main + +jobs: + - name: create-a-commit + plan: + - get: repo-main + - task: commit-and-tag + config: + platform: linux + image_resource: + type: registry-image + source: + repository: gitea/gitea # use any image that has the git cli + inputs: + - name: repo-main + outputs: + # to pass the commit to the following steps specify + # the "repo-main" as an output as well + - name: repo-main + run: + path: sh + args: + - -cx + # this is just a bash script + - | + cd repo-main + # edit a file / make a change + date +%Y-%m-%d > todays-date + git add ./todays-date + git commit -m "Add todays date" + git tag v0.1.6 + # push commit and tag + - put: repo-main + params: + # specify the "repo-main" artifact as the location + repository: repo-main +``` + +## Merging Branches + +Here is how you can merge two branches. Common if you are +using [gitflow](https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow) and need to merge a `dev` +branch into `main` every so often. + +```yaml +resources: + - name: repo-main + type: git + icon: github + source: + uri: https://github.com/user/my-repo + branch: main + + - name: repo-dev + type: git + icon: github + source: + uri: https://github.com/user/my-repo + branch: dev + +jobs: + - name: merge-dev-into-main + plan: + - get: repo-dev + - put: repo-main + params: + repository: repo-dev + merge: true +``` \ No newline at end of file diff --git a/docs/docs/how-to/git-guides/index.md b/docs/docs/how-to/git-guides/index.md new file mode 100644 index 00000000..e321d8d3 --- /dev/null +++ b/docs/docs/how-to/git-guides/index.md @@ -0,0 +1,3 @@ +--- +title: Git Guides +--- diff --git a/docs/docs/how-to/git-guides/multi-branch.md b/docs/docs/how-to/git-guides/multi-branch.md new file mode 100644 index 00000000..cc987f8a --- /dev/null +++ b/docs/docs/how-to/git-guides/multi-branch.md @@ -0,0 +1,124 @@ +--- +title: Multi-Branch Workflows +--- + +Teams may make use of multiple branches for their development. For instance, some teams create feature branches while +working on new functionality - once this functionality is ready, the branch will be merged into the main branch and the +feature branch will be deleted. + +While a feature is under development, you'll often want to run tests against the feature branch and possibly deploy to a +staging environment. To model this in Concourse, you'll need to have a pipeline for each active feature branch. Manually +setting (and eventually archiving) a pipeline for each feature branch would be quite a burden. For this type of +workflow, Concourse has a few important tools to help you out: the [`set_pipeline` step](../../steps/set-pipeline.md), [ +`across`](../../steps/modifier-and-hooks/across.md), and [instanced pipelines](../../pipelines/grouping-pipelines.md). + +!!! warning "Experimental Feature" + + [`across`](../../steps/modifier-and-hooks/across.md) and [instanced + pipelines](../../pipelines/grouping-pipelines.md) are both experimental features, and must be enabled with the + feature flags `CONCOURSE_ENABLE_ACROSS_STEP` and `CONCOURSE_ENABLE_PIPELINE_INSTANCES`, respectively. + +In this guide, we'll cover: + +1. Writing a pipeline to [Test, Build & Deploy](#test-build-deploy) a branch to a staging environment. We'll + use [Terraform](https://www.terraform.io/) for our deployment +2. [Tracking Branches](#tracking-branches) in a repository; for each branch, we'll set a pipeline (using the [ + `set_pipeline` step](../../steps/set-pipeline.md) and [across](../../steps/modifier-and-hooks/across.md)) +3. Automatically [Cleaning Up Old Workspaces](#cleaning-up-old-workspaces) after branches get merged or deleted + +## Test, Build & Deploy + +We'll start out by defining the pipeline that should run for each active branch. For this example, we'll be working with +the following [sample Go application](https://github.com/concourse/examples/tree/master/apps/golang). + +Our pipeline will have three stages: + +1. Run unit tests +2. Build and upload a binary to a blobstore (in our case, we'll + use [Google Cloud Storage](https://cloud.google.com/storage)) +3. Trigger a `terraform apply` to deploy our app to a staging environment. + The [Terraform module](https://github.com/concourse/examples/blob/master/terraform/staging/main.tf) we'll use here + doesn't actually provision any infrastructure, and is just used as an example + +Since the pipeline config is intended to be used as a template for multiple different branches, we can +use [Vars](../../vars.md) to parameterize the config. In particular, we'll use the vars `((feature))` and `((branch))`, +which represent the name of the feature and the name of the branch, respectively. + +Below is the full pipeline config from +the [Examples Repo](https://github.com/concourse/examples/blob/master/pipelines/multi-branch/template.yml): + +```yaml linenums="1" title="template.yml" +--8<-- "libs/examples/pipelines/multi-branch/template.yml" +``` + +## Tracking Branches + +In addition to the branch pipeline template, we'll also need a pipeline to track the list of branches and set a pipeline +for each one. + +To track the list of branches in a repository, we can use [ +`aoldershaw/git-branches-resource`](https://github.com/aoldershaw/git-branches-resource). This [ +`resource_type`](../../resource-types/index.md) emits a new [resource version](../../resources/resource-versions.md) +whenever a branch is created or deleted. It also lets us filter the list of branches by a regular expression. In this +case, let's assume our feature branches match the regular expression `feature/.*`. + +Below is the current pipeline config for this tracker pipeline: + +```yaml linenums="1" title="tracker.yml" +resource_types: + - name: git-branches + type: registry-image + source: + repository: aoldershaw/git-branches-resource + +resources: + - name: feature-branches + type: git-branches + source: + uri: https://github.com/concourse/examples + # The "(?Ppattern)" syntax defines a named capture group. + # aoldershaw/git-branches-resource emits the value of each named capture + # group under the `groups` key. + # + # e.g. feature/some-feature ==> {"groups": {"feature": "some-feature"}} + branch_regex: 'feature/(?P.*)' + + - name: examples + type: git + source: + uri: https://github.com/concourse/examples + +jobs: + - name: set-feature-pipelines + plan: + - in_parallel: + - get: feature-branches + trigger: true + - get: examples + - load_var: branches + file: feature-branches/branches.json + - across: + - var: branch + values: ((.:branches)) + set_pipeline: dev + file: examples/pipelines/multi-branch/template.yml + instance_vars: { feature: ((.:branch.groups.feature)) } + vars: { branch: ((.:branch.name)) } +``` + +We set each pipeline as an [instanced pipeline](../../pipelines/grouping-pipelines.md) - this will result in Concourse +grouping all the related `dev` pipelines in the UI. + +## Cleaning Up Old Workspaces + +With the setup described in [Tracking Branches](#tracking-branches), Concourse will automatically archive any pipelines +for branches that get removed. However, Concourse doesn't know that it should destroy Terraform workspaces when a branch +is removed. To accomplish this, we can yet again make use of +the [Terraform resource](https://github.com/ljfranklin/terraform-resource) to destroy these workspaces. We'll add +another job to +the [tracker pipeline](https://github.com/concourse/examples/blob/master/pipelines/multi-branch/tracker.yml) that +figures out which workspaces don't belong to an active branch and destroy them. + +```yaml linenums="1" title="template.yml" +--8<-- "libs/examples/pipelines/multi-branch/tracker.yml" +``` \ No newline at end of file diff --git a/docs/docs/how-to/index.md b/docs/docs/how-to/index.md new file mode 100644 index 00000000..e4e84a65 --- /dev/null +++ b/docs/docs/how-to/index.md @@ -0,0 +1,8 @@ +--- +title: How-To Guides +--- + +The following pages are guides that show how to accomplish certain workflows within Concourse. Most of the guides will +use specific images, but you are in no way limited to or forced to use these images to accomplish the same task. There +are many ways to accomplish the same thing in Concourse, so don't let these guides limit you in what you think is +possible with Concourse. \ No newline at end of file diff --git a/images/how-to-guides/pipelines/simple-gated-pipeline.png b/docs/docs/how-to/pipeline-guides/assets/gated-pipelines-01.png similarity index 100% rename from images/how-to-guides/pipelines/simple-gated-pipeline.png rename to docs/docs/how-to/pipeline-guides/assets/gated-pipelines-01.png diff --git a/images/how-to-guides/pipelines/simple-gated-fan-in-fan-out.png b/docs/docs/how-to/pipeline-guides/assets/gated-pipelines-02.png similarity index 100% rename from images/how-to-guides/pipelines/simple-gated-fan-in-fan-out.png rename to docs/docs/how-to/pipeline-guides/assets/gated-pipelines-02.png diff --git a/images/how-to-guides/pipelines/gated-pipeline-with-notification.png b/docs/docs/how-to/pipeline-guides/assets/gated-pipelines-03.png similarity index 100% rename from images/how-to-guides/pipelines/gated-pipeline-with-notification.png rename to docs/docs/how-to/pipeline-guides/assets/gated-pipelines-03.png diff --git a/images/how-to-guides/pipelines/explore-task-inputs-outs-example-one-10.gif b/docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-01.gif similarity index 100% rename from images/how-to-guides/pipelines/explore-task-inputs-outs-example-one-10.gif rename to docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-01.gif diff --git a/images/how-to-guides/pipelines/explore-task-inputs-outs-example-two-parallel.gif b/docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-02a.gif similarity index 100% rename from images/how-to-guides/pipelines/explore-task-inputs-outs-example-two-parallel.gif rename to docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-02a.gif diff --git a/images/how-to-guides/pipelines/explore-task-inputs-outs-example-two-serial.gif b/docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-02b.gif similarity index 100% rename from images/how-to-guides/pipelines/explore-task-inputs-outs-example-two-serial.gif rename to docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-02b.gif diff --git a/images/how-to-guides/pipelines/explore-task-inputs-outs-example-three-1.gif b/docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-03.gif similarity index 100% rename from images/how-to-guides/pipelines/explore-task-inputs-outs-example-three-1.gif rename to docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-03.gif diff --git a/images/how-to-guides/pipelines/explore-task-inputs-outs-example-four.gif b/docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-04.gif similarity index 100% rename from images/how-to-guides/pipelines/explore-task-inputs-outs-example-four.gif rename to docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-04.gif diff --git a/images/how-to-guides/pipelines/explore-task-inputs-outs-example-five.gif b/docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-05.gif similarity index 100% rename from images/how-to-guides/pipelines/explore-task-inputs-outs-example-five.gif rename to docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-05.gif diff --git a/images/how-to-guides/pipelines/explore-task-inputs-outs-example-six.gif b/docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-06.gif similarity index 100% rename from images/how-to-guides/pipelines/explore-task-inputs-outs-example-six.gif rename to docs/docs/how-to/pipeline-guides/assets/task-inputs-outputs-06.gif diff --git a/images/how-to-guides/pipelines/time-triggered-01.png b/docs/docs/how-to/pipeline-guides/assets/time-triggered-pipelines-01.png similarity index 100% rename from images/how-to-guides/pipelines/time-triggered-01.png rename to docs/docs/how-to/pipeline-guides/assets/time-triggered-pipelines-01.png diff --git a/images/how-to-guides/pipelines/time-triggered-02.png b/docs/docs/how-to/pipeline-guides/assets/time-triggered-pipelines-02.png similarity index 100% rename from images/how-to-guides/pipelines/time-triggered-02.png rename to docs/docs/how-to/pipeline-guides/assets/time-triggered-pipelines-02.png diff --git a/docs/docs/how-to/pipeline-guides/common-pipeline.md b/docs/docs/how-to/pipeline-guides/common-pipeline.md new file mode 100644 index 00000000..512a05b1 --- /dev/null +++ b/docs/docs/how-to/pipeline-guides/common-pipeline.md @@ -0,0 +1,168 @@ +--- +title: Common Pipeline Practices +--- + +The following are practices that we see a lot of people use in their pipelines. These are by no means "Best" practices, +they are simply common and may or may not work for you and your team. + +## Parallelizing Get Steps in Jobs + +All jobs usually have [`get` steps](../../steps/get.md) as their first set of steps. + +```yaml +jobs: + - name: awesome-job + plan: + - get: cool-code + - get: funny-binary + - get: the-weather + - task: business-stuff +``` + +To reduce the waiting time to the length of the longest running get step, put all `get` steps under an [ +`in_parallel` step](../../steps/in-parallel.md). + +```yaml +jobs: + - name: awesome-job + plan: + - in_parallel: + - get: cool-code + - get: funny-binary + - get: the-weather + - task: business-stuff +``` + +## Specify Inputs for Put Steps + +By default, [`put` step's](../../steps/put.md) have all artifacts from a job mounted in their resource container. This +can result in long initialization times for put steps. It's likely that a [`put` step](../../steps/put.md) only needs a +subset of all available artifacts generated throughout the job. + +There are two ways to specify which artifacts to send to a [`put` step](../../steps/put.md). You can specify `detect` as +the [`put` step `inputs`](../../steps/put.md), or you can pass in an exact list of all artifacts the [ +`put` step](../../steps/put.md) needs. + +Using `detect`: + +```yaml +jobs: + - name: the-job + plan: # Get some artifacts + - in_parallel: + - get: apples + - get: apple-basket + - get: monkeys + # using detect will result in "apples-location" and "basket" being passed in + # "monkeys" will not be passed in + - put: apples-in-basket + inputs: detect + params: + apples-location: apples/location # matches the first get step + basket: apple-basket # matches the second get step +``` + +Specifying the exact inputs needed for the [`put` step](../../steps/put.md): + +```yaml +jobs: + - name: the-job + plan: # Get some artifacts + - in_parallel: + - get: apples + - get: apple-basket + - get: monkeys + - put: apples-in-basket + inputs: [ apples, apple-basket ] # specify the exact inputs needed + params: + apples-location: apples/location + basket: apple-basket +``` + +## Putting Task Configs in Files + +A lot of the pipeline examples that you will find on this site and in resource repos will embed a [`task` step +`config`](../../steps/task.md) directly in the pipeline. This is a nice way of clearly seeing what inputs/outputs the +task uses. Tasks are usually designed to be used in multiple places, maybe with slightly different configuration. To +support this scenario, most users store task configs in files instead of embedding the config directly in the pipeline. + +Here's what this looks like in practice: + +```yaml title="print-date.yml, stored in some git repo under a folder named tasks" +platform: linux + +image_resource: # define a default image for the task to use + type: registry-image + source: + repository: busybox + +run: + path: date + args: [ "+%Y-%m-%d" ] +``` + +Using the task in a pipeline: + +```yaml title="pipeline.yml" +resources: + - name: ci + type: git + source: + uri: https://github.com/concourse/examples.git + +jobs: + - name: the-best-job + plan: + - get: ci + - task: today + file: ci/tasks/print-date.yml +``` + +## `Get` Images for Tasks Instead of using Anonymous Image Resources + +It is easy to let Concourse fetch images for tasks right when they are needed by using the `task-config.image_resource` +field in a [task config](../../steps/task.md). It's the easy out-of-the-box solution. Another way is to pass the image +for a task as an input to the job by setting the [`task` step `image`](../../steps/task.md) field. This also allows you +to track the version of the image being used by the task and also avoid getting rate-limited by configuring the resource +with credentials. + +=== "Before" + + ```yaml title="Anonymous Image Fetching for Tasks" + jobs: + - name: job + plan: + - task: simple-task + config: + platform: linux + image_resource: # anonymous image resource + type: registry-image + source: + repository: busybox + run: + path: echo + args: [ "Hello world!" ] + ``` +=== "After" + + ```yaml title="Passing Task Image as Job Inputs" + resources: + - name: busybox + type: registry-image + source: + repository: busybox + username: ((docker.user)) + password: ((docker.password)) + + jobs: + - name: job + plan: + - get: busybox # pass image into job + - task: simple-task + image: busybox # use image for task. Overrides anonymous image + config: + platform: linux + run: + path: echo + args: [ "Hello world!" ] + ``` \ No newline at end of file diff --git a/docs/docs/how-to/pipeline-guides/gated-pipelines.md b/docs/docs/how-to/pipeline-guides/gated-pipelines.md new file mode 100644 index 00000000..b71866af --- /dev/null +++ b/docs/docs/how-to/pipeline-guides/gated-pipelines.md @@ -0,0 +1,161 @@ +--- +title: Gated Pipeline Patterns +--- + +Gated pipelines provide control for administrators and release managers on when a given software release is deployed to +a tightly protected environment (e.g. production). + +The execution of jobs that perform certain tasks (e.g. deployment) targeting the downstream environment beyond the " +gate" step is done only upon either an approval coming from an external Change Control system or an explicit manual +trigger of such step. + +## 1) - A Simple Gated Pipeline + +By default, all [Jobs](../../jobs.md) only run when manually triggered. That means a user has to run [ +`fly trigger-job`](../../jobs.md#fly-trigger-job) or click the plus button in the web interface for a job to run. A job +only runs automatically if one of its resources has the `trigger: true` parameter set. + +Therefore, in order to create a gated job in a pipeline you simply need to create a job that can only be manually +triggered. That means not setting `trigger: true` for any of the jobs' [`get` steps](../../steps/get.md). + +```yaml +jobs: + - name: run-automatically + plan: + - get: my-repo + trigger: true # has trigger:true so automatically triggers + # can include more steps to run other things before hitting the gate + + - name: the-gate # manually trigger this job + plan: + - get: my-repo + trigger: false # redundant but guarantees the job won't run automatically + passed: + - run-automatically + + # runs immediately after the gate is triggered + - name: do-more-stuff-after-the-gate + plan: + - get: my-repo + passed: + - the-gate + trigger: true + # can include more steps to run other things + +resources: + - name: my-repo + type: git + source: + uri: https://github.com/concourse/examples.git +``` + +![](assets/gated-pipelines-01.png) + +## 2) - Gated Pipeline Fanning In and Out + +You can also use a gate as way to fan-in from multiple jobs and/or fan-out to multiple jobs as well. + +```yaml linenums="1" +jobs: + # three pre-gate jobs + - name: job-a + plan: + - get: my-repo + trigger: true + - name: job-b + plan: + - get: my-repo + trigger: true + - name: job-c + plan: + - get: my-repo + trigger: true + + - name: the-gate # manually trigger this job + plan: + - get: my-repo + trigger: false + passed: # fan-in from the three pre-gate jobs + - job-a + - job-b + - job-c + + # fan-out to three post-gate jobs + - name: post-gate-job-a + plan: + - get: my-repo + trigger: true + passed: [ the-gate ] + - name: post-gate-job-b + plan: + - get: my-repo + trigger: true + passed: [ the-gate ] + - name: post-gate-job-c + plan: + - get: my-repo + trigger: true + passed: [ the-gate ] + +resources: + - name: my-repo + type: git + source: + uri: https://github.com/concourse/examples.git +``` + +![](assets/gated-pipelines-02.png) + +## 3) - A Gated Pipeline With Notifications + +This pipeline shows you how you can send a notification, like an email, to notify someone that a new build of your +application is ready to be shipped. + +```yaml linenums="1" +jobs: + - name: build-it + plan: + - get: my-repo + trigger: true + # can add steps to build your app + + - name: test-it + plan: + - get: my-repo + trigger: true + passed: [ build-it ] + # can add steps to run tests + - put: email-release-manager + params: + subject: "Ready to ship" + body_text: | + A build is ready to be shipped! + Build to be shipped: ${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME} + Link to pipeline: ${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME} + + - name: ship-it + plan: + - get: my-repo + trigger: false + passed: [ test-it ] + +resources: + - name: my-repo + type: git + source: + uri: https://github.com/concourse/examples.git + - name: email-release-manager + type: email + source: + # other required fields for this resource have been omitted + from: pipeline@example.com + to: release-manager@example.com + +resource_types: + - name: email + type: registry-image + source: + repository: pcfseceng/email-resource +``` + +![](assets/gated-pipelines-03.png) \ No newline at end of file diff --git a/docs/docs/how-to/pipeline-guides/index.md b/docs/docs/how-to/pipeline-guides/index.md new file mode 100644 index 00000000..d594118b --- /dev/null +++ b/docs/docs/how-to/pipeline-guides/index.md @@ -0,0 +1,3 @@ +--- +title: Pipeline Guides +--- diff --git a/docs/docs/how-to/pipeline-guides/managing-pipeline-configs.md b/docs/docs/how-to/pipeline-guides/managing-pipeline-configs.md new file mode 100644 index 00000000..4b9be2b4 --- /dev/null +++ b/docs/docs/how-to/pipeline-guides/managing-pipeline-configs.md @@ -0,0 +1,64 @@ +--- +title: Managing Pipeline Configurations +--- + +When first starting with Concourse, it is common to write the pipeline and set it with `fly set-pipeline`. This works +fine when initially building a pipeline or testing out some new changes. Once you're past the initial building phase +though, you probably want to store your pipeline YAML files somewhere that you and others on your team can access and +update the pipeline without worrying about remembering to do `fly set-pipeline` every time. A Git repository is a good +place to store your pipeline YAML files, so our pipeline management conventions start with a Git repository. + +Most other CI/CD tools will tell you to store your "pipeline" or "workflow" configuration files in a `.something` +directory in your app's git repository. You could do that with Concourse too and create a `.concourse` directory, but +Concourse does not force any convention onto you. There are _so many ways_ that you could store and manage your pipeline +configuration files. The most common way is to use a Git repository, but there's nothing stopping you from using a +versioned S3 bucket either. + +Let's cover the two most common conventions for storing pipeline configurations. + +## 1) In Your App's Git Repository + +This is what most users will be familiar with coming from other CI/CD tools. Simply make a folder in the same repository +as your code. Name the folder whatever you want. Some possible names if you need inspiration: + +* `ci` +* `concourse` +* `pipelines` + +Then store your pipeline YAML files in that directory. Again, there's no "special way" that Concourse expects you to +store your pipeline YAML files. Do whatever makes sense to you! + +To automatically update your pipeline in Concourse with what's stored in your Git repository, use the [ +`set_pipeline` step](../../steps/set-pipeline.md) in a job. You can view an example of a pipeline updating +itself [in the examples section](../../../examples/set-pipeline.md). There are also examples on the [ +`set_pipeline` step](../../steps/set-pipeline.md) page. + +## 2) In a Different Git Repository + +This is the most common convention that Concourse users follow, especially if your pipelines interact with multiple Git +repositories. The Concourse project does this; we store all of our pipeline and task YAML files in [ +`github.com/concourse/ci`](https://github.com/concourse/ci), completely separate from the main repository at [ +`github.com/concourse/concourse`](https://github.com/concourse/concourse) and all +the [resource type repositories](https://github.com/concourse/?q=resource). + +### 2.a) Parent-Child Pipeline Relationships + +!!! tip + + The following is also described on the [`set_pipeline` step](../../steps/set-pipeline.md) page. + +If you are setting multiple pipelines, or multiple instances of the same pipeline, it can be helpful to manage them from +one place. Concourse allows you to use the `set_pipeline` step to create other pipelines. +The [set_pipeline step](../../steps/set-pipeline.md) is not limited to updating the current pipeline. + +When you use one pipeline to create other pipelines, this creates a parent-child relationship that Concourse tracks. You +can see an example of this [here in `set-pipelines.yml`](../../../examples/set-pipeline.md). + +As long as the parent pipeline continues to set/update the child pipeline(s), the child pipeline(s) will remain active. +If the parent pipeline stops updating the child pipeline(s) (e.g. you updated the parent pipeline to not set/update the +child pipeline(s) anymore), Concourse +will [archive the pipeline](../../pipelines/managing-pipelines.md#fly-archive-pipeline). This pauses the child pipeline( +s) and hides them from the web UI. The child pipeline configuration is deleted, but its build logs are retained. + +If you want to fully delete a pipeline, use [ +`fly destroy-pipeline`](../../pipelines/managing-pipelines.md#fly-destroy-pipeline). \ No newline at end of file diff --git a/docs/docs/how-to/pipeline-guides/manual-approval.md b/docs/docs/how-to/pipeline-guides/manual-approval.md new file mode 100644 index 00000000..5a7866e7 --- /dev/null +++ b/docs/docs/how-to/pipeline-guides/manual-approval.md @@ -0,0 +1,92 @@ +--- +title: Manual Approval Step +--- + +This is an example of a [`task` step](../../tasks.md) you can add to your [Jobs](../../jobs.md) that requires a human to +approve or reject the job from running. This is probably the most minimal version of a manual approval step you can have +in Concourse that doesn't require pulling in a bunch of other tech into your stack. It's definitely not the best UX +since you need to use the [`fly` CLI](../../fly.md) to approve the step. + +Task configuration, `config.yml`: + +```yaml +platform: linux + +inputs: + - name: repo + +params: + APPROVAL_TIMEOUT: 600 #default of 10mins + +run: + path: repo/tasks/manual-approval/run.sh +``` + +Task script, `run.sh`: + +```shell +#!/usr/bin/env bash + +set -euo pipefail + +timeout=$((EPOCHSECONDS+APPROVAL_TIMEOUT)) +echo -n "Waiting for manual approval..." +until [[ -f /tmp/approved || $EPOCHSECONDS -gt $timeout ]]; do + sleep 5 + echo -n "." +done + +if [[ -f /tmp/approved ]]; then + echo "Step approved!" +else + echo "Approval timeout reached. Aborting job." + exit 1 +fi +``` + +To approve the job when it gets to this step you have to create `/tmp/approved` on the step's container. You can do that +user `fly`'s `intercept` command, like so (replace `PIPELINE/JOB` with the name of your pipeline and job that the step +resides in): + +```shell +fly -t ci intercept --job PIPELINE/JOB --step manual-approval touch /tmp/approved +``` + +Here's the step added in-line to a pipeline so you can see how it works on its own. + +```yaml +jobs: + - name: approval + plan: + - task: manual-approval + params: + APPROVAL_TIMEOUT: 600 #10mins + config: + platform: linux + image_resource: + type: mock + source: + mirror_self: true + run: + path: bash + args: + - -c + - | + #!/usr/bin/env bash + + set -euo pipefail + + timeout=$((EPOCHSECONDS+APPROVAL_TIMEOUT)) + echo -n "Waiting for manual approval..." + until [[ -f /tmp/approved || $EPOCHSECONDS -gt $timeout ]]; do + sleep 5 + echo -n "." + done + + if [[ -f /tmp/approved ]]; then + echo "Step approved!" + else + echo "Approval timeout reached. Aborting job." + exit 1 + fi +``` \ No newline at end of file diff --git a/docs/docs/how-to/pipeline-guides/task-inputs-outputs.md b/docs/docs/how-to/pipeline-guides/task-inputs-outputs.md new file mode 100644 index 00000000..c26844ee --- /dev/null +++ b/docs/docs/how-to/pipeline-guides/task-inputs-outputs.md @@ -0,0 +1,533 @@ +--- +title: Exploring Task Input and Output Scenarios +--- + +Understanding how task inputs and outputs work in Concourse can be a little confusing initially. This guide will walk +you through a few example pipelines to show you how inputs and outputs work within a single Concourse job. By the end +you should understand how inputs and outputs work within the context of a single job. + +To run the pipelines in the following examples yourself you can get your own Concourse running locally by following +the [Quick Start](../../getting-started/quick-start.md) guide. Then use [ +`fly set-pipeline`](../../pipelines/setting-pipelines.md#fly-set-pipeline) to see the pipelines in action. + +## 1) - Passing Inputs Between Tasks + +This pipeline will show us how to create outputs and pass outputs as inputs to the next [step](../../steps/index.md) in +a [job plan](../../jobs.md). + +This pipeline has two tasks. The first task outputs a file with the date. The second task reads and prints the contents +of the file from the first task. + +Here's a visualization of the job. + +![](assets/task-inputs-outputs-01.gif) + +```yaml title="passing-artifacts.yml" linenums="1" +busybox: &busybox #YAML anchor + type: registry-image + source: + repository: busybox + +jobs: + - name: the-job + plan: + - task: create-one-output + config: + platform: linux + image_resource: *busybox + outputs: + # Concourse will make an empty dir with this name + # and save the contents for later steps + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file + - task: read-output-from-previous-step + config: + platform: linux + image_resource: *busybox + # You must explicitly name the inputs you expect + # this task to have. + # If you don't then outputs from previous steps + # will not appear in the step's container. + # The name must match the output from the previous step. + # Try removing or renaming the input to see what happens! + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./the-output/file +``` + +Set and run this pipeline to see the results yourself. Save the pipeline in a file called `passing-artifacts.yml`. + +```shell +fly -t tutorial set-pipeline -p passing-artifacts -c passing-artifacts.yml +fly -t tutorial unpause-pipeline -p passing-artifacts +fly -t tutorial trigger-job --job passing-artifacts/the-job --watch +``` + +## 2) - Two tasks with the same output, who wins? + +This scenario is to satisfy the curiosity cat inside all of us. Never do this in real life because you're definitely +going to hurt yourself! + +There are two [Jobs](../../jobs.md) in this pipeline. The first job, `writing-in-parallel`, has +two [Steps](../../steps/index.md); both steps will produce an artifact named `the-output` in parallel. If you run the +`writing-to-the-same-output-in-parallel` job multiple times you'll see the file in `the-output` folder changes depending +on which of the parallel tasks finished last. Here's a visualization of the first job. + +![](assets/task-inputs-outputs-02a.gif) + +The second job, `writing-to-the-same-output-serially`, is a serial version of the first job. In this job the second task +always wins because it's the last task that outputs `the-output`, so only `file2` will be in `the-output` directory in +the last step in the job plan. + +![](assets/task-inputs-outputs-02b.gif) + +The lesson to take away from this example is that **last to write wins** when it comes to the state of any particular +artifact in your job. + +```yaml title="parallel-artifacts.yml" linenums="1" +busybox: &busybox #YAML anchor + type: registry-image + source: + repository: busybox + +jobs: + - name: writing-to-the-same-output-in-parallel + plan: + # running two tasks that output in parallel?!? + # who will win?? + - in_parallel: + - task: create-the-output + config: + platform: linux + image_resource: *busybox + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file1 + - task: also-create-the-output + config: + platform: linux + image_resource: *busybox + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file2 + # run this job multiple times to see which + # previous task wins each time + - task: read-output-from-previous-step + config: + platform: linux + image_resource: *busybox + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./the-output + echo "Get ready to error!" + cat ./the-output/file1 ./the-output/file2 + + - name: writing-to-the-same-output-serially + plan: + - task: create-the-output + config: + platform: linux + image_resource: *busybox + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file1 + - task: also-create-the-output + config: + platform: linux + image_resource: *busybox + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file2 + - task: read-output-from-previous-step + config: + platform: linux + image_resource: *busybox + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./the-output + echo "Get ready to error! file1 will never exist" + cat ./the-output/file1 ./the-output/file2 +``` + +Set and run this pipeline to see the results yourself. Save the pipeline in a file called `parallel-artifacts.yml`. + +```shell +fly -t tutorial set-pipeline -p parallel-artifacts -c parallel-artifacts.yml +fly -t tutorial unpause-pipeline -p parallel-artifacts +fly -t tutorial trigger-job --job parallel-artifacts/writing-to-the-same-output-in-parallel --watch +fly -t tutorial trigger-job --job parallel-artifacts/writing-to-the-same-output-serially --watch +``` + +## 3) - Mapping the Names of Inputs and Outputs + +Sometimes the names of inputs and outputs don't match between multiple [task configs](../../steps/task.md), or they do +match, and you don't want them overwriting each other, like in the previous example. That's when `input_mapping` and +`output_mapping` become helpful. Both of these features rename the inputs/outputs in the task's config to some other +name in the job plan. + +This pipeline has one job with four tasks. + +The first task outputs a file with the date to the `the-output` directory. `the-output` is mapped to the new name +`demo-disk`. The artifact `demo-disk` is now available in the rest of the job plan for future steps to take as inputs. + +The second task reads and prints the contents of the file under the new name `demo-disk`. + +The third task reads and prints the contents of the file under another name, `generic-input`. The `demo-disk` artifact +in the job plan is mapped to `generic-input`. + +The fourth task tries to use the artifact named `the-output` as its input. This task fails to even start because there +was no artifact with the name `the-output` available in the [job plan](../../jobs.md#job-schema); it was remapped to +`demo-disk`. + +Here's a visualization of the job. + +![](assets/task-inputs-outputs-03.gif) + +```yaml title="mapping-artifacts.yml" linenums="1" +busybox: &busybox #YAML anchor + type: registry-image + source: + repository: busybox + +jobs: + - name: the-job + plan: + - task: create-one-output + # The task config has the artifact `the-output` + # output_mapping will rename `the-output` to `demo-disk` + # in the rest of the job's plan + output_mapping: + the-output: demo-disk + config: + platform: linux + image_resource: *busybox + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file + # this task expects the artifact `demo-disk` so no mapping is needed + - task: read-output-from-previous-step + config: + platform: linux + image_resource: *busybox + inputs: + - name: demo-disk + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./demo-disk/file + - task: rename-and-read-output + # This task expects the artifact `generic-input`. + # input_mapping will map the task's `generic-input` to + # the job plans `demo-disk` artifact. + # `demo-disk` is renamed to `generic-input`. + input_mapping: + generic-input: demo-disk + config: + platform: linux + image_resource: *busybox + inputs: + - name: generic-input + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./generic-input/file + - task: try-to-read-the-output + input_mapping: + generic-input: demo-disk + config: + platform: linux + image_resource: *busybox + # `the-output` is not available in the job plan + # so this task will error while initializing + # since there's no artiact named `the-output` in + # the job's plan + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + cat ./generic-input/file +``` + +Set and run this pipeline to see the results yourself. Save the pipeline in a file called `mapping-artifacts.yml`. + +```shell +fly -t tutorial set-pipeline -p mapping-artifacts -c mapping-artifacts.yml +fly -t tutorial unpause-pipeline -p mapping-artifacts +fly -t tutorial trigger-job --job mapping-artifacts/the-job --watch +``` + +## 4) - Adding Files to an Existing Artifact + +This pipeline will also have two jobs in order to illustrate this point. What happens if we add a file to an output? If +you think back to example two you may already know the answer. + +The first task will create `the-output` with `file1`. The second task will add `file2` to the `the-output`. The last +task will read the contents of `file1` and `file2`. + +As long as you re-declare the input as an output in the second task you can modify any of your outputs. + +This means you can pass something between a bunch of tasks and have each task add or modify something in the artifact. + +Here's a visualization of the job. + +![](assets/task-inputs-outputs-04.gif) + +```yaml title="existing-artifact.yml" linenums="1" +jobs: + - name: add-file-to-output + plan: + - task: create-one-output + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file1 + - task: add-file-to-previous-output + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + # this task lists the same artifact as + # its input and output + inputs: + - name: the-output + outputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output/file2 + - task: read-output-from-previous-step + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + inputs: + - name: the-output + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./the-output + cat ./the-output/file1 ./the-output/file2 +``` + +Set and run this pipeline to see the results yourself. Save the pipeline in a file called `existing-artifact.yml`. + +```shell +fly -t tutorial set-pipeline -p existing-artifact -c existing-artifact.yml +fly -t tutorial unpause-pipeline -p existing-artifact +fly -t tutorial trigger-job --job existing-artifact/add-file-to-output --watch +``` + +## 5) - Task With Multiple Inputs and Outputs + +What happens if you have a task that has multiple outputs and a second task that only lists one of the outputs? Does the +second task get the extra outputs from the first task? + +The answer is no. A task will only get the artifacts that match the name of the inputs listed in the task's config. + +Here's a visualization of the job. + +![](assets/task-inputs-outputs-05.gif) + +```yaml title="multiple-artifacts.yml" linenums="1" +jobs: + - name: multiple-outputs + plan: + - task: create-three-outputs + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + outputs: + - name: the-output-1 + - name: the-output-2 + - name: the-output-3 + run: + path: /bin/sh + args: + - -cx + - | + ls -lah + date > ./the-output-1/file + date > ./the-output-2/file + date > ./the-output-3/file + - task: take-one-output + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + # only one of the three outputs are + # listed as inputs + inputs: + - name: the-output-1 + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./ + cat ./the-output-1/file + - task: take-two-outputs + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + # this task pulls in the other + # two outputs, just for fun! + inputs: + - name: the-output-2 + - name: the-output-3 + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./ + cat ./the-output-2/file + cat ./the-output-3/file +``` + +Set and run this pipeline to see the results yourself. Save the pipeline in a file called `multiple-artifacts.yml`. + +```shell +fly -t tutorial set-pipeline -p multiple-artifacts -c multiple-artifacts.yml +fly -t tutorial unpause-pipeline -p multiple-artifacts +fly -t tutorial trigger-job --job multiple-artifacts/multiple-outputs --watch +``` + +## 6) - Get Steps Generate Artifacts + +The majority of Concourse pipelines have at least one [resource](../../resources/index.md), which means they have at +least one [`get` step](../../steps/get.md). Using a [`get` step](../../steps/get.md) in a job makes an artifact with the +name of the get step available for later steps in the job plan to consume as inputs. + +Here's a visualization of the job. + +![](assets/task-inputs-outputs-06.gif) + +```yaml title="get-artifact.yml" linenums="1" +resources: + - name: concourse-examples + type: git + source: + uri: "https://github.com/concourse/examples" + +jobs: + - name: get-job + plan: + # there will be an artifact named + # "concourse-examples" available in the job plan + - get: concourse-examples + - task: take-one-output + config: + platform: linux + image_resource: + type: registry-image + source: + repository: busybox + inputs: + - name: concourse-examples + run: + path: /bin/sh + args: + - -cx + - | + ls -lah ./ + cat ./concourse-examples/README.md +``` + +Set and run this pipeline to see the results yourself. Save the pipeline in a file called `get-artifact.yml`. + +```shell +fly -t tutorial set-pipeline -p get-artifact -c get-artifact.yml +fly -t tutorial unpause-pipeline -p get-artifact +fly -t tutorial trigger-job --job get-artifact/get-job --watch +``` \ No newline at end of file diff --git a/docs/docs/how-to/pipeline-guides/time-triggered-pipelines.md b/docs/docs/how-to/pipeline-guides/time-triggered-pipelines.md new file mode 100644 index 00000000..759a35ac --- /dev/null +++ b/docs/docs/how-to/pipeline-guides/time-triggered-pipelines.md @@ -0,0 +1,120 @@ +--- +title: Time Triggered Pipeline Patterns +--- + +The [time resource](https://github.com/concourse/time-resource/) produces a +new [version](../../getting-started/resources.md#versions) for the time interval that was declared in its definition in +the pipeline configuration file. + +The two most common usages are having the time resource trigger on an interval: + +```yaml +resources: + - name: trigger-every-3-minutes + type: time + source: + interval: 3m +``` + +Or trigger once within a certain time range: + +```yaml +resources: + - name: trigger-daily-between-1am-and-2am + type: time + source: + start: 1:00 AM + stop: 2:00 AM + location: America/Toronto +``` + +Check the README of the [time resource](https://github.com/concourse/time-resource/) for more details. + +## 1) - Single Time Trigger + +The following is an example of a pipeline that is triggered by a time resource on a pre-determined interval. + +```yaml +resources: + - name: trigger-every-3-minutes + type: time + source: + interval: 3m + +jobs: + - name: run-forrest-run + plan: + - get: trigger-every-3-minutes + trigger: true + # can add other steps to run in this job + + - name: run-bubba-run + plan: + - get: trigger-every-3-minutes + trigger: true + passed: + - run-forrest-run + # can add other steps to run in this job +``` + +![](assets/time-triggered-pipelines-01.png) + +## 2) - Multiple Time Triggers + +As an enhancement to the previous sample with a single time trigger, this pipeline example implements two time resource +triggers and the ability to manually kick it off outside the time resources schedules. + +The first time you set up a pipeline like this you will need to manually trigger it in order to satisfy the passed +constraint of the `manual-trigger` resource. Once one version is available that satisfies the passed constraint all +future triggers by the other resources will work as expected. + +```yaml +resources: + - name: trigger-every-4-minutes + type: time + source: + interval: 4m + - name: trigger-every-10-minutes + type: time + source: + interval: 10m + - name: manual-trigger + type: time + source: + interval: 1m + +jobs: + - name: manual-trigger + plan: + - put: manual-trigger + + - name: run-forrest-run + plan: + - get: trigger-every-4-minutes + trigger: true + - get: trigger-every-10-minutes + trigger: true + - get: manual-trigger + trigger: true + passed: + - manual-trigger + # can add other steps to run in this job + + - name: run-bubba-run + plan: + - get: trigger-every-4-minutes + trigger: true + passed: + - run-forrest-run + - get: trigger-every-10-minutes + trigger: true + passed: + - run-forrest-run + - get: manual-trigger + trigger: true + passed: + - run-forrest-run + # can add other steps to run in this job +``` + +![](assets/time-triggered-pipelines-02.png) \ No newline at end of file diff --git a/docs/docs/index.md b/docs/docs/index.md new file mode 100644 index 00000000..a48226c6 --- /dev/null +++ b/docs/docs/index.md @@ -0,0 +1,36 @@ +--- +title: Docs +--- + +Concourse is a pipeline-based continuous thing-doer. + +The term "pipeline" has become widespread in CI discussions, so being precise about what this means is important; +Concourse's pipelines differ significantly from others. + +Pipelines are built around Resources, which represent all external state, and Jobs, which interact with them. Concourse +pipelines function as dependency flows, similar to distributed Makefiles. Pipelines are designed to be self-contained to +minimize server-wide configuration. Maximizing portability also reduces risk, making it simpler for projects to recover +from CI disruptions. + +Resources like the git resource and s3 resource are used to express source code, dependencies, deployments, and other +external states. This interface also models more abstract concepts like scheduled or interval triggers, via the time +resource. + +Resource Types are defined within the pipeline itself, making the pipelines more self-sufficient while keeping Concourse +lean and versatile without needing a complex plugin system. + +Jobs are sequences of get, put, and task steps to execute. These steps determine the job's inputs and outputs. Jobs are +designed to be idempotent and loosely coupled, allowing the pipeline to evolve with project needs without requiring +engineers to maintain too much context simultaneously. + +Everything in Concourse runs in a container. Instead of modifying workers to install build tools, Tasks define their own +container image (typically using Docker images via the registry-image resource). + +## ...What? + +Concourse admittedly has a steeper learning curve initially, and depending on your background it might seem like a lot +to grasp. A key goal of this project is for that curve to flatten out shortly after and lead to greater productivity and +reduced stress over time. + +If this all sounds confusing, that's OK - you may want to simply continue onward, start experimenting a bit, and use the +above as a quick reference of the "big picture" as your understanding develops. \ No newline at end of file diff --git a/docs/docs/install/concourse-cli.md b/docs/docs/install/concourse-cli.md new file mode 100644 index 00000000..c0329eef --- /dev/null +++ b/docs/docs/install/concourse-cli.md @@ -0,0 +1,54 @@ +--- +title: The concourse CLI +--- + +# The `concourse` CLI + +The `concourse` CLI can be downloaded from +the [latest GitHub release](https://github.com/concourse/concourse/releases/latest) - make sure to grab the appropriate +archive for your platform. Each `concourse-*` archive contains the following files: + +``` +concourse/bin/concourse +concourse/bin/gdn # Linux only +concourse/fly-assets/... +concourse/resource-types/... # Linux only +``` + +The Linux release is the largest among all the platforms because it is prepackaged with +a [bundle of resource types](running-worker.md#bundled-resource-types) like +the [git](https://github.com/concourse/git-resource), [time](https://github.com/concourse/time-resource/), +and [registry-image](https://github.com/concourse/registry-image-resource/) resources. Resources only run on Linux +workers, that's why the other platforms are not bundled with resources; resources don't currently exist for non-linux +platforms. + +When extracted, the `concourse` binary will auto-discover its sibling assets based on its file location, so you may +extract it anywhere. On Linux a typical install location is `/usr/local/concourse`: + +```shell +tar -zxf concourse-*.tgz -C /usr/local +``` + +From there, you can either add `/usr/local/concourse/bin` to your `$PATH`, or just execute +`/usr/local/concourse/bin/concourse` directly. + +## Configuring `concourse` + +All Concourse `web` and `worker` node configuration is defined statically via flags. For a full list of flags, you can +pass `--help` to any command. + +```shell title="CLI Commands" +concourse web --help +concourse worker --help +concourse quickstart --help +concourse migrate --help +concourse generate-key --help +concourse land-worker --help +concourse retire-worker --help +``` + +Each flag can also be set via an environment variable. The env var for each flag is based on the flag name uppercased, +preceded with `CONCOURSE_` and dashes (`-`) replaced with underscores (`_`). These are also shown in `--help`. + +Various sections in documentation may refer to configuration via env vars rather than flags, but they are both +equivalent and interchangeable. Env vars are simply easier to reference in isolation and are more useful to copy-paste. \ No newline at end of file diff --git a/docs/docs/install/generating-keys.md b/docs/docs/install/generating-keys.md new file mode 100644 index 00000000..02c7f9ca --- /dev/null +++ b/docs/docs/install/generating-keys.md @@ -0,0 +1,67 @@ +--- +title: Generating Keys +--- + +## Generating the Keys + +Concourse's various components use RSA keys to verify tokens and worker registration requests. + +A minimal deployment will require the following keys: + +* **Session Signing Key** + * Used by the [`web` node](running-web.md) for signing and verifying user session tokens. +* **TSA Host Key** + * Used by the [`web` node](running-web.md) for the SSH worker registration gateway server ("TSA"). + * The public key is given to each [`worker` node](running-worker.md) to verify the remote host when connecting via + SSH. +* **Worker Key** + * Each [`worker` node](running-worker.md) verifies its registration with the [`web` node](running-web.md) via a SSH + key. + * The public key must be listed in the [`web` node](running-web.md)'s authorized worker keys file in order for the + worker to register. + +To generate these keys, run: + +```shell +concourse generate-key -t rsa -f ./session_signing_key +concourse generate-key -t ssh -f ./tsa_host_key +concourse generate-key -t ssh -f ./worker_key +``` + +or use `ssh-keygen`: + +```shell +ssh-keygen -t rsa -b 4096 -m PEM -f ./session_signing_key +ssh-keygen -t rsa -b 4096 -m PEM -f ./tsa_host_key +ssh-keygen -t rsa -b 4096 -m PEM -f ./worker_key +``` + +At this point you should have the following files: + +* `session_signing_key` +* `tsa_host_key` +* `tsa_host_key.pub` +* `worker_key` +* `worker_key.pub` + +You can remove the `session_signing_key.pub` file if you have one, it is not needed by any process in Concourse. + +## Multiple Worker Keys + +Currently you have one `worker_key`. You can use this one key-pair with multiple [`worker` node](running-worker.md)s. +Another good strategy is to have each worker or group of workers use a key that's unique to that one worker or group of +workers. + +In the second case you will end up with multiple private and public worker keys. The [`web` node](running-web.md) needs +to know about all of the public worker keys. To pass all public worker keys to the [`web` node](running-web.md) create a +file that contains all of the worker public keys. A common name for this file is `authorized_worker_keys.pub`. The file +should look like this, with one public key per line. + +```shell +$ cat authorized_worker_keys.pub +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCgKtVnbGRJ7Y63QKoO+loS... +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDU6lA4gSRYIc4MXzphJ2l5... +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDgNU7KBz/QQusPO52pNcea... +``` + +You should now have all the necessary keys needed to deploy Web and Worker nodes. \ No newline at end of file diff --git a/docs/docs/install/index.md b/docs/docs/install/index.md new file mode 100644 index 00000000..4e63fc25 --- /dev/null +++ b/docs/docs/install/index.md @@ -0,0 +1,30 @@ +--- +title: Install +--- + +A Concourse installation is composed of a [`web` node](running-web.md), a [`worker` node](running-worker.md), and +a [PostgreSQL node](running-postgres.md). + +There are many ways to deploy Concourse, depending on your personal preference. +The [Quick Start](../getting-started/quick-start.md) guide shows how to get Concourse up and running quickly via Docker +Compose, and there is also an official [Concourse Helm chart](https://github.com/concourse/concourse-chart). + +The documentation found here will primarily focus on the `concourse` CLI, which is the lowest common denominator, and +can also be directly used if you want to just run Concourse yourself on real hardware or your own managed VMs. + +The high-level steps to follow for installing Concourse are: + +1. Setup a Postgres database +2. Generate Secrets for the web and worker nodes +3. Install the web node +4. Install the worker node + +!!! note + + We don't document every configuration option for the `web` and `worker` commands. To view all flags you can + run the following `docker` commands. + + ```shell + docker run -t concourse/concourse web --help + docker run -t concourse/concourse worker --help + ``` \ No newline at end of file diff --git a/docs/docs/install/running-postgres.md b/docs/docs/install/running-postgres.md new file mode 100644 index 00000000..46253a6a --- /dev/null +++ b/docs/docs/install/running-postgres.md @@ -0,0 +1,59 @@ +--- +title: Running a PostgreSQL Node +--- + +Concourse uses [PostgreSQL](https://www.postgresql.org/) for storing all data and coordinating work in a multi-[ +`web` node](running-web.md) installation. + +## Prerequisites + +[PostgreSQL](https://www.postgresql.org/) v11 or above is required, though the latest available version is recommended. + +## Running PostgreSQL + +How this node is managed is up to you; Concourse doesn't actually have much of an opinion on it, it just needs a +database. By default Concourse will try connecting to a database named `atc`. + +How to install PostgreSQL is really dependent on your platform. Please refer to your Linux distribution or operating +system's documentation. + +For the most part, the instruction on Linux should look something like this: + +```shell +sudo apt install postgresql +sudo su postgres -c "createuser $(whoami)" +sudo su postgres -c "createdb --owner=$(whoami) atc" +``` + +This will install PostgreSQL (assuming your distro uses `apt`), create a user, and create a database that the current +UNIX user can access, assuming this same user is going to be running the[`web` node](running-web.md). This is a +reasonable default for distros like Ubuntu and Debian which default PostgreSQL to `peer` auth. + +## Resource utilization + +**CPU usage**: this is one of the most volatile metrics, and one we try pretty hard to keep down. There will be +near-constant database queries running, and while we try to keep them very simple, there is always more work to do. +Expect to feed your database with at least a couple cores, ideally four to eight. Monitor this closely as the size of +your deployment and the amount of traffic it's handling increases, and scale accordingly. + +**Memory usage**: similar to CPU usage, but not quite as volatile. + +**Disk usage**: pipeline configurations and various bookkeeping metadata for keeping track of jobs, builds, resources, +containers, and volumes. In addition, **all build logs are stored in the database**. This is the primary source of disk +usage. To mitigate this, log retention can be defined by pipeline authors by using [ +`job.build_log_retention`](https://concourse-ci.org/jobs.html#schema.job.build_log_retention). Concourse operators can +also configure a default [Build log retention](running-web.md#build-log-retention) policy that applies to all pipelines. + +**Bandwidth usage**: well, it's a database, so it most definitely uses the network. Something important to consider here +is the number of simultaneous connections that the database server itself will allow. Postgres exposes a [ +`max_connections`](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS) +configuration variable, and depending on how many web nodes you are running and the size of +their [connection pool](running-web.md#database-connection-pooling), you may need to tune these two numbers against each +other. + +**Highly available**: Up to you. Clustered PostgreSQL is kind of new and probably tricky to deploy, but there are +various cloud solutions for this. + +**Outbound traffic**: None + +**Inbound traffic**: Only ever from the `web` node \ No newline at end of file diff --git a/docs/docs/install/running-web.md b/docs/docs/install/running-web.md new file mode 100644 index 00000000..8f50d310 --- /dev/null +++ b/docs/docs/install/running-web.md @@ -0,0 +1,393 @@ +--- +title: Running a web Node +--- + +# Running a `web` node + +The `web` node is responsible for running the web UI, API, and as well as performing all pipeline scheduling. It's +basically the brain of Concourse. + +## Prerequisites + +Nothing special - the `web` node is a pretty simple Go application that can be run like +a [12-factor app](https://en.wikipedia.org/wiki/Twelve-Factor_App_methodology). + +## Running `concourse web` + +The `concourse` CLI can run as a `web` node via the `web` subcommand. + +Before running it, let's configure a local user so we can log in: + +```properties +CONCOURSE_ADD_LOCAL_USER=myuser:mypass +CONCOURSE_MAIN_TEAM_LOCAL_USER=myuser +``` + +This will configure a single user, `myuser`, with the password `mypass`. You'll probably want to change those to +sensible values, and later you may want to configure a proper auth provider - check +out [Auth & Teams](https://concourse-ci.org/auth.html) whenever you're ready. + +Next, you'll need to configure the session signing key, the SSH key for the worker gateway, and the authorized worker +key. Check [Generating Keys](generating-keys.md) to learn what these are and how they are created. + +```properties +CONCOURSE_SESSION_SIGNING_KEY=path/to/session_signing_key +CONCOURSE_TSA_HOST_KEY=path/to/tsa_host_key +CONCOURSE_TSA_AUTHORIZED_KEYS=path/to/authorized_worker_keys.pub +``` + +Finally, `web` needs to know how to reach your Postgres database. This can be set like so: + +```properties +CONCOURSE_POSTGRES_HOST=127.0.0.1 # default +CONCOURSE_POSTGRES_PORT=5432 # default +CONCOURSE_POSTGRES_DATABASE=atc # default +CONCOURSE_POSTGRES_USER=my-user +CONCOURSE_POSTGRES_PASSWORD=my-password +``` + +If you're running PostgreSQL locally, you can probably just point it to the socket and rely on the `peer` auth: + +```properties +CONCOURSE_POSTGRES_SOCKET=/var/run/postgresql +``` + +Now that everything's set, run: + +```shell +concourse web +``` + +All logs will be emitted to `stdout`, with any panics or lower-level errors being emitted to `stderr`. + +### Resource utilization + +CPU usage: peaks during pipeline scheduling, primarily when scheduling [Jobs](https://concourse-ci.org/jobs.html). +Mitigated by adding more `web` nodes. In this regard, `web` nodes can be considered compute-heavy more than anything +else at large scale. + +Memory usage: not very well classified at the moment as it's not generally a concern. Give it a few gigabytes and keep +an eye on it. + +Disk usage: none + +Bandwidth usage: aside from handling external traffic, the `web` node will at times have to stream bits out from one +worker and into another while executing [Steps](https://concourse-ci.org/steps.html). + +Highly available: `yes`; web nodes can all be configured the same (aside from `--peer-address`) and placed behind a load +balancer. Periodic tasks like garbage-collection will not be duplicated for each node. + +Horizontally scalable: yes; they will coordinate workloads using the database, resulting in less work for each node and +thus lower CPU usage. + +Outbound traffic: + +* `db` on its configured port for persistence +* `db` on its configured port for locking and coordinating in a multi-`web` node deployment +* other `web` nodes (possibly itself) on an [ephemeral port](https://en.wikipedia.org/wiki/Ephemeral_port) when a worker + is forwarded through the web node's TSA + +Inbound traffic: + +* `worker` connects to the TSA on port `2222` for registration +* `worker` downloads inputs from the ATC during [`fly execute`](https://concourse-ci.org/tasks.html#running-tasks) via + its external URL +* external traffic to the ATC API via the web UI and [`fly` CLI](https://concourse-ci.org/fly.html) + +## Operating a `web` node + +The `web` nodes themselves are stateless - they don't store anything on disk, and coordinate entirely using the +database. + +### Scaling + +The [`web` node](running-web.md) can be scaled up for high availability. They'll also +roughly share their scheduling workloads, using the database to synchronize. This is done by just running more `web` +commands on different machines, and optionally putting them behind a load balancer. + +To run a cluster of [`web` nodes](running-web.md), you'll first need to ensure they're all pointing to the same +PostgreSQL server. + +Next, you'll need to configure a peer address. This is a DNS or IP address that can be used to reach this `web` node +from other `web` nodes. Typically this uses a private IP, like so: + +```properties +CONCOURSE_PEER_ADDRESS=10.10.0.1 +``` + +This address will be used for forwarded worker connections, which listen on +the [ephemeral port](https://en.wikipedia.org/wiki/Ephemeral_port) range. + +Finally, if all of these nodes are going to be accessed through a load balancer, you'll need to configure the external +URL that will be used to reach your Concourse cluster: + +```properties +CONCOURSE_EXTERNAL_URL=https://ci.example.com +``` + +Aside from the peer URL, all configuration must be consistent across all `web` nodes in the cluster to ensure consistent +results. + +#### Database connection pooling + +You may wish to configure the max number of parallel database connections that each node makes. There are two pools to +configure: one for serving API requests, and one used for all the backend work such as pipeline scheduling. + +```properties +CONCOURSE_API_MAX_CONNS=10 # default +CONCOURSE_BACKEND_MAX_CONNS=50 # default +``` + +There are some non-configurable connection pools. They take up the following number of connections per pool: + +* Garbage Collection: 5 +* Lock: 1 +* Worker Registration: 1 + +The sum of these numbers across all `web` nodes should not be greater than the maximum number of simultaneous +connections your Postgres server will allow. See [ +`db` node resource utilization](running-postgres.md#resource-utilization) for more +information. + +For example, if 3 `web` nodes are configured with the values shown above then your PostgreSQL server should be +configured with a connection limit of at least 201: `(10 + 50 + 5 + 1 + 1) * 3 web nodes`. + +### Reloading worker authorized key + +While [Running `concourse web`](running-web.md#running-concourse-web), the authorized worker key +file, which contains all public keys for the workers, is loaded at startup. During the lifecycle of a [ +`web` node](running-web.md) new `worker` keys might be added or old ones removed. To +perform a live reload of this file you can send a `SIGHUP` signal to the `concourse web` process. The process will +remain running and Concourse will reload the authorized worker key file. + +### Restarting & Upgrading + +The `web` nodes can be killed and restarted willy-nilly. No draining is necessary; if the `web` node was orchestrating a +build it will continue where it left off when it comes back, or the build will be picked up by one of the other `web` +nodes. + +To upgrade a `web` node, stop its process and start a new one using the newly installed `concourse`. Any database +migrations will be run automatically on start. If `web` nodes are started in parallel, only one will run the migrations. + +We don't currently guarantee a lack of funny-business if you're running mixed Concourse versions - database migrations +can perform modifications that confuse other `web` nodes. So there may be some turbulence during a rolling upgrade, but +everything should stabilize once all `web` nodes are running the latest version. + +If you want more control over when the database migrations happen and know if they were successful you can use the +`concourse migrate` command. The `migrate` command accepts the same `CONCOURSE_POSTGRES_*` env vars as the +`concourse web` command. + +### Downgrading + +If you're stuck in a pinch and need to downgrade from one version of Concourse to another, you can use the +`concourse migrate` command. + +First, grab the desired migration version by running the following: + +```shell +# make sure this is the *old* Concourse binary +$ concourse migrate --supported-db-version +1551110547 +``` + +That number (yours will be different) is the expected migration version for that version of Concourse. + +Next, run the following with the new Concourse binary: + +```shell +concourse migrate --migrate-db-to-version=1551110547 +``` + +This will need the same `CONCOURSE_POSTGRES_*` configuration described in [Running +`concourse web`](running-web.md#running-concourse-web). + +Once this completes, switch all `web` nodes back to the older `concourse` binary and you should be good to go. + +## Configuring the `web` node + +### Giving your cluster a name + +If you've got many Concourse clusters that you switch between, you can make it slightly easier to notice which one +you're on by giving each cluster a name: + +```properties +CONCOURSE_CLUSTER_NAME=production +``` + +When set, this name will be shown in the top bar when viewing the dashboard. + +### Configuring ingress traffic + +If your web nodes are going to be accessed multiple network layers, you will need to set `CONCOURSE_EXTERNAL_URL` to a +URL accessible by your Concourse users. If you don't set this property, logging in will incorrectly redirect to its +default value of `127.0.0.1`. + +If your web node(s) will be behind a load balancer or reverse proxy then you will need to ensure connections made by [ +`fly intercept`](https://concourse-ci.org/builds.html#fly-intercept) are properly handled by upgrading the connection. +Here is a sample nginx configuration that upgrades connections made by +[`fly intercept`](https://concourse-ci.org/builds.html#fly-intercept). + +```nginx linenums="1" +server { + server_name ci.example.com; + + add_header Strict-Transport-Security "max-age=31536000" always; + ssl_stapling on; + ssl_stapling_verify on; + + # Proxy main concourse traffic + location / { + proxy_pass http://concourse.local:8080/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Protocol $scheme; + proxy_set_header X-Forwarded-Host $http_host; + } + + # Proxy fly intercept traffic + location ~ /hijack$ { + proxy_pass http://concourse.local:8080; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Protocol $scheme; + proxy_set_header X-Forwarded-Host $http_host; + # Upgrade connection + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + } +} +``` + +### TLS via Let's Encrypt + +Concourse can be configured to automatically acquire a TLS certificate via [Let's Encrypt](https://letsencrypt.org/): + +```properties +# Enable TLS +CONCOURSE_TLS_BIND_PORT=443 + +# Enable Let's Encrypt +CONCOURSE_ENABLE_LETS_ENCRYPT=true +``` + +!!! warning + + Concourse's Let's Encrypt integration works by storing the TLS certificate and key in the database, so it is + imperative that you enable [database encryption](https://concourse-ci.org/encryption.html) as well. + +By default, Concourse will reach out +to [Let's Encrypt's ACME CA directory](https://acme-v02.api.letsencrypt.org/directory). An alternative URL can be +configured like so: + +```properties +CONCOURSE_LETS_ENCRYPT_ACME_URL=https://acme.example.com/directory +``` + +In order to negotiate the certificate, your `web` node must be reachable by the ACME server. There are +intentionally [no publicly listed IP addresses to whitelist](https://letsencrypt.org/docs/faq/), so this typically means +just making your `web` node publicly reachable. + +### Build log retention + +Build logs are stored in the DB - if they are not cleanup up every once in a while, the storage usage for build logs +will continue to grow as more builds run. While this is usually fine for small Concourse instances, as you scale up, you +may run into storage concerns. + +To clean up old build logs, you can configure Concourse to periodically scan for builds whose logs should be reaped +based on a log retention policy, skipping over any paused pipelines and jobs. When a build's logs are reaped, they are +no longer visible in the UI. + +Concourse can be configured with a default build log retention policy for all jobs: + +```properties +CONCOURSE_DEFAULT_BUILD_LOGS_TO_RETAIN=50 +CONCOURSE_DEFAULT_DAYS_TO_RETAIN_BUILD_LOGS=14 +``` + +With these settings, Concourse will keep the latest 50 builds for each job. If a job runs more than 50 builds in 14 +days, all of those builds will be retained until 14 days after they ran. + +Some jobs have differing retention requirements - you can configure [ +`build_log_retention_policy` schema](https://concourse-ci.org/jobs.html#schema.build_log_retention_policy) on a +job-by-job basis. + +You can also configure Concourse with maximum values for build log retention policies to prevent jobs from retaining +their build logs for too long: + +```properties +CONCOURSE_MAX_BUILD_LOGS_TO_RETAIN=100 +CONCOURSE_MAX_DAYS_TO_RETAIN_BUILD_LOGS=30 +``` + +With these settings, [ +`build_log_retention_policy.builds`](https://concourse-ci.org/jobs.html#schema.build_log_retention_policy.builds) is +capped at 100, and [ +`build_log_retention_policy.days`](https://concourse-ci.org/jobs.html#schema.build_log_retention_policy.days) is capped +at 30. + +### Enabling audit logs + +A very simplistic form of audit logging can be enabled with the following vars: + +```properties +# Enable auditing for all api requests connected to builds. +CONCOURSE_ENABLE_BUILD_AUDITING=true + +# Enable auditing for all api requests connected to containers. +CONCOURSE_ENABLE_CONTAINER_AUDITING=true + +# Enable auditing for all api requests connected to jobs. +CONCOURSE_ENABLE_JOB_AUDITING=true + +# Enable auditing for all api requests connected to pipelines. +CONCOURSE_ENABLE_PIPELINE_AUDITING=true + +# Enable auditing for all api requests connected to resources. +CONCOURSE_ENABLE_RESOURCE_AUDITING=true + +# Enable auditing for all api requests connected to system transactions. +CONCOURSE_ENABLE_SYSTEM_AUDITING=true + +# Enable auditing for all api requests connected to teams. +CONCOURSE_ENABLE_TEAM_AUDITING=true + +# Enable auditing for all api requests connected to workers. +CONCOURSE_ENABLE_WORKER_AUDITING=true + +# Enable auditing for all api requests connected to volumes. +CONCOURSE_ENABLE_VOLUME_AUDITING=true +``` + +When enabled, API requests will result in an info-level log line like so: + +[//]: # (@formatter:off) +```json +{"timestamp":"2019-05-09T14:41:54.880381537Z","level":"info","source":"atc","message":"atc.audit","data":{"action":"Info","parameters":{},"user":"test"}} +{"timestamp":"2019-05-09T14:42:36.704864093Z","level":"info","source":"atc","message":"atc.audit","data":{"action":"GetPipeline","parameters":{":pipeline_name":["booklit"],":team_name":["main"]},"user":"test"}} +``` +[//]: # (@formatter:on) + +### Configuring defaults for resource types + +Defaults for the "core" resource +types ([those that show up under the Concourse org](https://github.com/concourse?q=-resource)) that comes with Concourse +can be set cluster-wide by passing in a configuration file. The format of the file is the name of the resource type +followed by an arbitrary configuration. + +Documentation for each resource type's configuration is in each implementation's `README`. + +```properties +CONCOURSE_BASE_RESOURCE_TYPE_DEFAULTS=./defaults.yml +``` + +For example, a `defaults.yml` that configures the entire cluster to use a registry mirror would have: + +```yaml +registry-image: + registry_mirror: + host: https://registry.mirror.example.com +``` \ No newline at end of file diff --git a/docs/docs/install/running-worker.md b/docs/docs/install/running-worker.md new file mode 100644 index 00000000..321d9abd --- /dev/null +++ b/docs/docs/install/running-worker.md @@ -0,0 +1,666 @@ +--- +title: Running a worker node +--- + +# Running a `worker` node + +The `worker` node registers with the [`web` node](running-web.md) and is then used for executing builds and performing +resource `check`s. It doesn't really decide much on its own. + +## Prerequisites + +* Linux: + * We test and support the following distributions. Minimum kernel version tested is 5.15. + * Ubuntu 22.04 + * Ubuntu 24.04 + * Other Requirements: + * User namespaces must be enabled. + * To enforce memory limits on tasks, memory + swap accounting must be enabled. +* Windows/Darwin: + * no special requirements (that we know of). + +!!! note + + Windows containers are currently not supported and Darwin does not have native containers. Steps will run inside + a temporary directory on the Windows/Darwin worker. Any dependencies needed for your tasks (e.g. git, .NET, golang, + ssh) should be pre-installed on the worker. Windows/Darwin workers do not come with any resource types. + +## Running `concourse worker` + +The `concourse` CLI can run as a `worker` node via the `worker` subcommand. + +First, you'll need to configure a directory for the worker to store data: + +```properties +CONCOURSE_WORK_DIR=/opt/concourse/worker +``` + +This is where all the builds run, and where all resources are fetched in to, so make sure it's backed by enough storage. + +Next, point the worker at your [`web` node](running-web.md) like so: + +```properties +CONCOURSE_TSA_HOST=10.0.2.15:2222 +CONCOURSE_TSA_PUBLIC_KEY=path/to/tsa_host_key.pub +CONCOURSE_TSA_WORKER_PRIVATE_KEY=path/to/worker_key +``` + +Finally start the worker: + +```shell +# run with -E to forward env config, or just set it all as root +sudo -E concourse worker +``` + +Note that the worker must be run as `root` because it orchestrates containers. + +All logs will be emitted to `stdout`, with any panics or lower-level errors being emitted to `stderr`. + +### Resource utilization + +**CPU usage**: almost entirely subject to pipeline workloads. More resources configured will result in more checking, +and in-flight builds will use as much CPU as they want. + +**Memory usage**: also subject to pipeline workloads. Expect usage to increase with the number of containers on the +worker and spike as builds run. + +**Bandwidth usage**: again, almost entirely subject to pipeline workloads. Expect spikes from periodic checking, though +the intervals should spread out over enough time. Resource fetching and pushing will also use arbitrary bandwidth. + +**Disk usage**: arbitrary data will be written as builds run, and resource caches will be kept and garbage collected on +their own life cycle. We suggest going for a larger disk size if it's not too much trouble. All state on disk must not +outlive the worker itself; it is all ephemeral. If the worker is re-created (i.e. fresh VM/container and all processes +were killed), it should be brought back with an empty disk. + +**Highly available**: not applicable. Workers are inherently singletons, as they're being used as drivers running +entirely different workloads. + +**Horizontally scalable**: yes; workers directly correlate to your capacity required by however many pipelines, +resources, and in-flight builds you want to run. It makes sense to scale them up and down with demand. + +**Outbound traffic**: + +* External traffic to arbitrary locations as a result of periodic resource checking and running builds +* External traffic to the `web` node's configured external URL when downloading the inputs for a [ + `fly execute`](https://concourse-ci.org/tasks.html#running-tasks) +* External traffic to the `web` node's TSA port (`2222`) for registering the worker +* If P2P streaming is enabled there will be traffic to other workers. + +**Inbound traffic**: + +* From the [`web` node](running-web.md) on port `7777` (Garden) and `7788` (BaggageClaim). These ports do not need to be + exposed, they are forwarded to the web node via the ssh connection on port `2222`. +* If P2P streaming is enabled there will be traffic to other workers. + +## Operating a `worker` node + +The `worker` nodes are designed to be stateless and as interchangeable as +possible. [Tasks](https://concourse-ci.org/tasks.html) and [Resources](https://concourse-ci.org/resources.html) bring +their own Docker images, so you should never have to install dependencies on the worker. Windows and Darwin workers are +the exception to this. Any dependencies should be pre-installed on Windows and Darwin workers. + +In Concourse, all important data is represented by [Resources](https://concourse-ci.org/resources.html), so the workers +themselves are dispensable. Any data in the work-dir is ephemeral and should go away when the worker machine is +removed - it should not be persisted between worker VM or container re-creates. + +### Scaling Workers + +More workers should be added to accommodate more pipelines. To know when this is necessary you should probably set +up [Metrics](https://concourse-ci.org/metrics.html) and keep an eye on container counts. If average container count +starts to approach 200 or so per worker, you should probably add another worker. Load average is another metric to keep +an eye on. + +To add a worker, just create another machine for the worker and follow the [Running +`concourse worker`](running-worker.md#running-a-worker-node) instructions again. + +!!! note + + It doesn't make sense to run multiple workers on one machine since they'll both be contending for the same + physical resources. Workers should be given their own VMs or physical machines to maximize resource usage. + +#### Horizontal vs Vertical Scaling + +The answer to whether you should scale your workers horizontally or vertically depends heavily on what workloads your +pipelines are running. Anecdotally though, we have seen that a lot of smaller workers (horizontal scaling) is usually +better than a few large workers (vertical scaling). + +Again, this is not an absolute answer! You will have to test this out against the workloads your pipelines demand and +adjust based on the [Metrics](https://concourse-ci.org/metrics.html) that you are tracking. + +### Worker Heartbeating & Stalling + +Workers will continuously heartbeat to the Concourse cluster in order to remain registered and healthy. If a worker +hasn't checked in after a while, possibly due to a network error, being overloaded, or having crashed, the web node will +transition its state to `stalled` and new workloads will not be scheduled on that worker until it recovers. + +If the worker remains in this state and cannot be recovered, it can be removed using the [ +`fly prune-worker`](https://concourse-ci.org/administration.html#fly-prune-worker) command. + +### Restarting a Worker + +Workers can be restarted in-place by sending `SIGTERM` to the worker process and starting it back up. Containers will +remain running and Concourse will reattach to builds that were in flight. + +This is a pretty aggressive way to restart a worker, and may result in errored builds - there are a few moving parts +involved and we're still working on making this airtight. + +A safer way to restart a worker is to land it by sending `SIGUSR1` to the `worker` process. This will switch the worker +to the `landing` state and Concourse will stop scheduling new work on it. When all builds running on the worker have +finished, the process will exit. + +You may want to enforce a timeout for draining - that way a stuck build won't prevent your workers from being upgraded. +This can be enforced by common tools like `start-stop-daemon`: + +```shell +start-stop-daemon \ + --pidfile worker.pid \ + --stop \ + --retry USR1/300/TERM/15/KILL +``` + +This will send `SIGUSR1`, wait up to 5 minutes, and then send `SIGTERM`. If it's _still_ running, it will be killed +after an additional 15 seconds. + +Once the timeout is enforced, there's still a chance that builds that were running will continue when the worker comes +back. + +### Gracefully Removing a Worker + +When a worker machine is going away, it should be _retired_. This is similar to _landing_, except at the end the worker +is completely unregistered, along with its volumes and containers. This should be done when a worker's VM or container +is being destroyed. + +To retire a worker, send `SIGUSR2` to the `worker` process. This will switch the worker to `retiring` state, and +Concourse will stop scheduling new work on it. When all builds running on the worker have finished, the worker will be +removed and the `worker` process will exit. + +Just like with landing, you may want to enforce a timeout for draining - that way a stuck build won't prevent your +workers from being upgraded. This can be enforced by common tools like `start-stop-daemon`: + +```shell +start-stop-daemon \ + --pidfile worker.pid \ + --stop \ + --retry USR2/300/TERM/15/KILL +``` + +This will send `SIGUSR2`, wait up to 5 minutes, and then send `SIGTERM`. If it's _still_ running, it will be killed +after an additional 15 seconds. + +## Configuring the `worker` node + +### Tagging Workers + +If there's something special about your worker and you'd like to target builds at it specifically, you can configure +tags like so: + +```shell +CONCOURSE_TAG="tag-1,tag-2" +``` + +A tagged worker is taken out of the default placement logic. Tagged workers will not be used for any +untagged [Steps](https://concourse-ci.org/steps.html). + +To run build steps on a tagged worker, specify the [`tags`](https://concourse-ci.org/tags-step.html#schema.tags) on any +particular step in your [job](https://concourse-ci.org/jobs.html). + +To perform resource `check`s on a tagged worker, specify [ +`tags`](https://concourse-ci.org/resources.html#schema.resource.tags) on the resource declaration. + +### Team Workers + +If you want to isolate [**all workloads +**](https://concourse-ci.org/global-resources.html#complications-with-reusing-containers) for +a [team](https://concourse-ci.org/managing-teams.html) then you can configure a worker to belong to a single team like +so: + +```properties +CONCOURSE_TEAM="lightweavers" +``` + +Once an untagged team worker is registered Concourse will schedule all untagged builds for that team on its team worker( +s). Builds for this team will no longer be scheduled on any untagged, non-team workers. + +It is possible to have a Concourse cluster made up of only team workers and have zero non-team workers, though this is +not a common setup because resource utilization across all workers ends up underutilized. It is useful though if you +have a particular team with heavy workloads that usually bothers other teams pipelines. + +#### Tags and Team Workers + +When you have a worker configured with tag(s) and a team like so: + +```properties +CONCOURSE_TAG="tag-1,tag-2" +CONCOURSE_TEAM="lightweavers" +``` + +Only steps that are tagged and from the specified team will be scheduled on such a worker. Any untagged work the team +has will land on either: + +1. Untagged team workers belonging to the team, or +2. Untagged workers not configured to a specific team + +### Healthcheck Endpoint + +The worker will automatically listen on port `8888` as its healthcheck endpoint. It will return a `HTTP 200` status code +with an empty body on a successful check. A successful check means the worker can reach +the [Garden and BaggageClaim servers](https://concourse-ci.org/internals.html#architecture-worker). + +The healthcheck endpoint is configurable through three variables: + +```shell +concourse worker --healthcheck-bind-ip= +# IP address on which to listen for health checking requests. (default: 0.0.0.0) + +concourse worker --healthcheck-bind-port +# Port on which to listen for health checking requests. (default: 8888) + +concourse worker --healthcheck-timeout +# HTTP timeout for the full duration of health checking. (default: 5s) +``` + +### Resource Types + +!!! note + + The following section only applies to Linux workers. Resource types are simply Linux container images and therefore + can't be run on Windows or Darwin workers. + +#### Bundled Resource Types + +Workers come prepackaged with a bundle of resource types. They are included in the tarball from +the [GitHub release page](https://github.com/concourse/concourse/releases) and are part of +the [concourse/concourse image](https://hub.docker.com/r/concourse/concourse). + +To view the resource types available on a worker run: + +```shell +fly workers --details +``` + +If you want more details, like the version number of each resource, you can run: + +```shell +fly curl api/v1/workers +``` + +#### Installing or Upgrading Bundled Resource Types + +You may want to upgrade the bundled resource types outside of Concourse upgrades or even install additional resource +types on your workers to reduce the polling on some external image repository +like [Docker Hub](https://hub.docker.com/). + +We will use the [git resource](https://github.com/concourse/git-resource) as our example. We will assume your Concourse +installation is at `/usr/local/concourse`. + +First, pull and create a container of the resource you're installing/upgrading. Grab the ID of the container that Docker +creates. + +```shell +$ docker run -d concourse/git-resource +b253417142565cd5eb43902e94a2cf355d5354b583fbc686488c9a153584c6ba +``` + +Export the containers file system into a gzip compressed tar archive named `rootfs.tgz` + +```shell +docker export b253417142 | gzip > rootfs.tgz +``` + +Create a file called `resource_metadata.json` and populate it with the following contents. Make sure the `type` does not +conflict with an existing resource type when you're installing a new resource type. In our example here we're calling +the type `gitv2` to avoid conflicting with the pre-existing `git` resource. + +```json +{ + "type": "gitv2", + "version": "1.13.0", + "privileged": false, + "unique_version_history": false +} +``` + +At this point you should have two files: `rootfs.tgz` and `resource_metadata.json`. + +Create a new directory under the `resource-types` folder in your Concourse installation directory. By convention, it +should be the same name as the `type`. + +```shell +mkdir /usr/local/concourse/resource-types/gitv2 +``` + +Place the `rootfs.tgz` and `resource_metadata.json` inside the +folder. [Restart your worker](running-worker.md#restarting-a-worker) and verify the new resource type is on there by +running one of the following commands: + +```shell +fly workers --details +# or +fly curl api/v1/workers +``` + +You can also verify that Concourse can create a container with the `rootfs.tgz` you made by running a simple pipeline: + +```yaml +resources: + - name: some-resource + type: gitv2 #change to your resource type + source: + uri: https://github.com/concourse/git-resource.git + +jobs: + - name: simple-job + plan: + - get: some-resource +``` + +### Configuring Runtimes + +The worker can be run with multiple container +runtimes - [containerd](https://github.com/containerd/containerd/), [Guardian](https://github.com/cloudfoundry/guardian), +and [Houdini](https://github.com/vito/houdini) (an experimental and the only runtime for Darwin and Windows). Only +`containerd` and `Guardian` are meant for production use. `Guardian` is the default runtime for Concourse. + +!!! note "Note about Architecture" + + The web node (ATC) talks to all 3 runtimes via a single interface called the + [Garden](https://github.com/cloudfoundry/garden) server. While Guardian comes packaged with a Garden server and + its flags in Concourse are unfortunately prefixed with `--garden-*`, Guardian (a runtime) and Garden + (an interface and server) are two separate tools. An analogy for Garden would be the [Container Runtime + Interface (CRI)](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/) used in + Kubernetes. Kubernetes uses containerd via CRI. Concourse uses containerd via Garden. + +#### `containerd` runtime + +To use the `containerd` runtime manually set the `--runtime` (`CONCOURSE_RUNTIME`) to `containerd` on the +`concourse worker` command. + +The following is a list of the `containerd` runtime specific flags for Concourse that can be set. They are all optional +and have default values. + +``` +Containerd Configuration: + --containerd-config= Path to a config file to use for the Containerd daemon. [$CONCOURSE_CONTAINERD_CONFIG] + --containerd-bin= Path to a containerd executable (non-absolute names get resolved from $PATH). [$CONCOURSE_CONTAINERD_BIN] + --containerd-init-bin= Path to an init executable (non-absolute names get resolved from $PATH). (default: /usr/local/concourse/bin/init) [$CONCOURSE_CONTAINERD_INIT_BIN] + --containerd-cni-plugins-dir= Path to CNI network plugins. (default: /usr/local/concourse/bin) [$CONCOURSE_CONTAINERD_CNI_PLUGINS_DIR] + --containerd-log-level Minimum level of logs to see. (default: info) [$CONCOURSE_CONTAINERD_LOG_LEVEL] + --containerd-request-timeout= How long to wait for requests to Containerd to complete. 0 means no timeout. (default: 5m) [$CONCOURSE_CONTAINERD_REQUEST_TIMEOUT] + --containerd-max-containers= Max container capacity. 0 means no limit. (default: 250) [$CONCOURSE_CONTAINERD_MAX_CONTAINERS] + --containerd-privileged-mode= How many privileges privileged containers get. full is equivalent to root on host. ignore means no extra privileges. fuse-only means enough to use fuse-overlayfs. (default: full) [$CONCOURSE_CONTAINERD_PRIVILEGED_MODE] + +Containerd Container Networking: + --containerd-external-ip= IP address to use to reach container's mapped ports. Autodetected if not specified. [$CONCOURSE_CONTAINERD_EXTERNAL_IP] + --containerd-dns-server= DNS server IP address to use instead of automatically determined servers. Can be specified multiple times. [$CONCOURSE_CONTAINERD_DNS_SERVER] + --containerd-restricted-network= Network ranges to which traffic from containers will be restricted. Can be specified multiple times. [$CONCOURSE_CONTAINERD_RESTRICTED_NETWORK] + --containerd-additional-hosts= Additional entries to add to /etc/hosts in containers. [$CONCOURSE_CONTAINERD_ADDITIONAL_HOSTS] + --containerd-network-pool= Network range to use for dynamically allocated container subnets. (default: 10.80.0.0/16) [$CONCOURSE_CONTAINERD_NETWORK_POOL] + --containerd-mtu= MTU size for container network interfaces. Defaults to the MTU of the interface used for outbound access by the host. [$CONCOURSE_CONTAINERD_MTU] + --containerd-allow-host-access Allow containers to reach the host's network. This is turned off by default. [$CONCOURSE_CONTAINERD_ALLOW_HOST_ACCESS] + +DNS Proxy Configuration: + --containerd-dns-proxy-enable Enable proxy DNS server. Note: this will enable containers to access the host network. [$CONCOURSE_CONTAINERD_DNS_PROXY_ENABLE] +``` + +!!! warning + + Make sure to read [A note on allowing host access](running-worker.md#a-note-on-allowing-host-access-and-dns-proxy) + and DNS proxy to understand the implications of using `--containerd-allow-host-access` and + `--containerd-dns-proxy-enable` + +#### Transitioning from Guardian to containerd + +If you are transitioning from `Guardian` to `containerd` you will need to convert any `--garden-*` ( +`CONCOURSE_GARDEN_*`) flags to their `containerd` (`CONCOURSE_CONTAINERD_*`) counterparts: + +| Guardian Flags | Containerd Flags | +|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------| +| `--garden-request-timeout`
`CONCOURSE_GARDEN_REQUEST_TIMEOUT` | `--containerd-request-timeout`
`CONCOURSE_CONTAINERD_REQUEST_TIMEOUT` | +| `--garden-dns-proxy-enable`
`CONCOURSE_GARDEN_DNS_PROXY_ENABLE ` | -`-containerd-dns-proxy-enable`
`CONCOURSE_CONTAINERD_DNS_PROXY_ENABLE` | +| _No equivalent CLI flag_
`CONCOURSE_GARDEN_ALLOW_HOST_ACCESS` | `--containerd-allow-host-access`
`CONCOURSE_CONTAINERD_ALLOW_HOST_ACCESS` | +| `--garden-network-pool`
`CONCOURSE_GARDEN_NETWORK_POOL` | `--containerd-network-pool`
`CONCOURSE_CONTAINERD_NETWORK_POOL` | +| `--garden-max-containers`
`CONCOURSE_GARDEN_MAX_CONTAINERS ` | `--containerd-max-containers`
`CONCOURSE_CONTAINERD_MAX_CONTAINERS` | +| _No equivalent CLI flag_
`CONCOURSE_GARDEN_DENY_NETWORKS` | `--containerd-restricted-network`
`CONCOURSE_CONTAINERD_RESTRICTED_NETWORK` | +| _No equivalent CLI flag or ENV option.
Configured through_ `garden_config.ini` | `--containerd-additional-hosts`
`CONCOURSE_CONTAINERD_ADDITIONAL_HOSTS` | +| _No equivalent CLI flag_
`CONCOURSE_GARDEN_DNS_SERVER` | `--containerd-dns-server`
`CONCOURSE_CONTAINERD_DNS_SERVER` | +| _No equivalent CLI flag_
`CONCOURSE_GARDEN_EXTERNAL_IP` | `--containerd-external-ip`
`CONCOURSE_CONTAINERD_EXTERNAL_IP` | +| _No equivalent CLI flag_
`CONCOURSE_GARDEN_MTU` | `--containerd-mtu`
`CONCOURSE_CONTAINERD_MTU` | + +#### `Guardian` runtime + +Guardian is currently the default runtime for Concourse. It can also be set by setting the `--runtime` flag to +`guardian` on the `concourse worker` command. + +The `concourse worker` command automatically configures and runs `Guardian` using the `gdn` binary, but depending on the +environment you're running Concourse in, you may need to pop open the hood and configure a few things. + +The `gdn` server can be configured in two ways: + +1. By creating a `config.ini` file and passing it as `--garden-config` (or `CONCOURSE_GARDEN_CONFIG`). The .ini file + should look something like this: + ```ini + [server] + flag-name=flag-value + ``` + To learn which flags can be set, consult `gdn server --help`. Each flag listed can be set under the `[server]` + heading. + +2. By setting `CONCOURSE_GARDEN_*` environment variables. This is primarily supported for backwards compatibility, and + these variables are not present in `concourse worker --help`. They are translated to flags passed to `gdn server` by + lower-casing the `*` portion and replacing underscores with hyphens. + +#### Troubleshooting and fixing DNS resolution + +!!! note + + The Guardian runtime took care of a lot of container creation operations for Concourse in the past. It was very + user-friendly for the project to use as a container runtime. While implementing the containerd runtime most + reported bugs were actually a difference in containerd's default behaviour compared to Guardian's. Currently + Concourse's containerd runtime mostly behaves like the Guardian runtime did. Most of the following DNS section + should apply to both runtimes. + +By default, containers created by the Guardian or containerd (will refer to both as _runtime_) runtime will carry over +the `/etc/resolv.conf` from the host into the container. This is often fine, but some Linux distributions configure a +special `127.x.x.x` DNS resolver (e.g. `systemd-resolved`). + +When the runtime copies the `resolv.conf` over, it removes these entries as they won't be reachable from the container's +network namespace. As a result, your containers may not have any valid nameservers configured. + +To diagnose this problem you can [`fly intercept`](https://concourse-ci.org/builds.html#fly-intercept) into a failing +container and check which nameservers are in `/etc/resolv.conf`: + +```shell +$ fly -t ci intercept -j concourse/concourse +bash-5.0$ grep nameserver /etc/resolv.conf +bash-5.0$ +``` + +In this case it is empty, as the host only listed a single `127.0.0.53` address which was then stripped out. To fix this +you'll need to explicitly configure DNS instead of relying on the default runtime behavior. + +##### Pointing to external DNS servers + +If you have no need for special DNS resolution within your Concourse containers, you can configure your containers to +use specific DNS server addresses external to the VM. + +The Guardian and containerd runtimes can have their DNS servers configured with flags or envs vars. + +=== "DNS via flags (containerd)" + + ```shell + concourse worker --containerd-dns-server="1.1.1.1" --containerd-dns-server="8.8.8.8" + ``` + +=== "DNS via env vars" + + ```properties + # containerd runtime + CONCOURSE_CONTAINERD_DNS_SERVER="1.1.1.1,8.8.8.8" + # Guardian runtime + CONCOURSE_GARDEN_DNS_SERVER="1.1.1.1,8.8.8.8" + ``` + +=== "`config.ini` (Guardian)" + + ```ini + [server] + ; configure Google DNS + dns-server = 8.8.8.8 + dns-server = 8.8.4.4 + ``` + +To verify this solves your problem you can [`fly intercept`](https://concourse-ci.org/builds.html#fly-intercept) into a +container and check which nameservers are in `/etc/resolv.conf`: + +```shell +$ fly -t ci intercept -j my-pipeline/the-job +bash-5.0$ cat /etc/resolv.conf +nameserver 1.1.1.1 +nameserver 8.8.8.8 +bash-5.0$ ping google.com +PING google.com (108.177.111.139): 56 data bytes +64 bytes from 108.177.111.139: seq=0 ttl=47 time=2.672 ms +64 bytes from 108.177.111.139: seq=1 ttl=47 time=0.911 ms +``` + +##### Using a local DNS server + +If you would like to use Consul, `dnsmasq`, or some other DNS server running on the worker VM, you'll have to configure +the LAN address of the VM as the DNS server and allow the containers to reach the address, like so: + +=== "Local DNS via flags (containerd)" + + ```shell + concourse worker --containerd-dns-server="10.0.1.3" --containerd-allow-host-access="true" + ``` + +=== "Local DNS via env vars" + + ```properties + # containerd runtime + CONCOURSE_CONTAINERD_DNS_SERVER="10.0.1.3" + CONCOURSE_CONTAINERD_ALLOW_HOST_ACCESS="true" + # Guardian runtime + CONCOURSE_GARDEN_DNS_SERVER="10.0.1.3" + CONCOURSE_GARDEN_ALLOW_HOST_ACCESS="true" + ``` + +=== "`config.ini` (Guardian)" + + ```ini + [server] + ; internal IP of the worker machine + dns-server=10.0.1.3 + + ; allow containers to reach the above IP + allow-host-access=true + ``` + +!!! warning + + Make sure to read [A note on allowing host access](running-worker.md#a-note-on-allowing-host-access-and-dns-proxy) + and DNS proxy to understand the implications of using `allow-host-access` + +To validate whether the changes have taken effect, you can [ +`fly intercept`](https://concourse-ci.org/builds.html#fly-intercept) into any container and check `/etc/resolv.conf` +once again: + +```shell +$ fly -t ci intercept -j my-pipeline/the-job +bash-5.0$ cat /etc/resolv.conf +nameserver 10.1.2.3 +bash-5.0$ nslookup concourse-ci.org +Server: 10.1.2.3 +Address: 10.1.2.3#53 + +Non-authoritative answer: +Name: concourse-ci.org +Address: 185.199.108.153 +Name: concourse-ci.org +Address: 185.199.109.153 +Name: concourse-ci.org +Address: 185.199.110.153 +Name: concourse-ci.org +Address: 185.199.111.153 +``` + +If `nslookup` times out or fails, you may need to open up firewalls or security group configuration so that the worker +VM can send UDP/TCP packets to itself. + +##### A note on allowing host access and DNS proxy + +Setting `allow-host-access` will, well, allow containers to access your host VM's network. If you don't trust your +container workloads, you may not want to allow this. With host network access, containers will be able to reach out to +any other locally running network processes running on the worker including the garden and baggageclaim servers **which +would allow them to issue commands and manipulate other containers and volumes on the same worker**. + +Setting `dns-proxy-enable` will also enable `allow-host-access` (since the dns proxy will be run on the host, therefore +requiring host access be enabled). + +### Configuring Peer-to-Peer Volume Streaming + +Peer-to-Peer (P2P) volume streaming enables the workers to stream volumes directly to each other instead of always +streaming volumes through the web node(s). This can reduce the time it takes for individual steps in a job to start and +reduce the amount of network traffic used by the Concourse cluster. + +!!! warning "Experimental Feature" + + This feature is experimental. It is not as robust as the default volume streaming setup which always goes + through web nodes. + +**Pre-Requisites** + +* All worker nodes need to be able to reach each other via IP address. This usually means they are on the same LAN. You + can test this by trying to ping one worker from another worker. If even one worker does not meet this requirement then + you cannot use P2P volume streaming. +* The baggageclaim port (`7788` is the default) is open to traffic on all worker nodes. You can verify the port is open + and reaching the baggageclaim API server by hitting the `/volumes` endpoint. +
+ ```shell + curl http://:7788/volumes + ``` + +To enable P2P volume streaming you need to configure some settings on the web and worker nodes. Configure the worker +nodes first. Configure the web node(s) last. + +#### P2P Worker Configuration + +* `CONCOURSE_BAGGAGECLAIM_BIND_IP=0.0.0.0` - _Required_. The worker needs to listen for traffic over `127.0.0.1` (to + receive info from the web node) as well as its LAN IP in a P2P setup. Therefore, we need to set the IP baggageclaim + binds to `0.0.0.0`. +* `CONCOURSE_BAGGAGECLAIM_P2P_INTERFACE_NAME_PATTERN=eth0` - _Optional_. Regular expression to match a network interface + for P2P streaming. This is how a worker determines its own LAN IP address, by looking it up via the LAN interface + specified by this flag. +
+
+ You can determine the name of the LAN interface for any worker by listing all network interfaces and noting which + interface has the LAN IP that you want the worker to use. +
+
+ To view all available network interfaces on your worker: + * On Linux run `ip addr list` + * On MacOS run `ifconfig` + * On Windows run `ipconfig`. Windows network interface names are very different from Unix device names. Example + network interface names for Windows include: +
+ ``` + Ethernet 4 + Local Area Connection* 2 + Local Area Connection* 12 + Wi-Fi 5 + Bluetooth Network Connection 2 + Loopback Pseudo-Interface 1 + ``` +* `CONCOURSE_BAGGAGECLAIM_P2P_INTERFACE_FAMILY=4` - _Optional_. Tells the worker to use IPv4 or IPv6. Defaults to `4` + for IPv4. Set to `6` for IPv6. + +#### P2P Web Configuration + +You need to tell the web node(s) to use P2P volume streaming. + +```shell +CONCOURSE_ENABLE_P2P_VOLUME_STREAMING=true +``` + +Once that flag is set and the web node is restarted, P2P volume streaming will start occurring in your Concourse +cluster. \ No newline at end of file diff --git a/docs/docs/install/upgrading-concourse.md b/docs/docs/install/upgrading-concourse.md new file mode 100644 index 00000000..faffe690 --- /dev/null +++ b/docs/docs/install/upgrading-concourse.md @@ -0,0 +1,49 @@ +--- +title: Upgrading Concourse +--- + +Be careful to check the "Breaking Changes" in the release notes - in particular, you'll want to look for any flags that +have changed. + +## Upgrading the Web Node + +The web node is upgraded by stopping the Concourse process, swapping out the `concourse` binary with the new one, and +re-starting it. + +Each [`web` node](running-web.md) will automatically run database migrations on start-up and lock via the database to +ensure only one of the web nodes runs the migrations. We currently do not guarantee zero-downtime upgrades, as +migrations may make changes that confuse the older web nodes. This should resolve as each web node is upgraded, and +shouldn't result in any inconsistent state. + +Typically, Concourse can be upgraded from any version to any other version, though around 3.x and 4.x we made some +changes to how migrations are run, and as a result the following upgrade paths must be followed: + +| Current Version | Upgrade Path | +|------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| < [v3.6.0](https://github.com/concourse/concourse/releases/tag/v3.6.0) | [v3.6.0](https://github.com/concourse/concourse/releases/tag/v3.6.0) -> [v4.0.0](https://github.com/concourse/concourse/releases/tag/v4.0.0) -> latest | +| = [v3.6.0](https://github.com/concourse/concourse/releases/tag/v3.6.0) | [v4.0.0](https://github.com/concourse/concourse/releases/tag/v4.0.0) -> latest | + +We'll try to minimize this kind of thing in the future. + +Lastly, you will want to overwrite the contents of `concourse/fly-assets` with the contents from +the [GitHub release tarball](https://github.com/concourse/concourse/releases) so users can [ +`fly sync`](https://concourse-ci.org/fly.html#fly-sync) to the correct version. + +## Upgrading the Worker Node + +The worker node is upgraded by stopping the Concourse process, swapping out the `concourse` binary with the new one, and +re-starting it. + +### Linux Workers + +The Linux tarball from the [GitHub release page](https://github.com/concourse/concourse/releases) contains extra assets +that you will want to ensure are also upgraded at the same time. Make sure you overwrite the contents of the following +directories: + +* `concourse/bin/...` - Other binaries like `gdn`, `runc`, and `containerd` are in this directory +* `concourse/resource-types/...` - The location of the + default [resource-types](https://concourse-ci.org/resource-types.html) included with each Concourse release + +### Darwin and Windows Workers + +There are no additional steps for upgrading Darwin and Windows workers. \ No newline at end of file diff --git a/images/concourse_architecture.png b/docs/docs/internals/assets/index-01.png similarity index 100% rename from images/concourse_architecture.png rename to docs/docs/internals/assets/index-01.png diff --git a/docs/docs/internals/build-tracker.md b/docs/docs/internals/build-tracker.md new file mode 100644 index 00000000..be1b05e8 --- /dev/null +++ b/docs/docs/internals/build-tracker.md @@ -0,0 +1,7 @@ +--- +title: Build Tracker +--- + +The build tracker is the component that runs the execution of a build. It picks up any started builds, which can be +orphaned builds (builds that an ATC started but did not finish) or builds that have just been scheduled. There is one +build tracker per ATC, which runs on an interval that is defaulted to 10 seconds. \ No newline at end of file diff --git a/docs/docs/internals/checker.md b/docs/docs/internals/checker.md new file mode 100644 index 00000000..18a645fb --- /dev/null +++ b/docs/docs/internals/checker.md @@ -0,0 +1,46 @@ +--- +title: Resource Checker +--- + +[Resources](../resources/index.md) represent external state such as a git repository, files in an S3 bucket, or anything +else that changes over time. By modelling these as resources, it allows you to use this external state as inputs (or +triggers) to your workloads. + +## When are resources checked? + +The component that schedules resource checks is called the **resource checker**. The rate at which these checks happen +is called the check interval (configurable via `CONCOURSE_LIDAR_SCANNER_INTERVAL`). There's an obvious tradeoff, whereby +the more frequently you poll, the bigger the strain on Concourse (as well as the external source). However, if you want +to pick up those new commits as quickly as possible, then you need to poll as often as possible. + +The resource checker uses the [`resource.check_every`](../resources/index.md#resource-schema) interval in order to +figure out if a resource needs to be checked. A resource's `check_every` interval dictates how often it should be +checked for new versions, with a default of 1 minute. If that seems like a lot of checking, it is, but it's how +Concourse keeps everything snappy. You can configure this interval independently for each resource using `check_every`. + +If your external service supports it, you can set [`resource.webhook_token`](../resources/index.md#resource-schema) to +eliminate the need for periodic checking altogether. If a `webhook_token` is configured, the external service can notify +Concourse when to check for new versions. Note that configuring a `webhook_token` will not stop Concourse from +periodically checking your resource. If you wish to rely solely on webhooks for detecting new versions, you can +set `check_every` to `never`. + +On every interval tick, the resource checker will see if there are any resources that need to be checked. It does this +by first finding resources which are used as inputs to jobs, and then comparing the current time against the last time +each resource was checked. If it has been longer than a resource's configured `check_every` interval, a new check will +be scheduled. In practice this means that if a resource has a `check_every` of `1m`, it is not guaranteed to be checked +precisely every 60 seconds. `check_every` simply sets a lower bound on the time between checks. + +When the resource checker finds a resource to check (either because its `check_every` interval elapsed, or because its +configured `webhook_token` was triggered), it schedules a new build that invokes +the [`check` script](../resource-types/implementing.md#check-check-for-new-versions) of the resource's +underlying [resource type](../resource-types/index.md). + +## What do resource checks produce? + +The whole point of running checks is to produce versions. Concourse's [Build Scheduler](scheduler.md) is centered around +the idea of resource versions. It's how Concourse determines that something is new and a new build needs to be +triggered. + +The versions produced by each resource are unique to the underlying [resource type](../resource-types/index.md). For +instance, the `git` resource type uses commit SHAs as versions. The `registry-image` resource uses the image digest and +tag in the version. \ No newline at end of file diff --git a/docs/docs/internals/garbage-collector.md b/docs/docs/internals/garbage-collector.md new file mode 100644 index 00000000..4916a9c2 --- /dev/null +++ b/docs/docs/internals/garbage-collector.md @@ -0,0 +1,43 @@ +--- +title: Garbage Collector +--- + +Concourse runs everything in isolated environments by creating fresh containers and volumes to ensure things can safely +run in a repeatable environment, isolated from other workloads running on the same worker. + +This introduces a new problem of knowing when Concourse should remove these containers and volumes. Safely identifying +things for removal and then getting rid of them, releasing their resources, is the process of _garbage collection_. + +## Goals + +Let's define our metrics for success: + +* **Safe.** There should never be a case where a build is running and a container or volume is removed out from under + it, causing the build to fail. Resource checking should also never result in errors from check containers being + removed. No one should even know garbage collection is happening. +* **Airtight.** Everything Concourse creates, whether it's a container or volume on a worker or an entry in the + database, should never leak. Each object should have a fully defined lifecycle such that there is a clear end to its + use. The ATC should be interruptible at any point in time and at the very least be able to remove any state it had + created beforehand. +* **Resilient.** Garbage collection should never be outpaced by the workload. A single misbehaving worker should not + prevent garbage collection from being performed on other workers. A slow delete of a volume should not prevent garbage + collecting of other things on the same worker. + +## How it Works + +The garbage collector is a batch operation that runs on an interval with a default of 30 seconds. It's important to note +that the collector must be able to run frequently enough to not be outpaced by the workload producing things, and so the +batch operation should be able to complete pretty quickly. + +The batch operation first performs garbage collection within the database alone, removing rows that are no longer +needed. The removal of rows from one stage will often result in removals in a later stage. There are individual +collectors for each object, such as the volume collector or the container collector, and they are all run +asynchronously. + +After the initial pass of garbage collection in the database, there should now be a set of containers and volumes that +meet criteria for garbage collection. These two are a bit more complicated to garbage-collect; they both require talking +to a worker, and waiting on a potentially slow delete. + +Containers and volumes are the costliest resources consumed by Concourse. There are also many of them created over time +as builds execute and pipelines perform their resource checking. Therefore it is important to parallelize this aspect of +garbage collection so that one slow delete or one slow worker does not cause them to pile up. \ No newline at end of file diff --git a/docs/docs/internals/index.md b/docs/docs/internals/index.md new file mode 100644 index 00000000..81d1963a --- /dev/null +++ b/docs/docs/internals/index.md @@ -0,0 +1,134 @@ +--- +title: Internals +--- + +This section provides a deeper understanding of some of the concepts surrounding Concourse. + +An understanding of the basics of Concourse concepts, such as pipelines, jobs, etc, is recommended as parts of this +section might assume a level of knowledge from them. This section is not necessary for using Concourse but are more for +experienced users that want to dig deeper into how Concourse works. + +## Basic architecture + +Concourse is a fairly simple distributed system built up from the following components. You'll see them referenced here +and there throughout the documentation, so you may want to skim this page just to get an idea of what they are. + +![](assets/index-01.png) + +## ATC: web UI & build scheduler + +The ATC is the heart of Concourse. It runs the web UI and API and is responsible for all pipeline scheduling. It +connects to PostgreSQL, which it uses to store pipeline data (including build logs). + +Multiple ATCs can be running as one cluster; as long as they're all pointing to the same database, they'll synchronize +using basic locking mechanisms and roughly spread work across the cluster. + +The ATC by default listens on port `8080`, and is usually co-located with the [TSA](#tsa-worker-registration-forwarding) +and sitting behind a load balancer. + +!!! note + + For [`fly intercept`](../builds.md#fly-intercept) to function, make sure your load balancer is configured to do TCP + or SSL forwarding, not HTTP or HTTPS. + +There are multiple components within the ATC that each have their own set of responsibilities. The main components +consist of the [checker](checker.md), [scheduler](scheduler.md), [build tracker](build-tracker.md), and +the [garbage collector](garbage-collector.md). + +The [checker](checker.md)'s responsibility is to continuously checks for new versions of resources. +The [scheduler](scheduler.md) is responsible for scheduling builds for a job and the [build tracker](build-tracker.md) +is responsible for running any scheduled builds. The [garbage collector](garbage-collector.md) is the cleanup mechanism +for removing any unused or outdated objects, such as containers and volumes. + +All the components in a Concourse deployment can be viewed in the _components_ table in the database as of v5.7.0. The +intervals that the components run at can also be adjusted through editing that table, as well as pausing the component +from running entirely. + +## TSA: worker registration & forwarding + +The TSA is a custom-built SSH server that is used solely for securely +registering [workers](../install/running-worker.md) with the [ATC](#atc-web-ui-build-scheduler). + +The TSA by default listens on port `2222`, and is usually co-located with the [ATC](#atc-web-ui-build-scheduler) and +sitting behind a load balancer. + +The TSA implements CLI over the SSH connection, supporting the following commands: + +* The `forward-worker` command is used to reverse-tunnel a worker's addresses through the TSA and register the forwarded + connections with the ATC. This allows workers running in arbitrary networks to register securely, so long as they can + reach the TSA. This is much safer than opening the worker up to the outside world. +* The `land-worker` command is sent from the worker when landing, and initiates the state change to `LANDING` through + the ATC. +* The `retire-worker` command is sent from the worker when retiring, and initiates the state change to `RETIRING` + through the ATC. +* The `delete-worker` command is sent from the worker when draining is interrupted while a worker is retiring. It + removes the worker from the ATC. +* The `sweep-containers` command is sent periodically to facilitate garbage collection of containers which can be + removed from the worker. It returns a list of handles for containers in the `DESTROYING` state, and it is the worker's + job to subsequently destroy them. +* The `report-containers` command is sent along with the list of all container handles on the worker. The ATC uses this + to update the database, removing any `DESTROYING` containers which are no longer in the set of handles, and marking + any `CREATED` containers that are not present as missing. +* The `sweep-volumes` command is sent periodically to facilitate garbage collection of volumes which can be removed from + the worker. It returns a list of handles for volumes in the `DESTROYING` state, and it is the worker's job to + subsequently destroy them. +* The `report-volumes` command is sent along with the list of all volume handles on the worker. The ATC uses this to + update the database, removing any `DESTROYING` volumes which are no longer in the set of handles, and marking + any `CREATED` volumes that are not present as missing. + +## Workers Architecture + +Workers are machines running [Garden](https://github.com/cloudfoundry-incubator/garden) +and [Baggageclaim](https://github.com/concourse/concourse/tree/master/worker/baggageclaim) servers and registering +themselves via the [TSA](#tsa-worker-registration-forwarding). + +!!! note + + Windows and Darwin workers also run Garden and Baggageclaim servers but do not run containers. They both use + [houdini](https://github.com/vito/houdini) to fake making containers. Windows containers are not supported and + Darwin does not have native container technology. + +Workers have no important state configured on their machines, as everything runs in a container and thus shouldn't care +about what packages are installed on the host (well, except for those that allow it to be a worker in the first place). +This is very different from workers in other non-containerized CI solutions, where the state of packages on the worker +is crucial to whether your pipeline works or not. + +Each worker registers itself with the Concourse cluster via the [TSA](#tsa-worker-registration-forwarding). + +Workers by default listen on port `7777` for Garden and port `7788` for Baggageclaim. Connections to both servers are +forwarded over the SSH connection made to the [TSA](#tsa-worker-registration-forwarding). + +### The worker lifecycle + +#### **RUNNING** + +: A worker in this state is registered with the cluster and ready to start running containers and storing volumes. + +#### **STALLED** + +: A worker in this state was previously registered with the cluster, but stopped advertising itself for some reason. +Usually this is due to network connectivity issues, or the worker stopping unexpectedly. + +: If the worker remains in this state and cannot be recovered, it can be removed using +the [`fly prune-worker`](../operation/administration.md#fly-prune-worker) command. + +#### **LANDING** + +: The `concourse land-worker` command will put a worker in the `LANDING` state to safely drain its assignments for +temporary downtime. + +: The ATC will wait for builds on the worker for jobs which are uninterruptible to finish, and transition the worker +into `LANDED` state. + +#### **LANDED** + +: A worker in this state has successfully waited for all uninterruptible jobs on it after having `concourse land-worker` +called. It will no longer be used to schedule any new containers or create volumes until it registers as `RUNNING` +again. + +#### **RETIRING** + +: The `concourse retire-worker` command will put a worker in the `RETIRING` state to remove it from the cluster +permanently. + +: The ATC will wait for builds on the worker for jobs which are uninterruptible to finish, and remove the worker. \ No newline at end of file diff --git a/docs/docs/internals/scheduler.md b/docs/docs/internals/scheduler.md new file mode 100644 index 00000000..5d84210b --- /dev/null +++ b/docs/docs/internals/scheduler.md @@ -0,0 +1,67 @@ +--- +title: Build Scheduler +--- + +!!! warning + + As of the v6.0.0 release, there have been many changes to the scheduler, so it would be advisable to assume that + this documentation should only be used for Concourse deployments v6.0.0 and above. + +Builds represent each execution of a [job](../jobs.md). Figuring out when to schedule a new job build is the +responsibility of the **build scheduler**. The scheduling of new job builds can be dependent on many different factors +such as when a new version of a resource is discovered, when a dependent upstream build finishes, or when a user +manually triggers a build. + +The build scheduler is a global component, where it deals with all the jobs within a deployment. It runs on an interval +with a default of 10 seconds. If there are multiple ATCs, only one of the ATC's scheduler component will run per +interval tick in order to ensure that there will be no duplicated work between ATC nodes. + +The subcomponent used to figure out whether a build can be scheduled is called the [algorithm](#algorithm). + +## Algorithm + +The algorithm is a subcomponent of the scheduler which is used to determine the input versions to the next build of a +job. There are many factors that contribute to figuring out the next input versions. It can be anything that affects +which resource versions will be used to schedule a build, such as `version` constraints or `passed` constraints in +a [`get` step](../steps/get.md), disabling versions through the web UI, etc. The algorithm can also fail to determine a +successful set of input versions, which the error will be propagated to the preparation view in the build page. + +If the algorithm computes a successful set of input versions, it will figure out whether the versions it computed can be +used to produce a new build. This is done by comparing the [trigger-able](../steps/get.md) input versions to the +versions used by the previous build and if any of them have a different version, then the scheduler will know to +schedule a new build. Conversely, if the input versions produced by the algorithm are the same as the previous build, +then the scheduler will not create a new build. + +## Scheduling behavior + +The scheduler will schedule a new build if any of the versions produced by the algorithm for `trigger: true` resources +has not been used in any previous build of the job. + +What this means is if the algorithm runs and computes an input version, the scheduler will create a new build as long as +that version has not been used by any previous build's version for that same input. Even if that version has been used +by a build 2 months ago, the scheduler will **not** schedule a new build because that version has been previously used +in a build of the job. + +If there are any input versions that are different from any previous build, it will trigger a new build. + +## Scheduling on demand + +The scheduler runs on an interval, but rather than scheduling all the jobs within a deployment on every tick, it only +schedules the jobs that need to be _scheduled_. + +First, the scheduler determines which jobs need to be scheduled. Below are all the reasons why Concourse will think a +job needs to be scheduled: + +* Detecting new versions of a resource through a check +* Saving a new version through a put +* A build finishes for an upstream job (through passed constraints) +* Enabling/Disabling a resource version that has not been used in a previous build +* Pinning/Unpinning a resource version that has not been used in a previous build +* Setting a pipeline +* Updating a resource's `resource_config` +* Manually triggering a build +* Rerunning a build +* Multiple versions available for a version every constraint + +Each job that is scheduled will use the algorithm to determine what inputs its next build should have. Then the build is +scheduled and picked up by the [Build Tracker](build-tracker.md). \ No newline at end of file diff --git a/docs/docs/jobs.md b/docs/docs/jobs.md new file mode 100644 index 00000000..bfba88b8 --- /dev/null +++ b/docs/docs/jobs.md @@ -0,0 +1,105 @@ +--- +title: Jobs +--- + +Jobs determine the actions of your pipeline. They determine how [resources](resources/index.md) progress through it, and +how the pipeline is visualized. + +The most important attribute of a job is its build plan, configured as job.plan. This determines the sequence +of [Steps](steps/index.md) to execute in any builds of the job. + +A pipeline's jobs are listed under [`pipeline.jobs`](pipelines/index.md#pipeline-schema) with the following schema: + +## `job` schema + +## Managing Jobs + +### `fly jobs` + +To list the jobs configured in a pipeline, run: + +```shell +fly -t example jobs -p my-pipeline +``` + +### `fly trigger-job` + +To immediately queue a new build of a job, run: + +```shell +fly -t example trigger-job --job my-pipeline/my-job +``` + +This will enqueue a new build of the `my-job` job in the `my-pipeline` pipeline. + +To start watching the newly created build, append the `--watch` flag like so: + +```shell +fly -t example trigger-job --job my-pipeline/my-job --watch +``` + +You can also queue new builds by clicking the `+` button on the job or build pages in the web UI. + +### `fly rerun-build` + +To queue a new build of a job with exactly the same inputs as a given build of the same job, run: + +```shell +To queue a new build of a job with exactly the same inputs as a given build of the same job, run: +``` + +This will enqueue a new build of the `my-job` job in the `my-pipeline` pipeline, using the same input versions as build +number 4. + +To start watching the newly created build, append the `--watch` flag like so: + +```shell +fly -t example rerun-build --job my-pipeline/my-job --build 4 --watch +``` + +You can also rerun builds by visiting the build page for the build in question in the web UI and clicking the rerun +button. + +### `fly pause-job` + +To prevent scheduling and running builds of a job, run: + +```shell +fly -t example pause-job --job my-pipeline/my-job +``` + +This will prevent pending builds of the job from being scheduled, though builds that are in-flight will still run, and +pending builds will still be created as normal. + +### `fly unpause-job` + +To resume scheduling of a job, run: + +```shell +fly -t example unpause-job --job my-pipeline/my-job +``` + +This will resume scheduling of builds queued for the job. + +### `fly clear-task-cache` + +If you've got a [task cache](tasks.md#task-config-schema) that you need to clear out for whatever reason, this can be +done like so: + +```shell +fly -t example clear-task-cache --job my-pipeline/my-job --step my-step-name +``` + +This will immediately invalidate the caches - they'll be garbage collected asynchronously and subsequent builds will run +with empty caches. + +You can also clear out a particular path for the given step's cache, using `--cache-path`: + +```shell +fly -t example clear-task-cache \ + --job my-pipeline/my-job \ + --step my-step-name \ + --cache-path go/pkg +``` + +If `--cache-path` is not specified, all caches for the given step will be cleared. \ No newline at end of file diff --git a/docs/docs/observation.md b/docs/docs/observation.md new file mode 100644 index 00000000..a66287ef --- /dev/null +++ b/docs/docs/observation.md @@ -0,0 +1,73 @@ +--- +title: Observation +--- + +This section outlines everything you need to know for observing the state of your pipelines. + +## The Dashboard + +The dashboard, available at the default route (`/`), provides a bird's-eye view of the Concourse cluster. All visible +pipelines across all teams are listed here. A high-density (HD) view is available at `/hd`. + +## `cc.xml` + +The Concourse API can return the status of a team's pipelines in a format compatible with tools +like [CCMenu](http://ccmenu.org/). This endpoint is available at the following route: + +``` +/api/v1/teams/{team}/cc.xml +``` + +## Badges + +The Concourse API supports returning SVG badges indicating the status of a job: + +``` +/api/v1/teams/{team}/pipelines/{pipeline}/jobs/{job}/badge +``` + +... and for an entire pipeline: + +``` +/api/v1/teams/{team}/pipelines/{pipeline}/badge +``` + +This can be used to annotate your READMEs with a build status badge like so: + +```markdown +![CI](https://ci.concourse-ci.org/api/v1/teams/main/pipelines/concourse/jobs/unit/badge) +``` + +... which should render the following: + +![CI](https://ci.concourse-ci.org/api/v1/teams/main/pipelines/concourse/jobs/unit/badge) + +You can also specify a custom title for the badge with a title query parameter: + +``` +/api/v1/teams/{team}/pipelines/{pipeline}/badge?title=hello +``` + +or + +``` +/api/v1/teams/{team}/pipelines/{pipeline}/jobs/{job}/badge?title=hello +``` + +... which should render the following: + +![CI](https://ci.concourse-ci.org/api/v1/teams/main/pipelines/concourse/jobs/unit/badge?title=hello) + +If you want to have the image link to your pipeline/job in a README, you can make it like so: + +```html + + Concourse Pipeline Status + +``` + +## Pipeline Visibility + +Pipelines may be exposed so that they can be monitored without having to authenticate. For more information, +see [Pipeline & Build Visibility](auth-and-teams/exposing.md). \ No newline at end of file diff --git a/docs/docs/operation/administration.md b/docs/docs/operation/administration.md new file mode 100644 index 00000000..02280709 --- /dev/null +++ b/docs/docs/operation/administration.md @@ -0,0 +1,85 @@ +--- +title: Administration +--- + +## `fly workers` + +To list the currently registered workers, including additional metadata, run: + +```shell +fly -t example workers +``` + +This can be useful for monitoring the status of your workers, if you suspect that one keeps dropping out of the pool or +getting tasked with too many containers, etc. + +## `fly prune-worker` + +To remove a stalled, landing, landed, or retiring worker, run: + +```shell +fly -t example prune-worker --worker worker-name +``` + +To prune _all_ stalled workers, run: + +```shell +fly -t example prune-worker --all-stalled +``` + +This is for those cases where you know a worker is not coming back. + +!!! note + + Running workers cannot be pruned, since they'll just re-register themselves anyway. + +## `fly land-worker` + +To initiate landing of a worker and eventually (after draining) cause it to exit, run: + +```shell +fly -t example land-worker --worker worker-name +``` + +## `fly containers` + +To list the active containers across all your workers, run: + +```shell +fly -t example containers +``` + +This can be useful when discovering the containers available for [`fly intercept`](../builds.md#fly-intercept)ing. + +## `fly volumes` + +To list the active volumes across all your workers, run: + +```shell +fly -t example volumes +``` + +This can be useful to observe the caches warming across your cluster, and could be a good indicator of disk use. + +## `fly curl` + +To execute an arbitrary API request, you can run something like the following: + +```shell +fly -t example curl /api/v1/info +``` + +This command is just a shim that runs `curl` under the hood. To pass flags to `curl`, pass a `--` argument after the +path so that `fly` can distinguish them from its own flags: + +```shell +fly -t example curl /api/v1/builds -- \ + -X PUT \ + -H "Content-type: application/json" \ + -d @plan.json +``` + +!!! note + + If you use this command the assumption is that you know what you're doing. If you find yourself using this command + often, let us know - perhaps there's a missing command! \ No newline at end of file diff --git a/docs/docs/operation/container-placement.md b/docs/docs/operation/container-placement.md new file mode 100644 index 00000000..c807fd2b --- /dev/null +++ b/docs/docs/operation/container-placement.md @@ -0,0 +1,149 @@ +--- +title: Container Placement +--- + +Each [step](../steps/index.md) in a build is executed inside a container. The [`web` node](../install/running-web.md) +distributes containers across the worker cluster depending on the configured strategy. If no workers satisfy the +configured strategy, the [step](../steps/index.md) will block until a worker becomes available. + +## The `volume-locality` strategy + +When using `volume-locality`, the [`web` node](../install/running-web.md) places [`task` step](../steps/task.md) +and [`put` step](../steps/put.md) containers on workers where a majority of their inputs are already present. **This is +the default strategy**. + +The advantage of this approach is that it reduces the likelihood that large artifacts will have to be streamed from +one [`worker` node](../install/running-worker.md), through the [`web` node](../install/running-web.md), and to the +target `worker` node. For large artifacts, this can result in quite a bit of overhead. + +The disadvantage of this approach is that it can sometimes result in builds "gravitating" to a particular worker and +overloading it, at least until the resource caches warm across the worker pool. This disadvantage can be partially +mitigated using the (currently experimental) [`limit-active-volumes` strategy](#the-limit-active-volumes-strategy) in +conjunction with [Chaining Placement Strategies](#chaining-placement-strategies). + +If your builds tend to be light on artifacts and heavy on task execution, you may want to try +the [`fewest-build-containers` strategy](#the-fewest-build-containers-strategy) or the (currently +experimental) [`limit-active-tasks` strategy](#the-limit-active-tasks-strategy). + +## The `fewest-build-containers` strategy + +When using the `fewest-build-containers` strategy, step containers (`get`, `put`, `task`) are placed on the worker that +has the fewest build containers (i.e. containers for other steps of other builds). + +!!! info + + Containers used for resource checks are not counted because they are long-living containers that get re-used for + multiple checks, and therefore consume very little resources on the worker. + +To use this strategy, set the following env var on the [`web` node](../install/running-web.md): + +```properties +CONCOURSE_CONTAINER_PLACEMENT_STRATEGY=fewest-build-containers +``` + +## The `random` strategy + +With the `random` strategy, the [`web` node](../install/running-web.md) places `get`, `put`, and `task` containers on +any worker, ignoring any affinity. + +As this is truly random, this will be fine until one day it's not fine. + +To use this strategy, set the following env var on the [`web` node](../install/running-web.md): + +```properties +CONCOURSE_CONTAINER_PLACEMENT_STRATEGY=random +``` + +## The `limit-active-tasks` strategy + +!!! warning "Experimental Feature" + + `limit-active-tasks` is an experimental feature. + +When selecting the `limit-active-tasks` placement strategy, each `task` executed on a worker will increase the number +of "active tasks" on that worker by one. When the task completes the number is decreased by one. +The [`web` node](../install/running-web.md) then places `get`, `put`, and `task` containers on the worker that currently +has the _least amount of active tasks_. + +Additionally, `max-active-tasks-per-worker` can be set to an integer of 1 or more, in which case a worker will not +execute more than that amount of **tasks**. A value of 0 means that there is no limit on the maximum number of active +tasks on the workers. If no worker can be selected because all of them already have `max-active-tasks-per-worker` active +tasks, then the task will wait for a free worker, periodically polling the pool. The +metric `concourse_steps_waiting{type="task"}` is emitted to monitor these events. Note that the parameter does not apply +to `get` and `put` steps which will always be scheduled on the worker with the fewest active tasks. + +```properties +CONCOURSE_CONTAINER_PLACEMENT_STRATEGY=limit-active-tasks +``` + +and, optionally + +```properties +CONCOURSE_MAX_ACTIVE_TASKS_PER_WORKER=1 +``` + +## The `limit-active-containers` strategy + +!!! warning "Experimental Feature" + + `limit-active-containers` is an experimental feature. + +The `limit-active-containers` placement strategy rejects workers that already have too many containers. It makes no +effort to find the worker with the fewest number of containers present, and is therefore most useful when combined with +other placement strategies by [Chaining Placement Strategies](#chaining-placement-strategies). + +`max-active-containers-per-worker` can be set to an integer of 1 or more, in which case a worker will not execute more +than that amount of **containers**. If unset (or set to a value of 0), the `limit-active-containers` strategy has no +effect - if this is your only placement strategy, workers will be chosen at random. + +```properties +CONCOURSE_CONTAINER_PLACEMENT_STRATEGY=limit-active-containers +CONCOURSE_MAX_ACTIVE_CONTAINERS_PER_WORKER=200 +``` + +## The `limit-active-volumes` strategy + +!!! warning "Experimental Feature" + + `limit-active-volumes` is an experimental feature. + +The `limit-active-volumes` placement strategy rejects workers that already have too many volumes. It makes no effort to +find the worker with the fewest number of volumes present, and is therefore most useful when combined with other +placement strategies by [Chaining Placement Strategies](#chaining-placement-strategies). + +`max-active-volumes-per-worker` can be set to be an integer of 1 or more, in which case a worker will not execute more +than that amount of **volumes**. If unset (or set to a value of 0), the `limit-active-volumes` strategy has no effect - +if this is your only placement strategy, workers will be chosen at random. + +```properties +CONCOURSE_CONTAINER_PLACEMENT_STRATEGY=limit-active-volumes +CONCOURSE_MAX_ACTIVE_VOLUMES_PER_WORKER=200 +``` + +## Chaining Placement Strategies + +Container placement strategies can be chained together to apply multiple strategies in sequence. The first strategy in +the chain receives the entire set of workers, filtering the set down in some way, and passing that new set of workers to +the next strategy in the chain. If the last strategy in the chain returns multiple workers, one will be chosen at +random. + +For instance, consider the following configuration: + +```properties +CONCOURSE_CONTAINER_PLACEMENT_STRATEGY=limit-active-containers,limit-active-volumes,volume-locality,fewest-build-containers +CONCOURSE_MAX_ACTIVE_CONTAINERS_PER_WORKER=200 +CONCOURSE_MAX_ACTIVE_VOLUMES_PER_WORKER=100 +``` + +This defines a chain of 4 placement strategies, plus the implicit `random` strategy. Let's look at what each strategy +accomplishes: + +1. [`limit-active-containers` strategy](#the-limit-active-containers-strategy) removes all workers that already have + more than 200 containers +2. [`limit-active-volumes` strategy](#the-limit-active-volumes-strategy) removes all remaining workers that already have + more than 100 volumes +3. [`volume-locality` strategy](#the-volume-locality-strategy) keeps only the remaining worker(s) that have the most + inputs locally. This can keep more than one worker in the case of a tie +4. [`fewest-build-containers` strategy](#the-fewest-build-containers-strategy) will attempt to break ties by selecting + the worker with fewer build containers. If all the remaining workers have the exact same number of containers, one + will be selected at random \ No newline at end of file diff --git a/docs/docs/operation/creds/aws-secrets.md b/docs/docs/operation/creds/aws-secrets.md new file mode 100644 index 00000000..c886b628 --- /dev/null +++ b/docs/docs/operation/creds/aws-secrets.md @@ -0,0 +1,184 @@ +--- +title: The AWS Secrets Manager credential manager +--- + +## Configuration + +In order to integrate with AWS Secrets Manager for credential management, the web node must be configured with: + +* an access key and secret key, or a session token +* the AWS region that your parameters are stored within. + +If no access key, secret key, or session token is provided, Concourse will attempt to use environment variables or the +instance credentials assigned to the instance. + +The web node's configuration specifies the following: + +**`aws-secretsmanager-access-key`**: string + +: A valid AWS access key. + +: Environment variable `CONCOURSE_AWS_SECRETSMANAGER_ACCESS_KEY`. + +**`aws-secretsmanager-secret-key`**: string + +: The secret key that corresponds to the access key defined above. + +: Environment variable `CONCOURSE_AWS_SECRETSMANAGER_SECRET_KEY`. + +**`aws-secretsmanager-session-token`**: string + +: A valid AWS session token. + +: Environment variable `CONCOURSE_AWS_SECRETSMANAGER_SESSION_TOKEN`. + +**`aws-secretsmanager-region`**: string + +: The AWS region that requests to Secrets Manager will be sent to. + +: Environment variable `CONCOURSE_AWS_SECRETSMANAGER_REGION`. + +**`aws-secretsmanager-pipeline-secret-template`**: string + +: The base path used when attempting to locate a pipeline-level secret. + +: Environment variable `CONCOURSE_AWS_SECRETSMANAGER_PIPELINE_SECRET_TEMPLATE`. + +: !!! example + + Default: `/concourse/{{.Team}}/{{.Pipeline}}/{{.Secret}}` + +**`aws-secretsmanager-team-secret-template`**: string + +: The base path used when attempting to locate a team-level secret. + +: Environment variable `CONCOURSE_AWS_SECRETSMANAGER_TEAM_SECRET_TEMPLATE`. + +: !!! example + + Default: `/concourse/{{.Team}}/{{.Secret}}` + +For example, to launch the ATC and enable Secrets Manager, you may configure: + +```shell +concourse web ... \ + --aws-secretsmanager-region us-east-1 \ + --aws-secretsmanager-access-key AKIAIOSFODNN7EXAMPLE \ + --aws-secretsmanager-secret-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + +# or use env variables +CONCOURSE_AWS_SECRETSMANAGER_REGION="us-east-1" \ +CONCOURSE_AWS_SECRETSMANAGER_ACCESS_KEY="AKIAIOSFODNN7EXAMPLE" \ +CONCOURSE_AWS_SECRETSMANAGER_SECRET_KEY="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ +concourse web ... +``` + +A more secure method is to configure +an [IAM role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) for your EC2 ATC +instance so that credentials are fetched automatically from the EC2 metadata service. + +## Saving credentials in AWS + +It seems to be best to use the 'other type of secret' option and the 'plaintext' entry (otherwise your secrets will be +interpolated as JSON) for best results. Make sure your secret locations match the lookup templates exactly; include the +leading `/`, for example. + +## IAM Permissions + +The following is an example of an IAM policy that can be used to grant permissions to an IAM user or instance role. Note +that the `Resource` section can contain a wildcard to a secret or be restricted to an individual secret. In order for +the health check to work properly (see [Scaling](#scaling)), Concourse needs to have access to +the `__concourse-health-check` secret. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowAccessToSecretManagerParameters", + "Effect": "Allow", + "Action": [ + "secretsmanager:ListSecrets" + ], + "Resource": "*" + }, + { + "Sid": "AllowAccessGetSecret", + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ], + "Resource": [ + "arn:aws:secretsmanager:*:*:secret:/concourse/*", + "arn:aws:secretsmanager:*:*:secret:__concourse-health-check-??????" + ] + } + ] +} +``` + +If you wish to restrict concourse to only have access to secrets for a specific pipeline, you can +replace `"arn:aws:secretsmanager:*:*:secret:/concourse/*"` in the example above with: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowAccessToSecretManagerParameters", + "Effect": "Allow", + "Action": [ + "secretsmanager:ListSecrets" + ], + "Resource": "*" + }, + { + "Sid": "AllowAccessGetSecret", + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ], + "Resource": [ + "arn:aws:secretsmanager:*:*:secret:/concourse/TEAM_NAME/*", + "arn:aws:secretsmanager:*:*:secret:/concourse/TEAM_NAME/PIPELINE_NAME/*", + "arn:aws:secretsmanager:*:*:secret:__concourse-health-check-??????" + ] + } + ] +} +``` + +where `TEAM_NAME` and `PIPELINE_NAME` are replaced with the team and name of the pipeline in question. + +For more information on how to use IAM roles to restrict access to Secrets Manager, review +the [official documentation](https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_identity-based-policies.html). + +## Credential Lookup Rules + +When resolving a parameter such as `((foo_param))`, Concourse will look in the following paths, in order: + +* `/concourse/TEAM_NAME/PIPELINE_NAME/foo_param` +* `/concourse/TEAM_NAME/foo_param` + +The leading `/concourse` can be changed by specifying `--aws-secretsmanager-pipeline-secret-template` +or `--aws-secretsmanager-team-secret-template` variables. + +!!! note + + If Concourse does not have [permission](#iam-permissions) to access the pipeline-scoped paths, then credential + lookups will fail even for credentials which are stored at the team level. + +## Scaling + +If your cluster has a large workload, in particular if there are many resources, Concourse can generate a lot of traffic +to AWS and subsequently get rate-limited. + +As long as Concourse has permission to get the value of the `__concourse-health-check` secret, you should be able to +measure an error rate by polling the `/api/v1/info/creds` endpoint when authenticated as +a [Concourse Admin](../../auth-and-teams/user-roles.md#concourse-admin). + +Depending on your workflow for updating secrets and your reliability requirements it may be +worth [Caching credentials](caching.md) and/or [Retrying failed fetches](retrying-failed.md) to mitigate +rate-limit-related errors. \ No newline at end of file diff --git a/docs/docs/operation/creds/aws-ssm.md b/docs/docs/operation/creds/aws-ssm.md new file mode 100644 index 00000000..62c2a355 --- /dev/null +++ b/docs/docs/operation/creds/aws-ssm.md @@ -0,0 +1,133 @@ +--- +title: The AWS SSM credential manager +--- + +## Configuration + +The ATC is configured with an access key and secret key or session token and the AWS region that your parameters are +stored within. If no access key, secret key, or session token is provided, Concourse will attempt to use environment +variables or the instance credentials assigned to the instance. + +The ATC's configuration specifies the following: + +**`aws-ssm-access-key`**: string + +: A valid AWS access key. + +: Environment variable `CONCOURSE_AWS_SSM_ACCESS_KEY`. + +**`aws-ssm-secret-key`**: string + +: The secret key that corresponds to the access key defined above. + +: Environment variable `CONCOURSE_AWS_SSM_SECRET_KEY`. + +**`aws-ssm-session-token`**: string + +: A valid AWS session token. + +: Environment variable `CONCOURSE_AWS_SSM_SESSION_TOKEN`. + +**`aws-ssm-region`**: string + +: The AWS region that requests to parameter store will be sent to. + +: Environment variable `CONCOURSE_AWS_SSM_REGION`. + +**`aws-ssm-pipeline-secret-template`**: string + +: The base path used when attempting to locate a pipeline-level secret. + +: Environment variable `CONCOURSE_AWS_SSM_PIPELINE_SECRET_TEMPLATE`. + +: !!! example + + Default: `/concourse/{{.Team}}/{{.Pipeline}}/{{.Secret}}` + +**`aws-ssm-team-secret-template`**: string + +: The base path used when attempting to locate a team-level secret. + +: Environment variable `CONCOURSE_AWS_SSM_TEAM_SECRET_TEMPLATE`. + +: !!! example + + Default: `/concourse/{{.Team}}/{{.Secret}}` + +For example, to launch the ATC and enable the parameter store, you may configure: + +```shell +concourse web ... \ + --aws-ssm-region us-east-1 \ + --aws-ssm-access-key AKIAIOSFODNN7EXAMPLE \ + --aws-ssm-secret-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + +# or use env variables +CONCOURSE_AWS_SSM_REGION="us-east-1" \ +CONCOURSE_AWS_SSM_ACCESS_KEY="AKIAIOSFODNN7EXAMPLE" \ +CONCOURSE_AWS_SSM_SECRET_KEY="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ +concourse web ... +``` + +A more secure method is to configure +an [IAM role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) for your EC2 ATC +instance so that credentials are fetched automatically from the EC2 metadata service. + +## IAM Permissions + +The following is an example of an IAM policy that can be used to grant permissions to an IAM user or instance role. Note +that the `Resource` section can contain a wildcard to a parameter or be restricted to an individual parameter. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowAccessToSsmParameters", + "Effect": "Allow", + "Action": [ + "ssm:GetParameter", + "ssm:GetParametersByPath" + ], + "Resource": [ + "arn:aws:ssm:::parameter/concourse/*", + "arn:aws:ssm:::parameter/concourse/TEAM_NAME/*", + "arn:aws:ssm:::parameter/concourse/TEAM_NAME/PIPELINE_NAME/*" + ] + }, + { + "Sid": "AllowAccessToDecryptSsmParameters", + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:DescribeKey" + ], + "Resource": "arn:aws:kms:::key/KMS_KEY_ID" + }, + { + "Sid": "AllowListKeys", + "Effect": "Allow", + "Action": [ + "kms:ListAliases", + "kms:ListKeys" + ], + "Resource": "*" + } + ] +} +``` + +Note that the `TEAM_NAME`, `PIPELINE_NAME`, and `KMS_KEY_ID` text above should be replaced to fit your Concourse setup. + +For more information on how to use IAM roles to restrict access to SSM parameters, review +the [official documentation](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-access.html). + +## Credential Lookup Rules + +When resolving a parameter such as `((foo_param))`, Concourse will look in the following paths, in order: + +* `/concourse/TEAM_NAME/PIPELINE_NAME/foo_param` +* `/concourse/TEAM_NAME/foo_param` + +The leading `/concourse` can be changed by specifying `--aws-ssm-pipeline-secret-template` +or `--aws-ssm-team-secret-template` variables. \ No newline at end of file diff --git a/docs/docs/operation/creds/caching.md b/docs/docs/operation/creds/caching.md new file mode 100644 index 00000000..41eeaff2 --- /dev/null +++ b/docs/docs/operation/creds/caching.md @@ -0,0 +1,34 @@ +--- +title: Caching credentials +--- + +By default, credentials are fetched each time they're used. When many pipelines are configured this can result in a ton +of requests to the credential server. + +To reduce load on your credential server you may want to enable caching by setting the following env on +the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_SECRET_CACHE_ENABLED=true +``` + +Enabling secret caching will cache secrets from both [credential managers](index.md) and +from [var sources](../../vars.md#var-sources-experimental). + +By default, credentials will be cached for one minute at a time. This value can be increased to further reduce load on +the server like so: + +```properties +CONCOURSE_SECRET_CACHE_DURATION=5m # increase from 1m default +``` + +Credential cache duration can also be determined by the credential manager itself - for example, if Vault returns a +lease duration for a credential, the shorter value between the configured cache duration and the credential's lease +duration will be used. + +By default, the _absence_ of a credential is also cached for 10 seconds so that Concourse doesn't keep looking for a +misconfigured credential. This duration can be configured like so: + +```properties +CONCOURSE_SECRET_CACHE_DURATION_NOTFOUND=1s # decrease from 10s default +``` \ No newline at end of file diff --git a/docs/docs/operation/creds/conjur.md b/docs/docs/operation/creds/conjur.md new file mode 100644 index 00000000..f0ac0349 --- /dev/null +++ b/docs/docs/operation/creds/conjur.md @@ -0,0 +1,135 @@ +--- +title: The Conjur credential manager +--- + +## Configuration + +Concourse can be configured to pull credentials from a [CyberArk Conjur](https://conjur.org/) instance. + +The ATC is configured with a Conjur host username and api key or session token. If no host username, api key, or session +token is provided, Concourse will attempt to use environment variables. + +The ATC's configuration specifies the following: + +**`conjur-appliance-url`**: string + +: URL of the Conjur instance. + +: Environment variable `CONCOURSE_CONJUR_APPLIANCE_URL`. + +**`aws-secretsmanager-secret-key`**: string + +: The secret key that corresponds to the access key defined above. + +: Environment variable `CONCOURSE_CONJUR_ACCOUNT`. + +**`aws-secretsmanager-session-token`**: string + +: A valid Conjur host username. + +: Environment variable `CONCOURSE_CONJUR_AUTHN_LOGIN`. + +**`conjur-authn-api-key`**: string + +: The api key that corresponds to the Conjur host username. + +: Environment variable `CONCOURSE_CONJUR_AUTHN_API_KEY`. + +**`conjur-authn-token-file`**: string + +: Token file used if Conjur instance is running in k8s or iam. + +: Environment variable `CONCOURSE_CONJUR_AUTHN_TOKEN_FILE`. + +**`conjur-cert-file`**: string + +: Cert file used if conjur instance is using a self-signed cert. + +: Environment variable `CONCOURSE_CONJUR_CERT_FILE`. + +**`conjur-pipeline-secret-template`**: string + +: The base path used when attempting to locate a pipeline-level secret. + +: Environment variable `CONCOURSE_CONJUR_PIPELINE_SECRET_TEMPLATE`. + +: !!! example + + Default: `/concourse/{{.Team}}/{{.Secret}}` + +**`conjur-team-secret-template`**: string + +: The base path used when attempting to locate a team-level secret. + +: Environment variable `CONCOURSE_CONJUR_TEAM_SECRET_TEMPLATE`. + +: !!! example + + Default: `/concourse/{{.Team}}/{{.Secret}}` + +**`conjur-secret-template`**: string + +: The base path used when attempting to locate a vault or safe level secret. + +: Environment variable `CONCOURSE_CONJUR_SECRET_TEMPLATE`. + +: !!! example + + Default: `vaultName/{{.Secret}}` + +For example, to launch the ATC and enable Conjur, you may configure: + +```shell +concourse web ... \ + --conjur-appliance-url https://conjur-master.local \ + --conjur-account conjur \ + --conjur-authn-login host/concourse/dev \ + --conjur-authn-api-key 107eaqz167jkzm2q8wjv4mnyj0z12gfkws9wq9gzsjt29v2sn7yvy + +# or use env variables +CONCOURSE_CONJUR_APPLIANCE_URL="https://conjur-master.local" \ +CONCOURSE_CONJUR_ACCOUNT="conjur" \ +CONCOURSE_CONJUR_AUTHN_LOGIN="host/concourse/dev" \ +CONCOURSE_CONJUR_AUTHN_API_KEY="107eaqz167jkzm2q8wjv4mnyj0z12gfkws9wq9gzsjt29v2sn7yvy" \ +concourse web ... +``` + +## Conjur Permissions + +The following is an example Conjur policy that can be used to grant permissions to a Conjur host. In this +example `host/concourse` will have permissions to read and update all the secrets within the `TEAM_NAME` +and `PIPELINE_NAME` policies. + +```yaml +- !host concourse +- !policy + id: concourse + owner: !host concourse + body: + - !policy + id: TEAM_NAME + body: + - !variable team-secret-variable + - !policy + id: PIPELINE_NAME + body: + - !variable pipeline-secret-variable +``` + +Note that the `TEAM_NAME` and `PIPELINE_NAME` text above should be replaced to fit your Concourse setup. + +For more information on how to create and load Conjur policies, review +the [official documentation](https://docs.conjur.org/Latest/en/Content/Operations/Policy/policy-overview.htm?tocpath=Fundamentals%7CPolicy%20Management%7C_____0). + +## Credential Lookup Rules + +When resolving a parameter such as `((foo_param))`, Concourse will look in the following paths, in order: + +* `/concourse/TEAM_NAME/PIPELINE_NAME/foo_param` +* `/concourse/TEAM_NAME/foo_param` +* `vaultName/foo_param` + +The leading `/concourse` can be changed by specifying `--conjur-pipeline-secret-template` +or `--conjur-team-secret-template` variables. + +The leading `vaultName` can be changed by specifying `--conjur-secret-template` variable. \ No newline at end of file diff --git a/docs/docs/operation/creds/credhub.md b/docs/docs/operation/creds/credhub.md new file mode 100644 index 00000000..717d2cd2 --- /dev/null +++ b/docs/docs/operation/creds/credhub.md @@ -0,0 +1,37 @@ +--- +title: The CredHub credential manager +--- + +## Configuration + +The ATC is statically configured with a CredHub server URL with TLS and client config. + +For example, to point the ATC at an internal CredHub server with TLS signed by a local CA, using client id and secret, +you may configure: + +```shell +concourse web ... \ + --credhub-url https://10.2.0.3:9000 \ + --credhub-ca-cert /etc/my-ca.cert \ + --credhub-client-id =db02de05-fa39-4855-059b-67221c5c2f63 \ + --credhub-client-secret 6a174c20-f6de-a53c-74d2-6018fcceff64 +``` + +## Credential Lookup Rules + +When resolving a parameter such as `((foo_param))`, it will look in the following paths, in order: + +* `/concourse/TEAM_NAME/PIPELINE_NAME/foo_param` +* `/concourse/TEAM_NAME/foo_param` + +The leading `/concourse` can be changed by specifying `--credhub-path-prefix`. + +CredHub credentials actually have different types, which may contain multiple values. For example, the `user` type +specifies both `username` and `password.` You can specify the field to grab via `.` syntax, +e.g. `((foo_param.username))`. + +If the action is being run in the context of a pipeline (e.g. a `check` or a step in a build of a job), the ATC will +first look in the pipeline path. If it's not found there, it will look in the team path. This allows credentials to be +scoped widely if they're common across many pipelines. + +If an action is being run in a one-off build, the ATC will only look in the team path. \ No newline at end of file diff --git a/docs/docs/operation/creds/id-token.md b/docs/docs/operation/creds/id-token.md new file mode 100644 index 00000000..0ac08339 --- /dev/null +++ b/docs/docs/operation/creds/id-token.md @@ -0,0 +1,375 @@ +--- +title: The IDToken credential manager +--- + +This idtoken credential manager is a bit special. It doesn't load any credentials from an external source but instead +generates [JWTs](https://datatracker.ietf.org/doc/html/rfc7519) which are signed by concourse and contain information +about the pipeline/job that is currently running. It can NOT be used as a cluster-wide credential manager, but must +instead be used as a [var source](../../vars.md#var-sources-experimental). + +These JWTs can be used to authenticate with external services via "identity federation" with the identity of the +pipeline. + +Examples for services that support authentication via JWTs are: + +* [Vault](https://vaultproject.io/) +* [AWS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers.html) +* [Azure](https://learn.microsoft.com/en-us/graph/api/resources/federatedidentitycredentials-overview?view=graph-rest-1.0) + +External services can verify if JWTs are actually issued by your Concourse, by checking the signatures on the JWTs +against the public keys published by your Concourse. + +The public keys for verification are published as [JWKS](https://datatracker.ietf.org/doc/html/rfc7517) at: + +``` +https://your-concourse-server.com/.well-known/jwks.json +``` + +Concourse also offers a [OIDC Discovery Endpoint](https://openid.net/specs/openid-connect-discovery-1_0.html), which +allows external services to auto-discover the JWKS-URL. + +## Usage + +You create a [var source](../../vars.md#var-sources-experimental) of type `idtoken` with the configuration you want ( +see [Configuration](#configuration)) in your pipeline. That var source then exposes a single variable with a single +field, token, which contains the JWT and can be used in any step of your pipeline. + +You can also have multiple `idtoken` var sources in the same pipeline, each with different audiences, lifetimes etc. + +```yaml +var_sources: + - name: myidtoken + type: idtoken + config: + audience: [ "sts.amazonaws.com" ] + +jobs: + - name: print-creds + plan: + - task: print + config: + platform: linux + image_resource: + type: mock + source: { mirror_self: true } + run: + path: bash + args: + - -c + - | + echo myidtoken: ((myidtoken:token)) +``` + +## Configuration + +You can pass several config options to the `idtoken` var source to customize the generated JWTs. For example, you can +configure the `aud` claim, token expiration, or granularity of the `sub` claim. +See [`idtoken` var source](../../vars.md#id-token) for all config options. + +### Subject Scope + +Some external services (like AWS) only perform exact-matches on a token's sub-claim and ignore most other claims. To +enable use-cases like "_all pipelines of a team should be able to assume an AWS-Role_", Concourse offers the option to +configure how granular the `sub` claim's value should be. + +This is configured via the `subject_scope` setting of the [`idtoken` var source](../../vars.md#id-token). + +Depending on the value of `subject_scope`, the content of the JWT's `sub` claim will differ: + +| `subject_scope` | `sub` Value in JWT | +|-----------------|--------------------------------------------------------------| +| `team` | `` | +| `pipeline` | `/` | +| `instance` | `//`[^1] | +| `job` | `///`[^2] | + +[^1]: Instance vars are rendered as comma-separated key-value pairs. e.g. `my-var:my-value,hello:world` +[^2]: + + If a path element is empty (for example because you chose `job` on a pipeline with no instance-vars), the empty + element is still added. e.g. `my-team/my-pipeline//my-job`. Note the double forward-slashes between the pipeline and + job name, where instance vars would go. + +This way all your pipelines can simply get a token with `subject_scope: team` and use this token to assume an AWS-Role +that matches on `sub: "your_team_name"`. + +## Example JWT + +The generated tokens usually look something like this: + +``` +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwczovL3lvdXItY29uY291cnNlLmV4YW1wbGUuY29tIiwiZXhwIjoxNzUxMDE1NzM0LCJhdWQiOlsiYXBpOi8vQXp1cmVBRFRva2VuRXhjaGFuZ2UiXSwic3ViIjoibWFpbi9leGFtcGxlLXBpcGVsaW5lIiwidGVhbSI6Im1haW4iLCJwaXBlbGluZSI6ImV4YW1wbGUtcGlwZWxpbmUiLCJqb2IiOiJleGFtcGxlLWpvYiJ9.my7l44tH0wfz8vc6z3fMmzTMxZ8_orhjcsOti3BKSNo +``` + +And after decoding, looks like this: + +```json +{ + "aud": "sts.amazonaws.com", + "exp": 1751282764, + "iat": 1751279164, + "iss": "https://your-concourse-server.com", + "job": "print-creds", + "pipeline": "mypipeline", + "sub": "main/mypipeline", + "team": "main" +} +``` + +Here is a short explanation of the different claims: + +* `iss`: Who issued the token (always contains the external URL of your Concourse) +* `exp`: When the token will expire +* `aud`: Who the token is intended for. (In the above example it's for Azure's Identity Federation API) +* `team`: The team of the pipeline this token was generated for +* `pipeline`: The pipeline this token was generated for +* `job`: The name of the job (inside the pipeline) this token was generated for +* `instance_vars`: Any instance vars for the pipeline (if it is an instanced pipeline). Will be a comma-separated list + of key-value pairs. e.g. `hello:world,my-var:my-value` +* `sub`: A combination of team + pipeline + instance_vars + job. Which parts are used here is configurable, + see [Subject Scope](#subject-scope). + +## Automatic Key Rotation + +Concourse will automatically rotate the signing keys used for creating the JWTs. The default rotation period +is `7 days`. The previously used keys are being kept around for a while (by default `24h`) so that verification of +currently existing JWTs doesn't fail during key rotation. + +This behavior can be configured via the following ATC flags: + +* `CONCOURSE_SIGNING_KEY_ROTATION_PERIOD`: How often to rotate the signing keys. Default: `7d`. A value of `0` means + don't rotate at all. +* `CONCOURSE_SIGNING_KEY_GRACE_PERIOD`: How long to keep previously used signing keys published in the JWKs after they + have been rotated. Default: `24h`. +* `CONCOURSE_SIGNING_KEY_CHECK_INTERVAL`: How often to check if new keys are needed or if old ones should be removed. + Default: `10m` + +## Examples + +### Vault + +You can use JWTs to authenticate +with [HashiCorp Vault](https://developer.hashicorp.com/vault/docs/auth/jwt#jwt-authentication). This way your pipelines +can directly communicate with Vault and use all of its features, beyond what Concourse's native Vault-integration +offers. + +First enable the JWT auth method in your Vault Server: + +```shell +vault auth enable jwt +``` + +Now configure the JWT auth method to accept JWTs issued by your Concourse: + +```shell +vault write auth/jwt/config \ + oidc_discovery_url="https://" \ + default_role="demo" +``` + +Lastly, configure a role for JWT auth. Make sure to use the same value in your pipeline that you used for +_bound_audiences_ (the best would be the URL of your Vault). _bound_subject_ must be the sub-claim value of your JWT, if +you use the _subject_scope_ setting to change the contents of your sub-claim, adapt this accordingly! + +```shell +vault write auth/jwt/role/demo \ + role_type="jwt"\ + user_claim="sub" \ + bound_subject="main/your-pipeline" \ + bound_audiences="my-vault-server.com" \ + policies=webapps \ + ttl=1h +``` + +This role will allow the holder of a JWT with aud: "`my-vault-server.com`" and sub: "`main/your-pipeline`" to get a +Vault token with the Vault-policy `webapps`. If the policy you want to assign has a different name, simply change it in +the above example. Make sure to adapt the value for `bound_subject` according to your team and pipeline name. + +Pipelines can now do the following: + +```yaml +var_sources: + - name: vaulttoken + type: idtoken + config: + audience: [ "my-vault-server.com" ] + +jobs: + - name: vault-login + plan: + - task: login + config: + platform: linux + image_resource: + type: registry-image + source: { repository: hashicorp/vault } + run: + path: sh + args: + - -e + - -c + - | + export VAULT_ADDR=https://my-vault-server.com + vault write auth/jwt/login \ + role=demo \ + jwt=((vaulttoken:token)) \ + --format=json > vault-response.json + echo "Now do something with the token in vault-response.json" +``` + +You don't have to create a role and a policy for every single of your pipelines! You can use claims from the JWT with +Vault's [policy templating](https://developer.hashicorp.com/vault/tutorials/policies/policy-templating) feature. This +way you can define a policy that allows a pipeline read to all the secrets it would usually have access to using +Concourse's native Vault-integration: + +```hcl +path "concourse/metadata/{{ identity.entity.aliases..metadata.team }}" { + capabilities = ["list"] +} + +path "concourse/data/{{ identity.entity.aliases..metadata.team }}/+" { + capabilities = ["read"] +} + +path "concourse/metadata/{{ identity.entity.aliases..metadata.team }}/{{ identity.entity.aliases..metadata.pipeline }}" { + capabilities = ["list"] +} + +path "concourse/metadata/{{ identity.entity.aliases..metadata.team }}/{{ identity.entity.aliases..metadata.pipeline }}/*" { + capabilities = ["read", "list"] +} + +path "concourse/data/{{ identity.entity.aliases..metadata.team }}/{{ identity.entity.aliases..metadata.pipeline }}/*" { + capabilities = ["read", "list"] +} +``` + +!!! note + + Make sure to set `` to the actual mount-accessor value of your JWT Auth method! You can use `vault + auth list --format=json | jq -r '."jwt/".accessor'` to get the accessor for your jwt auth method. + +With a policy like this you don't need to configure `bound_subject` in your JWT auth role. Every single pipeline can +simply use the same role and the policy will take care that they can only access secrets meant for them. However, you +need to explicitly configure claim to metadata mapping: + +```shell +vault write auth/jwt/role/demo \ + role_type="jwt"\ + user_claim="sub" \ + bound_subject= \ + bound_audiences="my-vault-server.com" \ + policies=pipeline-new \ + claim_mappings='team=team' \ + claim_mappings='pipeline=pipeline' \ + ttl=1h +``` + +### AWS + +AWS +supports [federation with external identity providers](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers.html). +Using this, you can allow identities managed by an external identity provider to perform actions in your AWS account. + +In this scenario the external identity provider is Concourse and the identities are teams/pipelines/jobs. This means you +are able to grant a specific pipeline or job permission to perform actions in AWS (like deploying something), all +without managing IAM users or dealing with long-lived credentials. + +First you need +to [create an OpenID Connect identity provider](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html) +in your AWS Account. Set _Provider URL_ to the external URL of your Concourse server. For _Audience_, you can choose any +string you like, but using a value like `sts.amazonaws.com` is recommended. You have to use the same string later in the +configuration of your [`idtoken` var source](../../vars.md#id-token). + +Next you will need +to [create an IAM-Role that can be assumed using your JWT](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_oidc.html#idp_oidc_Create). +Set _Identity Provider_ to the value you previously set _Audience_ to. Add a condition on the sub-claim with +type `StringEquals` and value `yourteam/yourpipeline`. This will allow ONLY that specific pipeline (and any instanced +versions of it) to assume that IAM Role using a JWT. If you use the `subject_scope` setting to change the contents of +your sub-claim, adapt this condition accordingly! In the next step you will be able to choose which AWS permissions your +role will get. + +Now you can use +the [AWS AssumeRoleWithWebIdentity API operation](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html) +to assume your role via a JWT issued by Concourse. The easiest way is to do this is via +the [assume-role-with-web-identity AWS CLI command](https://docs.aws.amazon.com/cli/latest/reference/sts/assume-role-with-web-identity.html): + +```yaml +var_sources: + - name: awstoken + type: idtoken + config: + audience: [ "sts.amazonaws.com" ] + +jobs: + - name: aws-login + plan: + - task: print + config: + platform: linux + image_resource: + type: registry-image + source: { repository: amazon/aws-cli } + run: + path: bash + args: + - -e + - -c + - | + aws sts assume-role-with-web-identity \ + --role-session-name Concourse \ + --role-arn arn:aws:iam:::role/ \ + --web-identity-token ((awstoken:token)) > creds.json + echo "Now do something with the temporary credentials in creds.json" +``` + +### Azure + +Azure also supports a way to grant the holder of a JWT permissions in the Cloud. This is done via a feature +called [Federated Credentials](https://learn.microsoft.com/en-us/graph/api/resources/federatedidentitycredentials-overview?view=graph-rest-1.0). + +First, [create an EntraID App Registration](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app). +This app registration will be the service principal used by your pipeline. + +Now [create a federated credential](https://learn.microsoft.com/en-us/entra/workload-id/workload-identity-federation-create-trust?pivots=identity-wif-apps-methods-azp#other-identity-providers) +for the app registration you just created. + +For _Scenario_ select "Other". For Issuer set it to the external URL of your Concourse server. For _Type_ select " +Explicit subject identifier" and set _Value_ to `/` of the pipeline that should be able to use +the identity. If you use the `subject_scope` setting to change the contents of your sub-claim, change this setting here +accordingly. + +You can now assign IAM permissions to the identity of the app registration, which define what the identity is allowed to +do in your Azure subscription. + +Your pipeline can now use the `az cli` to log in to Azure using a JWT generated by Concourse: + +```yaml +var_sources: + - name: azuretoken + type: idtoken + config: + audience: [ "api://AzureADTokenExchange" ] + +jobs: + - name: azure-deploy + plan: + - task: login + config: + platform: linux + image_resource: + type: registry-image + source: { repository: mcr.microsoft.com/azure-cli } + run: + path: bash + args: + - -e + - -c + - | + echo ((azuretoken:token)) + az login --service-principal \ + -u \ + --tenant \ + --federated-token ((azuretoken:token)) + echo "You are now authenticated with Azure. Do something with it!" +``` \ No newline at end of file diff --git a/docs/docs/operation/creds/index.md b/docs/docs/operation/creds/index.md new file mode 100644 index 00000000..e4536763 --- /dev/null +++ b/docs/docs/operation/creds/index.md @@ -0,0 +1,63 @@ +--- +title: Credential Management +--- + +Going beyond [Encryption](../encryption.md), explicit credential management will provide credentials to your builds for +a brief amount of time, without being persisted anywhere. It also allows for credentials to be rotated and managed +external to the pipeline or team, and prevents them from being revealed by [ +`fly get-pipeline`](../../pipelines/managing-pipelines.md#fly-get-pipeline). + +Credential management works by replacing the credentials with `((vars))` in your pipeline or task config. When the +Concourse is about to run the step or `check` that is configured with vars, it will resolve them by fetching the values +from the credential manager. If the values are not present, the action will error. + +The following configurations can be parameterized with a credential manager: + +* resource.source under pipeline.resources +* resource_type.source under pipeline.resource_types +* resource.webhook_token under pipeline.resources +* task step params on a task step in a pipeline +* [Tasks](../../tasks.md) in their entirety - whether from task step file or task step config in a pipeline, or a config + executed with [fly execute](../../tasks.md) + +Where these values are looked up and how the credential manager is configured depends on the backend. Consult the +relevant section below for whichever backend you want to use. + +
+ +- :simple-vault: Vault + + --- + [:octicons-arrow-right-24: Configure](vault.md) + +- :material-lock: CredHub + + --- + [:octicons-arrow-right-24: Configure](credhub.md) + +- :material-aws: AWS SSM + + --- + [:octicons-arrow-right-24: Configure](aws-ssm.md) + +- :material-aws: AWS Secrets Manager + + --- + [:octicons-arrow-right-24: Configure](aws-secrets.md) + +- :material-kubernetes: Kubernetes + + --- + [:octicons-arrow-right-24: Configure](kubernetes.md) + +- :material-lock: Conjur + + --- + [:octicons-arrow-right-24: Configure](conjur.md) + +- :material-openid: IDToken + + --- + [:octicons-arrow-right-24: Configure](id-token.md) + +
\ No newline at end of file diff --git a/docs/docs/operation/creds/kubernetes.md b/docs/docs/operation/creds/kubernetes.md new file mode 100644 index 00000000..073a9a0f --- /dev/null +++ b/docs/docs/operation/creds/kubernetes.md @@ -0,0 +1,26 @@ +--- +title: Kubernetes Credential Manager +--- + +Concourse can be configured to pull credentials +from [Kubernetes `secret` objects](https://kubernetes.io/docs/concepts/configuration/secret). + +To configure it, either enable the in-cluster client by setting the following environment variable on +the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_KUBERNETES_IN_CLUSTER=true +``` + +or set the path to a `kubeconfig` file: + +```properties +CONCOURSE_KUBERNETES_CONFIG_PATH=~/.kube/config +``` + +## Credential lookup rules + +When resolving a parameter such as `((foo))`, Concourse will look for it in the following order in the namespace +configured for that team: + +## Configuring Kubernetes RBAC \ No newline at end of file diff --git a/docs/docs/operation/creds/redacting.md b/docs/docs/operation/creds/redacting.md new file mode 100644 index 00000000..54885947 --- /dev/null +++ b/docs/docs/operation/creds/redacting.md @@ -0,0 +1,62 @@ +--- +title: Redacting credentials +--- + +Concourse can be configured to automatically redact credentials from build output like so: + +```properties +CONCOURSE_ENABLE_REDACT_SECRETS=true +``` + +This behavior is off by default as there is likely a CPU performance overhead on +the [`web` nodes](../../install/running-web.md) involved with enabling it. It will be on by default once we've confirmed +that it performs well enough at large scale. + +When enabled, Concourse will keep track of the credential values which were used in a build. When writing build logs to +the database, it will replace any occurrence of these values with the text `((redacted))`. + +Say you're running a task which runs the following script: + +```shell +set -e -u -x + +echo $SECRET > some-file +sha1sum some-file +``` + +!!! note + + The `set -x` - the root cause of many accidental credential leaks. + +Let's say you have a job which runs this task, providing the `$SECRET` parameter using a credential manager `((var))`: + +```yaml +plan: + - task: use-secret + file: # ... + params: { SECRET: ((some-var)) } +``` + +With `hello` in `some-var`, this will result in the following build output: + +```shell ++ echo ((redacted)) ++ sha1sum some-file +f572d396fae9206628714fb2ce00f72e94f2258f some-file +``` + +Going a step further, what happens when that var has multiple lines of output, like `"hello\ngoodbye"`? + +```shell ++ echo ((redacted)) ((redacted)) ++ sha1sum some-file +638e5ebcd06a5208906960aa5fbe1d4ebd022771 some-file +``` + +What happened here? Well, because we didn't quote the `$SECRET` var arg to `echo`, it squashed the lines together into +arguments. This _could_ have confused our redacting logic and resulted in leaking the credential, but because Concourse +redacts secret values line-by-line, we're still OK. This will also help with JSON marshalled credential values, which +get interspersed with `\n` in a string literal. + +Although Concourse tries to be thorough in its redacting of credentials, the best way to prevent credential leakage is +to not accidentally print them in the first place. Think of this as an airbag, not a seatbelt! \ No newline at end of file diff --git a/docs/docs/operation/creds/retrying-failed.md b/docs/docs/operation/creds/retrying-failed.md new file mode 100644 index 00000000..f33a79cc --- /dev/null +++ b/docs/docs/operation/creds/retrying-failed.md @@ -0,0 +1,20 @@ +--- +title: Retrying failed fetches +--- + +When a request to the credential manager fails due to an intermittent error (e.g. a timeout or `connection refused`), +Concourse will automatically try the request again up to 5 times before giving up. After all attempts fail, the error +will be surfaced in the UI for the resource check or build step that initiated the request. + +The retry logic can be configured by specifying the following env on the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_SECRET_RETRY_ATTEMPTS=5 # how many times to try +CONCOURSE_SECRET_RETRY_INTERVAL=10s # how long to wait between attempts +``` + +!!! note "Vault Credential Manager" + + As Vault API client already does retry which has covered the retry conditions of this general secret fetching retry, + if a deployment uses Vault credential manager, `CONCOURSE_SECRET_RETRY_ATTEMPTS` can be set to 0 (or a small value, + like 1 or 2) in order to avoid duplicate retries. \ No newline at end of file diff --git a/docs/docs/operation/creds/vault.md b/docs/docs/operation/creds/vault.md new file mode 100644 index 00000000..02036ed2 --- /dev/null +++ b/docs/docs/operation/creds/vault.md @@ -0,0 +1,318 @@ +--- +title: The Vault credential manager +--- + +Concourse can be configured to pull credentials from a [Vault](https://vaultproject.io/) instance. + +To configure this, first configure the URL of your Vault server by setting the following env on +the [`web` node](../../install/running-web.md): + +```properties +CONCOURSE_VAULT_URL=https://vault.example.com:8200 +``` + +You may also need to configure the CA cert for Vault: + +```properties +CONCOURSE_VAULT_CA_CERT=path/to/ca.crt +``` + +You'll also need to configure how the `web` node authenticates with Vault - +see [Authenticating with Vault](https://concourse-ci.org/vault-credential-manager.html#authenticating-with-vault) for +more details as that step is quite involved. + +## Credential lookup rules + +Vault lets you organize secrets into hierarchies, which is useful for when they should be accessible for particular +pipelines or teams. When you have a parameter like `((foo))` in a pipeline definition, Concourse will (by default) look +for it in the following paths, in order: + +* `/concourse/TEAM_NAME/PIPELINE_NAME/foo` +* `/concourse/TEAM_NAME/foo` + +Vault credentials are actually key-value, so for `((foo))` Concourse will default to the field name value. You can +specify the field to grab via `.` syntax, e.g. `((foo.bar))`. + +If you have multiple, intermediate levels in your path, you can use the `/` separator to reach your intended field, +e.g. `((foo/bar/baz.qux))`. + +When executing a one-off task, there is no pipeline: so in this case, only the team path `/concourse/TEAM_NAME/foo` is +searched. + +There are several ways to customize the lookup logic: + +1. Add a "shared path", for secrets common to all teams. +2. Change the team- and pipeline-dependent path templates. +3. Change the path prefix from `/concourse` to something else. +4. Set a [Vault namespace](https://www.vaultproject.io/docs/enterprise/namespaces/) for isolation within a Vault + Enterprise installation. + +Each of these can be controlled by Concourse command line flags, or environment variables. + +### Configuring a shared path + +A "shared path" can also be configured for credentials that you would like to share across all teams and pipelines, +foregoing the default team/pipeline namespacing. Use with care! + +```properties +CONCOURSE_VAULT_SHARED_PATH=some-shared-path +``` + +This path must exist under the configured path prefix. The above configuration would correspond +to `/concourse/some-shared-path` with the default `/concourse` prefix. + +### Changing the path templates + +You can choose your own list of templates, which will expand to team- or pipeline-specific paths. These are subject to +the path prefix. By default, the templates used are: + +```properties +CONCOURSE_VAULT_LOOKUP_TEMPLATES=/{{.Team}}/{{.Pipeline}}/{{.Secret}},/{{.Team}}/{{.Secret}} +``` + +When secrets are to be looked up, these are evaluated subject to the configured path prefix, where `{{.Team}}` expands +to the current team, `{{.Pipeline}}` to the current pipeline (if any), and `{{.Secret}}` to the name of the secret. So +if the settings are: + +```properties +CONCOURSE_VAULT_PATH_PREFIX=/secrets +CONCOURSE_VAULT_LOOKUP_TEMPLATES=/{{.Team}}/concourse/{{.Pipeline}}/{{.Secret}},/{{.Team}}/concourse/{{.Secret}},/common/{{.Secret}} +``` + +and `((password))` is used in team `myteam` and pipeline `mypipeline`, Concourse will look for the following, in order: + +1. `/secrets/myteam/concourse/mypipeline/password` +2. `/secrets/myteam/concourse/password` +3. `/secrets/common/password` + +### Changing the path prefix + +The leading `/concourse` can be changed by specifying the following: + +```properties +CONCOURSE_VAULT_PATH_PREFIX=/some-other-prefix +``` + +### Using a Vault namespace + +If you are using Vault Enterprise, you can make secret lookups and authentication happen under a namespace. + +```properties +CONCOURSE_VAULT_NAMESPACE=chosen/namespace/path +``` + +This setting applies to all teams equally. + +## Configuring the secrets engine + +Concourse is currently limited to looking under a single path, meaning enabling only one secrets engine is +supported: `kv`, or `kv_v2`. This may change in the future - we're still collecting ideas +in [RFC #21](https://github.com/concourse/rfcs/pull/21). + +Using kv version 2 enables versioned secrets and the ability to restore previous versions or deleted secrets. Concourse +will read the latest version of a secret at all times and if it is deleted it will appear as if the secret does not +exist. More information regarding the Vault KV backend and the differences in versions can be +found [here](https://www.vaultproject.io/docs/secrets/kv). + +So, let's configure the kv secrets engine and mount it at `/concourse`: + +```shell +vault secrets enable -version=1 -path=concourse kv +``` + +To enable kv_v2 and versioned secrets: + +```shell +vault secrets enable -version=2 -path=concourse kv +``` + +Next, you'll want to create a policy to allow Concourse to read from this path. + +```hcl +path "concourse/*" { + capabilities = ["read"] +} +``` + +Save this to `concourse-policy.hcl`, and then run: + +```shell +vault policy write concourse ./concourse-policy.hcl +``` + +This configuration will allow Concourse to read all credentials under `/concourse`. This should match your configured +path prefix. + +## Authenticating with Vault + +There are many ways to authenticate with a Vault server. The `web` node can be configured with either a token or an +arbitrary auth backend and arbitrary auth params, so just about all of them should be configurable. + +When the `web` node acquires a token, either by logging in with an auth backend or by being given one directly, it will +continuously renew the token to ensure it doesn't expire. The renewal interval is half of the token's lease duration. + +### Using a periodic token + +The simplest way to authenticate is by generating a periodic token: + +```shell +$ vault token create --policy concourse --period 1h +Key Value +--- ----- +token s.mSNnbhGAqxK2ZbMasOQ91rIA +token_accessor 0qsib5YcYvROm86cT08IFxIT +token_duration 1h +token_renewable true +token_policies [concourse default] +``` + +!!! warning + + Choose your `--period` wisely, as the timer starts counting down as soon as the token is created. You should also + use a duration long enough to account for any planned `web` node downtime. + +Once you have the token, just set the following env on the `web` node: + +```properties +CONCOURSE_VAULT_CLIENT_TOKEN=s.mSNnbhGAqxK2ZbMasOQ91rIA +``` + +Periodic tokens are the quickest way to get started, but they have one fatal flaw: if the `web` node is down for longer +than the token's configured period, the token will expire and a new one will have to be created and configured. This can +be avoided by using the [`approle` auth backend](#using-the-approle-auth-backend). + +### Using the `approle` auth backend + +The [`approle`](https://www.vaultproject.io/docs/auth/approle.html) backend allows for an _app_ (in this case, +Concourse) to authenticate with a _role_ pre-configured in Vault. + +With this backend, the [`web` node](../../install/running-web.md) is configured with a `role_id` corresponding to a +pre-configured role, and a `secret_id` which is used to authenticate and acquire a token. + +The `approle` backend must first be configured in Vault. Vault's `approle` backend allows for a few parameters which you +may want to set to determine the permissions and lifecycle of its issued tokens: + +`policies=names` + +: This determines the policies (comma-separated) to set on each token. Be sure to set one that has access to the secrets +path - see [Configuring the secrets engine](#configuring-the-secrets-engine) for more information. + +`token_ttl=duration` + +: This determines the TTL for each token granted. The token can be continuously renewed, as long as it is renewed before +the TTL elapses. + +`token_max_ttl=duration` + +: This sets a maximum lifetime for each token, after which the token can no longer be renewed. + +: If configured, be sure to set the same value on the `web` node so that it can re-auth before this duration is reached: +```properties +CONCOURSE_VAULT_AUTH_BACKEND_MAX_TTL=1h +``` + +`period=duration` + +: If configured, tokens issued will be [periodic](https://www.vaultproject.io/docs/concepts/tokens.html#periodic-tokens) +. Periodic tokens are not bound by any configured max TTL, and can be renewed continuously. It does not make sense to +configure both `period` and `token_max_ttl` as the max TTL will be ignored. + +`token_num_uses=count` + +: This sets a limit on how often a token can be used. **We do not recommend setting this value**, as it will effectively +hamstring Concourse after a few credential acquisitions. The `web` node does not currently know to re-acquire a token +when this limit is reached. + +`secret_id_ttl=duration` and `secret_id_num_uses=count` + +: These two configurations will result in the secret ID expiring after the configured time or configured number of +log-ins, respectively. + +: You should only set these if you have something periodically re-generating secret IDs and re-configuring your `web` +nodes accordingly. + +Given all that, a typical configuration may look something like this: + +```shell +$ vault auth enable approle +Success! Enabled approle auth method at: approle/ +$ vault write auth/approle/role/concourse policies=concourse period=1h +Success! Data written to: auth/approle/role/concourse +``` + +Now that the backend is configured, we'll need to obtain the `role_id` and generate a `secret_id`: + +```shell +$ vault read auth/approle/role/concourse/role-id +Key Value +--- ----- +role_id 5f3420cd-3c66-2eff-8bcc-0e8e258a7d18 +$ vault write -f auth/approle/role/concourse/secret-id +Key Value +--- ----- +secret_id f7ec2ac8-ad07-026a-3e1c-4c9781423155 +secret_id_accessor 1bd17fc6-dae1-0c82-d325-3b8f9b5654ee +``` + +These should then be set on the [`web` node](../../install/running-web.md) like so: + +```properties +CONCOURSE_VAULT_AUTH_BACKEND="approle" +CONCOURSE_VAULT_AUTH_PARAM="role_id:5f3420cd-3c66-2eff-8bcc-0e8e258a7d18,secret_id:f7ec2ac8-ad07-026a-3e1c-4c9781423155" +``` + +### Using the `cert` auth backend + +The [`cert`](https://www.vaultproject.io/docs/auth/cert.html) auth method allows authentication using SSL/TLS client +certificates. + +With this backend, the [`web` node](../../install/running-web.md) is configured with a client cert and a client key. +Vault must be configured with TLS, which you should be almost certainly be doing anyway. + +The `cert` backend must first be configured in Vault. Vault's `cert` backend allows for a few parameters which you may +want to set to determine the lifecycle of its issued tokens: + +`policies=names` + +: This determines the policies (comma-separated) to set on each token. Be sure to set one that has access to the secrets +path - +see [Configuring the secrets engine](https://concourse-ci.org/vault-credential-manager.html#configuring-the-secrets-engine) +for more information. + +`ttl=duration` + +: This determines the TTL for each token granted. The token can be continuously renewed, as long as it is renewed before +the TTL elapses. + +`max_ttl=duration` + +: This sets a maximum lifetime for each token, after which the token can no longer be renewed. + +: If configured, be sure to set the same value on the `web` node so that it can re-auth before this duration is reached: +```properties +CONCOURSE_VAULT_AUTH_BACKEND_MAX_TTL=1h +``` + +`period=duration` +: If configured, tokens issued will be [ +_periodic_](https://www.vaultproject.io/docs/concepts/tokens.html#periodic-tokens). Periodic tokens are not bound by any +configured max TTL, and can be renewed continuously. It does not make sense to configure both `period` and `max_ttl` as +the max TTL will be ignored. + +```shell +$ vault auth enable cert +Success! Enabled cert auth method at: cert/ +$ vault write auth/cert/certs/concourse policies=concourse certificate=@out/vault-ca.crt ttl=1h +Success! Data written to: auth/cert/certs/concourse +``` + +Once that's all set up, you'll just need to configure the client cert and key on the `web` node like so: + +```properties +CONCOURSE_VAULT_AUTH_BACKEND="cert" +CONCOURSE_VAULT_CLIENT_CERT=vault-certs/concourse.crt +CONCOURSE_VAULT_CLIENT_KEY=vault-certs/concourse.key +``` + +In this case no additional auth params are necessary, as the Vault's TLS auth backend will check the certificate against +all roles if no name is specified. \ No newline at end of file diff --git a/docs/docs/operation/encryption.md b/docs/docs/operation/encryption.md new file mode 100644 index 00000000..eb37da65 --- /dev/null +++ b/docs/docs/operation/encryption.md @@ -0,0 +1,90 @@ +--- +title: Encryption +--- + +Automating everything means authorizing something to automate many things. This makes CI systems a high-risk target for +security leaks. + +Concourse pipelines are loaded with credentials: resources are configured with private keys, tasks are given credentials +to servers they integrate via [credential manager variables](creds/index.md), [`task` step `vars`](../steps/task.md), +or [`task` step `params`](../steps/task.md), etc. If someone gets their hands on your config, they have access to +everything. + +To mitigate this, Concourse supports encrypting sensitive information before it reaches the database. This way the +plaintext credentials only exist in memory for as long as they need to, and if someone gains access to your database, +they can't so easily gain the keys to the kingdom. + +We strongly encourage anyone running Concourse to configure encryption. Going further, it's best to have Concourse not +store the credentials in the first place, in which case you may want to +configure [credential management](creds/index.md) as well. + +## What's encrypted? + +The following values are expected to contain credentials, and so will be encrypted: + +* Resource [`resource.sources`](../resources/index.md#resource-schema), as they often contain private keys and other + credentials for writing to (or simply granting access to) the resource. +* Resource type [`resource_type.sources`](../resource-types/index.md#resource_type-schema), for the same reason as + above, though this is probably a less common use case. +* Pipeline [`task` step `vars`](../steps/task.md) and [`task` step `params`](../steps/task.md), in case they contain + sensitive information such as usernames and/or passwords. +* Put step [`put` step `params`](../steps/put.md) and get step [`get` step `params`](../steps/get.md) are also + encrypted, even though they rarely should contain credentials (they're usually in [ + `resource.source`](../resources/index.md#resource-schema)). +* Team auth configurations, as they often contain things like GitHub or other oAuth client secrets. + +!!! note + + The actual implementation encrypts things in a more heavy-handed way than the above list implies. For example, + pipeline configs are actually encrypted as one large blob. + +Notably, the following things are NOT encrypted: + +* Build logs. If your jobs are outputting credentials, encryption won't help you. We have chosen not to tackle this + initially as it would introduce a performance burden for what is not as much of an obvious win. +* Resource versions. These should never contain credentials, and are often meaningless on their own. +* Resource metadata. These are visible to anyone if your pipeline + is [exposed](https://concourse-ci.org/managing-pipelines.html#fly-expose-pipeline), and should never contain + credentials. +* Pipeline names, job names, etc. - anything else that is not a high-risk target for credential leakage, as opposed to + regular information leaks. +

+ Resources and jobs in particular exist in their own tables, with their names in plaintext, and only their config + encrypted. In this way, names are not protected, even though the pipeline config itself is also stored as one big + encrypted blob. + +## Enabling Encryption + +To enable encryption, you'll just need to come up with a 16 or 32-byte random character sequence and configure it as +`--encryption-key` flag to the `web` command. For BOSH, this is the [ +`encryption_key`](https://bosh.io/jobs/web?source=github.com/concourse/concourse-bosh-release#p=encryption_key) +property. + +On startup, the [`web` node](../install/running-web.md) will encrypt all existing plaintext data, and any new data being +written will be encrypted before it's sent over the network to the database. + +The initial bulk encryption shouldn't take too long, but it will scale linearly with the amount of data that you have, +and if another ATC is running it'll suddenly not be able to read the data until it's also given the key. So, expect some +downtime. + +## Rotating the Encryption Key + +To swap out the encryption key, you'll need to pass the previous key as `--old-encryption-key` (or [ +`old_encryption_key`](https://bosh.io/jobs/web?source=github.com/concourse/concourse-bosh-release#p=old_encryption_key)), +and the new key as `--encryption-key` (or [ +`encryption_key`](https://bosh.io/jobs/web?source=github.com/concourse/concourse-bosh-release#p=encryption_key)). + +On startup, the [`web` node](../install/running-web.md) will decrypt all existing data and re-encrypt it with the new +key, in one go. If it encounters a row which is already encrypted with the new key, it will continue on (as may be the +case when restarting with the flags again, or if the ATC died in the middle of rotating). + +If the ATC encounters a row which cannot be decrypted with neither the old key nor the new one, it will log loudly and +fail to start, telling you which row it choked on. This data must be dealt with in some way, either by re-configuring +the key the row was encrypted with as the old key, or manually performing database surgery to remove the offending row. +Hopefully this doesn't happen to you! + +## Disabling Encryption + +To opt out of encryption entirely (I'm sure you have your reasons), simply pass `--old-encryption-key` (or [ +`old_encryption_key`](https://bosh.io/jobs/web?source=github.com/concourse/concourse-bosh-release#p=old_encryption_key)) +alone. With no new encryption key, the [web node](../install/running-web.md) will decrypt all existing data on start. \ No newline at end of file diff --git a/docs/docs/operation/global-resources.md b/docs/docs/operation/global-resources.md new file mode 100644 index 00000000..e3a69b57 --- /dev/null +++ b/docs/docs/operation/global-resources.md @@ -0,0 +1,106 @@ +--- +title: Global Resources +--- + +!!! warning "Experimental Feature" + + Global Resources is an experimental feature introduced in Concourse v5.0 . It is enabled by passing the + `--enable-global-resources` flag to the `concourse web` command. + +The basic concept of global resources is to share detected resource versions between all resources that have the same +`resource.type` and `resource.source` configuration. + +Before v5.0.0, each pipeline resource had its own version history, associated to the resource by name. This meant that +multiple pipelines with the same resource configs would redundantly collect the same version and metadata information. + +With v5.0.0's experimental 'global resources' feature, resource versions are instead associated to an +anonymous '[resource config](../resources/index.md#resource-schema)' i.e. its `resource.type` and `resource.source`. + +## Benefits of Global Resources + +### Fewer resource `check`s to perform + +With global resources, all resources that have the same configuration will share the same version history and share only +one checking interval. This reduces load on the worker and on the external services that the resources point to. + +For example, prior to global resources if there were three resources with the same configuration between three team's +pipelines it would result in three check containers performing three resource checks every minute to fetch the versions. + +With global resources, this configuration will result in only one check container and one resource check every minute to +fetch versions for all the resources. + +Since there will be only one resource check for all resources that have the same configuration, the resource that has +the shortest [`resource.check_every`](../resources/index.md#resource-schema) configured will result in its pipeline +running the checks for that resource configuration. + +#### Complications with reusing containers + +There is an exception to sharing check containers within a deployment, which is workers belonging to a team and workers +with tags. + +If a resource has [`resource.tags`](../resources/index.md#resource-schema) configured, and the resource's check interval +ends up acquiring the checking lock, if a check container already exists with the same resource config elsewhere, it +will reuse the container, otherwise a container will be created on a worker matching the appropriate tags. + +Similarly, if a team has its own workers, and their check interval ended up acquiring the lock, it will try to re-use a +container with the same resource config from the shared worker pool, rather than creating a new container on the team's +workers. + +This is a bit complicated to reason about, and we plan to stop re-using `check` containers to simplify all of this. +See [concourse/concourse#3079](https://github.com/concourse/concourse/issues/3079) for more information. + +### Reducing redundant data + +The majority of Concourse resources will benefit from having versions shared globally because most resource versions +have an external source of truth. + +For example, a `check` for the [`git` resource](https://github.com/concourse/git-resource) that pulls in the +`concourse/concourse` repository will always return the same set of versions as an equivalent resource pointing to the +same repository. By consolidating the `check`s and the versions, there will essentially only be one set of versions +collected from the repository and saved into the database. + +### Reliable Resource Version History + +Prior to global resources, a resource's version history was directly associated to the resource name. This meant that +any changes to a resource's configuration without changing its name would basically append the versions from the new +configuration after the old versions, which are no longer accurate to the current configuration. + +Global resources instead associates the resource versions to the resource's `resource.type` and `resource.source`. +Therefore, whenever a resource definition changes, the versions will "reset" and change along with it, resulting in +truthful and reliable version histories. + +## Risks and Side Effects + +### Sharing versions doesn't work well for all resource types + +Sharing versions isn't always a good idea. For example, the [ +`time` resource](https://github.com/concourse/time-resource) is often used to generate versions on an interval so that +jobs can fire periodically. If version history were to be shared for all users with e.g. a 10-minute interval, that +would lead to a thundering herd of builds storming your workers, leading to load spikes and a lot of unhappy builds. + +We are working toward a solution to the [`time` resource](https://github.com/concourse/time-resource)'s thundering herd +problem - namely, to not model time as a resource, and instead model it as a [ +`var_source`](../vars.md#var_source-schema). We are tracking progress toward this goal +in [concourse/concourse#5815](https://github.com/concourse/concourse/issues/5815). + +Another case where version history shouldn't be shared is when resources "automagically" learn their auth credentials +using things like IAM roles. In these cases, the credentials aren't in the `resource.source`. If version history were to +be shared, anyone could configure the same `source:`, not specifying any credentials, and see the version history +discovered by some other pipeline that ran its checks on workers that had access via IAM roles. + +For this reason, any resource types that acquire credentials outside of `source:` should not share version history. +Granted, the user won't be able to fetch these versions, but it's still an information leak. + +IAM roles are a bit of a thorn in our side when it comes to designing features like this. We're planning on introducing +support for them in a way that doesn't have this problem +in [concourse/concourse#3023](https://github.com/concourse/concourse/issues/3023). + +### Intercepting check containers is no longer safe + +Now that `check` containers are shared across teams, it would be dangerous to allow anyone +to [`fly intercept`](../builds.md#fly-intercept) to `check` containers. For this reason, this capability is limited +to [admin users](../auth-and-teams/user-roles.md#concourse-admin). + +We recognize that this will make it a bit more difficult for end users to debug things like failing checks. We plan to +improve this by introducing a way to provision a _new_ `check` container to facilitate debugging. +See [concourse/concourse#3344](https://github.com/concourse/concourse/issues/3344) for more information. \ No newline at end of file diff --git a/docs/docs/operation/index.md b/docs/docs/operation/index.md new file mode 100644 index 00000000..cfff8c8d --- /dev/null +++ b/docs/docs/operation/index.md @@ -0,0 +1,10 @@ +--- +title: Operation +--- + +The following sections describes operator-focused features and tools that Concourse provides, such as monitoring and +credential management. + +These concepts are not required to operate Concourse, but are for users that are looking to extend the capabilities of +managing a Concourse deployment. For users that are new to these concepts, we do recommend learning how to set +up [Credential Management](creds/index.md) and [Encryption](encryption.md). \ No newline at end of file diff --git a/docs/docs/operation/metrics.md b/docs/docs/operation/metrics.md new file mode 100644 index 00000000..1314a05e --- /dev/null +++ b/docs/docs/operation/metrics.md @@ -0,0 +1,29 @@ +--- +title: Metrics +--- + +Metrics are essential in understanding how any large system is behaving and performing. Concourse can emit metrics about +both the system health itself and about the builds that it is running. Operators can tap into these metrics in order to +observe the health of the system. + +## Configuring Metrics + +The [`web` node](../install/running-web.md) can be configured to emit metrics on start. + +Currently supported metrics emitters are InfluxDB, NewRelic, Prometheus, and Datadog. There is also a dummy emitter that +will just spit the metrics out in to the logs at `DEBUG` level, which can be enabled with the `--emit-to-logs` flag. + +Regardless of your metrics emitter, you can set `CONCOURSE_METRICS_BUFFER_SIZE` to determine how many metrics emissions +are sent at a time. Increasing this number can be helpful if sending metrics is regularly failing (due to rate limiting +or network failures) or if latency is particularly high. + +There are various flags for different emitters; run `concourse web --help` and look for "Metric Emitter" to see what's +available. + +## What's emitted? + +This reference section lists of all the metrics that Concourse emits via the Prometheus emitter. + +To make this document easy to maintain, Prometheus is used as the "source of truth" - primarily because it has help text +built-in, making this list easy to generate. Treat this list as a reference when looking for the equivalent metric names +for your emitter of choice. \ No newline at end of file diff --git a/docs/docs/operation/opa-integration.md b/docs/docs/operation/opa-integration.md new file mode 100644 index 00000000..0054d996 --- /dev/null +++ b/docs/docs/operation/opa-integration.md @@ -0,0 +1,230 @@ +--- +title: Open Policy Agent Integration +--- + +!!! note + + The [Open Policy Agent](https://www.openpolicyagent.org/docs/latest/) (OPA, pronounced “oh-pa”) is an open source, + general-purpose policy engine that unifies policy enforcement across the stack. + +OPA allows you to create arbitrary rules within Concourse without having to add a new feature to Concourse. You could +even recreate Concourse's [RBAC system](../auth-and-teams/user-roles.md) using OPA. + +More likely use-cases are to enforce rules your organization may have, such as not using certain container images or +disallowing the use of privileged workloads. With OPA you can be as general or fine-grained as you want, enforcing these +rules at the team or pipeline level. + +The next few sections explain how to configure Concourse to talk to an OPA server and how to write OPA rules for +Concourse. + +## Configuring Concourse + +There are four configuration options you need to set on the `concourse web` nodes to have them interact with OPA. + +`CONCOURSE_OPA_URL`: The OPA policy check endpoint. + +: Should point to a specific `package/rule` that contains all Concourse rules for your cluster. + +: _Example_: `http://opa-endpoint.com/v1/data/concourse/decision` + +`CONCOURSE_POLICY_CHECK_FILTER_HTTP_METHOD`: API http methods to go through policy check. + +: You will need to make sure these match up with an API action in the next two configuration options. + +: _Example_: `PUT,POST` + +`CONCOURSE_POLICY_CHECK_FILTER_ACTION`: Actions in this list will go through policy check. + +: _Example_: `ListWorkers,ListContainers` + +`CONCOURSE_POLICY_CHECK_FILTER_ACTION_SKIP`: Actions in this list will not go through policy check + +: _Example_: `PausePipeline,UnpausePipeline` + +For the last three configuration options you can refer +to [this list of routes](https://github.com/concourse/concourse/blob/master/atc/routes.go) for a list of API actions and +their respective HTTP method. There are also some [Special Actions](#special-actions) not directly in the API. + +## Writing OPA Rules + +On the OPA server you'll need to create a package and policy for Concourse. This should match up with the endpoint +provided to Concourse. The [OPA documentation](https://www.openpolicyagent.org/docs/latest/) has a good guide explaining +how to generally write OPA rules and set up an OPA server. + +For any actions that Concourse has been configured to filter it will send a JSON request to the OPA server with the +following details. Top-level data directly under the `input` key will be present for most actions. The information under +the `data` key will differ based on the action being checked. + +This sample JSON payload is what OPA is sent when a user sets a pipeline. The `data` key contains the pipeline in JSON +format. + +```json +{ + "input": { + "service": "concourse", + "cluster_name": "dev", + "cluster_version": "7.4.0", + "http_method": "PUT", + "action": "SaveConfig", + "user": "test", + "team": "main", + "pipeline": "check-pipeline", + "data": { + "jobs": [ + { + "name": "test", + "plan": [ + { + "get": "tiny" + }, + { + "config": { + "image_resource": { + "source": { + "repository": "busybox" + }, + "type": "registry-image" + }, + "platform": "linux", + "run": { + "args": [ + "-exc", + "echo hello" + ], + "path": "sh" + } + }, + "task": "a-task" + } + ] + } + ] + } + } +} +``` + +An OPA rule can respond to Concourse with three fields: + +
+ +* `allowed` (_required_): Boolean type. Setting to `False` will deny the action unless the `block` field is `False`. +* `block` (_optional_): Boolean type. If set to `False` and `allowed` is `True` this creates a soft-policy enforcement. + The action will be allowed and the `reasons` will still be printed to the web UI like a warning message. (1) +* `reasons` (_optional_): List of string type. If an action is denied based on the `allowed` field then the reason(s) + will be displayed in the UI. + +
+ +1. Not setting `block` is the same as setting `"block": true`. + +Here is an example OPA policy. By default, it will allow whatever action it has been sent. It will deny the action if +one or more of the three deny rules are true. + +```rego title="concourse.rego" linenums="1" +package concourse + +default decision = {"allowed": true} + +decision = {"allowed": false, "reasons": reasons} { + count(deny) > 0 + reasons := deny +} + +deny["cannot use docker-image types"] { + input.action == "UseImage" + input.data.image_type == "docker-image" +} + +deny["cannot run privileged tasks"] { + input.action == "SaveConfig" + input.data.jobs[_].plan[_].privileged +} + +deny["cannot use privileged resource types"] { + input.action == "SaveConfig" + input.data.resource_types[_].privileged +} +``` + +## Special Actions + +Most of the actions you can filter for come directly from the list +of [API actions](../auth-and-teams/user-roles.md#action-matrix). There are currently two special actions you can also +filter on. + +### `UseImage` + +Before Concourse starts a container you can check what image it is going to use to create the container. Depending on +the `image_type` the `image_source` field may contain other fields. The JSON payload for this action will look similar +to the following example: + +```json +{ + "input": { + "service": "concourse", + "cluster_name": "dev", + "cluster_version": "7.4.0", + "action": "UseImage", + "team": "main", + "pipeline": "simple", + "data": { + "image_type": "registry-image", + "privileged": true, + "image_source": { + "repository": "alpine", + "tag": "latest" + } + } + } +} +``` + +### `SetPipeline` + +This action occurs whenever a [`set_pipeline` step](../steps/set-pipeline.md) is run. The JSON payload for this action +will contain the pipeline config in JSON format under the `data` key: + +```json +{ + "input": { + "service": "concourse", + "cluster_name": "dev", + "cluster_version": "7.4.0", + "action": "SetPipeline", + "team": "main", + "pipeline": "simple", + "data": { + "jobs": [ + { + "name": "test", + "plan": [ + { + "get": "tiny" + }, + { + "config": { + "image_resource": { + "source": { + "repository": "busybox" + }, + "type": "registry-image" + }, + "platform": "linux", + "run": { + "args": [ + "-exc", + "echo hello" + ], + "path": "sh" + } + }, + "task": "a-task" + } + ] + } + ] + } + } +} +``` \ No newline at end of file diff --git a/docs/docs/operation/security-hardening.md b/docs/docs/operation/security-hardening.md new file mode 100644 index 00000000..1259c12d --- /dev/null +++ b/docs/docs/operation/security-hardening.md @@ -0,0 +1,49 @@ +--- +title: Security Hardening +--- + +Concourse tasks run in containers, which provide a degree of isolation from the host. However, if inadequate attention +is paid to security, anyone with the ability to update pipelines or modify a script called in a task might be able to +escape from the container and take control of the host. From there, they could access other host resources, interfere +with pipelines they might not otherwise have access to, and collect credentials. + +Following the guidance in this section can help you to greatly reduce the risk of a container escape. + +## Keeping your kernel up-to-date + +Containers run in different Linux namespaces on the same Linux kernel as the host system. Vulnerabilities in the kernel +version you run can allow for local privilege escalation - which in the Concourse context means allowing an escape from +a Concourse task to full root privileges on the host. + +You can greatly reduce the risk of container escapes by staying up to date with your kernel version, tracking either the +latest release, or the latest kernel from a Linux distribution with a reputable security programme. + +## Locking down privileged mode + +By default, privileged mode (i.e. tasks with `privileged: true` on the task step) grants containers a very wide set of +Linux capabilities, without any restrictions on syscalls allowed. These privileges are enough to load a kernel module ( +allowing arbitrary privilege escalation and container escape), as well as direct access to all host devices. As such, by +default, privileged tasks are equivalent to full root access on the host. + +If you are running a worker using the containerd container runtime, Concourse provides some options to reduce the risk +of container escapes through privileged tasks. + +The `--containerd-privileged-mode=ignore` (or by environment variable, `CONCOURSE_CONTAINERD_PRIVILEGED_MODE=ignore`) +option to the worker is the most restrictive, but most secure option. It makes Concourse treat privileged tasks the same +as normal tasks (i.e. grants no extra privileges, effectively disabling privileged tasks). While this is secure, it is +also restrictive if you want to do things like build or run containers inside tasks. + +The `--containerd-privileged-mode=fuse-only` (or by environment variable, +`CONCOURSE_CONTAINERD_PRIVILEGED_MODE=fuse-only`) option to the worker makes it possible to secure privileged tasks +against container escape, while still allowing privileged tasks to build container images with buildah, and run them +with podman from inside the task. + +!!! warning "Caution" + + For the fuse-only privileged mode option to be secure against escapes from privileged tasks, you must run your + worker in a container with user namespaces enabled. Privileged containers in fuse-only mode have `CAP_SYS_ADMIN` + capability, which is harmless when in a non-default user namespace, but equivalent to full root on the host + otherwise. When running the worker in a Docker or podman container, refer to the + [Docker](https://docs.docker.com/engine/security/userns-remap/#enable-userns-remap-on-the-daemon) or + [Podman](https://docs.podman.io/en/latest/markdown/podman-run.1.html#subuidname-name) docs to learn how to set up + user namespaces. \ No newline at end of file diff --git a/docs/docs/operation/tracing.md b/docs/docs/operation/tracing.md new file mode 100644 index 00000000..2339f958 --- /dev/null +++ b/docs/docs/operation/tracing.md @@ -0,0 +1,105 @@ +--- +title: Tracing +--- + +!!! warning "Experimental Feature" + + Tracing is an experimental feature. + +Tracing in Concourse enables the delivery of traces related to the internal processes that go into running builds, and +other internal operations, breaking them down by time, and component. + +It leverages the ([OpenTelemetry](https://opentelemetry.io/)) SDK to allow support for many platforms. Currently tracing +can be configured to integrates with: + +* [Jaeger](https://www.jaegertracing.io/) +* [Google Cloud Trace](https://cloud.google.com/trace) (Stackdriver) +* [Honeycomb.io](https://honeycomb.io/) +* [OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-go/tree/master/exporters/otlp) + +## Configuring Tracing + +To export spans to Jaeger, specify the Thrift HTTP endpoint of the Jaeger collector: + +```properties +CONCOURSE_TRACING_JAEGER_ENDPOINT=http://jaeger:14268/api/traces +``` + +To export spans to Google Cloud Trace, specify the GCP Project ID: + +```properties +CONCOURSE_TRACING_STACKDRIVER_PROJECTID=your-gcp-project-id +``` + +Note that suitable GCP credentials must be available, via the usual [ +`GOOGLE_APPLICATION_CREDENTIALS` environment variable](https://cloud.google.com/docs/authentication/getting-started#setting_the_environment_variable), +the default location that the `gcloud` CLI expects, or from GCP's metadata server (if Concourse is deployed on GCP). + +To export spans the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) via the OTLP +Exporter, specify your collector access endpoint: + +```properties +CONCOURSE_TRACING_OTLP_ADDRESS=otel-collector.example.com:4317 +CONCOURSE_TRACING_OTLP_USE_TLS=false +``` + +To export spans to [Lightstep](https://opentelemetry.lightstep.com/) via the OTLP Exporter, specify your collector +access token and endpoint: + +```properties +CONCOURSE_TRACING_OTLP_ADDRESS=ingest.lightstep.com:443 +CONCOURSE_TRACING_OTLP_HEADERS=lightstep-access-token:mysupersecrettoken +``` + +To export spans +to [Elastic Observability](https://www.elastic.co/guide/en/apm/get-started/current/open-telemetry-elastic.html) via the +OTLP Exporter, specify your Elastic APM secret token and endpoint: + +```properties +CONCOURSE_TRACING_OTLP_ADDRESS=elastic-apm-server.example.com:443 +CONCOURSE_TRACING_OTLP_HEADERS=Authorization=Bearer your-secret-token +``` + +To export spans to Honeycomb.io, specify the API key, dataset and optionally the service name: + +```properties +CONCOURSE_TRACING_HONEYCOMB_API_KEY=your-honeycomb-api-key +CONCOURSE_TRACING_HONEYCOMB_DATASET=your-honeycomb-dataset +CONCOURSE_TRACING_HONEYCOMB_SERVICE_NAME=service-name-for-concourse # NOTE: Optional. Defaults to "concourse" +``` + +## Trace context propagation + +When tracing is enabled, trace context propagation is activated in pipeline tasks thanks to the injection of the +environment variable `TRACEPARENT` in `run` commands. The environment variable `TRACEPARENT` complies with +the [W3C Trace Context](https://www.w3.org/TR/trace-context/) specification. + +## What's emitted? + +Below is a summary of the various operations that Concourse currently traces. They are arranged like a call tree, so +that for each operation described, its sub-operations are described indented immediately below. + +* `scanner.Run` -- An execution of the [Resource Checker](../internals/checker.md), responsible for determining which + resources need to be checked. + * `scanner.check` -- This operation simply represents inserting the check in the database. +* `scheduler.Run` -- This represents one tick of the [Build Scheduler](../internals/scheduler.md). + * `schedule-job` -- this is the same operation scoped to a single job. + * `Algorithm.Compute` -- this is where the [Algorithm](../internals/scheduler.md#algorithm) determines inputs + for a job. Each of the resolvers below describes a different strategy for determining inputs, depending on the + job's config. + * `individualResolver.Resolve` -- This is used to determine versions input to a [ + `get` step](../steps/get.md) without `passed` constraints. + * `groupResolver.Resolve` -- This is the juicy part of the algorithm, which deals with `passed` constraints. + * `pinnedResolver.Resolve` -- This operation is used to determine inputs + when [Version Pinning](../resources/resource-versions.md) is at play. + * `job.EnsurePendingBuildExists` -- This is where a new build, if deemed necessary by scheduling constraints, + will be inserted into the database. This operation follows from checker.Run above and will appear under the + same trace as the check which produced the resource version responsible for triggering the new build. +* `build` -- this is the primary operation performed by the [Build Tracker](../internals/build-tracker.md). When a build + is automatically triggered, this span follows from the `job.EnsurePendingBuildExists` operation which created the + build, appearing in the same trace. + * `get` -- this tracks the execution of a [`get` step](../steps/get.md). + * `put` -- this tracks the execution of a [`put` step](../steps/put.md). + * `task` -- this tracks the execution of a [`task` step](../steps/task.md). + * `set_pipeline` -- this tracks the execution of a [`set_pipeline` step](../steps/set-pipeline.md). + * `load_var` -- this tracks the execution of a [`load_var` step](../steps/load-var.md). \ No newline at end of file diff --git a/docs/docs/operation/tuning.md b/docs/docs/operation/tuning.md new file mode 100644 index 00000000..05e81cd1 --- /dev/null +++ b/docs/docs/operation/tuning.md @@ -0,0 +1,149 @@ +--- +title: Performance Tuning +--- + +By default, Concourse is configured to feel very snappy. This is good for when you are first trying out Concourse or +using it on a small team with a few dozen pipelines. + +When you begin trying to scale Concourse is where fires can start breaking out. This section will go over some +configuration values in Concourse that you can change to make scaling easier. + +## The Big Caveat + +Track [Metrics](metrics.md)! Everything you read next could be all for nothing if you don't have metrics to track where +the bottlenecks are in your Concourse system. We highly suggest tracking metrics so you have a clear before and after +picture for any changes you make and to clearly see if you're moving things in the right direction. + +## Build Logs + +Is the size of your database growing dramatically? Can't keep up with the storage costs? Then you should probably +configure some default log retention settings. + +By default, Concourse will not delete any of your logs from your pipelines. You have to opt in to having Concourse +automatically delete build logs for you. You can set a time-based retention policy and/or a policy based on the number +of logs a job generates. + +### `CONCOURSE_DEFAULT_BUILD_LOGS_TO_RETAIN` + +Determines how many build logs to retain per job by default. If you set this to `10` then any jobs in your pipelines +that have more than ten builds will have the extra logs for those builds deleted. + +Users can override this value in their pipelines. + +### `CONCOURSE_MAX_BUILD_LOGS_TO_RETAIN` + +Determines how many build logs to retain per job. Users cannot override this setting. + +### `CONCOURSE_DEFAULT_DAYS_TO_RETAIN_BUILD_LOGS` + +Determines how old build logs have to be before they are deleted. Setting this to a value like `10` will result in any +build logs older than 10 days to be deleted. + +Users can override this value in their pipelines. + +### `CONCOURSE_MAX_DAYS_TO_RETAIN_BUILD_LOGS` + +Determines how old build logs have to be before they are deleted. Users cannot override this setting in their pipelines. + +## Resource Checking + +By default, Concourse checks any given resource every ~1min. This makes Concourse feel snappy when you first start using +it. Once you start trying to scale though the amount of checks can begin to feel aggressive. The following settings can +help you reduce the load caused by resource checking. + +### `CONCOURSE_RESOURCE_CHECKING_INTERVAL` + +This is where the default value for 1min checks comes from. Changing this value changes the default checking interval +for all resources. Users can override this value when defining a resource with the [ +`resource.check_every`](../resources/index.md#resource-schema) field. + +### `CONCOURSE_RESOURCE_WITH_WEBHOOK_CHECKING_INTERVAL` + +Same as the previous var but only applies to resources with webhooks. Could use this to disable resource checking of +resources that use webhooks by setting it to a large value like `99h`. + +### `CONCOURSE_MAX_CHECKS_PER_SECOND` + +Maximum number of checks that can be started per second. This will be calculated as +`(# of resources)/(resource checking interval)`. If you're finding that too many resource checks are running at once and +consuming a lot of resources on your workers then you can use this var to reduce the overall load. + +A value of `-1` will remove this maximum limit of checks per second. + +## Pipeline Management + +Here are some flags you can set on the web node to help manage the amount of resources pipelines consume. These flags +are mostly about ensuring pipelines don't run forever without good reason. + +### `CONCOURSE_PAUSE_PIPELINES_AFTER` + +This flag takes a number representing the number of days since a pipeline last ran before it's automatically paused. So +specifying `90` means any pipelines that last ran 91 days ago will be automatically paused. + +For large instances it can be common for users to set a pipeline and then forget about it. The pipeline may never run +another job again and be forgotten forever. Even if the jobs in the pipeline never run Concourse will still be running +resource checks for that pipeline, if any resources are defined. By setting this flag you can ensure that any pipelines +that meet this criteria will be automatically paused and not consume resources long-term. For some large instances this +can mean up to 50% of pipelines eventually being paused. + +### `CONCOURSE_DEFAULT_TASK_{CPU/MEMORY}_LIMIT` + +Global defaults for CPU and memory you can set. Only applies to tasks, not resource containers (`check/get/put` steps). +You can read more about how to set these limits on the [`task` step `container_limits`](../steps/task.md) page. + +Users can override these values in their pipelines. + +### `CONCOURSE_DEFAULT_{GET/PUT/TASK}_TIMEOUT` + +Global defaults for how long the mentioned step takes to execute. Useful if you're finding your users write pipelines +with tasks that get stuck or never end. Ensures that every build eventually finishes. + +Users can override these values in their pipelines. + +## Container Placement + +If you find that workers keep crashing due to high CPU and/or memory usage then you could try specifying a custom +container placement strategy or strategy chain. The [Container Placement](container-placement.md) page has some examples +of container placement strategy chains you can use. + +## Garbage Collection + +When jobs fail or error out in Concourse their resources are not immediately cleaned up. The container and storage space +remain on a worker for some period of time before they get garbage collected. If you want to make the garbage collector +more aggressive you can change the following settings on your web node: + +### `CONCOURSE_GC_FAILED_GRACE_PERIOD` + +This env var only applies to containers where the job failed and has the longest grace period among all the other GC +grace periods. It has a default value of `120h` (five days). + +The reason the default value is so long is so users don't feel rushed to investigate their failed job. A job can fail +over a weekend and users can investigate the failed jobs containers when they come back on Monday. + +Failed containers get GC as soon as a new build of the job is kicked off. So you don't have to worry about failed +containers always hanging around for five days. They'll only hang around for that long if they're the most recent build +of a job. + +If you notice a lot of containers and volumes hanging around that are tied to failed jobs you can try reducing this +setting to fewer days or even a few hours. + +### Other GC Grace Periods + +Depending on what a container was used for and its exit condition, there are various flags you can adjust to make +Concourse GC these resources faster or slower. The following env vars cover the cases where you probably don't need the +container hanging around for very long. They have a default value of `5m`. + +* `CONCOURSE_GC_ONE_OFF_GRACE_PERIOD` - Period after which one-off build containers will be garbage-collected +* `CONCOURSE_GC_MISSING_GRACE_PERIOD` - Period after which containers and volumes that were created but went missing + from the worker will be garbage-collected +* `CONCOURSE_GC_HIJACK_GRACE_PERIOD`- Period after which hijacked containers will be garbage-collected + +## Web To Worker Ratio + +This is anecdotal, and you should adjust based on your metrics of your web nodes. A starting ratio of web to workers is +1:6; one web instance for every six workers. + +The core Concourse team runs two web nodes and 16 workers, a 1:8 ratio. We can get away with this lower web to worker +ratio because we don't have that many users actively interacting with the web UI on a daily basis; less than 10 active +users. Since we're only one team using the instance we have fewer pipelines than an instance supporting multiple teams +would. \ No newline at end of file diff --git a/docs/docs/pipelines/grouping-pipelines.md b/docs/docs/pipelines/grouping-pipelines.md new file mode 100644 index 00000000..0b662c00 --- /dev/null +++ b/docs/docs/pipelines/grouping-pipelines.md @@ -0,0 +1,212 @@ +--- +title: Grouping Pipelines +--- + +!!! warning "Experimental Feature" + + Instanced Pipelines/Instance Groups are currently experimental, and are subject to change. + + To experiment with Instanced Pipelines on your deployment, you need to set the feature flag + `--enable-pipeline-instances` (`CONCOURSE_ENABLE_PIPELINE_INSTANCES=true`) + +Although pipelines operate independently of one another, it's not uncommon to have several pipelines that are highly +related, and possibly derived from the same pipeline template. It's useful to be able to group these pipelines to reduce +clutter and improve navigation. For this, Concourse has the concept of Instanced Pipelines and Instance Groups, where an +Instance Group composes several related Instanced Pipelines. + +For instance, suppose you support multiple version lines of your software (v1.0.x and v2.0.x, say), and want a pipeline +for each version line in order to facilitate delivering patch releases. You create a common pipeline template that +uses [Vars](../vars.md) to specialize each pipeline: + +```yaml +resources: + - name: repo + type: git + source: + uri: git@... + # The only difference between the pipelines is the git branch to use + branch: release/v((version)) + +jobs: + - name: test + plan: [ ... ] + + - name: deploy-to-staging + plan: [ ... ] + + - name: release + plan: [ ... ] +``` + +Before Concourse v7.0.0, you might set multiple pipelines with the version information encoded in the pipeline name, +e.g.: + +```shell +fly -t example set-pipeline \ + --pipeline release-1.0.x \ + --config template.yml \ + --var version=1.0.x + +fly -t example set-pipeline \ + --pipeline release-2.0.x \ + --config template.yml \ + --var version=2.0.x +``` + +The downside to this approach is that things can get disorganized quickly as the number of pipelines increases, which +can make the UI cluttered and hard to navigate. Additionally, not everything can easily be encoded into the pipeline +name, especially with the restrictions on [identifiers](../config-basics.md#identifier-schema) - while it's readable in +this case, it can get unwieldy as the number of variables in the template grows. + +The recommended approach is to construct an Instance Group where each version has its own Instanced Pipeline: + +```shell +fly -t example set-pipeline \ + --pipeline release \ + --config template.yml \ + --instance-var version=1.0.x + +fly -t example set-pipeline \ + --pipeline release \ + --config template.yml \ + --instance-var version=2.0.x +``` + +There are only a few differences from the previous approach in terms of creating the pipelines: + +1. We give each Instanced Pipeline the same name (in this case, `release`), and +2. We use the `--instance-var` flag instead of `--var`. Doing so makes the variable name and value a part of the + pipeline's identifier ([Managing Instanced Pipelines](#managing-instanced-pipelines) describes how to work with + Instanced Pipelines in [fly](../fly.md)) + +!!! warning + + The `-i` or `--instance-var` flag behaves like the `-y` or `--yaml-var`, meaning instance vars can hold arbitrary + YAML/JSON data. The `-v` or `--var` flag, on the other hand, only defines strings. See + [Static vars](../vars.md#static-vars) to learn the difference between the flags + +!!! note + + There are no [fly](../fly.md) commands for constructing an Instance Group - Concourse logically groups all + Instanced Pipelines with the same name into a single Instance Group. Instanced Pipelines have the same pipeline + semantics as other pipelines - they are just organized and identified in a different way. + +## Managing Instanced Pipelines + +Instanced Pipelines can be managed via [fly](../fly.md) as described in [Managing Pipelines](managing-pipelines.md), +with one important distinction - since instance vars are a part of the pipeline's identifier, the `--pipeline` flag must +include both the name of the Instance Group as well as the instance vars. The `--pipeline` flag takes the form: + +```shell +fly ... --pipeline group/var1:value1,var2:value2 +``` + +As a concrete example, to pause the `release` Instanced Pipeline with `version:1.0.x`, you would issue the following +command: + +```shell +fly -t example pause-pipeline --pipeline release/version:1.0.x +``` + +Let's look at a more complicated example - suppose you have an Instanced Pipeline that was set using one of the +following commands: + +```shell +fly -t example set-pipeline \ + --pipeline upgrade \ + --config template.yml \ + --instance-var version.from=1.0.0 \ + --instance-var version.to=2.0.0 \ + --instance-var branch=feature/foo +# ...or equivalently +fly -t example set-pipeline \ + --pipeline upgrade \ + --config template.yml \ + --instance-var 'version={from: 1.0.0, to: 2.0.0}' \ + --instance-var branch=feature/foo +``` + +!!! tip + + Using dot-notation here (as in the first command) is recommended since YAML is finicky about spaces. + + For instance, had we used `--instance-var 'version={from:1.0.0, to:2.0.0}'` (no spaces between keys and values), + we would end up with the following object (represented as JSON): + + ```json + {"from:1.0.0": null, "to:2.0.0": null} + ``` + + Specifying each field individually using dot-notation is harder to mess up. + +Here, there are two instance vars: `version`, that contains the object `{"from": "1.0.0", "to": "2.0.0"}`, and `branch`, +that contains the string `"feature/foo"`. In order to pause this pipeline, you could issue one of the following +commands: + +```shell +fly -t example pause-pipeline \ + --pipeline 'upgrade/version.from:1.0.0,version.to:2.0.0,branch:"feature/foo"' +# ... or equivalently +fly -t example pause-pipeline \ + --pipeline 'upgrade/version:{from: 1.0.0, to: 2.0.0},branch:"feature/foo"' +``` + +For accessing sub-fields of an object, we can either use dot-notation as described +in [Providing static values for vars](setting-pipelines.md#providing-static-values-for-vars), or we can define the +object in full as valid YAML. + +!!! warning + + If the instance var name or value contains a "special character" (`.`, `,`, `/`, `{`, `}`, or whitespace), it + must be surrounded by double quotes `"`. Depending on your shell, this usually means the entire flag must be + quoted, since otherwise your shell will try to expand the quotes. + +### `fly order-instanced-pipelines` + +To configure the ordering of instanced pipelines within an individual instance group, run: + +```shell +fly -t example order-instanced-pipelines \ + --group group \ + --pipeline key1:value1 \ + --pipeline key2:value2 \ + --pipeline key3:value3 +``` + +!!! note + + This command only ensures that the given pipelines are in the given order. If there are other pipelines that you + haven't included in the command, they may appear in-between, before, or after the given set. + +!!! warning + + If you want to reorder pipelines outside of an individual instance group, you should use the + [`fly order-pipelines`](managing-pipelines.md#fly-order-pipelines) command. + +### Managing Jobs and Resources + +[Managing Jobs](../jobs.md) and [Managing Resources](../resources/managing-resources.md) walk you through some of the +commands you can use to manage jobs and resources within pipelines. For Instanced Pipelines, we need to encode the +instance vars in the `--job` and `--resource` flags. These flags now take the form: + +```shell +fly ... --job group/var1:value1,var2:value2/job +``` + +and + +```shell +fly ... --resource group/var1:value1,var2:value2/resource +``` + +For instance, to trigger the `test` job of `release/version:1.0.x`, we issue the following command: + +```shell +fly -t example trigger-job --job release/version:1.0.x/test +``` + +To check the `repo` resource of `release/version:1.0.x`, we issue the following command: + +```shell +fly -t example check-resource --resource release/version:1.0.x/repo +``` \ No newline at end of file diff --git a/docs/docs/pipelines/index.md b/docs/docs/pipelines/index.md new file mode 100644 index 00000000..1f3daf2e --- /dev/null +++ b/docs/docs/pipelines/index.md @@ -0,0 +1,133 @@ +--- +title: Pipelines +--- + +A pipeline is the result of configuring [Jobs](../jobs.md) and [Resources](../resources/index.md) together. When +you configure a pipeline, it takes on a life of its own, to continuously detect resource versions and automatically +queue new builds for jobs as they have new available inputs. + +The name of a pipeline has a few restrictions that are outlined here: [ +`identifier` schema](../config-basics.md#identifier-schema). + +Pipelines are configured via [`fly set-pipeline`](setting-pipelines.md#fly-set-pipeline) or the [ +`set_pipeline` step](https://concourse-ci.org/set-pipeline-step.html#set-pipeline) as +declarative [YAML files](../config-basics.md#intro-to-yaml) which conform to the following schema: + +## `pipeline` schema + +??? warning "**jobs**: `[`**[job](../jobs.md#job-schema)**`]`" + + A set of [jobs](../jobs.md) for the pipeline to continuously schedule. At least one job is required for + a pipeline to be valid. + +??? info "**resources**: `[`**resource**`]`" + + A set of resources for the pipeline to continuously check. + +??? info "**resource_types**: `[`**resource_type**`]`" + + A set of resource types for resources within the pipeline to use. + +??? info "**var_sources**: `[`**var_source**`]`" + + A set of Var sources for the pipeline to use. + +??? info "**groups**: `[`**group**`]`" + + A list of job groups to use for organizing jobs in the web UI. + + Groups have no functional effect on your pipeline. They are purely for making it easier to grok large pipelines + in the web UI. + + !!! note + + Once you have added groups to your pipeline, all jobs must be in a group. + + ??? example "Grouping Jobs" + + The following example will make the "tests" group the default view (since it's listed first), separating the later jobs into a "publish" group: + + ```yaml + groups: + - name: test + jobs: + - unit + - integration + - name: publish + jobs: + - deploy + - shipit + ``` + + This would display two tabs at the top of the home page: "test" and "publish". + + For a real world example of how groups can be used to simplify navigation and provide logical grouping, + see the groups used at the top of the page in the [Concourse pipeline](https://ci.concourse-ci.org/). + + ### `group_config` schema + + ??? warning "**name**: **[`identifier`](../config-basics.md#identifier-schema)**" + + A unique name for the group. This should be short and simple as it will be used as the tab name for navigation. + + ??? info "**jobs**: [`job.name`]" + + A list of jobs that should appear in this group. A job may appear in multiple groups. Neighbours of jobs in the + current group will also appear on the same page in order to give context of the location of the group in + the pipeline. + + You may also use any valid [glob](https://www.man7.org/linux/man-pages/man7/glob.7.html) to represent several + jobs, e.g.: + + ```yaml + groups: + - name: develop + jobs: + - terraform-* + - test + - deploy-{dev,staging} + - name: ship + jobs: + - deploy-prod + - name: all + jobs: + - "*" + ``` + + In this example, the `develop` group will match `terraform-apply`, `terraform-destroy`, `test`, `deploy-dev`, + `deploy-staging`. The `ship` group will only match `deploy-prod`. The `all` group will match all jobs in the + pipeline. + + !!! warning "Note" + + Depending on how it's used, *, {, and } have special meaning in YAML, and may need to be quoted + (as was done in the all job above) + +??? info "**display**: **display_config**" + + !!! warning "Experimental Feature" + + Display was introduced in Concourse v6.6.0. It is considered an **experimental** feature. + + Visual configurations for personalizing your pipeline. + + ??? example "Background image" + + The following example will display an image in the background of the pipeline it is configured on. + + ```yaml + display: + background_image: https://avatars1.githubusercontent.com/u/7809479?s=400&v=4 + ``` + + ### `display_config` schema + + ??? info "**background_image**: [`string`](../config-basics.md#string-schema)" + + Allows users to specify a custom background image for the pipeline. Must be an http, https, or relative URL. + + ??? info "**background_filter**: [`string`](../config-basics.md#string-schema)" + + Default _`opacity(30%) grayscale(100%)`_. Allows users to specify custom + [CSS filters](https://developer.mozilla.org/en-US/docs/Web/CSS/filter) that are applied to the + `background_image`. \ No newline at end of file diff --git a/docs/docs/pipelines/managing-pipelines.md b/docs/docs/pipelines/managing-pipelines.md new file mode 100644 index 00000000..3e939a25 --- /dev/null +++ b/docs/docs/pipelines/managing-pipelines.md @@ -0,0 +1,139 @@ +--- +title: Managing Pipelines +--- + +## `fly pipelines` + +To list the currently-configured pipelines and their paused state, run: + +```shell +fly -t example pipelines +``` + +By default, archived pipelines are not included in the output of this command. To view archived pipelines, +provide `--include-archived` flag. + +## `fly rename-pipeline` + +To rename a pipeline, run: + +```shell +fly -t example rename-pipeline \ + --old-name my-pipeline \ + --new-name my-cool-pipeline +``` + +All job history is retained when renaming a pipeline. + +## `fly pause-pipeline` + +To pause a pipeline, run: + +```shell +fly -t example pause-pipeline --pipeline my-pipeline +``` + +This will prevent jobs from being scheduled and stop the periodic checking for new versions of resources. Builds that +are in-flight will still finish. + +## `fly unpause-pipeline` + +To unpause a pipeline, run: + +```shell +fly -t example unpause-pipeline --pipeline my-pipeline +``` + +This will resume job scheduling and resource checking. + +## `fly expose-pipeline` + +By default, newly configured pipelines are only visible to the pipeline's team. To make a pipeline viewable by other +teams and unauthenticated users, run: + +```shell +fly -t example expose-pipeline --pipeline my-pipeline +``` + +This feature is useful if you're using Concourse for an open source project and you'd like your community to be able to +see into your build pipeline. + +To undo this change, see [`fly hide-pipeline`](#fly-hide-pipeline). + +Exposing a pipeline reveals basically everything except for build output and resource metadata. + +To expose a resource's metadata, [resource.public](../resources/index.md) must be set to `true`. + +To expose a job's build output, [job.public](../jobs.md) must be set to `true`. This will also reveal resource metadata +for any [`get` step](../steps/get.md) or [`put` steps](../steps/put.md) in the build output. + +## `fly hide-pipeline` + +If you realize that you've made a terrible mistake in [exposing your pipeline](#fly-expose-pipeline), you can run: + +```shell +fly -t example hide-pipeline --pipeline my-pipeline +``` + +If you're panicking you can run the command's short form, `hp`, instead. + +## `fly get-pipeline` + +Fly can be used to fetch and update the configuration for your pipelines. This is achieved by using +the [`fly get-pipeline`](#fly-get-pipeline) and [`fly set-pipeline`](setting-pipelines.md#fly-set-pipeline) +commands. For example, to fetch the current configuration of your `my-pipeline` Concourse pipeline and print it +on `STDOUT` run the following: + +```shell +fly -t example get-pipeline --pipeline my-pipeline +``` + +To get JSON instead of YAML you can use the `-j` or `--json` argument. This can be useful when inspecting your config +with [jq](http://stedolan.github.io/jq/). + +## `fly destroy-pipeline` + +Every now and then you just don't want a pipeline to be around anymore. Running `fly destroy-pipeline` will stop the +pipeline activity and remove all data collected by the pipeline, including build history and collected versions. + +For example, to destroy the `my-pipeline` pipeline, you would run: + +```shell +fly -t example destroy-pipeline --pipeline my-pipeline +``` + +## `fly order-pipelines` + +To configure the ordering of pipelines, run: + +```shell +fly -t example order-pipelines \ + --pipeline pipeline-1 \ + --pipeline pipeline-2 \ + --pipeline pipeline-3 +``` + +Note that this command only ensures that the given pipelines are in the given order. If there are other pipelines that +you haven't included in the command, they may appear in-between, before, or after the given set. + +!!! warning + + If you want to reorder instanced pipelines within an individual instance group, you should use + the [`fly order-instanced-pipelines`](grouping-pipelines.md#fly-order-instanced-pipelines) command. + +## `fly archive-pipeline` + +A pipeline can be archived via fly. This means that the pipeline will be paused and hidden from the web UI. The pipeline +config will be deleted (so any secrets or interpolated [Vars](../vars.md) will be removed) while the build logs will be +retained. + +```shell +fly -t example archive-pipeline -p pipeline-1 +``` + +To unarchive a pipeline, simply set the pipeline again with the same name +using [fly set-pipeline](setting-pipelines.md#fly-set-pipeline). If a job in the new pipeline has the same name as a job +in the archived pipeline, the old build logs for that job will be restored. + +Note that because the config is deleted, [`fly get-pipeline`](#fly-get-pipeline) will no longer work for archived +pipelines. \ No newline at end of file diff --git a/docs/docs/pipelines/setting-pipelines.md b/docs/docs/pipelines/setting-pipelines.md new file mode 100644 index 00000000..6d5a2f20 --- /dev/null +++ b/docs/docs/pipelines/setting-pipelines.md @@ -0,0 +1,148 @@ +--- +title: Setting Pipelines +--- + +Pipelines are configured entirely via the [`fly` CLI](../fly.md) or the [ +`set_pipeline` step](https://concourse-ci.org/set-pipeline-step.html#set-pipeline). There is no GUI for configuring +pipelines. + +## `fly set-pipeline` + +To submit a pipeline configuration to Concourse from a file on your local disk you can use the `-c` or `--config` flag, +like so: + +```shell +fly -t example set-pipeline \ + --pipeline my-pipeline \ + --config pipeline.yml +``` + +This will present a diff of the changes and ask you to confirm the changes. If you accept then Concourse's pipeline +configuration will switch to the pipeline definition in the YAML file specified. + +The `-c` or `--config` flag can also take in the value `-` to indicate reading from `stdin`: + +```shell +cat pipeline.yml | fly -t example set-pipeline \ + --pipeline my-pipeline \ + --config - +``` + +Note that reading from `stdin` disables the confirmation prompt - the pipeline will be set automatically. + +### Providing static values for vars + +The pipeline configuration can contain [Vars](../vars.md) which may be replaced +with [static values](../vars.md#static-vars) or [loaded at runtime](../vars.md#dynamic-vars). This allows for +credentials to be extracted from a pipeline config, making it safe to check in to a public repository or pass around. + +For example, if you have a `pipeline.yml` as follows: + +```yaml +resources: + - name: private-repo + type: git + source: + uri: git@... + branch: master + private_key: ((private-repo-key)) +``` + +... you could then configure this pipeline like so: + +```shell +fly -t example set-pipeline \ + --pipeline my-pipeline \ + --config pipeline.yml \ + --var "private-repo-key=$(cat id_rsa)" +``` + +Or, if you had a `vars.yml` as follows: + +```yaml +private-repo-key: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- +``` + +... you could configure it like so: + +```shell +fly -t example set-pipeline \ + --pipeline my-pipeline \ + --config pipeline.yml \ + --load-vars-from vars.yml +``` + +You can use nested fields in your `pipeline.yml` as follows: + +```yaml +resources: + - name: private-repo + type: git + source: + uri: git@((repo.uri)) + branch: ((repo.branch)) + private_key: (("github.com".private-repo-key)) +``` + +... you could configure it by `--load-vars-from` with a `vars.yml` as follows: + +```yaml +repo: + uri: github.com/... + branch: master +github.com: + private-repo-key: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- +``` + +... or you could also configure it by passing the vars as flags: + +```shell +fly -t example set-pipeline \ + --pipeline my-pipeline \ + --config pipeline.yml \ + --var "repo.uri=github.com" \ + --var "repo.branch=master" \ + --var "\"github.com\".private-repo-key=$(cat id_rsa)" +``` + +When configuring a pipeline, any vars not provided statically will be left to +resolve [at runtime](../vars.md#dynamic-vars). To check that all vars are resolvable, you can pass the `--check-creds` +flag: + +```shell +fly -t example set-pipeline \ + --pipeline my-pipeline \ + --config pipeline.yml \ + --load-vars-from vars.yml \ + --check-creds +``` + +This will fill in all statically-provided vars and then attempt to resolve all remaining vars server-side. If any fail +to resolve, configuring the pipeline will fail. + +## `fly validate-pipeline` + +To validate a local pipeline configuration without submitting it to Concourse, run `validate-pipeline`: + +```shell +fly validate-pipeline --config pipeline.yml +``` + +By default, pipeline errors will cause `validate-pipeline` to fail, but warnings won't. To fail on both errors and +warnings, pass the `--strict` flag. + +## `fly format-pipeline` + +To format a pipeline config in a "canonical" form (i.e. keys are in normal order, with `name` first for example), run: + +```shell +fly format-pipeline --config pipeline.yml +``` + +This will print the formatted pipeline config to `stdout`. To update the file in-place, pass `--write/-w`. \ No newline at end of file diff --git a/docs/docs/resource-types/implementing.md b/docs/docs/resource-types/implementing.md new file mode 100644 index 00000000..0ae976c6 --- /dev/null +++ b/docs/docs/resource-types/implementing.md @@ -0,0 +1,323 @@ +--- +title: Implementing a Resource Type +--- + +A resource type is implemented by a container image with three scripts: + +* `/opt/resource/check` for checking for new versions of the resource +* `/opt/resource/in` for pulling a version of the resource down +* `/opt/resource/out` for idempotently pushing a version up + +Distributing resource types as containers allows them to package their own dependencies. For example, the `git` resource +comes with the `git` binary pre-installed. + +All resources must implement all three actions, though the actions can just be no-ops (which still must be correctly +implemented as detailed below). + +Resources can emit logs to the user by writing to `stderr`. ANSI escape codes (coloring, cursor movement, etc.) will be +interpreted properly by the web UI, so you should make your output pretty. + +## `check`: Check for new versions. + +A resource type's `check` script is invoked to detect new versions of the resource. It is given the configured source +and current version on `stdin`, and must print the array of new versions, in chronological order (oldest first), +to `stdout`, including the requested version if it's still valid. + +The request body will have the following fields: + +
+ +* `source` is an arbitrary JSON object which specifies the location of the resource (1), including any credentials. This + is passed verbatim from the [resource configuration](../resources/index.md). + +* `version` is a JSON object with `string` fields, used to uniquely identify an instance of the resource. For `git` this + would be the commit's SHA. + +
+ +1. For the `git` resource this would be the repo URI, the branch, and the private key, if necessary. + +For example, here's what the input for the `git` resource may look like: + +```json +{ + "source": { + "uri": "git://some-uri", + "branch": "develop", + "private_key": "..." + }, + "version": { + "ref": "61cbef" + } +} +``` + +Upon receiving this payload the `git` resource would probably do something like: + +```shell +[ -d /tmp/repo ] || git clone git://some-uri /tmp/repo +cd /tmp/repo +git pull && git log 61cbef..HEAD +``` + +Note that it conditionally clones; the container for checking versions is reused between checks, so that it can +efficiently pull rather than cloning every time. + +And the output, assuming `d74e01` is the commit immediately after `61cbef`: + +```json +[ + { + "ref": "61cbef" + }, + { + "ref": "d74e01" + }, + { + "ref": "7154fe" + } +] +``` + +The list may be empty, if there are no versions available at the source. If the given version is already the latest, an +array with that version as the sole entry should be listed. + +If your resource is unable to determine which versions are newer than the given version (e.g. if it's a git commit that +was `push -f`ed over), then the current version of your resource should be returned (i.e. the new `HEAD`). + +## `in`: Fetch a given resource. + +The `in` script is passed a destination directory as command line argument `$1`, and is given on `stdin` the configured +source and a precise version of the resource to fetch. + +The script must fetch the resource and place it in the given directory. + +If the desired resource version is unavailable (for example, if it was deleted), the script must exit with error. + +The script must emit the fetched version, and may emit metadata as a list of key-value pairs. This data is intended for +public consumption and will make it upstream, intended to be shown on the build's page. + +The request will contain the following fields: + +* `source` is the same value as passed to [check](#check-check-for-new-versions). +* `version` is the same type of value passed to [check](#check-check-for-new-versions), and specifies the version to + fetch. +* `params` is an arbitrary JSON object passed along verbatim from get step params on a [get step](../steps/get.md). + +Example request, in this case for the `git` resource: + +```json +{ + "source": { + "uri": "git://some-uri", + "branch": "develop", + "private_key": "..." + }, + "version": { + "ref": "61cebf" + } +} +``` + +Upon receiving this payload the `git` resource would probably do something like: + +```shell +git clone --branch develop git://some-uri $1 +cd $1 +git checkout 61cebf +``` + +And output: + +```json +{ + "version": { + "ref": "61cebf" + }, + "metadata": [ + { + "name": "commit", + "value": "61cebf" + }, + { + "name": "author", + "value": "Hulk Hogan" + } + ] +} +``` + +## `out`: Update a resource. + +The `out` script is passed a path to the directory containing the build's full set of sources as command line +argument `$1`, and is given on `stdin` the configured params and the resource's source configuration. + +The script must emit the resulting version of the resource. For example, the `git` resource emits the SHA of the commit +that it has just pushed. + +Additionally, the script may emit metadata as a list of key-value pairs. This data is intended for public consumption +and will make it upstream, intended to be shown on the build's page. + +The request will contain the following fields: + +* `source` is the same value as passed to [check](#check-check-for-new-versions). +* `params` is an arbitrary JSON object passed along verbatim from get step params on a [`put` step](../steps/put.md). + +Example request, in this case for the `git` resource: + +```json +{ + "params": { + "branch": "develop", + "repo": "some-repo" + }, + "source": { + "uri": "git@...", + "private_key": "..." + } +} +``` + +Upon receiving this payload the `git` resource would probably do something like: + +```shell +cd $1/some-repo +git push origin develop +``` + +And output: + +```json +{ + "version": { + "ref": "61cebf" + }, + "metadata": [ + { + "name": "commit", + "value": "61cebf" + }, + { + "name": "author", + "value": "Mick Foley" + } + ] +} +``` + +## Metadata + +When used in a [`get` step](../steps/get.md) or a [`put` step](../steps/put.md), metadata about the running build is +made available via the following environment variables: + +`$BUILD_ID` + +: The internal identifier for the build. Right now this is numeric, but it may become a UUID in the future. Treat +it as an absolute reference to the build. + +`$BUILD_NAME` + +: The build number within the build's job. + +`$BUILD_JOB_NAME` + +: The name of the build's job. + +`$BUILD_PIPELINE_NAME` + +: The name of the pipeline that the build's job lives in. + +`$BUILD_PIPELINE_INSTANCE_VARS` + +: The instance vars of the instanced pipeline that the build's job lives in, serialized as JSON. +See [Grouping Pipelines](../pipelines/grouping-pipelines.md) for a definition of instanced pipelines. + +`$BUILD_TEAM_NAME` + +: The team that the build belongs to. + +`$BUILD_CREATED_BY` + +: The username that created the build. By default, it is not available. +See [`expose_build_created_by`](../resources/index.md#resource-schema) for how to opt in. This metadata field is not +made available to the [`get` step](../steps/get.md). + +`$ATC_EXTERNAL_URL` + +: The public URL for your ATC; useful for debugging. + +If the build is a one-off, `$BUILD_NAME`, `$BUILD_JOB_NAME`, `$BUILD_PIPELINE_NAME`, and `$BUILD_PIPELINE_INSTANCE_VARS` +will not be set. + +Additionally, `$BUILD_PIPELINE_INSTANCE_VARS` will not be set if the build's pipeline has no instance vars (i.e. is not +an instanced pipeline). + +None of these variables are available to [check](#check-check-for-new-versions). + +These variables should be used solely for annotating things with metadata for traceability, i.e. for linking to the +build in an alert or annotating an automated commit to facilitate its origin discovery. + +They should _not_ be used to emulate versioning (e.g. by using the increasing build number). They are not provided +to [`task` steps](../steps/task.md) to avoid this anti-pattern. + +## Certificate Propagation + +Certificates can be automatically propagated into each resource container, if the worker is configured to do so. The +BOSH release configures this automatically, while the `concourse` binary must be given a `--certs-dir` flag pointing to +the path containing the CA certificate bundle. + +The worker's certificate directory will then be always mounted at `/etc/ssl/certs`, read-only, in each resource +container created on the worker. There's no single standard path for this, so we picked one that would work out of the +box in most cases. + +This approach to certificate configuration is similar in mindset to the propagation of `http_proxy`/`https_proxy` - +certs are kind of a baseline assumption when deploying software, so Concourse should do its best to respect it +out-of-the-box, especially as they're often used in tandem with a man-in-the-middle corporate SSL proxy. (In this way it +doesn't feel too much like the anti-pattern of hand-tuning workers.) + +## Testing resources locally using docker + +To test an already packaged resource (a docker image) outside concourse, you need to: + +1. If, for instance, you are testing the `out` behaviour of the `git` resource, create a json file with `source` + configuration of the resource and the `params` the `put` step expects. Such a file for the `git` resource would + contain the following (or similar): +
+ ```json + { + "source": { + "uri": "git://some-uri", + "branch": "develop", + "private_key": "..." + }, + "params": { + "repository": ".", + "rebase": true + } + } + ``` + Save this file to out-config.json in your working directory. +2. Then run the `/opt/resource/out` script with its inputs provided via `stdin` like so (using the `docker` cli as an + example): +
+ ```shell + docker run --rm -i -v "${PWD}:${PWD}" -w "${PWD}" \ + concourse/git-resource /opt/resource/out . < out-config.json + ``` + +!!! warning + + This example needs modification depending on the resource you are testing and your local environment. See the notes + below for details. + +1. If you use the exact configuration in this example, the git resource will print an error about the format of the + private key being invalid. Adjust the content `out-config.json` as necessary to get it working with your resource. +2. If the resource you are testing uses [Metadata](#metadata), you will need to provide the required metadata as + environment variables to your `docker run` command like so: +
+ ```shell + docker run --rm -i -e ATC_EXTERNAL_URL="https://concourse.example.com" \ + -e BUILD_NAME=620 \ + -v "${PWD}:${PWD}" \ + -w "${PWD}" concourse/git-resource /opt/resource/out . < out-config.json + ``` \ No newline at end of file diff --git a/docs/docs/resource-types/index.md b/docs/docs/resource-types/index.md new file mode 100644 index 00000000..d745e0f2 --- /dev/null +++ b/docs/docs/resource-types/index.md @@ -0,0 +1,38 @@ +--- +title: Resource Types +--- + +Each resource in a pipeline has a `type`. The resource's type determines what versions are detected, the bits that are fetched when the resource's [get step](../steps/get.md) runs, and the side effect that occurs when the resource's [put step](../steps/put.md) runs. + +Concourse comes with a few "core" resource types to cover common use cases like `git` and `s3` - the rest are developed and supported by the Concourse community. An exhaustive list of all resource types is available in the [Resource Types catalog](https://resource-types.concourse-ci.org/). + +A pipeline's resource types are listed under [pipeline.resource_types](../pipelines/index.md#pipeline-schema) with the following schema: + +## `resource_type` schema + +??? example "Using a `rss` resource type to subscript to RSS feeds" + + Resource Types can be used to extend the functionality of your pipeline and provide deeper integrations. This + example uses one to trigger a job whenever a new [Dinosaur Comic](http://www.qwantz.com/) is out. + + ```yaml + --- + resource_types: + - name: rss + type: registry-image + source: + repository: suhlig/concourse-rss-resource + tag: latest + + resources: + - name: booklit-releases + type: rss + source: + url: http://www.qwantz.com/rssfeed.php + + jobs: + - name: announce + plan: + - get: booklit-releases + trigger: true + ``` \ No newline at end of file diff --git a/docs/docs/resource-types/managing-types.md b/docs/docs/resource-types/managing-types.md new file mode 100644 index 00000000..05ede06a --- /dev/null +++ b/docs/docs/resource-types/managing-types.md @@ -0,0 +1,13 @@ +--- +title: Managing Resource Types +--- + +## `fly check-resource-type` + +To force immediate checking for new versions of a resource type, rather than waiting for the periodic checking, run: + +```shell +fly -t example check-resource-type --resource-type my-pipeline/my-resource-type +``` + +This can be useful for forcing an update if you're iterating on your own resource type implementation. \ No newline at end of file diff --git a/docs/docs/resources/index.md b/docs/docs/resources/index.md new file mode 100644 index 00000000..6cd8c9b7 --- /dev/null +++ b/docs/docs/resources/index.md @@ -0,0 +1,19 @@ +--- +title: Resources +--- + +Resources are the heart and soul of Concourse. They represent all external inputs to and outputs of [jobs](../jobs.md) +in the pipeline. + +Each resource represents a versioned artifact with an external source of truth. Configuring the same resource in any +pipeline on any Concourse cluster will behave the exact same way. Concourse will continuously `check` each configured +resource to discover new versions. These versions then flow through the pipeline via [`get` steps](../steps/get.md) +configured on [Jobs](../jobs.md). + +More concretely, resources are containers that run on your workers. +See [Implementing a Resource Type](../resource-types/implementing.md) for more details. + +A pipeline's resources are listed under [`pipeline.resources`](../pipelines/index.md#pipeline-schema) with the following +schema. + +## `resource` schema \ No newline at end of file diff --git a/docs/docs/resources/managing-resources.md b/docs/docs/resources/managing-resources.md new file mode 100644 index 00000000..9ee2131b --- /dev/null +++ b/docs/docs/resources/managing-resources.md @@ -0,0 +1,119 @@ +--- +title: Managing Resources +--- + +## `fly check-resource` + +To force immediate checking for new versions of a resource, rather than waiting for the periodic checking, run: + +```shell +fly -t example check-resource --resource my-pipeline/my-resource +``` + +To check from a particular version, including the given version, append the `--from` flag like so: + +```shell +fly -t example check-resource --resource my-pipeline/my-resource \ + --from ref:abcdef +``` + +This can be useful for collecting versions that are older than the current ones, given that a newly configured resource +will only start from the latest version. + +!!! note + + The `ref:` prefix is resource-dependent. For example, the _bosh-io-release_ resource might use `version:11.2` in + place of `ref:abcdef`. + +## `fly pin-resource` + +To pin a resource to a specific version of that resource, run: + +```shell +fly -t example pin-resource --resource my-pipeline/my-resource \ + --version ref:bceaf +``` + +!!! note + + The version needs to be provided as a key-value pair. For the git resource the `ref:` prefix is used while the + _registry_ resource might use `digest` as a prefix like `digest:sha256:94be7d7b`. + +A comment can be provided using the `--comment` flag, which is then also visible in the UI: + +```shell +fly -t example pin-resource --resource my-pipeline/my-resource \ + --version ref:abcdef \ + --comment "Some reason" +``` + +This can, for example, be used to pull in a fixed version of an external dependency which might break your build in a +new release. After the problem has been resolved, the pin can be removed. Another example could be running a build with +a set of older inputs when needed. + +To remove the pin on a resource use: + +```shell +fly -t example unpin-resource --resource my-pipeline/my-resource +``` + +You can also pin a resource via the UI by clicking on the pin button next to the desired version on the resource page. A +default comment is automatically generated containing your username and a timestamp. This comment can be edited. + +## `fly enable-resource-version` + +To enable a specific version of a resource, run: + +```shell +fly -t example enable-resource-version --resource my-pipeline/my-resource \ + --version ref:bceaf +``` + +!!! note + + The version needs to be provided as a key-value pair. For the git resource the `ref:` prefix is used while the + _registry_ resource might use `digest` as a prefix like `digest:sha256:94be7d7b`. + +This command is idempotent. Enabling an already enabled resource version will do nothing. + +You can also enable a resource version via the UI by clicking on the check mark button next to the desired version on +the resource page. + +## `fly disable-resource-version` + +To disable a specific version of a resource, run: + +```shell +fly -t example disable-resource-version --resource my-pipeline/my-resource \ + --version ref:bceaf +``` +!!! note + + The version needs to be provided as a key-value pair. For the git resource the `ref:` prefix is used while the + _registry_ resource might use `digest` as a prefix like `digest:sha256:94be7d7b`. + +This command is idempotent. Disabling an already disabled resource version will do nothing. + +You can also disable a resource version via the UI by clicking on the check mark button next to the desired version on +the resource page. + +## `fly clear-resource-cache` + +If you've got a resource cache that you need to clear out for whatever reason, this can be done like so: + +```shell +fly -t example clear-resource-cache -r my-pipeline/my-resource +``` + +This will immediately invalidate all the caches related to that resource - they'll be garbage collected asynchronously +and subsequent builds will run with empty caches. + +You can also clear out a particular version for the given resource cache, using `-v`: + +```shell +fly -t example clear-resource-cache \ + -r my-pipeline/my-resource \ + -v ref:abcdef +``` + +If `-v` is not specified, all caches for the given resource will be cleared. \ No newline at end of file diff --git a/docs/docs/resources/resource-versions.md b/docs/docs/resources/resource-versions.md new file mode 100644 index 00000000..3ebed740 --- /dev/null +++ b/docs/docs/resources/resource-versions.md @@ -0,0 +1,73 @@ +--- +title: Resource Versions +--- + +As you may know, resources represent external state that changes over time. But how do we track those changes in a +generic way that will properly represent all the different resource types? That is where resource versions are +introduced. Concourse uses versions to represent the exact changes of a resource over time. + +The versions of a resource are directly dependent on its resource configuration +and [resource type](../resource-types/index.md). Each resource type has its own definition of what its versions should +be. For example, the versions of a git resource would be the commits of the GitHub repository and the versions of a +docker image resource are the image digests. + +If you want to figure out what determines the version of a resource type, it is typically outlined in the `check` +behavior for the resource type. For example, the git resource uses commits as +versions [git resource type check behavior](https://github.com/concourse/git-resource#check-check-for-new-commits). + +## Where do they come from and what are they used for? + +The [resource checker](../internals/checker.md) is responsible for checking for new versions of a resource. These +versions are then saved to the database and can be viewed from the resource page in the web UI. + +Resource versions are used by the [build scheduler](../internals/scheduler.md) in order to schedule new builds for a +job. + +## Version Pinning + +A common job workflow is to use the latest version of a resource in order to trigger new builds. This works most of the +time until you run into a situation where you need to run the job using an old version of a resource. Concourse provides +a solution to this, which is called resource pinning. + +There are two different ways to pin a resource: through the pipeline config and through the web UI. Within the pipeline +config, you can either pin the resource to a version through the [resource configuration](index.md#resource-schema) or +through a [get step version configuration](../steps/get.md). If you would like to pin through the web UI, the +functionality can be found in the resource version history page which is accessed through clicking into the resource +within the pipeline page. + +Pinning through the pipeline config is useful for a more permanent pinned state. If a resource is pinned through the +pipeline config, it cannot be modified through the web UI and can only be changed through modifying and resetting the +pipeline config. + +Pinning through the web UI is useful for reactionary pinning of a resource. For example, it can be used in the event of +a broken upstream dependency. + +If you had a version pinned in the web UI and then pinned it through the pipeline config, the pipeline config pinned +version will take precedence. + +A pinned version is associated to a resource and can be viewed in the resource page (excluding the case that the version +was pinned on a get step). This pinned version will be propagated throughout the pipeline and used by the jobs that take +that pinned resource as an input. If there is a job that has a passed constraint on a pinned resource, this means that +the input is only valid if that pinned version has been used by the passed constraint job. + +Let's say we have a pipeline with two jobs and one resource that is being used as a passed constraint between the two +jobs. If that resource is pinned to a version, the first job will produce a build using the pinned version of the +resource. After that build succeeds, the second job that has a passed constraint on the first will then be able to +trigger off a build because the pinned version has been successfully used by the first job. + +### Unpinning + +When a version is unpinned, Concourse will go back to using the latest available version. This means a new build will be +queued up if the most recent build used the old pinned version and the input has `trigger: true`. + +If you would like to learn more about how version pinning and unpinning works with the build scheduler, you can read +more about it in the [scheduling behavior section](../internals/scheduler.md#scheduling-behavior). + +## Disabling a Version + +A resource version can also be disabled through the web UI on the resource version history page. These disabled versions +will not be used to schedule any further builds for any jobs that use the resource as an input. + +Disabled versions can also be re-enabled through the resource version history page. + +Disabling a version is useful for cases where you know that the version is broken or incompatible. \ No newline at end of file diff --git a/docs/docs/steps/do.md b/docs/docs/steps/do.md new file mode 100644 index 00000000..5ad1a486 --- /dev/null +++ b/docs/docs/steps/do.md @@ -0,0 +1,9 @@ +--- +title: Do Step +--- + +# `do` Step + +Performs the given steps serially, with the same semantics as if they were at the top level step listing. Most commonly +used with [`try` step](try.md), [across-step](modifier-and-hooks/across.md), +and [step-hooks](modifier-and-hooks/index.md). \ No newline at end of file diff --git a/docs/docs/steps/get.md b/docs/docs/steps/get.md new file mode 100644 index 00000000..23f66d4b --- /dev/null +++ b/docs/docs/steps/get.md @@ -0,0 +1,7 @@ +--- +title: Get Step +--- + +# `get` Step + +Fetches a version of a [resource](../resources/index.md). Expand each section below for more details and examples. \ No newline at end of file diff --git a/docs/docs/steps/in-parallel.md b/docs/docs/steps/in-parallel.md new file mode 100644 index 00000000..f8ea9c92 --- /dev/null +++ b/docs/docs/steps/in-parallel.md @@ -0,0 +1,13 @@ +--- +title: In Parallel Step +--- + +# `in_parallel` Step + +Performs the given steps in parallel. If any sub-steps in a `parallel` result in a failure or error, the parallel step +as a whole is considered to have failed or errored. Expand each section below for more details and examples. + +## `in_parallel_config` schema + +Instead of passing in a list of steps to `in_parallel` you can pass in the following fields. The list of steps will fall +under the `steps` field. \ No newline at end of file diff --git a/docs/docs/steps/index.md b/docs/docs/steps/index.md new file mode 100644 index 00000000..c5ef0c36 --- /dev/null +++ b/docs/docs/steps/index.md @@ -0,0 +1,30 @@ +--- +title: Steps +--- + +Each [job](../jobs.md) has a single build plan configured as [`job.plan`](../jobs.md#job-schema). A build plan is a +recipe for what to run when a build of the job is created. + +A build plan is a sequence of steps: + +* the [`task` step](task.md) runs a [task](../tasks.md) +* the [`get` step](get.md) fetches a [resource](../resources/index.md) +* the [`put` step](put.md) updates a [resource](../resources/index.md) +* the [`set_pipeline` step](set-pipeline.md) configures a [pipeline](../pipelines/index.md) +* the [`load_var` step](load-var.md) loads a value into a [local var](../vars.md#local-var) +* the [`in_parallel` step](in-parallel.md) runs steps in parallel +* the [`do` step](do.md) runs steps in sequence +* the [`across` step](modifier-and-hooks/across.md) modifier runs a step multiple times; once for each combination of + variable values +* the [`try` step](try.md) attempts to run a step and succeeds even if the step fails + +When a new version is available for a `get` step with `trigger: true` configured, a new build of the job will be created +from the build plan. + +When viewing the job in the pipeline, resources that are used as `get` steps appear as inputs, and resources that are +used in `put` steps appear as outputs. Jobs are rendered downstream of any jobs they reference in `passed` constraints, +connected by the resource. + +If any step in the build plan fails, the build will fail and subsequent steps will not be executed. Additional steps may +be configured to run after failure by configuring [`on_failure`](modifier-and-hooks/on-failure.md) +or [`ensure`](modifier-and-hooks/ensure.md) (or the job equivalents, `job.on_failure` and `job.ensure`). \ No newline at end of file diff --git a/docs/docs/steps/load-var.md b/docs/docs/steps/load-var.md new file mode 100644 index 00000000..fed7b74f --- /dev/null +++ b/docs/docs/steps/load-var.md @@ -0,0 +1,9 @@ +--- +title: Load Var Step +--- + +# `load_var` Step + +Load the value for a var at runtime from a file, making it available to subsequent steps as +a [local build var](../vars.md#local-var) named after the given identifier. Expand each section below for more +details and examples. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/across.md b/docs/docs/steps/modifier-and-hooks/across.md new file mode 100644 index 00000000..31ff8ce1 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/across.md @@ -0,0 +1,200 @@ +--- +title: Across Step Modifier +--- + +# `across` Step Modifier + +Run a step multiple times with different combinations of variable values. + +!!! warning "Experimental Feature" + + The `across` step is not enabled by default. To enable `across` for your deployment, you must set the feature flag + `CONCOURSE_ENABLE_ACROSS_STEP`. The `across` step may be enabled by default in a future version of Concourse. + +The `across` step can be combined with the [`load_var` step](../load-var.md), +the [`set_pipeline` step](../set-pipeline.md), and [instanced pipelines](../../pipelines/grouping-pipelines.md) to +maintain a dynamically sized group of related pipelines. + +More fields are also available for variable interpolation with the across step. +See [Across Step & Dynamic Vars](../../vars.md#across-step-dynamic-vars) for details. + +!!! note + + Outputs from steps ran within the across step are not available to steps outside of the across step. + +??? example "Across with task step" + + ```yaml + jobs: + - name: job + plan: + - across: + - var: some-text + values: [ "hello-world", "hello-concourse" ] + task: running-((.:some-text)) + config: + platform: linux + image_resource: + type: mock + source: + mirror_self: true + run: + path: echo + args: [ "((.:some-text))" ] + ``` + +??? example "Across with input and output mapping" + + ```yaml + resources: + - name: ci + type: git + source: + uri: https://github.com/concourse/examples.git + + jobs: + - name: job + plan: + - get: ci + - across: + - var: pipeline + values: [ "hello-world", "time-triggered" ] + do: + - task: running-((.:pipeline)) + input_mapping: + ((.:pipeline)): ci + output_mapping: + ((.:pipeline)): newci + config: + platform: linux + image_resource: + type: mock + source: + mirror_self: true + inputs: + - name: ((.:pipeline)) + outputs: + - name: ((.:pipeline)) + run: + path: cat + args: [ "((.:pipeline))/pipelines/((.:pipeline)).yml" ] + - task: newci-((.:pipeline)) + config: + platform: linux + image_resource: + type: mock + source: + mirror_self: true + inputs: + - name: newci + run: + path: cat + args: [ "newci/pipelines/((.:pipeline)).yml" ] + ``` + +??? example "Across with `set_pipeline` step" + + ```yaml + resources: + - name: ci + type: git + source: + uri: https://github.com/concourse/examples.git + + jobs: + - name: job + plan: + - get: ci + - across: + - var: pipeline + values: [ "hello-world", "time-triggered" ] + set_pipeline: ((.:pipeline)) + file: ci/pipelines/((.:pipeline)).yml + ``` + +??? example "Across with multiple steps" + + Use the [`do` step](../do.md) to across over multiple steps. + + ```yaml + jobs: + - name: job + plan: + - across: + - var: name + values: [ "Kaladin", "Jasnah" ] + do: # takes a list of steps + - task: saying-hello + config: + platform: linux + image_resource: + type: mock + source: + mirror_self: true + run: + path: echo + args: [ "Hello ((.:name))!" ] + - task: saying-bye + config: + platform: linux + image_resource: + type: mock + source: + mirror_self: true + run: + path: echo + args: [ "Bye ((.:name))!" ] + ``` + +??? example "Multi-branch workflows (instance pipelines)" + + You can use the across step to set a pipeline for each branch in a git repository. + + ```yaml + plan: + - get: release-branches + trigger: true + - get: ci + - load_var: branches + file: release-branches/branches.json + - across: + - var: branch + values: ((.:branches)) + set_pipeline: release + file: ci/pipelines/release.yml + instance_vars: { branch: ((.:branch.name)) } + ``` + + When a new branch is added, a new pipeline will be created. When a branch is deleted, the pipeline will be + automatically archived as described in the [`set_pipeline` step](../set-pipeline.md). + + For a more complete example, refer to [Multi-Branch Workflows](../../how-to/git-guides/multi-branch.md). + +## Limitations + +The `across` step does not work with the [`get` step](../get.md) or [`put` step](../put.md). The names of resources are +not interpolated within across steps. Trying to do the following will not work. + +```yaml +- across: + - var: version + values: [ "1.16", "1.17" ] + do: + - get: go-((.:version)) + # or this + - get: golang + resource: go-((.version)) +``` + +The main reason this does not work is that Concourse determines the inputs for a job before the job starts. Concourse +has no way of determining inputs for a job while it's in the middle of running. + +Current pipeline validation logic will also block you from setting the pipeline at all since Concourse validates the +relationship between all resources and jobs by looking at get and put steps. + +The above example will return an error like this when trying to set the pipeline: + +```yaml +invalid jobs: + jobs.job.plan.do[0].across.get(go): unknown resource 'go-((.:version))' +``` \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/attempts.md b/docs/docs/steps/modifier-and-hooks/attempts.md new file mode 100644 index 00000000..5b546719 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/attempts.md @@ -0,0 +1,11 @@ +--- +title: Attempts Step Modifier +--- + +# `attempts` Step Modifier + +The total number of times a step should be tried before it should fail, e.g. `5` will run the `step` up to 5 times +before giving up. + +Attempts will retry on a Concourse error as well as build failure. When the number of attempts is reached and the step +has still not succeeded then the step will fail. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/ensure.md b/docs/docs/steps/modifier-and-hooks/ensure.md new file mode 100644 index 00000000..7158afe7 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/ensure.md @@ -0,0 +1,10 @@ +--- +title: Ensure Step Hook +--- + +# `ensure` Step Hook + +A hook step to execute after the parent step regardless of whether the parent step succeeds, fails, or errors. The step +will also be executed if the build was aborted and its parent step was interrupted. + +If the parent step succeeds and the ensured step fails, the overall step fails. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/index.md b/docs/docs/steps/modifier-and-hooks/index.md new file mode 100644 index 00000000..4595fe07 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/index.md @@ -0,0 +1,5 @@ +--- +title: Modifiers and Hooks +--- + +The following hooks and modifiers can be added to any other [step](../index.md) type. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/on-abort.md b/docs/docs/steps/modifier-and-hooks/on-abort.md new file mode 100644 index 00000000..3d8c3a28 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/on-abort.md @@ -0,0 +1,7 @@ +--- +title: On Abort Step Hook +--- + +# `on_abort` Step Hook + +A hook step to execute if the build is aborted and the parent step was running and then terminated. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/on-error.md b/docs/docs/steps/modifier-and-hooks/on-error.md new file mode 100644 index 00000000..efdb58d6 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/on-error.md @@ -0,0 +1,9 @@ +--- +title: On Error Step Hook +--- + +# `on-error` Step Hook + +A hook step to execute after the parent step if the parent step terminates abnormally in any way other than those +handled by the [`on_abort`](on-abort.md) or [`on_failure`](on-failure.md). This covers scenarios as broad as +configuration mistakes, temporary network issues with the workers, or running longer than a [`timeout`](timeout.md). \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/on-failure.md b/docs/docs/steps/modifier-and-hooks/on-failure.md new file mode 100644 index 00000000..f7818bd5 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/on-failure.md @@ -0,0 +1,9 @@ +--- +title: On Failure Step Hook +--- + +# `on-failure` Step Hook + +A hook step to execute if the parent step fails. + +This does not "recover" the failure - it will still fail even if the hook step succeeds. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/on-success.md b/docs/docs/steps/modifier-and-hooks/on-success.md new file mode 100644 index 00000000..bba0b350 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/on-success.md @@ -0,0 +1,7 @@ +--- +title: On Success Step Hook +--- + +# `on-success` Step Hook + +A hook step to execute if the parent step succeeds. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/tags.md b/docs/docs/steps/modifier-and-hooks/tags.md new file mode 100644 index 00000000..e43a3af1 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/tags.md @@ -0,0 +1,11 @@ +--- +title: Tags Step Modifier +--- + +# `tags` Step Modifier + +The tags by which to match workers. The step will be placed within the pool of workers that match all the given set of +tags. + +For example, if `[a, b]` is specified, only workers advertising the a and b tags (in addition to any others) will be +used for running the step. \ No newline at end of file diff --git a/docs/docs/steps/modifier-and-hooks/timeout.md b/docs/docs/steps/modifier-and-hooks/timeout.md new file mode 100644 index 00000000..05fa8643 --- /dev/null +++ b/docs/docs/steps/modifier-and-hooks/timeout.md @@ -0,0 +1,10 @@ +--- +title: Timeout Step Modifier +--- + +# `timeout` Step Modifier + +The amount of time to limit the step's execution to, e.g. `30m` for 30 minutes. + +When exceeded, the step will be interrupted, with the same semantics as aborting the build (except the build will +be `errored`, not `aborted`, to distinguish between human intervention and timeouts being enforced). \ No newline at end of file diff --git a/docs/docs/steps/put.md b/docs/docs/steps/put.md new file mode 100644 index 00000000..b378dbb4 --- /dev/null +++ b/docs/docs/steps/put.md @@ -0,0 +1,7 @@ +--- +title: Put Step +--- + +# `put` Step + +Pushes to the given [resource](../resources/index.md). Expand each section below for more details and examples. \ No newline at end of file diff --git a/docs/docs/steps/set-pipeline.md b/docs/docs/steps/set-pipeline.md new file mode 100644 index 00000000..ef7dab4e --- /dev/null +++ b/docs/docs/steps/set-pipeline.md @@ -0,0 +1,22 @@ +--- +title: Set Pipeline Step +--- + +# `set_pipeline` Step + +Configures a [pipeline](../pipelines/index.md). Expand each section below for more details and examples. + +Pipelines configured with the `set_pipeline` step are connected to the [job](../jobs.md) that configured them and will +be automatically archived in the following scenarios: + +* When the job that previously set a pipeline runs a successful build which did not configure the pipeline (i.e. + the `set_pipeline` step was removed for that specific pipeline). +* When the job is removed from its pipeline configuration (see [`job.old_name`](../jobs.md) for renaming instead of + removing). +* When the job's pipeline is archived or destroyed. + +This means any job that uses `set_pipeline` should set all still-desired pipelines in each build, rather than setting +them one-by-one through many builds. + +See [`fly archive-pipeline`](../pipelines/managing-pipelines.md#fly-archive-pipeline) for what happens when a pipeline +is archived. \ No newline at end of file diff --git a/docs/docs/steps/task.md b/docs/docs/steps/task.md new file mode 100644 index 00000000..4b251742 --- /dev/null +++ b/docs/docs/steps/task.md @@ -0,0 +1,11 @@ +--- +title: Task Step +--- + +# `task` Step + +Executes a [task](../tasks.md). Expand each section below for more details and examples. + +When a task completes, the artifacts specified by [`task-config.outputs`](../tasks.md#task-config-schema) will be +registered in the build's artifact namespace. This allows subsequent `task` steps and [`put` steps](put.md) to access +the result of a task. \ No newline at end of file diff --git a/docs/docs/steps/try.md b/docs/docs/steps/try.md new file mode 100644 index 00000000..c2ea45d4 --- /dev/null +++ b/docs/docs/steps/try.md @@ -0,0 +1,5 @@ +--- +title: Try Step +--- + +# `try` Step \ No newline at end of file diff --git a/docs/docs/tasks.md b/docs/docs/tasks.md new file mode 100644 index 00000000..6a3f42fe --- /dev/null +++ b/docs/docs/tasks.md @@ -0,0 +1,431 @@ +--- +title: Tasks +--- + +The smallest configurable unit in a Concourse pipeline is a single task. A task can be thought of as a function +from `task-config.inputs` to `task-config.outputs` that can either succeed or fail. + +Going a bit further, ideally tasks are pure functions: given the same set of inputs, it should either always succeed +with the same outputs or always fail. This is entirely up to your script's level of discipline, however. Flaky tests or +dependencies on the internet are the most common source of impurity. + +Once you have a running Concourse deployment, you can start configuring your tasks and executing them interactively from +your terminal with the [Fly](fly.md) command line tool. + +Once you've figured out your task's configuration, you can reuse it for a [Job](jobs.md) in +your [Pipeline](pipelines/index.md). + +Conventionally a task's configuration is placed in the same repository as the code it's testing, possibly under +some `ci` directory. + +A task's configuration specifies the following: + +??? example "Testing a Ruby app" + + This configuration specifies that the task must run with the `ruby:2.1` Docker image with a `my-app` input, and when + the task is executed it will run the `scripts/test` script in the same repo. + + ```yaml + --- + platform: linux + + image_resource: + type: registry-image + source: + repository: ruby + tag: '2.1' + + inputs: + - name: my-app + + run: + path: my-app/scripts/test + ``` + +??? example "Producing outputs from a task" + + A task can configure `task-config.outputs` to produce artifacts that can then be propagated to a [`put` + step](steps/put.md) or another [`task` step](steps/task.md) in the same plan. They can also be downloaded with [fly + execute](#running-tasks-with-fly-execute) by passing `-o`. + + ```yaml + --- + platform: linux + + image_resource: # ... + + inputs: + - name: project-src + + outputs: + - name: built-project + + run: + path: project-src/ci/build + ``` + + ... assuming `project-src/ci/build` looks something like: + + ```shell + #!/bin/bash + + set -e -u -x + + export GOPATH=$PWD/project-src + + go build -o built-project/my-project \ + github.com/concourse/my-project + ``` + + ... this task could then be used in a [build plan](steps/index.md) like so: + + ```yaml + plan: + - get: project-src + - task: build-bin + file: project-src/ci/build.yml + - put: project-bin + params: + file: built-project/my-project + ``` + +??? example "Caching ephemeral state" + + The following task and script could be used by a Node project to cache the `node_modules` directory: + + ```yaml + --- + platform: linux + + image_resource: # ... + + inputs: + - name: project-src + + caches: + - path: project-src/node_modules + + run: + path: project-src/ci/build + ``` + + ... assuming `project-src/ci/build` looks something like: + + ```shell + #!/bin/bash + + set -e -u -x + + cd project-src + npm install + + # ... + ``` + + ... this task would cache the contents of `project-src/node_modules` between runs of this task on the same worker. + +??? example "Using an image from a private Docker registry" + + The following external task uses an image from a private registry. Assuming the CA is configured properly on the + workers, SSL should Just Work™. + + External tasks are now fully interpolated using [credential manager variables](operation/creds/index.md) and + [`task` step `vars`](steps/task.md), so you can use template variables in an external task: + + ```yaml + --- + platform: linux + + image_resource: + type: registry-image + source: + repository: my.local.registry:8080/my/image + username: ((myuser)) + password: ((mypass)) + + inputs: + - name: my-app + + run: + path: my-app/scripts/test + args: [ "Hello, world!", "((myparam))" ] + ``` + +## `task-config` schema + +## Running tasks with `fly execute` + +One of the most common use cases of `fly` is taking a local project on your computer and setting it up with a task +configuration to be run inside a container in Concourse. This is useful to build Linux projects on OS X or to avoid all +of those debugging commits when something is configured differently between your local and remote setup. + +You can execute a task like this: + +```shell +fly -t example execute --config tests.yml +``` + +Your files will be uploaded and the task will be executed with them. The working directory name will be used as the +input name. If they do not match, you must specify `-i name=`. Instead, where `name` is the input name from the task +configuration. + +[Fly](fly.md) will automatically capture `SIGINT` and `SIGTERM` and abort the build when received. This allows it to be +transparently composed with other toolchains. + +By default, [`fly execute`](tasks.md#running-tasks-with-fly-execute) will not send extra files or large files in your +current directory that would normally be ignored by your version control system. You can use the `--include-ignored` +flag in order to send ignored files to Concourse along with those that are not ignored. + +If your task needs to run as `root`, then you can specify the `-p` or `--privileged` flag. + +??? example "Providing multiple inputs" + + Tasks in Concourse can take multiple inputs. Up until now we've just been submitting a single input (our current + working directory) that has the same name as the directory. + + Tasks must specify the inputs that they require as `task-config.inputs`. For `fly` to upload these inputs you can + use the `-i` or `--input` arguments with name and path pairs. For example: + + ```shell + fly -t example execute \ + --config build-stemcell.yml \ + --input code=. \ + --input stemcells=../stemcells + ``` + + This would work together with a `build-stemcell.yml` if its inputs: section was as follows: + + ```yaml + inputs: + - name: code + - name: stemcells + ``` + + If you specify an input, then the default input will no longer be added automatically, and you will need to + explicitly list it (as with the `code` input above). + + This feature can be used to mimic other resources and try out input combinations that would normally not be possible + in a pipeline. + +??? example "Basing inputs on a job in your pipeline with `inputs-from`" + + If the `--inputs-from` flag is given, the specified job will be looked up in the pipeline, and the one-off build + will base its inputs on those currently configured for the job. + + If any `--input` flags are given (see above), they will override the base set of inputs. + + For example: + + ```shell + fly -t example execute \ + --config task.yml \ + --inputs-from main/integration \ + --input foo=./foo + ``` + + This will trigger a one-off-build using the `task.yml` task config, basing its inputs on the latest candidates for + the `integration` job in the `main` pipeline, with the `foo` input overridden to specify local code to run. + + This can be used to more closely replicate the state in CI when weeding out flakiness, or as a shortcut for local + development so that you don't have to upload every single resource from your local machine. + +??? example "Using an image from a job in your pipeline with `--image`" + + When using `--inputs-from` as above, you can additionally specify which input to use as the task's image by passing + `--image input-name`. + + For example, the following pipeline fetches an image via a [`get` step](steps/get.md) and uses it for [`task` step + `image`](steps/task.md): + + ```yaml + resources: + - name: my-repo + type: git + source: { uri: https://example.com } + + - name: some-image + type: registry-image + source: { repository: ubuntu } + + jobs: + - name: integration + plan: + - get: my-repo + - get: some-image + - task: my-task + file: my-repo/task.yml + image: some-image + ``` + + ... so to run the same task with the same image in a one-off build, you would run: + + ```shell + fly -t example execute \ + --config task.yml \ + --inputs-from main/integration \ + --image some-image + ``` + +??? example "Taking artifacts from the build with `--output`" + + If a task specifies outputs, then you're able to extract these back out of the build and back to your local system. + For example: + + ```shell + fly -t example execute \ + --config build-stemcell.yml \ + --input code=. \ + --output stemcell=/tmp/stemcell + ``` + + This would work together with a `build-stemcell.yml`, if its `outputs:` section was as follows: + + ```yaml + outputs: + - name: stemcell + ``` + + This feature is useful to farm work out to your Concourse server to build things in a repeatable manner. + +??? example "Providing values for `params`" + + Any params listed in the task configuration can be specified by using environment variables. + + So, if you have a task with the following params: + + ```yaml + params: + FOO: fizzbuzz + BAR: + ``` + + ... and you run: + + ```shell + BAR=hello; fly execute + ``` + + The task would then run with `BAR` as `"hello"`, and `FOO` as `"fizzbuzz"` (its default value). + +??? example "Providing values for vars" + + Task config files can contain [Vars](vars.md) which can can be set during `fly execute` by using the `-v`, `-y` and + `-l` flags: + + ```shell + fly -t example execute --config tests.yml \ + -l vars.yml \ + -v some_string="Hello World!" \ + -y some_bool=true + ``` + + Any variables not satisfied via the above flags will be deferred to the configured [credential + manager](operation/creds/index.md). + + To satisfy these vars when running the task in a pipeline, see [`task` step `vars`](steps/task.md). + +??? example "Targeting a specific worker with `--tag`" + + If you want to execute a task on a worker that has a specific tag, you can do so by passing `--tag`: + + ```shell + fly -t example execute --config task.yml --tag bar + ``` + + This will execute the task specified by `task.yml` on a worker that has been tagged `bar.` + +## Task runtime environment + +A task runs in a new container every time, using the image provided by `task-config.image_resource` as its base +filesystem (i.e. `/`). + +The command specified by `task-config.run` will be executed in a working directory containing each of +the `task-config.inputs`. If any input is missing, the task will not run (and the container will not even be created). + +The working directory will also contain empty directories for each of the `task-config.outputs`. The task must place +artifacts in the output directories for them to be exported. This meshes well with build tools with configurable +destination paths. + +!!! tip + + If your build tools don't support output paths, you can configure an input and output with the same path. The + directory will be populated by the input, and any changes made to the directory will propagate downstream as an + output. + +Any [`task` step `params`](steps/task.md) configured will be set in the environment for the task's command, along with +any environment variables provided by the task's image (i.e. `ENV` rules from your `Dockerfile`). + +The user the command runs as is determined by the image. If you're using a Docker image, this will be the user set by +a `USER` rule in your `Dockerfile`, or `root`, if not specified. + +Another relevant bit of configuration is [`task` step `privileged`](steps/task.md), which determines whether the user +the task runs as will have full privileges (primarily when running as `root`). This is intentionally _not_ configurable +by the task itself, to prevent privilege escalation by submitting pull requests to repositories that contain task +configs. + +Putting all this together, the following task config: + +```yaml +--- +platform: linux + +image_resource: + type: registry-image + source: + repository: golang + tag: '1.6' + +params: + SOME_PARAM: some-default-value + +inputs: + - name: some-input + - name: some-input-with-custom-path + path: some/custom/path + +outputs: + - name: some-output + +run: + path: sh + args: + - -exc + - | + whoami + env + go version + find . + touch some-output/my-built-artifact +``` + +... will produce the following output: + +```shell +$ whoami +root +$ env +USER=root +HOME=/root +GOLANG_DOWNLOAD_SHA256=5470eac05d273c74ff8bac7bef5bad0b5abbd1c4052efbdbc8db45332e836b0b +PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +GOPATH=/go +PWD=/tmp/build/e55deab7 +GOLANG_DOWNLOAD_URL=https://golang.org/dl/go1.6.linux-amd64.tar.gz +GOLANG_VERSION=1.6 +SOME_PARAM=some-default-value +$ go version +go version go1.6 linux/amd64 +$ find . +. +./some-input +./some-input/foo +./some +./some/custom +./some/custom/path +./some/custom/path/bar +./some-output +$ touch some-output/my-built-artifact +``` + +... and propagate `my-built-artifact` to any later [`task` steps](steps/task.md) or [`put` steps](steps/put.md) that +reference the `some-output` artifact, in the same way that this task had `some-input` as an input. \ No newline at end of file diff --git a/docs/docs/vars.md b/docs/docs/vars.md new file mode 100644 index 00000000..365a4cbc --- /dev/null +++ b/docs/docs/vars.md @@ -0,0 +1,371 @@ +--- +title: Vars +--- + +Concourse supports value substitution in YAML configuration by way of `((vars))`. + +Automation entails the use of all kinds of credentials. It's important to keep these values separate from the rest of +your configuration by using vars instead of hardcoding values. This allows your configuration to be placed under source +control and allows credentials to be tucked safely away into a secure credential manager like Vault instead of the +Concourse database. + +Aside from credentials, vars may also be used for generic parameterization of pipeline configuration templates, allowing +a single pipeline config file to be configured multiple times with different parameters - e.g. `((branch_name))`. + +## `((var))` syntax + +The full syntax for vars is `((`_`source-name:secret-path.secret-field`_`))`. + +The optional _source-name_ identifies the [var source](#var-sources-experimental) from which the value will be read. If +omitted (along with the `:` delimiter), the [cluster-wide credential manager](#the-cluster-wide-credential-manager) will +be used, or the value may be provided [statically](#static-vars). The special name `.` refers to +the [local var source](#local-var), while any other name refers to a [var source](#var-sources-experimental). + +The required _secret-path_ identifies the location of the credential. The interpretation of this value depends on the +var source type. For example, with Vault this may be a path like `path/to/cred`. For the Kubernetes secret manager this +may just be the name of a secret. For credential managers which support path-based lookup, a secret-path without a +leading / may be queried relative to a predefined set of path prefixes. This is how the Vault credential manager +currently works; `foo` will be queried under `/concourse/(team name)/(pipeline name)/foo`. + +The optional _secret-field_ specifies a field on the fetched secret to read. If omitted, the credential manager may +choose to read a 'default field' from the fetched credential if the field exists. For example, the Vault credential +manager will return the value of the `value` field if present. This is useful for simple single-value credentials where +typing `((foo.value))` would feel verbose. + +The _secret-path_ and _secret-field_ may be surrounded by double quotes `"..."` if they contain special characters +like `.` and `:`. For instance, `((source:"my.secret"."field:1"))` will set the _secret-path_ to `my.secret` and the +_secret-field_ to `field:1`. + +## The "`.`" var source {: #local-var } + +The special var source name `.` refers to a "local var source." + +The precise scope for these "local vars" depends on where they're being used. Currently, the only mechanism that uses +the local var source is the [`load_var` step](steps/load-var.md), which sets a var in a local var source provided to all +steps executed in the build. + +## Interpolation + +Values for vars are substituted structurally. That is, if you have `foo: ((bar))`, whatever value `((bar))` resolves to +will become the value of the `foo` field in the object. This can be a value of any type and structure: a boolean, a +simple string, a multiline credential like a certificate, or a complicated data structure like an array of objects. + +This differs from text-based substitution in that it's impossible for a value to result in broken YAML syntax, and it +relieves the template author from having to worry about things like whitespace alignment. + +When a `((var))` appears adjacent to additional string content, e.g. `foo: hello-((bar))-goodbye`, its value will be +concatenated with the surrounding content. If the `((var))` resolves to a non-string value, an error will be raised. + +If you are using the YAML operator for merging `<<`, you will need to wrap it in double quotes like +so `"<<": ((foobars))`, to avoid a cryptic error message such as "error: yaml: map merge requires map or sequence of +maps as the value". This will allow you to merge in values from various vars. +See [YAML merge specification](https://yaml.org/type/merge.html) for more information on how this normally works. + +## Static vars + +Var values may also be specified statically using the [`set_pipeline` step](steps/set-pipeline.md) +and [`task` step](steps/task.md). + +When running the [`fly` CLI](fly.md) equivalent +commands ([fly set-pipeline](pipelines/setting-pipelines.md#fly-set-pipeline) +and [fly execute](tasks.md#running-tasks-with-fly-execute)), var values may be provided using the following flags: + +* `-v` or `--var NAME=VALUE` sets the string `VALUE` as the value for the var `NAME`. +* `-y` or `--yaml-var NAME=VALUE` parses `VALUE` as YAML and sets it as the value for the var `NAME`. +* `-i` or `--instance-var NAME=VALUE` parses `VALUE` as YAML and sets it as the value for the instance var `NAME`. + See [Grouping Pipelines](pipelines/grouping-pipelines.md) to learn more about instance vars. +* `-l` or `--load-vars-from FILE` loads `FILE`, a YAML document containing mapping var names to values, and sets them + all. + +When used in combination with `-l`, the `-y` and `-v` flags take precedence. This way a vars file may be re-used, +overriding individual values by hand. + +??? example "Setting values with the `task` step" + + Let's say we have a [task config](tasks.md#task-config-schema) like so: + + ```yaml + platform: linux + + image_resource: + type: registry-image + source: + repository: golang + tag: ((tag)) + + inputs: + - name: booklit + + run: + path: booklit/ci/unit + ``` + + We could use [vars](tasks.md#task-config-schema) to run this task against different versions of Go: + + ```yaml + jobs: + - name: unit + plan: + - get: booklit + trigger: true + - task: unit-1.13 + file: booklit/ci/unit.yml + vars: { tag: 1.13 } + - task: unit-1.8 + file: booklit/ci/unit.yml + vars: { tag: 1.8 } + ``` + +??? example "Setting values with `-v` and `-y`" + + With a pipeline template like so: + + ```yaml + resources: + - name: booklit + type: booklit + source: + uri: https://github.com/concourse/booklit + branch: ((branch)) + private_key: (("github.com".private_key)) + + jobs: + - name: unit + plan: + - get: booklit + trigger: ((trigger)) + - task: unit + file: booklit/ci/unit.yml + ``` + + Let's say we have a private key in a file called `private_key`. + + The [fly validate-pipeline](pipelines/setting-pipelines.md#fly-validate-pipeline) command may be used to test how + interpolation is applied, by passing the `--output` flag. + + ```shell + fly validate-pipeline \ + -c pipeline.yml \ + -y trigger=true \ + -v \"github.com\".private_key="$(cat private_key)" \ + -v branch=master \ + --output + ``` + + The above incantation should print the following: + + ```yaml + jobs: + - name: unit + plan: + - get: booklit + trigger: true + - file: booklit/ci/unit.yml + task: unit + resources: + - name: booklit + type: booklit + source: + branch: master + private_key: | + -----BEGIN RSA PRIVATE KEY----- + # ... snipped ... + -----END RSA PRIVATE KEY----- + uri: https://github.com/concourse/booklit + ``` + + Note that we had to use `-y` so that the `trigger: true` ends up with a boolean value instead of the + string `"true"`. + +??? example "Loading values from files with `-l`" + + With a pipeline template like so: + + ```yaml + resources: + - name: booklit + type: booklit + source: + uri: https://github.com/concourse/booklit + branch: ((branch)) + private_key: (("github.com".private_key)) + + jobs: + - name: unit + plan: + - get: booklit + trigger: ((trigger)) + - task: unit + file: booklit/ci/unit.yml + ``` + + Let's say I've put the `private_key` var in a file called `vars.yml`, since it's quite large and hard to pass + through flags: + + ```yaml + github.com: + private_key: | + -----BEGIN RSA PRIVATE KEY----- + # ... snipped ... + -----END RSA PRIVATE KEY----- + ``` + + The [fly validate-pipeline](pipelines/setting-pipelines.md#fly-validate-pipeline) command may be used to test how + interpolation is applied, by passing the `--output` flag. + + ```shell + fly validate-pipeline \ + -c pipeline.yml \ + -l vars.yml \ + -y trigger=true \ + -v branch=master \ + --output + ``` + + The above incantation should print the following: + + ```yaml + jobs: + - name: unit + plan: + - get: booklit + trigger: true + - task: unit + file: booklit/ci/unit.yml + resources: + - name: booklit + type: booklit + source: + branch: master + private_key: | + -----BEGIN RSA PRIVATE KEY----- + # ... snipped ... + -----END RSA PRIVATE KEY----- + uri: https://github.com/concourse/booklit + ``` + + Note that we had to use `-y` so that the `trigger: true` ends up with a boolean value instead of the + string `"true"`. + +## Dynamic vars + +Concourse can read values from "var sources" - typically credential managers like Vault - at runtime. This keeps them +out of your configuration and prevents sensitive values from being stored in your database. Values will be read from the +var source and optionally [cached](operation/creds/caching.md) to reduce load on the var source. + +The following attributes can be parameterized through a var source: + +* [resource.source](resources/index.md) under [pipeline.resources](pipelines/index.md#pipeline-schema) +* [resource_type.source](resource-types/index.md#resource_type-schema) + under [pipeline.resources](pipelines/index.md#pipeline-schema) +* [resource.webhook_token](resources/index.md#resource-schema) + under [pipeline.resources](pipelines/index.md#pipeline-schema) +* [task step params](tasks.md#task-config-schema) on a task step in a pipeline +* [tasks configuration](tasks.md) in their entirety - whether from task step file or task step config in a pipeline, or + a config executed with [`fly execute`](tasks.md#running-tasks-with-fly-execute) + +Concourse will fetch values for vars as late as possible - i.e. when a step using them is about to execute. This allows +the credentials to have limited lifetime and rapid rotation policies. + +### Across Step & Dynamic Vars + +For the [across step](steps/modifier-and-hooks/across.md), more fields can be dynamically interpolated during runtime: + +* [set_pipeline step](steps/set-pipeline.md) identifier and [file](steps/set-pipeline.md) field +* [task step](steps/task.md) identifier, [input_mapping](steps/task.md), and [output_mapping](steps/task.md), in + addition to the all other fields mentioned above for the task step + +### Var sources (experimental) + +!!! warning "Experimental Feature" + + `var_sources` was introduced in Concourse v5.8.0. It is considered an **experimental** feature until its associated + [RFC](https://github.com/concourse/rfcs/pull/39) is resolved. + +Var sources can be configured for a pipeline via [`pipeline.var_sources`](pipelines/index.md). + +Each var source has a name which is then referenced as the _source-name_ in var syntax, +e.g. `((my-vault:test-user.username))` to fetch the `test-user` var from the `my-vault` var source. +See [`((var))` syntax](#var-syntax) for a detailed explanation of this syntax. + +Currently, only these types are supported: + +* [`vault`](#vault) +* [`dummy`](#dummy) +* [`ssm`](#ssm) +* [`secretmanager`](#secrets-manager) (since v7.7.0) +* [`idtoken`](#id-token) (since v7.14.0) + +In the future we want to make use of something like +the [Prototypes (RFC #37)](https://github.com/concourse/rfcs/pull/37) so that third-party credential managers can be +used just like resource types. + +#### `var_source` schema + +??? warning "name: [string](config-basics.md#string-schema)" + + The name of the `((var))` source. This should be short and simple. This name will be referenced + [`((var))` syntax](#var-syntax) throughout the config. + +!!! info "one of ..." + + === "Vault" + + ??? warning "type: `vault`" + + The `vault` type supports configuring a [Vault](https://www.vaultproject.io/) server as a `((var))` source. + + ??? warning "config: vault_config" + + === "Dummy" + + ??? warning "type: `dummy`" + + The `dummy` type supports configuring a static map of vars to values. + + This is really only useful if you have no better alternative for credential management but still have + sensitive values that you would like to [redact](operation/creds/redacting.md) them from build output. + + ??? warning "config: dummy_config" + + === "SSM" + + ??? warning "type: `ssm`" + + The `SSM` type supports configuring an [AWS Systems Manager](https://aws.amazon.com/systems-manager/) + in a single region as a `((var))` source. + + ??? warning "config: ssm_config" + + === "Secrets Manager" + + ??? warning "type: `secretsmanager`" + + The `secretsmanager` type supports configuring an [AWS Secrets + Manager](https://aws.amazon.com/secrets-manager/) in a single region as a `((var))` source. + + ??? warning "config: secretsmanager_config" + + === "ID Token" + + ??? warning "type: `idtoken`" + + The `idtoken` type issues JWTs which are signed by concourse and contain information about the currently + running pipeline/job. + + These JWTs can be used to authenticate with external services. + + ??? warning "config: idtoken_config" + +### The cluster-wide credential manager + +Concourse can be configured with a single cluster-wide credential manager, which acts as a source for any vars which do +not specify a source name. + +See [Credential Management](operation/creds/index.md) for more information. + +!!! note + + In the future we would like to introduce support for multiple cluster-wide var sources, configured using the + [`var_source` schema](#varsource-schema), and begin deprecating the [cluster-wide credential + manager](#the-cluster-wide-credential-manager). \ No newline at end of file diff --git a/docs/ecosystem/index.md b/docs/ecosystem/index.md new file mode 100644 index 00000000..1c80704a --- /dev/null +++ b/docs/ecosystem/index.md @@ -0,0 +1,77 @@ +--- +title: Ecosystem +--- + +Concourse is utilized by a diverse array of businesses, government agencies, open source projects and non-profit +organizations. The applications of Concourse are as varied as its user community, including CI/CD for applications, +continuous delivery of infrastructure, release integration, test automation, and numerous other use cases! + +If you use Concourse, or your organization offers Concourse-related services, we'd appreciate hearing from you. +Please [submit a pull request](https://github.com/concourse/docs/pulls) adding your organization's name in alphabetical +order to one of the lists below, and help us showcase how many people do things continuously with Concourse. + +## Concourse-as-a-Service + +The following organizations deliver Concourse as a fully-managed cloud solution, eliminating infrastructure overhead and +providing dedicated support. + +* [CentralCI](https://centralci.com/) + +## 3rd Party Service Providers + +The following organizations provide various Concourse-related services, including training, consulting, support and +managed solutions. + +* [Altoros](https://www.altoros.com/) +* [anynines](https://www.anynines.com/) +* [Cycloid](https://www.cycloid.io/) +* [Gstack](https://www.gstack.io/) +* [Pixel Air IO](https://pixelair.io/) +* [SuperOrbital](https://www.superorbital.io/) + +## Who Uses Concourse? + +These organizations have either added themselves to this list, or whose use of Concourse is publicly known. There are +many additional Concourse users who cannot publicly disclose information about their technology stack (typically +financial institutions and security firms). + +1. Altoros +2. anynines +3. Aptomi +4. Armakuni +5. boclips +6. Cerner +7. cloud.gov +8. Cloud Foundry Foundation +9. Comcast +10. Cycloid +11. Electric UI +12. EngineerBetter +13. Express Scripts +14. Fauna +15. Fidelity International +16. Gardener +17. (United Kingdom) Government Digital Services +18. Gstack +19. The Home Depot +20. IBM +21. LeapYear +22. Napoleon Sports & Casino +23. Nasdaq +24. Nokogiri +25. RabbitMQ +26. Resilient Scale +27. SAP +28. Smarsh +29. Springer Nature +30. Stark & Wayne +31. SUSE +32. SuperOrbital +33. Unit 2 Games +34. United States Air Force - Kessel Run +35. Varian +36. Verizon +37. VMware +38. Webfleet Solutions +39. Yahoo! +40. Zipcar \ No newline at end of file diff --git a/docs/examples/git-triggered.md b/docs/examples/git-triggered.md new file mode 100644 index 00000000..daa5a9b2 --- /dev/null +++ b/docs/examples/git-triggered.md @@ -0,0 +1,31 @@ +--- +title: git-triggered job example +hide: + - toc +--- + +# `git`-triggered job example + +The [`git` resource](https://github.com/concourse/git-resource) can be used to trigger a job. + +
+
+ +
+
+ +## Pipeline Configuration + + + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/git-triggered.yml" +``` + +## References + +* [Resources](https://concourse-ci.org/resources.html) +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/golang-lib.md b/docs/examples/golang-lib.md new file mode 100644 index 00000000..14d9ddcb --- /dev/null +++ b/docs/examples/golang-lib.md @@ -0,0 +1,36 @@ +--- +title: Golang library testing example +hide: + - toc +--- + +You can run the tests for a Golang library across any specified versions. + +
+
+ +
+
+ +This example shows how to have multiple versions of a language, environment, or dependency fetched and integrated in to +a [Pipeline](https://concourse-ci.org/pipelines.html). + +For these Docker images, defining them as [Resources](https://concourse-ci.org/resources.html) has two advantages for +this use case. First, this enables the pipeline to be triggered when there are new versions of those images available. +Second, referencing them in the task's [`task` step **`image` +**](https://concourse-ci.org/task-step.html#schema.task.image) param is helpful as it will ensure consistency between +the image versions fetched by the [Resource](https://concourse-ci.org/resources.html) and the image version running in +the job. + +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/golang-lib.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/hello-world.md b/docs/examples/hello-world.md new file mode 100644 index 00000000..0a724e68 --- /dev/null +++ b/docs/examples/hello-world.md @@ -0,0 +1,31 @@ +--- +title: Hello World pipeline +hide: + - toc +--- + +A single job is the simplest form of pipeline. + +
+
+ +
+
+ +While this is less of an example pipeline, this is a simple introduction to a critical primitive to form pipelines. + +Also, due to the fact that there are minimal external factors ([Resources](https://concourse-ci.org/resources.html)) for +the system to check and resolve, this is often used to test overall system health. + +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/hello-world.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/index.md b/docs/examples/index.md new file mode 100644 index 00000000..db659cd2 --- /dev/null +++ b/docs/examples/index.md @@ -0,0 +1,24 @@ +--- +title: Examples +hide: + - toc +--- + +Setting up self-contained Concourse [pipelines](https://concourse-ci.org/pipelines.html) is an excellent way to +experiment before exploring the more comprehensive documentation. + +Each example presents a pipeline YAML snippet which can be copied to a local file and deployed to your instance via [ +`fly set-pipeline`](https://concourse-ci.org/setting-pipelines.html#fly-set-pipeline). From there you can experiment and +modify parts of the configuration to better understand how everything works. All configuration options are detailed in +the [Docs](../docs/index.md). + +For a practical real-world example, +examine [Concourse's own pipeline](https://ci.concourse-ci.org/teams/main/pipelines/concourse) (and +its [configuration](https://github.com/concourse/ci/blob/master/pipelines/concourse.yml)): + +
+
+ +
+
\ No newline at end of file diff --git a/docs/examples/inputs-outputs.md b/docs/examples/inputs-outputs.md new file mode 100644 index 00000000..33e87eaa --- /dev/null +++ b/docs/examples/inputs-outputs.md @@ -0,0 +1,37 @@ +--- +title: Task inputs and outputs example +hide: + - toc +--- + +A task can pass an artifacts to another task in the same job. + +
+
+ +
+
+ +Tasks within a job have the ability to pass artifacts directly inbetween them to allow you to process artifacts in many +ways. + +While you are free to create as many jobs as you'd like for your pipeline, you have to use resources to pass artifacts +inbetween them. + +These constructs give you the ability to design a pipeline that can process artifacts in many different ways +via [Tasks](https://concourse-ci.org/tasks.html), and then store those processed artifacts externally +via [Resources](https://concourse-ci.org/resources.html). + +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/task-passing-artifact.yml" +``` + +## References + +* [`task-config.outputs`](https://concourse-ci.org/tasks.html#schema.task-config.outputs) +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/java-app.md b/docs/examples/java-app.md new file mode 100644 index 00000000..249eea4b --- /dev/null +++ b/docs/examples/java-app.md @@ -0,0 +1,26 @@ +--- +title: Java application testing example +hide: + - toc +--- + +You can run the tests for a Java application. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/java.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/job-and-task-hooks.md b/docs/examples/job-and-task-hooks.md new file mode 100644 index 00000000..8373c845 --- /dev/null +++ b/docs/examples/job-and-task-hooks.md @@ -0,0 +1,34 @@ +--- +title: Job & task hooks example +hide: + - toc +--- + +Job hooks like [`job.on_success`](https://concourse-ci.org/jobs.html#schema.job.on_success) and Step hooks like [ +`on_success`](https://concourse-ci.org/on-success-step.html#schema.on_success) are available to perform actions based on +the success, failure, or abortion of a job. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/job-and-task-hooks.yml" +``` + +## References + +* [`job.on_success`](https://concourse-ci.org/jobs.html#schema.job.on_success) +* [`job.on_failure`](https://concourse-ci.org/jobs.html#schema.job.on_failure) +* [`job.on_abort`](https://concourse-ci.org/jobs.html#schema.job.on_abort) +* [`on_success`](https://concourse-ci.org/on-success-step.html#schema.on_success) +* [`on_failure`](https://concourse-ci.org/on-failure-hook.html#schema.on_failure) +* [`on_abort`](https://concourse-ci.org/on-abort-hook.html#schema.on_abort) +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/manually-triggered.md b/docs/examples/manually-triggered.md new file mode 100644 index 00000000..fc2d5bde --- /dev/null +++ b/docs/examples/manually-triggered.md @@ -0,0 +1,27 @@ +--- +title: Manually triggered job example +hide: + - toc +--- + +A job can be triggered by a resource. After it's complete, the next job can run automatically or manually. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/manually-triggered.yml" +``` + +## References + +* [Resources](https://concourse-ci.org/resources.html) +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/nodejs-app.md b/docs/examples/nodejs-app.md new file mode 100644 index 00000000..7c2090e6 --- /dev/null +++ b/docs/examples/nodejs-app.md @@ -0,0 +1,26 @@ +--- +title: Nodejs application testing example +hide: + - toc +--- + +You can run the tests for a Nodejs application. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/nodejs-app-testing.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/php-app.md b/docs/examples/php-app.md new file mode 100644 index 00000000..ac4a0802 --- /dev/null +++ b/docs/examples/php-app.md @@ -0,0 +1,26 @@ +--- +title: PHP application testing example +hide: + - toc +--- + +You can run the tests for a PHP application. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/php-larvel-app-testing.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/pipeline-vars.md b/docs/examples/pipeline-vars.md new file mode 100644 index 00000000..21d45fda --- /dev/null +++ b/docs/examples/pipeline-vars.md @@ -0,0 +1,34 @@ +--- +title: Pipeline ((vars)) example +hide: + - toc +--- + +# Pipeline `((vars))` example + +You can use params in a pipelines configuration file. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/pipeline-vars.yml" +``` + +## Variables + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/vars-file.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/rails-app.md b/docs/examples/rails-app.md new file mode 100644 index 00000000..15a0826e --- /dev/null +++ b/docs/examples/rails-app.md @@ -0,0 +1,26 @@ +--- +title: Rails application testing example +hide: + - toc +--- + +You can run the tests for a Rails that requires a specific version of ruby and relies on a Postgres database. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/rails-app-testing.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/serial-job.md b/docs/examples/serial-job.md new file mode 100644 index 00000000..c963fe1e --- /dev/null +++ b/docs/examples/serial-job.md @@ -0,0 +1,33 @@ +--- +title: Serial job example +hide: + - toc +--- + +Setting the [`job.serial`](https://concourse-ci.org/jobs.html#schema.job.serial) flag restricts a job to run one build +at a time. + +
+
+ +
+
+ +By default, jobs are run in parallel. For some use cases this might be ideal (ex. testing all incoming commits from a +repository). For other use cases this might be less ideal (ex. deploying an application). + +You can also set the [`job.max_in_flight`](https://concourse-ci.org/jobs.html#schema.job.max_in_flight) value to 1 to +disable parallel job runs. + +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/serial-job.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/examples/set-pipeline.md b/docs/examples/set-pipeline.md new file mode 100644 index 00000000..9e0cc0b1 --- /dev/null +++ b/docs/examples/set-pipeline.md @@ -0,0 +1,26 @@ +--- +title: Set Pipelines Example +hide: + - toc +--- + +You can set a static set of pipelines from another pipeline on the same team. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/set-pipelines.yml" +``` + +## References + +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [`set-pipeline` step](https://concourse-ci.org/set-pipeline-step.html) \ No newline at end of file diff --git a/docs/examples/time-triggered.md b/docs/examples/time-triggered.md new file mode 100644 index 00000000..91083a76 --- /dev/null +++ b/docs/examples/time-triggered.md @@ -0,0 +1,29 @@ +--- +title: time-triggered job example +hide: + - toc +--- + +# `time`-triggered job example + +The [`time` resource](https://github.com/concourse/time-resource) can be used to trigger a job. + +
+
+ +
+
+ +## Pipeline Configuration + +```yaml linenums="1" +--8<-- "libs/examples/pipelines/time-triggered.yml" +``` + +## References + +* [Resources](https://concourse-ci.org/resources.html) +* [Jobs](https://concourse-ci.org/jobs.html) +* [Steps](https://concourse-ci.org/steps.html) +* [Tasks](https://concourse-ci.org/tasks.html) \ No newline at end of file diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..3d0d7412 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,13 @@ +--- +title: Concourse +template: home.html +hide: + - navigation + - toc +--- + +## Core features + +Concourse is engineered to +be [expressive, versatile, and secure](https://github.com/concourse/rfcs/blob/master/DESIGN_PRINCIPLES.md), remaining +intuitive even as your project complexity grows. \ No newline at end of file diff --git a/docs/libs/examples b/docs/libs/examples new file mode 160000 index 00000000..254c2f62 --- /dev/null +++ b/docs/libs/examples @@ -0,0 +1 @@ +Subproject commit 254c2f62b8e6abc7af810e4f6e6bbe221c33d9be diff --git a/docs/project/index.md b/docs/project/index.md new file mode 100644 index 00000000..0afce375 --- /dev/null +++ b/docs/project/index.md @@ -0,0 +1,76 @@ +--- +title: Project +--- + +Concourse began as a side-project by [`@vito`](https://github.com/vito) and [`@xoebus`](https://github.com/xoebus) +in 2014. Since then, Concourse has evolved into a dedicated community with contributors from all around the world. + +Concourse is a project of the [Cloud Foundry foundation (CFF)](https://www.cloudfoundry.org/), currently lead +by [Taylor Silva](https://github.com/taylorsilva/) and [Derek Richard](https://github.com/drich10). The CFF pays for the +infrastructure costs of the project. [Pixel Air IO](https://pixelair.io/), lead by Taylor, is currently the main +developer behind Concourse; reviewing and merging Pull Requests, squashing bugs, and stewarding the project and +community. + +## Where is everything? + +* The [Concourse repo](https://github.com/concourse/concourse) houses the main codebase, where planning happens, and + where issues are tracked. +* The [Docs repo](https://github.com/concourse/docs) contains the source for the website you're reading now! +* [GitHub Discussions](https://github.com/concourse/concourse/discussions) are used for support, announcements, idea + sharing, and general conversations. +* The [Concourse blog](https://blog.concourse-ci.org/) features tutorials and updates from the development perspective. +* The [Concourse Discord server](https://discord.gg/MeRxXKW) offers a great space to chat with other contributors. +* The Concourse working group charter is available in the Cloud + Foundry [community repo](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/concourse.md). +* The working group holds public monthly meetings. Past meetings can be viewed in + this [YouTube playlist](https://www.youtube.com/watch?v=Vamezx1SePw&list=PLhuMOCWn4P9ji8ZCY2a-FvMeT7S74-Hhm) and + meeting notes + are [here](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/WORKING-GROUPS.md#concourse). + +## Why make Concourse? + +When working on a substantial project, having a pipeline to reliably test, deploy, and publish the product is essential +for rapid iteration. + +But with every CI system we tried, we found ourselves repeatedly facing the same problems: complex configs buried in +many pages of the web UI, uncertainty about who changed what & when, managing dependencies and state on the workers, +build contamination, frustrating UX... + +Our project was expanding, and with every box we checked and for every worker we manually configured, the anxiety of +having to rebuild everything if something failed grew increasingly. We began writing software to manage our CI instead +of creating the software for the product we intended to build. + +We created Concourse to be a CI system that provides peace of mind. A CI that's straightforward enough to fully +understand and easy to maintain as your project grows; both in the complexity of the product and the size of your team. +We aimed to build a CI with robust abstractions and fewer concepts to learn, making it easier to comprehend and allowing +Concourse to evolve gracefully. + +## How can I help? + +Concourse is a free and Open Source software project that depends on the contributions of sponsors and volunteers +worldwide. + +If you're interested in contributing, head over to GitHub and check out +the [contributing docs](https://github.com/concourse/concourse/blob/master/CONTRIBUTING.md)! + +If you're able to financially contribute to the continued development of Concourse, please reach out +to [Taylor](https://github.com/taylorsilva/). + +## Report a security issue + +To report a security issue, please email [security@concourse-ci.org](mailto:security@concourse-ci.org). + +Security advisories will be published +as [`concourse/concourse` GitHub Security Advisories](https://github.com/concourse/concourse/security/advisories). + +## Thanks + +It's been a long journey and we're grateful to many people for our continued success. We are deeply indebted to all who +help sustain this project, but the extraordinary efforts of the following organizations deserve special recognition. + +### Pivotal + +Concourse wouldn't be what it is today without [Pivotal](https://pivotal.io/). This extends beyond the sponsorship, +which began in early 2015 - without the experiences we gained and the practices we learned while working +on [Cloud Foundry](https://cloudfoundry.org/) and [BOSH](https://bosh.io/), we would have neither the technical +expertise nor the strong opinions that led to Concourse's creation. \ No newline at end of file diff --git a/docs/support/index.md b/docs/support/index.md new file mode 100644 index 00000000..13123a42 --- /dev/null +++ b/docs/support/index.md @@ -0,0 +1,13 @@ +--- +title: Support +--- + +## Community Support + +You can ask the community for support either on [Discord](https://discord.gg/MeRxXKW) +or [GitHub Discussions](https://github.com/concourse/concourse/discussions/categories/help-support). + +## Commercial Support + +The maintainers of Concourse provide commercial support for Concourse through [Pixel Air](https://pixelair.io/). If +you're interested in commercial support you can [book a call](https://pixelair.io/contact/) with us. \ No newline at end of file diff --git a/mkdocs.template.yml b/mkdocs.template.yml new file mode 100644 index 00000000..2f639e54 --- /dev/null +++ b/mkdocs.template.yml @@ -0,0 +1,32 @@ +#@ load("@ytt:template", "template") +#@ load("@ytt:data", "data") + +#@ load("redirect.helper.star", "generateRoutes") + +INHERIT: base.yml + +plugins: + - search + - glightbox + - blog: + blog_dir: blog + post_url_date_format: yyyy-MM-dd + post_url_format: "{date}-{slug}" + - redirects: + #@ docs = data.values.docs + redirect_maps: + top-docs: #@ template.replace(generateRoutes(docs.top_level)) + getting-started: #@ template.replace(generateRoutes(docs.getting_started)) + install: #@ template.replace(generateRoutes(docs.install)) + auth: #@ template.replace(generateRoutes(docs.auth)) + pipelines: #@ template.replace(generateRoutes(docs.pipelines)) + resources: #@ template.replace(generateRoutes(docs.resources)) + resource-types: #@ template.replace(generateRoutes(docs.resource_types)) + steps: #@ template.replace(generateRoutes(docs.steps)) + how-to: #@ template.replace(generateRoutes(docs.how_to)) + operation: #@ template.replace(generateRoutes(docs.operation)) + internals: #@ template.replace(generateRoutes(docs.internals)) + examples: #@ template.replace(generateRoutes(data.values.examples)) + project: #@ template.replace(generateRoutes(data.values.project)) + ecosystem: #@ template.replace(generateRoutes(data.values.ecosystem)) + support: #@ template.replace(generateRoutes(data.values.support)) \ No newline at end of file diff --git a/images/concourse-pattern.svg b/overrides/assets/images/concourse-pattern.svg similarity index 100% rename from images/concourse-pattern.svg rename to overrides/assets/images/concourse-pattern.svg diff --git a/overrides/assets/stylesheets/home.css b/overrides/assets/stylesheets/home.css new file mode 100644 index 00000000..73c1ef6d --- /dev/null +++ b/overrides/assets/stylesheets/home.css @@ -0,0 +1,178 @@ + +body { + background-image: url('../images/concourse-pattern.svg'); + position: fixed; + top: 0; + left: 0; + width: 100vw; + height: 100vh; + background-size: cover; + background-position: center center; + background-repeat: no-repeat; + background-attachment: fixed; +} + +.centered-section { + display: flex; + align-items: center; + justify-content: center; + height: 100%; +} + +.side-by-side { + background-color: rgba( + 255, + 255, + 255, + 0.2 + ); + border-radius: 8px; + box-shadow: 0 4px 16px rgba(0, 0, 0, 0.9); + text-align: center; + display: grid; + grid-template-columns: 1fr 1fr; + grid-gap: 0 4%; + padding: 3em 1.5em; + margin-left: 1.5em; + margin-right: 1.5em; +} + +.side-by-side .segment { + display: flex; + flex-direction: column; + text-align: left; + padding: 10px; +} + +.splash-intro { + color: #efeeee; +} + +.splash-intro p { + font-size: 1rem; +} + +.splash-intro h1 { + font-size: xx-large; + font-weight: bold; + color: #efeeee; +} + +.splash-intro > * > .text-links { + text-decoration: underline !important; + color: #efeeee; +} + +.splash-intro > * > .text-links:hover { + color: #efeeee; +} + +.quick-start { + width: fit-content; + flex-grow: 0; +} + +.splash-downloads { + background: #2a2929; + justify-self: stretch; + position: relative; +} + +.split-header { + display: flex; + flex-direction: row; + justify-content: space-between; +} + +/* The container
- needed to position the dropdown content */ +.dropdown { + position: absolute; + display: inline-block; +} + +/* Dropdown Content (Hidden by Default) */ +.dropdown-content { + display: none; + position: absolute; + background-color: #f1f1f1; + min-width: 160px; + z-index: 1; +} + +/* Links inside the dropdown */ +.dropdown-content a { + color: black; + padding: 12px 16px; + text-decoration: none; + display: block; +} + +/* Change color of dropdown links on hover */ +.dropdown-content a:hover { + background-color: #ddd +} + +/* Show the dropdown menu on hover */ +.dropdown:hover .dropdown-content { + display: block; +} + +/* Change the background color of the dropdown button when the dropdown content is shown */ +.md-button.download-dropdown-btn:hover { + border-left: 1px solid white +} + +.download-btn, .download-dropdown-btn { + height: 3em; +} + +.download-dropdown-btn { + width: fit-content; +} + +.md-button.download-btn { + border-radius: 1em 0 0 1em; +} + +.md-button.download-dropdown-btn { + border-radius: 0 1em 1em 0; + border-left: 1px solid white +} + +.download-ctr { + display: grid; + grid-template-columns: 0.75fr 1.25fr; +} + +.std-flex-box { + display: flex; + justify-content: space-between; + align-items: center; + padding: 0.5em; +} + +.std-flex-box > div > .tool-select { + width: 17em; + height: 2em; +} + +#release-text { + margin-top: 0; + margin-bottom: 0; +} + +.splash-split { + display: flex; + flex-direction: column; + justify-content: space-between; + height: 100%; + margin: 1.5em 1em; + color: white; +} + +@media screen and (max-width: 1219px) { /* Example breakpoint for tablets */ + .side-by-side, .download-ctr { + grid-template-columns: 1fr; /* Two equal columns */ + } +} + diff --git a/overrides/home.html b/overrides/home.html new file mode 100644 index 00000000..ed869011 --- /dev/null +++ b/overrides/home.html @@ -0,0 +1,100 @@ +{% extends "main.html" %} +{% block tabs %} +{{ super() }} + +{% endblock %} + +{% block content %} + + + +
+
+
+

Concourse is an open-source continuous thing-doer.

+

+ Centered around the simple mechanics of + resources, + tasks, and + jobs, Concourse + delivers a versatile approach to automation that excels at + CI + / + CD. +

+ + +
+
+
+
+
+

Download the latest release:

+ + +
+
+ +
+
+
+ concourse server: +
+ +
+ + Download Concourse + +
Apple
+
+ +
Apricot
+
+
+
+ +
+ +
+
+ fly CLI: +
+ +
+ + Download fly + +
Apple
+
+ +
Apricot
+
+
+
+ +
+
+ +
+
+
+
+ +{% endblock %} + +{% block footer %} +{% endblock%} diff --git a/overrides/partials/integrations/analytics/goattracker.html b/overrides/partials/integrations/analytics/goattracker.html new file mode 100644 index 00000000..024475ec --- /dev/null +++ b/overrides/partials/integrations/analytics/goattracker.html @@ -0,0 +1,22 @@ + \ No newline at end of file diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..171495f6 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,1371 @@ +{ + "name": "docs-ci", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "dependencies": { + "@material/web": "^2.4.1" + }, + "devDependencies": { + "@rollup/plugin-node-resolve": "^16.0.3", + "@rollup/plugin-terser": "^0.4.4", + "@rollup/plugin-typescript": "^12.3.0", + "rollup": "^4.52.5", + "sass": "^1.93.3", + "typescript": "^5.9.3" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.11.tgz", + "integrity": "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@lit-labs/ssr-dom-shim": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@lit-labs/ssr-dom-shim/-/ssr-dom-shim-1.4.0.tgz", + "integrity": "sha512-ficsEARKnmmW5njugNYKipTm4SFnbik7CXtoencDZzmzo/dQ+2Q0bgkzJuoJP20Aj0F+izzJjOqsnkd6F/o1bw==", + "license": "BSD-3-Clause" + }, + "node_modules/@lit/reactive-element": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@lit/reactive-element/-/reactive-element-2.1.1.tgz", + "integrity": "sha512-N+dm5PAYdQ8e6UlywyyrgI2t++wFGXfHx+dSJ1oBrg6FAxUj40jId++EaRm80MKX5JnlH1sBsyZ5h0bcZKemCg==", + "license": "BSD-3-Clause", + "dependencies": { + "@lit-labs/ssr-dom-shim": "^1.4.0" + } + }, + "node_modules/@material/web": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@material/web/-/web-2.4.1.tgz", + "integrity": "sha512-0sk9t25acJ72Qv3r0n9r0lgDbPaAKnpm0p+QmEAAwYyZomHxuVbgrrAdtNXaRm7jFyGh+WsTr8bhtvCnpPRFjw==", + "license": "Apache-2.0", + "workspaces": [ + "catalog" + ], + "dependencies": { + "lit": "^2.8.0 || ^3.0.0", + "tslib": "^2.4.0" + } + }, + "node_modules/@parcel/watcher": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.1.tgz", + "integrity": "sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "dependencies": { + "detect-libc": "^1.0.3", + "is-glob": "^4.0.3", + "micromatch": "^4.0.5", + "node-addon-api": "^7.0.0" + }, + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "@parcel/watcher-android-arm64": "2.5.1", + "@parcel/watcher-darwin-arm64": "2.5.1", + "@parcel/watcher-darwin-x64": "2.5.1", + "@parcel/watcher-freebsd-x64": "2.5.1", + "@parcel/watcher-linux-arm-glibc": "2.5.1", + "@parcel/watcher-linux-arm-musl": "2.5.1", + "@parcel/watcher-linux-arm64-glibc": "2.5.1", + "@parcel/watcher-linux-arm64-musl": "2.5.1", + "@parcel/watcher-linux-x64-glibc": "2.5.1", + "@parcel/watcher-linux-x64-musl": "2.5.1", + "@parcel/watcher-win32-arm64": "2.5.1", + "@parcel/watcher-win32-ia32": "2.5.1", + "@parcel/watcher-win32-x64": "2.5.1" + } + }, + "node_modules/@parcel/watcher-android-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.1.tgz", + "integrity": "sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.1.tgz", + "integrity": "sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.1.tgz", + "integrity": "sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-freebsd-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.1.tgz", + "integrity": "sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.1.tgz", + "integrity": "sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.1.tgz", + "integrity": "sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.1.tgz", + "integrity": "sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.1.tgz", + "integrity": "sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.1.tgz", + "integrity": "sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.1.tgz", + "integrity": "sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.1.tgz", + "integrity": "sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-ia32": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.1.tgz", + "integrity": "sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.1.tgz", + "integrity": "sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@rollup/plugin-node-resolve": { + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-16.0.3.tgz", + "integrity": "sha512-lUYM3UBGuM93CnMPG1YocWu7X802BrNF3jW2zny5gQyLQgRFJhV1Sq0Zi74+dh/6NBx1DxFC4b4GXg9wUCG5Qg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1", + "@types/resolve": "1.20.2", + "deepmerge": "^4.2.2", + "is-module": "^1.0.0", + "resolve": "^1.22.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.78.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/plugin-terser": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.4.tgz", + "integrity": "sha512-XHeJC5Bgvs8LfukDwWZp7yeqin6ns8RTl2B9avbejt6tZqsqvVoWI7ZTQrcNsfKEDWBTnTxM8nMDkO2IFFbd0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "serialize-javascript": "^6.0.1", + "smob": "^1.0.0", + "terser": "^5.17.4" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/plugin-typescript": { + "version": "12.3.0", + "resolved": "https://registry.npmjs.org/@rollup/plugin-typescript/-/plugin-typescript-12.3.0.tgz", + "integrity": "sha512-7DP0/p7y3t67+NabT9f8oTBFE6gGkto4SA6Np2oudYmZE/m1dt8RB0SjL1msMxFpLo631qjRCcBlAbq1ml/Big==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.1.0", + "resolve": "^1.22.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.14.0||^3.0.0||^4.0.0", + "tslib": "*", + "typescript": ">=3.7.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + }, + "tslib": { + "optional": true + } + } + }, + "node_modules/@rollup/pluginutils": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.3.0.tgz", + "integrity": "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.5.tgz", + "integrity": "sha512-8c1vW4ocv3UOMp9K+gToY5zL2XiiVw3k7f1ksf4yO1FlDFQ1C2u72iACFnSOceJFsWskc2WZNqeRhFRPzv+wtQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.5.tgz", + "integrity": "sha512-mQGfsIEFcu21mvqkEKKu2dYmtuSZOBMmAl5CFlPGLY94Vlcm+zWApK7F/eocsNzp8tKmbeBP8yXyAbx0XHsFNA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.5.tgz", + "integrity": "sha512-takF3CR71mCAGA+v794QUZ0b6ZSrgJkArC+gUiG6LB6TQty9T0Mqh3m2ImRBOxS2IeYBo4lKWIieSvnEk2OQWA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.5.tgz", + "integrity": "sha512-W901Pla8Ya95WpxDn//VF9K9u2JbocwV/v75TE0YIHNTbhqUTv9w4VuQ9MaWlNOkkEfFwkdNhXgcLqPSmHy0fA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.5.tgz", + "integrity": "sha512-QofO7i7JycsYOWxe0GFqhLmF6l1TqBswJMvICnRUjqCx8b47MTo46W8AoeQwiokAx3zVryVnxtBMcGcnX12LvA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.5.tgz", + "integrity": "sha512-jr21b/99ew8ujZubPo9skbrItHEIE50WdV86cdSoRkKtmWa+DDr6fu2c/xyRT0F/WazZpam6kk7IHBerSL7LDQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.5.tgz", + "integrity": "sha512-PsNAbcyv9CcecAUagQefwX8fQn9LQ4nZkpDboBOttmyffnInRy8R8dSg6hxxl2Re5QhHBf6FYIDhIj5v982ATQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.5.tgz", + "integrity": "sha512-Fw4tysRutyQc/wwkmcyoqFtJhh0u31K+Q6jYjeicsGJJ7bbEq8LwPWV/w0cnzOqR2m694/Af6hpFayLJZkG2VQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.5.tgz", + "integrity": "sha512-a+3wVnAYdQClOTlyapKmyI6BLPAFYs0JM8HRpgYZQO02rMR09ZcV9LbQB+NL6sljzG38869YqThrRnfPMCDtZg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.5.tgz", + "integrity": "sha512-AvttBOMwO9Pcuuf7m9PkC1PUIKsfaAJ4AYhy944qeTJgQOqJYJ9oVl2nYgY7Rk0mkbsuOpCAYSs6wLYB2Xiw0Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.5.tgz", + "integrity": "sha512-DkDk8pmXQV2wVrF6oq5tONK6UHLz/XcEVow4JTTerdeV1uqPeHxwcg7aFsfnSm9L+OO8WJsWotKM2JJPMWrQtA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.5.tgz", + "integrity": "sha512-W/b9ZN/U9+hPQVvlGwjzi+Wy4xdoH2I8EjaCkMvzpI7wJUs8sWJ03Rq96jRnHkSrcHTpQe8h5Tg3ZzUPGauvAw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.5.tgz", + "integrity": "sha512-sjQLr9BW7R/ZiXnQiWPkErNfLMkkWIoCz7YMn27HldKsADEKa5WYdobaa1hmN6slu9oWQbB6/jFpJ+P2IkVrmw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.5.tgz", + "integrity": "sha512-hq3jU/kGyjXWTvAh2awn8oHroCbrPm8JqM7RUpKjalIRWWXE01CQOf/tUNWNHjmbMHg/hmNCwc/Pz3k1T/j/Lg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.5.tgz", + "integrity": "sha512-gn8kHOrku8D4NGHMK1Y7NA7INQTRdVOntt1OCYypZPRt6skGbddska44K8iocdpxHTMMNui5oH4elPH4QOLrFQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.5.tgz", + "integrity": "sha512-hXGLYpdhiNElzN770+H2nlx+jRog8TyynpTVzdlc6bndktjKWyZyiCsuDAlpd+j+W+WNqfcyAWz9HxxIGfZm1Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.5.tgz", + "integrity": "sha512-arCGIcuNKjBoKAXD+y7XomR9gY6Mw7HnFBv5Rw7wQRvwYLR7gBAgV7Mb2QTyjXfTveBNFAtPt46/36vV9STLNg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.5.tgz", + "integrity": "sha512-QoFqB6+/9Rly/RiPjaomPLmR/13cgkIGfA40LHly9zcH1S0bN2HVFYk3a1eAyHQyjs3ZJYlXvIGtcCs5tko9Cw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.5.tgz", + "integrity": "sha512-w0cDWVR6MlTstla1cIfOGyl8+qb93FlAVutcor14Gf5Md5ap5ySfQ7R9S/NjNaMLSFdUnKGEasmVnu3lCMqB7w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.5.tgz", + "integrity": "sha512-Aufdpzp7DpOTULJCuvzqcItSGDH73pF3ko/f+ckJhxQyHtp67rHw3HMNxoIdDMUITJESNE6a8uh4Lo4SLouOUg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.5.tgz", + "integrity": "sha512-UGBUGPFp1vkj6p8wCRraqNhqwX/4kNQPS57BCFc8wYh0g94iVIW33wJtQAx3G7vrjjNtRaxiMUylM0ktp/TRSQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.5.tgz", + "integrity": "sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/resolve": { + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", + "integrity": "sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "license": "MIT" + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/detect-libc": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", + "integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "bin": { + "detect-libc": "bin/detect-libc.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/immutable": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.1.4.tgz", + "integrity": "sha512-p6u1bG3YSnINT5RQmx/yRZBpenIl30kVxkTLDyHLIMk0gict704Q9n+thfDI7lTRm9vXdDYutVzXhzcThxTnXA==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-module": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", + "integrity": "sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/lit": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/lit/-/lit-3.3.1.tgz", + "integrity": "sha512-Ksr/8L3PTapbdXJCk+EJVB78jDodUMaP54gD24W186zGRARvwrsPfS60wae/SSCTCNZVPd1chXqio1qHQmu4NA==", + "license": "BSD-3-Clause", + "dependencies": { + "@lit/reactive-element": "^2.1.0", + "lit-element": "^4.2.0", + "lit-html": "^3.3.0" + } + }, + "node_modules/lit-element": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/lit-element/-/lit-element-4.2.1.tgz", + "integrity": "sha512-WGAWRGzirAgyphK2urmYOV72tlvnxw7YfyLDgQ+OZnM9vQQBQnumQ7jUJe6unEzwGU3ahFOjuz1iz1jjrpCPuw==", + "license": "BSD-3-Clause", + "dependencies": { + "@lit-labs/ssr-dom-shim": "^1.4.0", + "@lit/reactive-element": "^2.1.0", + "lit-html": "^3.3.0" + } + }, + "node_modules/lit-html": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-3.3.1.tgz", + "integrity": "sha512-S9hbyDu/vs1qNrithiNyeyv64c9yqiW9l+DBgI18fL+MTvOtWoFR0FWiyq1TxaYef5wNlpEmzlXoBlZEO+WjoA==", + "license": "BSD-3-Clause", + "dependencies": { + "@types/trusted-types": "^2.0.2" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/node-addon-api": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", + "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/rollup": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.52.5.tgz", + "integrity": "sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.52.5", + "@rollup/rollup-android-arm64": "4.52.5", + "@rollup/rollup-darwin-arm64": "4.52.5", + "@rollup/rollup-darwin-x64": "4.52.5", + "@rollup/rollup-freebsd-arm64": "4.52.5", + "@rollup/rollup-freebsd-x64": "4.52.5", + "@rollup/rollup-linux-arm-gnueabihf": "4.52.5", + "@rollup/rollup-linux-arm-musleabihf": "4.52.5", + "@rollup/rollup-linux-arm64-gnu": "4.52.5", + "@rollup/rollup-linux-arm64-musl": "4.52.5", + "@rollup/rollup-linux-loong64-gnu": "4.52.5", + "@rollup/rollup-linux-ppc64-gnu": "4.52.5", + "@rollup/rollup-linux-riscv64-gnu": "4.52.5", + "@rollup/rollup-linux-riscv64-musl": "4.52.5", + "@rollup/rollup-linux-s390x-gnu": "4.52.5", + "@rollup/rollup-linux-x64-gnu": "4.52.5", + "@rollup/rollup-linux-x64-musl": "4.52.5", + "@rollup/rollup-openharmony-arm64": "4.52.5", + "@rollup/rollup-win32-arm64-msvc": "4.52.5", + "@rollup/rollup-win32-ia32-msvc": "4.52.5", + "@rollup/rollup-win32-x64-gnu": "4.52.5", + "@rollup/rollup-win32-x64-msvc": "4.52.5", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/sass": { + "version": "1.93.3", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.93.3.tgz", + "integrity": "sha512-elOcIZRTM76dvxNAjqYrucTSI0teAF/L2Lv0s6f6b7FOwcwIuA357bIE871580AjHJuSvLIRUosgV+lIWx6Rgg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chokidar": "^4.0.0", + "immutable": "^5.0.2", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=14.0.0" + }, + "optionalDependencies": { + "@parcel/watcher": "^2.4.1" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/smob": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/smob/-/smob-1.5.0.tgz", + "integrity": "sha512-g6T+p7QO8npa+/hNx9ohv1E5pVCmWrVCUzUXJyLdMmftX6ER0oiWY/w9knEonLpnOp6b6FenKnMfR8gqwWdwig==", + "dev": true, + "license": "MIT" + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/terser": { + "version": "5.44.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.44.0.tgz", + "integrity": "sha512-nIVck8DK+GM/0Frwd+nIhZ84pR/BX7rmXMfYwyg+Sri5oGVE99/E3KvXqpC2xHFxyqXyGHTKBSioxxplrO4I4w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.15.0", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + } + } +} diff --git a/package.json b/package.json index a8d011ea..d5682e5d 100644 --- a/package.json +++ b/package.json @@ -1,21 +1,22 @@ { - "name": "docs-search", - "version": "0.0.1", - "repository": "https://github.com/concourse/docs", - "author": "Alex Suraci ", - "license": "MIT", + "type": "module", "scripts": { - "start": "node index.js", - "build": "make" + "build:ytt": "ytt -f mkdocs.template.yml -f redirects > mkdocs.yml", + "build:scss": "sass --style=compressed src/mwc-theme.scss overrides/assets/stylesheets/mwc-theme.css", + "build:mwc": "rollup -c rollup.config.mwc.js", + "build:all": "npm run build:scss && npm run build:mwc && npm run build:ytt", + "build": "npm run build:all && mkdocs build", + "start": "npm run build:all && mkdocs serve --livereload" }, "dependencies": { - "elm": "0.19.1-6", - "gscan": "^5.0.0", - "less": "^3.9.0" + "@material/web": "^2.4.1" }, "devDependencies": { - "elm-format": "^0.8.8", - "elm-oracle": "^1.1.1" - }, - "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e" + "@rollup/plugin-node-resolve": "^16.0.3", + "@rollup/plugin-terser": "^0.4.4", + "@rollup/plugin-typescript": "^12.3.0", + "rollup": "^4.52.5", + "sass": "^1.93.3", + "typescript": "^5.9.3" + } } diff --git a/redirects/redirect.data.yml b/redirects/redirect.data.yml new file mode 100644 index 00000000..94cd58e2 --- /dev/null +++ b/redirects/redirect.data.yml @@ -0,0 +1,299 @@ +#@data/values +--- + +docs: + top_level: + routes: + - from: docs + to: index + html_only: true + - from: fly + to: fly + - from: config-basics + to: config-basics + - from: vars + to: vars + - from: jobs + to: jobs + - from: tasks + to: tasks + - from: builds + to: builds + - from: observation + to: observation + + getting_started: + routes: + - from: getting-started + to: index + - from: quick-start + to: quick-start + - from: tutorial-hello-world + to: hello-world + - from: tutorial-inputs-outputs + to: inputs-outputs + - from: tutorials-resources + to: resources + install: + routes: + - from: install + to: index + - from: postgresql-node + to: running-postgres + - from: concourse-cli + to: concourse-cli + - from: concourse-generate-key + to: generating-keys + - from: concourse-web + to: running-web + - from: concourse-worker + to: running-worker + - from: upgrading-concourse + to: upgrading-concourse + auth: + routes: + - from: auth + to: index + - from: main-team + to: main-team + + - from: configuring-auth + to: configuring/index + - from: local-auth + to: configuring/local-user + - from: github-auth + to: configuring/github + - from: gitlab-auth + to: configuring/gitlab + - from: bitbucket-cloud-auth + to: configuring/bitbucket-cloud + - from: cf-uaa-auth + to: configuring/cf-uaa + - from: ldap-auth + to: configuring/ldap + - from: microsoft-auth + to: configuring/microsoft + - from: generic-oidc-auth + to: configuring/generic-oidc + - from: generic-oauth + to: configuring/generic-oauth + - from: generic-saml-auth + to: configuring/generic-saml + + - from: managing-teams + to: managing-teams + - from: user-roles + to: user-roles + - from: exposing + to: exposing + - from: teams-caveats + to: caveats + + pipelines: + routes: + - from: pipelines + to: index + - from: setting-pipelines + to: setting-pipelines + - from: managing-pipelines + to: managing-pipelines + - from: instanced-pipelines + to: grouping-pipelines + resources: + routes: + - from: resources + to: index + - from: resource-versions + to: resource-versions + - from: managing-resources + to: managing-resources + resource_types: + routes: + - from: resource-types + to: index + - from: implementing-resource-types + to: implementing + - from: managing-resource-types + to: managing-types + steps: + routes: + - from: steps + to: index + - from: get-step + to: get + - from: put-step + to: put + - from: task-step + to: task + - from: set-pipeline-step + to: set-pipeline + - from: load-var-step + to: load-var + - from: in-parallel-step + to: in-parallel + - from: do-step + to: do + - from: try-step + to: try + - from: modifier-and-hook-steps + to: modifier-and-hooks/index + - from: across-step + to: modifier-and-hooks/across + - from: timeout-step + to: modifier-and-hooks/timeout + - from: attempts-step + to: modifier-and-hooks/attempts + - from: tags-step + to: modifier-and-hooks/tags + - from: on-success-step + to: modifier-and-hooks/on-success + - from: on-failure-hook + to: modifier-and-hooks/on-failure + - from: on-abort-hook + to: modifier-and-hooks/on-abort + - from: on-error-hook + to: modifier-and-hooks/on-error + - from: ensure-hook + to: modifier-and-hooks/ensure + how_to: + routes: + - from: how-to-guides + to: index + + - from: pipeline-guides + to: pipeline-guides/index + - from: managing-pipeline-configurations + to: pipeline-guides/managing-pipeline-configs + - from: common-pipeline-practices + to: pipeline-guides/common-pipeline + - from: exploring-task-input-and-output-scenarios + to: pipeline-guides/task-inputs-outputs + - from: gated-pipeline-patterns + to: pipeline-guides/gated-pipelines + - from: time-triggered-pipeline-patterns + to: pipeline-guides/time-triggered-pipelines + - from: manual-approval-step + to: pipeline-guides/manual-approval + + - from: git-guides + to: git-guides/index + - from: basic-git-operations + to: git-guides/basic + - from: multi-branch-workflows + to: git-guides/multi-branch + + - from: container-image-guides + to: container-image-guides/index + - from: building-and-pushing-an-image + to: container-image-guides/build-push + - from: building-an-image-and-using-it-in-a-task + to: container-image-guides/build-use + + operation: + routes: + - from: operation + to: index + - from: metrics + to: metrics + - from: tracing + to: tracing + - from: encryption + to: encryption + + - from: creds + to: creds/index + - from: vault-credential-manager + to: creds/vault + - from: credhub-credential-manager + to: creds/credhub + - from: aws-ssm-credential-manager + to: creds/aws-ssm + - from: aws-asm-credential-manager + to: creds/aws-secrets + - from: kubernetes-credential-manager + to: creds/kubernetes + - from: conjur-credential-manager + to: creds/conjur + - from: idtoken-credential-manager + to: creds/id-token + - from: creds-caching + to: creds/caching + - from: creds-redacting + to: creds/redacting + - from: creds-retry-logic + to: creds/retrying-failed + + - from: security-hardening + to: security-hardening + - from: container-placement + to: container-placement + - from: opa + to: opa-integration + - from: performance-tuning + to: tuning + - from: global-resources + to: global-resources + - from: administration + to: administration + + internals: + routes: + - from: internals + to: index + - from: checker + to: checker + - from: scheduler + to: scheduler + - from: build-tracker + to: build-tracker + - from: garbage-collector + to: garbage-collector + +examples: + routes: + - from: examples + to: index + html_only: true + - from: hello-world-example + to: hello-world + - from: serial-job-example + to: serial-job + - from: pipeline-vars-example + to: pipeline-vars + - from: set-pipelines-example + to: set-pipeline + - from: task-inputs-outputs-example + to: inputs-outputs + - from: time-trigger-example + to: time-triggered + - from: git-trigger-example + to: git-triggered + - from: manual-trigger-example + to: manually-triggered + - from: hooks-example + to: job-and-task-hooks + - from: golang-library-example + to: golang-lib + - from: rails-example + to: rails-app + - from: java-example + to: java-app + - from: nodejs-example + to: nodejs-app + - from: php-example + to: php-app + +project: + routes: + - from: project + to: index + +ecosystem: + routes: + - from: ecosystem + to: index + +support: + routes: + - from: support + to: index \ No newline at end of file diff --git a/redirects/redirect.helper.star b/redirects/redirect.helper.star new file mode 100644 index 00000000..bb22b040 --- /dev/null +++ b/redirects/redirect.helper.star @@ -0,0 +1,17 @@ +def generateRoutes(section): + created = {} + + for route in section.routes: + toRoute = "{}/{}.md".format(section.root, route["to"]) + + if hasattr(route, "html_only") and route["html_only"]: + created.update({"{}.html.md".format(route["from"]): toRoute}) + else: + created.update({"{}.html.md".format(route["from"]): toRoute}) + created.update({"{}.md".format(route["from"]): toRoute}) + end + + end + + return created +end \ No newline at end of file diff --git a/redirects/redirect.schema.yml b/redirects/redirect.schema.yml new file mode 100644 index 00000000..52babc2b --- /dev/null +++ b/redirects/redirect.schema.yml @@ -0,0 +1,88 @@ +#@data/values-schema +--- + +docs: + top_level: + root: "docs" + routes: + - from: "" + to: "" + html_only: false + getting_started: + root: "docs/getting-started" + routes: + - from: "" + to: "" + install: + root: "docs/install" + routes: + - from: "" + to: "" + auth: + root: "docs/auth-and-teams" + routes: + - from: "" + to: "" + pipelines: + root: "docs/pipelines" + routes: + - from: "" + to: "" + resources: + root: "docs/resources" + routes: + - from: "" + to: "" + resource_types: + root: "docs/resource-types" + routes: + - from: "" + to: "" + steps: + root: "docs/steps" + routes: + - from: "" + to: "" + how_to: + root: "docs/how-to" + routes: + - from: "" + to: "" + operation: + root: "docs/operation" + routes: + - from: "" + to: "" + internals: + root: "docs/internals" + routes: + - from: "" + to: "" + +examples: + root: "examples" + routes: + - from: "" + to: "" + html_only: false + +project: + root: "project" + routes: + - from: "" + to: "" + html_only: true + +ecosystem: + root: "ecosystem" + routes: + - from: "" + to: "" + html_only: true + +support: + root: "support" + routes: + - from: "" + to: "" + html_only: true \ No newline at end of file diff --git a/rollup.config.mwc.js b/rollup.config.mwc.js new file mode 100644 index 00000000..4b314254 --- /dev/null +++ b/rollup.config.mwc.js @@ -0,0 +1,24 @@ +import { nodeResolve } from '@rollup/plugin-node-resolve'; +import terser from '@rollup/plugin-terser'; +import typescript from "@rollup/plugin-typescript"; + +export default { + input: 'src/mwc-entry.ts', + output: { + // Target the specific overrides asset folder + file: 'overrides/assets/javascripts/mwc-bundle.min.js', + format: 'esm', // MWC requires ES Module format + sourcemap: true, + }, + plugins: [ + // Resolve imports from node_modules + nodeResolve(), + // ⬅️ NEW: Add the TypeScript plugin *before* terser + typescript({ + // Ensure Rollup uses your tsconfig.json + tsconfig: './tsconfig.json' + }), + // Minify the output + terser() + ] +}; \ No newline at end of file diff --git a/src/homepage.ts b/src/homepage.ts new file mode 100644 index 00000000..509fe86c --- /dev/null +++ b/src/homepage.ts @@ -0,0 +1,54 @@ +import {MdTabs} from "@material/web/tabs/tabs.js"; + +export function downloadMacOs(): void { + +} + +export function downloadWindows(): void { + +} + +export function downloadLinux(): void { + +} + +import {ref} from 'lit/directives/ref.js'; +import {MdPrimaryTab} from "@material/web/all"; + +export function setupPrimaryTabPanels() { + const tabsElement = document.getElementById('osPrimaryTags'); + + // Get all the panel elements in order + const panels = [ + document.getElementById('panel-macos'), + document.getElementById('panel-windows'), + document.getElementById('panel-linux') + ]; + + if (tabsElement) { + + const tabs = tabsElement as MdTabs; + + tabs.addEventListener('change', (event) => { + // 1. Get the index of the newly selected tab + const newIndex = (event.target as MdTabs).activeTabIndex; + + // 2. Loop through all panels + panels.forEach((panel: HTMLElement | null, index) => { + if (panel) { + // 3. Set 'hidden' to true for all panels except the one matching the index + if (index === newIndex) { + panel.removeAttribute('hidden'); // Show the active panel + } else { + panel.setAttribute('hidden', ''); // Hide all other panels + } + } + + + }); + }); + } + + +} + diff --git a/src/mwc-entry.ts b/src/mwc-entry.ts new file mode 100644 index 00000000..abfe6f01 --- /dev/null +++ b/src/mwc-entry.ts @@ -0,0 +1,34 @@ +// 1. Import and register all Material Web Components +import '@material/web/all.js'; + +import {MdMenu} from "@material/web/all"; + + +// 2. Custom MkDocs Initialization Logic +const document$ = window.document$; + +if (document$) { + // Subscribe to run logic on initial load AND every instant navigation + document$.subscribe(() => { + // downloadMacOs(); + // setupPrimaryTabPanels(); + }); + + // const cached = __md_get("__source", sessionStorage) + const gitInformation: string | null = getValueFromSessionStoragePartialMatch("__source"); + + console.log(gitInformation) +} + +function getValueFromSessionStoragePartialMatch(partialMatch: string): string | null { + for (let i = 0; i < sessionStorage.length; i++) { + const key = sessionStorage.key(i); + // Ensure the key exists and perform the partial match check + if (key && key.includes(partialMatch)) { + // Retrieve the value for the matching key + return sessionStorage.getItem(key); + } + } + // Return null if no key matches the partial string + return null; +} \ No newline at end of file diff --git a/src/mwc-theme.scss b/src/mwc-theme.scss new file mode 100644 index 00000000..877b03a6 --- /dev/null +++ b/src/mwc-theme.scss @@ -0,0 +1,22 @@ +// Colors + +$primary: #2094f3; + +:root > * { + --md-primary-fg-color: #{$primary}; + --md-primary-fg-color--light: #42a5f5; + --md-primary-fg-color--dark: #1975d2; + --md-primary-bg-color: #fff; + --md-primary-bg-color--light: #ffffffb3; +} + +:root { + --md-sys-color-primary: #{$primary}; +} + +@import url('https://fonts.googleapis.com/css2?family=Roboto+Slab:wght@400;500;700&display=swap'); + +:root { + --md-ref-typeface-brand: 'Roboto Slab'; + --md-ref-typeface-plain: 'Roboto Slab'; +} \ No newline at end of file diff --git a/src/types/mkdocs-global.d.ts b/src/types/mkdocs-global.d.ts new file mode 100644 index 00000000..916029c7 --- /dev/null +++ b/src/types/mkdocs-global.d.ts @@ -0,0 +1,14 @@ +/** + * Declaration for the document$ global exposed by MkDocs Material. + * It's an Observable (or similar pattern) that emits on every page change/load. + * We define it on the global Window object. + */ +interface Window { + document$?: { + subscribe: (callback: () => void) => void; + }; +} + +// If you access document$ directly as a global (without window.), +// you can also declare it like this: +declare const document$: Window['document$']; \ No newline at end of file diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 00000000..02587bd8 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,24 @@ +// tsconfig.json +{ + "compilerOptions": { + // ------------------ Compilation Targets ------------------ + "target": "ES2020", /* Compile to modern JS. Rollup handles the final bundle format. */ + "module": "ESNext", /* Use modern ES module format. */ + "moduleResolution": "node", /* Standard module resolution. */ + "allowSyntheticDefaultImports": true, /* Allows default imports from modules with no default export. */ + "esModuleInterop": true, /* Compatibility for CommonJS imports (like Lit/MWC modules). */ + "lib": ["ES2020", "DOM"], /* Include modern JS features and browser DOM APIs. */ + + // ------------------ Output and Source Maps ------------------ + "sourceMap": true, /* Generate source map files. */ + "rootDir": "src", /* Specify the root directory of source files. */ + + // ------------------ Type Checking and Strictness ------------------ + "strict": true, /* Enable all strict type-checking options. Highly recommended. */ + "skipLibCheck": true /* Skip type checking of declaration files (faster compilation). */ + }, + "include": [ + "src/**/*.ts", /* Only include TypeScript files in the src folder. */ + "src/**/*.d.ts" // ⬅️ Ensure .d.ts files are included + ] +} \ No newline at end of file