diff --git a/requirements.txt b/requirements.txt
index 637daa3..e7dc010 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,3 +21,4 @@ semantic-version~=2.9.0
kubernetes>=11.0.0,<22.0.0
retrying~=1.3.3
oic==1.3.0
+gefyra~=0.8.1
\ No newline at end of file
diff --git a/setup.py b/setup.py
index fda4a9b..33690f0 100644
--- a/setup.py
+++ b/setup.py
@@ -39,6 +39,7 @@
"python-slugify>=5.0.2,<6.2.0",
"click-didyoumean~=0.3.0",
"requests-toolbelt~=0.9.1",
+ "gefyra~=0.8.1",
],
python_requires="~=3.7",
packages=find_packages(),
diff --git a/tests/cluster/storage/test_cluster_storage.py b/tests/cluster/storage/test_cluster_storage.py
new file mode 100644
index 0000000..d64e69e
--- /dev/null
+++ b/tests/cluster/storage/test_cluster_storage.py
@@ -0,0 +1,41 @@
+import os
+import tempfile
+import unittest
+from pathlib import Path
+from uuid import UUID
+
+from unikube.cluster.storage.cluster_storage import ClusterStorage
+
+
+class ClusterStorageTest(unittest.TestCase):
+ def setUp(self):
+ self.temporary_path_object = tempfile.TemporaryDirectory()
+ self.temporary_path = self.temporary_path_object.name
+
+ def tearDown(self):
+ self.temporary_path_object.cleanup()
+
+ def test_missing_id(self):
+ with self.assertRaises(Exception):
+ _ = ClusterStorage(file_path=self.temporary_path)
+
+ def test_save(self):
+ cluster_storage = ClusterStorage(file_path=self.temporary_path, id=UUID("00000000-0000-0000-0000-000000000000"))
+ cluster_storage.save()
+
+ file = Path(os.path.join(cluster_storage.file_path, cluster_storage.file_name))
+
+ self.assertTrue(file.exists())
+ self.assertEqual(UUID("00000000-0000-0000-0000-000000000000"), cluster_storage.id)
+
+ def test_load(self):
+ cluster_storage_01 = ClusterStorage(
+ file_path=self.temporary_path, id=UUID("00000000-0000-0000-0000-000000000000")
+ )
+ cluster_storage_01.name = "test"
+ cluster_storage_01.save()
+
+ cluster_storage_02 = ClusterStorage(
+ file_path=self.temporary_path, id=UUID("00000000-0000-0000-0000-000000000000")
+ )
+ self.assertEqual("test", cluster_storage_02.name)
diff --git a/tests/console/test_input.py b/tests/console/test_input.py
index 4939d96..4057a42 100644
--- a/tests/console/test_input.py
+++ b/tests/console/test_input.py
@@ -70,7 +70,7 @@ def test_filter_none(self):
filter_ = None
choices_resolved = resolve_duplicates(choices=choices, identifiers=identifiers)
- choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, filter=filter_)
+ choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, _filter=filter_)
assert choices_filtered == [CHOICE_01, CHOICE_02]
def test_filter_empty_list(self):
@@ -79,7 +79,7 @@ def test_filter_empty_list(self):
filter_ = []
choices_resolved = resolve_duplicates(choices=choices, identifiers=identifiers)
- choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, filter=filter_)
+ choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, _filter=filter_)
assert choices_filtered == []
def test_filter_existing_01(self):
@@ -88,7 +88,7 @@ def test_filter_existing_01(self):
filter_ = ["1"]
choices_resolved = resolve_duplicates(choices=choices, identifiers=identifiers)
- choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, filter=filter_)
+ choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, _filter=filter_)
assert choices_filtered == [CHOICE_01]
def test_filter_existing_02(self):
@@ -97,7 +97,7 @@ def test_filter_existing_02(self):
filter_ = ["2"]
choices_resolved = resolve_duplicates(choices=choices, identifiers=identifiers)
- choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, filter=filter_)
+ choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, _filter=filter_)
assert choices_filtered == ["choice"]
def test_filter_non_existing(self):
@@ -106,7 +106,7 @@ def test_filter_non_existing(self):
filter_ = ["3"]
choices_resolved = resolve_duplicates(choices=choices, identifiers=identifiers)
- choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, filter=filter_)
+ choices_filtered = filter_by_identifiers(choices=choices_resolved, identifiers=identifiers, _filter=filter_)
assert choices_filtered == []
diff --git a/tests/gefyra/Dockerfile b/tests/gefyra/Dockerfile
new file mode 100644
index 0000000..5410dea
--- /dev/null
+++ b/tests/gefyra/Dockerfile
@@ -0,0 +1,5 @@
+FROM ubuntu
+# run a server on port 8000
+RUN apt update && apt install -y iproute2 iputils-ping python3 traceroute wget curl
+COPY local.py local.py
+CMD python3 local.py
\ No newline at end of file
diff --git a/tests/gefyra/local.py b/tests/gefyra/local.py
new file mode 100644
index 0000000..5116f38
--- /dev/null
+++ b/tests/gefyra/local.py
@@ -0,0 +1,48 @@
+#!/bin/env python
+
+import http.server
+import signal
+import socket
+import socketserver
+import sys
+from datetime import datetime
+
+if sys.argv[1:]:
+ port = int(sys.argv[1])
+else:
+ port = 8000
+
+
+class MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
+ def do_GET(self):
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+ hostname = socket.gethostname()
+ now = datetime.utcnow()
+ self.wfile.write(
+ bytes(f"
Hello from Gefyra. It is {now} on {hostname}.
".encode("utf-8"))
+ )
+
+
+my_handler = MyHttpRequestHandler
+server = socketserver.ThreadingTCPServer(("", port), my_handler)
+
+
+def signal_handler(signal, frame):
+ try:
+ if server:
+ server.server_close()
+ finally:
+ sys.exit(0)
+
+
+signal.signal(signal.SIGINT, signal_handler)
+try:
+ while True:
+ sys.stdout.flush()
+ server.serve_forever()
+except KeyboardInterrupt:
+ pass
+
+server.server_close()
diff --git a/tests/gefyra/unikube.yml b/tests/gefyra/unikube.yml
new file mode 100644
index 0000000..82f7cfb
--- /dev/null
+++ b/tests/gefyra/unikube.yml
@@ -0,0 +1,15 @@
+# unikube switch configuration file
+apps:
+ local:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ deployment: buzzword-counter-web
+ container: buzzword-counter
+ command: python3 local.py
+ ports:
+ - 9000:8000
+ volumes:
+ - ./src:/app
+ env:
+ - DJANGO_DEBUG: "True"
diff --git a/tests/test_cache.py b/tests/test_cache.py
new file mode 100644
index 0000000..9f8e4df
--- /dev/null
+++ b/tests/test_cache.py
@@ -0,0 +1,38 @@
+import os
+import tempfile
+import unittest
+from pathlib import Path
+from uuid import UUID
+
+from unikube.cache import Cache
+
+
+class CacheTest(unittest.TestCase):
+ def setUp(self):
+ self.temporary_path_object = tempfile.TemporaryDirectory()
+ self.temporary_path = self.temporary_path_object.name
+
+ def tearDown(self):
+ self.temporary_path_object.cleanup()
+
+ def test_cache_empty(self):
+ cache = Cache(file_path=self.temporary_path)
+ self.assertEqual(UUID("00000000-0000-0000-0000-000000000000"), cache.userId)
+
+ def test_cache_save(self):
+ cache = Cache(file_path=self.temporary_path)
+ cache.file_path = self.temporary_path
+ cache.save()
+
+ file = Path(os.path.join(cache.file_path, cache.file_name))
+
+ self.assertTrue(file.exists())
+ self.assertEqual(UUID("00000000-0000-0000-0000-000000000000"), cache.userId)
+
+ def test_cache_load(self):
+ cache_01 = Cache(file_path=self.temporary_path)
+ cache_01.userId = UUID("00000000-0000-0000-0000-000000000001")
+ cache_01.save()
+
+ cache_02 = Cache(file_path=self.temporary_path)
+ self.assertEqual(UUID("00000000-0000-0000-0000-000000000001"), cache_02.userId)
diff --git a/tests/test_cli_app.py b/tests/test_cli_app.py
index 4d71283..e01c9c6 100644
--- a/tests/test_cli_app.py
+++ b/tests/test_cli_app.py
@@ -1,26 +1,24 @@
+from unittest.mock import patch
+
from tests.login_testcase import LoginTestCase
+from unikube.authentication.authentication import TokenAuthentication
from unikube.cli import app
-from unikube.commands import ClickContext
-
-
-def check():
- """Function used to mock check function"""
- pass
+from unikube.context import ClickContext
class AppTestCase(LoginTestCase):
- def test_list(self):
+ @patch.object(TokenAuthentication, "check")
+ def test_list(self, *args, **kwargs):
obj = ClickContext()
- obj.auth.check = check
result = self.runner.invoke(
app.list,
obj=obj,
)
assert result.exit_code == 1
- def test_shell_invalid_arguments(self):
+ @patch.object(TokenAuthentication, "check")
+ def test_shell_invalid_arguments(self, *args, **kwargs):
obj = ClickContext()
- obj.auth.check = check
result = self.runner.invoke(
app.shell,
[
diff --git a/tests/test_cli_auth.py b/tests/test_cli_auth.py
index a72c05a..76c46b0 100644
--- a/tests/test_cli_auth.py
+++ b/tests/test_cli_auth.py
@@ -1,94 +1,89 @@
import os
+import unittest
from unittest.mock import patch
-import pytest
from click.testing import CliRunner
+from unikube.authentication.authentication import TokenAuthentication
from unikube.cli import auth
-from unikube.commands import ClickContext
-
-
-def test_login_failed():
- runner = CliRunner()
- result = runner.invoke(
- auth.login,
- ["--email", "test@test.de", "--password", "unsecure"],
- obj=ClickContext(),
- )
- assert "[ERROR] Login failed. Please check email and password.\n" in result.output
- assert result.exit_code == 0
-
-
-def test_login_wrong_token():
- def login(email, password):
- return {"success": True, "response": {"access_token": "WRONG_TOKEN"}}
-
- runner = CliRunner()
- obj = ClickContext()
- obj.auth.login = login
- result = runner.invoke(
- auth.login,
- ["--email", "test@test.de", "--password", "secure"],
- obj=obj,
- )
- assert "[ERROR] Login failed. Your token does not match." in result.output
- assert result.exit_code == 0
-
-
-def test_logout():
- runner = CliRunner()
- result = runner.invoke(
- auth.logout,
- obj=ClickContext(),
- )
- assert result.output == "[INFO] Logout completed.\n"
- assert result.exit_code == 0
-
-
-def test_status_not_logged():
- runner = CliRunner()
- result = runner.invoke(
- auth.status,
- obj=ClickContext(),
- )
- assert result.output == "[INFO] Authentication could not be verified.\n"
- assert result.exit_code == 0
-
-
-def test_status_success():
- def verify():
- return {"success": True}
-
- runner = CliRunner()
- obj = ClickContext()
- obj.auth.verify = verify
- result = runner.invoke(
- auth.status,
- obj=obj,
- )
- assert result.output == "[SUCCESS] Authentication verified.\n"
- assert result.exit_code == 0
-
-
-def test_login_logout_success():
- runner = CliRunner()
-
- email = os.getenv("TESTRUNNER_EMAIL")
- secret = os.getenv("TESTRUNNER_SECRET")
- assert email is not None
- assert secret is not None
-
- result = runner.invoke(
- auth.login,
- ["--email", email, "--password", secret],
- obj=ClickContext(),
- )
- assert "[SUCCESS] Login successful. Hello Testrunner!\n" in result.output
- assert result.exit_code == 0
-
- result = runner.invoke(
- auth.logout,
- obj=ClickContext(),
- )
- assert result.output == "[INFO] Logout completed.\n"
- assert result.exit_code == 0
+from unikube.context import ClickContext
+
+
+class AuthTest(unittest.TestCase):
+ def test_login_failed(self):
+ runner = CliRunner()
+ result = runner.invoke(
+ auth.login,
+ ["--email", "test@test.de", "--password", "unsecure"],
+ obj=ClickContext(),
+ )
+ assert "[ERROR] Login failed. Please check email and password.\n" in result.output
+ assert result.exit_code == 1
+
+ @patch.object(TokenAuthentication, "login")
+ def test_login_wrong_token(self, mock_login):
+ mock_login.return_value = {"success": True, "response": {"access_token": "WRONG_TOKEN"}}
+
+ runner = CliRunner()
+ obj = ClickContext()
+ result = runner.invoke(
+ auth.login,
+ ["--email", "test@test.de", "--password", "secure"],
+ obj=obj,
+ )
+ assert "[ERROR] Login failed." in result.output
+ assert result.exit_code == 1
+
+ def test_logout(self):
+ runner = CliRunner()
+ result = runner.invoke(
+ auth.logout,
+ obj=ClickContext(),
+ )
+ assert result.output == "[INFO] Logout completed.\n"
+ assert result.exit_code == 0
+
+ def test_status_not_logged(self):
+ runner = CliRunner()
+ result = runner.invoke(
+ auth.status,
+ obj=ClickContext(),
+ )
+ assert result.output == "[INFO] Authentication could not be verified.\n"
+ assert result.exit_code == 0
+
+ @patch.object(TokenAuthentication, "verify")
+ def test_status_success(self, mock_verify):
+ mock_verify.return_value = {"success": True}
+
+ runner = CliRunner()
+ obj = ClickContext()
+ result = runner.invoke(
+ auth.status,
+ obj=obj,
+ )
+ assert result.output == "[SUCCESS] Authentication verified.\n"
+ assert result.exit_code == 0
+
+ def test_login_logout_success(self):
+ email = os.getenv("TESTRUNNER_EMAIL")
+ secret = os.getenv("TESTRUNNER_SECRET")
+
+ self.assertIsNotNone(email)
+ self.assertIsNotNone(secret)
+
+ runner = CliRunner()
+ result = runner.invoke(
+ auth.login,
+ ["--email", email, "--password", secret],
+ obj=ClickContext(),
+ )
+ assert "[SUCCESS] Login successful.\n" in result.output
+ assert result.exit_code == 0
+
+ result = runner.invoke(
+ auth.logout,
+ obj=ClickContext(),
+ )
+ assert result.output == "[INFO] Logout completed.\n"
+ assert result.exit_code == 0
diff --git a/tests/test_cli_deck.py b/tests/test_cli_deck.py
index 4445141..d4bce9e 100644
--- a/tests/test_cli_deck.py
+++ b/tests/test_cli_deck.py
@@ -17,7 +17,6 @@ def test_deck_info(self):
self.assertEqual(result.exit_code, 0)
def test_info_not_existing_deck(self):
-
result = self.runner.invoke(
deck.info,
["not_existing_deck"],
@@ -26,7 +25,6 @@ def test_info_not_existing_deck(self):
self.assertIn("[ERROR] Deck name/slug does not exist.\n", result.output)
def test_deck_list(self):
-
result = self.runner.invoke(
deck.list,
obj=ClickContext(),
@@ -45,8 +43,4 @@ def test_deck_ingress(self):
obj=ClickContext(),
)
- self.assertIn(
- "[ERROR] The project cluster does not exist. Please be sure to run 'unikube project up' first.\n",
- result.output,
- )
self.assertEqual(result.exit_code, 1)
diff --git a/tests/test_cli_helper.py b/tests/test_cli_helper.py
index 233fa20..0e829d3 100644
--- a/tests/test_cli_helper.py
+++ b/tests/test_cli_helper.py
@@ -1,6 +1,7 @@
import pytest
from requests import HTTPError, Session
+from unikube.cache import Cache
from unikube.helpers import (
check_environment_type_local_or_exit,
download_manifest,
@@ -40,15 +41,9 @@ def test_download_manifest():
},
}
- class Authentication:
- def refresh(self):
- return {"success": True, "response": {"access_token": ""}}
-
- access_token = ""
- authentication = Authentication()
-
+ cache = Cache()
with pytest.raises(SystemExit) as pytest_wrapped_e:
- _ = download_manifest(deck=deck, authentication=authentication, access_token=access_token)
+ _ = download_manifest(deck=deck, cache=cache)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
diff --git a/tests/test_cli_orga.py b/tests/test_cli_orga.py
index 7fe5d75..c0f011a 100644
--- a/tests/test_cli_orga.py
+++ b/tests/test_cli_orga.py
@@ -34,5 +34,5 @@ def test_orga_list(self):
self.assertIn("id", result.output)
self.assertIn("name", result.output)
- self.assertIn("acme", result.output)
+ self.assertIn("ACME", result.output)
self.assertEqual(result.exit_code, 0)
diff --git a/tests/test_cli_project.py b/tests/test_cli_project.py
index 5e3cc6e..2b4ae85 100644
--- a/tests/test_cli_project.py
+++ b/tests/test_cli_project.py
@@ -1,5 +1,5 @@
from tests.login_testcase import LoginTestCase
-from unikube.cli import orga, project
+from unikube.cli import project
from unikube.commands import ClickContext
diff --git a/unikube/_backup/decorators.py b/unikube/_backup/decorators.py
deleted file mode 100644
index 6fd4e06..0000000
--- a/unikube/_backup/decorators.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from time import sleep
-
-import click
-from utils.console import error
-from utils.exceptions import UnikubeClusterUnavailableError
-from utils.localsystem import K3D, KubeAPI
-from utils.project import AppManager, ProjectManager
-
-
-def retry_command(function):
- def wrapper(*args, **kwargs):
- try:
- res = function(*args, **kwargs)
- except UnikubeClusterUnavailableError:
- error("Cannot reach local cluster.")
- project = ProjectManager().get_active()
- app = AppManager().get_active()
- if click.confirm(f"Should we try to \"project up {project.get('name')}\"?"):
- K3D(project).up(ingress_port=None, workers=None)
- retry_count = 0
- k8s = KubeAPI(project, app)
- while not k8s.is_available and retry_count <= 30:
- sleep(0.5)
- retry_count += 1
- if retry_count == 30:
- error("Could not up project.")
- exit(1)
- res = function(*args, **kwargs)
- else:
- exit(1)
- return res
-
- return wrapper
diff --git a/unikube/_backup/utils_project.py b/unikube/_backup/utils_project.py
deleted file mode 100644
index 8787aba..0000000
--- a/unikube/_backup/utils_project.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# -*- coding: utf-8 -*-
-from typing import List, Optional
-
-from tinydb import Query
-from utils.client import GQLQueryExecutor, get_requests_session
-
-from unikube import settings
-
-
-class ConfigManager:
-
- DB = None
- MISC = settings.MISC
- QUERY = Query()
-
- def set_active(self, _id, slug, **kwargs) -> dict:
- self.DB.update({"cli_active": False})
- obj = self.DB.get(self.QUERY.id == _id)
- if obj:
- obj.update(kwargs)
- obj["cli_active"] = True
- self.DB.write_back([obj])
- else:
- # object gets created for the very first time
- obj = {"id": _id, "name": slug, "cli_active": True}
- obj.update(kwargs)
- self.DB.insert(obj)
- return obj
-
- def update_active(self, obj):
- db_obj = self.DB.get(self.QUERY.id == obj["id"])
- db_obj.update(obj)
- self.DB.write_back([db_obj])
-
- def get_active(self) -> Optional[dict]:
- obj = self.DB.get(self.QUERY.cli_active == True) # noqa
- return obj
-
- def get_all(self) -> List[dict]:
- return self.DB.all()
-
- def delete(self, _id):
- self.DB.remove(self.QUERY.id == _id)
-
-
-class AppManager(ConfigManager):
- DB = settings.config.table("apps")
-
- def unset_app(self):
- self.DB.update({"cli_active": False})
-
-
-class ProjectManager(ConfigManager):
- DB = settings.config.table("projects")
- APP_MGR = AppManager()
-
- def unset_project(self):
- self.DB.update({"cli_active": False})
- self.APP_MGR.unset_app()
-
-
-class AllProjects(GQLQueryExecutor):
- query = """
- {
- projects(organizationId: "") {
- id
- slug
- description
- }
- }
- """
- key = "projects"
-
-
-class ProjectInfo(GQLQueryExecutor):
- query = """
- {
- project(id: "$id") {
- id
- slug
- description
- organization {
- name
- }
- specRepository
- created
- }
- }
- """
- key = "project"
-
-
-class ProjectApps(GQLQueryExecutor):
- query = """
- {
- project(id: "$id") {
- applications {
- id
- slug
- description
- namespace
- environment(level:"local"){
- specsUrl
- }
- }
- }
- }
- """
- key = "project"
-
- def get_data(self, **kwargs):
- data = self._query(**kwargs)["applications"]
- if "filter" in kwargs:
- _filter = kwargs["filter"]
- result = []
- if type(_filter) == list:
- for d in data:
- [d.pop(x, None) for x in _filter]
- result.append(d)
- return result
- else:
- for d in data:
- d.pop(_filter)
- result.append(d)
- return result
- return data
-
-
-class AppSpecs(GQLQueryExecutor):
- query = """
- {
- applications(id: "$id") {
- namespace
- environment(level:"local"){
- specsUrl
- }
- }
- }
- """
-
- key = "applications"
-
-
-class Deployments(GQLQueryExecutor):
- query = """
- {
- application(id: "$id") {
- namespace
- deployments(level: "local") {
- id
- slug
- description
- ports
- isSwitchable
- }
- }
- }
- """
-
- key = "application"
-
-
-def download_specs(url):
- session = get_requests_session()
- r = session.get(settings.DEFAULT_UNIKUBE_GRAPHQL_HOST + url)
- if r.status_code == 200:
- return r.json()
- raise Exception(f"access to K8s specs failed (status {r.status_code})")
diff --git a/unikube/authentication/authentication.py b/unikube/authentication/authentication.py
index 02f47c7..98a7af6 100644
--- a/unikube/authentication/authentication.py
+++ b/unikube/authentication/authentication.py
@@ -1,5 +1,6 @@
import sys
from urllib.parse import urljoin
+from uuid import UUID
import click_spinner
import jwt
@@ -8,8 +9,7 @@
import unikube.cli.console as console
from unikube import settings
from unikube.authentication.types import AuthenticationData
-from unikube.storage.general import LocalStorageGeneral
-from unikube.storage.user import LocalStorageUser
+from unikube.cache import Cache, UserSettings
class IAuthentication:
@@ -35,12 +35,15 @@ def verify_or_refresh(self) -> bool:
class TokenAuthentication(IAuthentication):
def __init__(
self,
- local_storage_general: LocalStorageGeneral,
+ cache: Cache,
timeout=settings.TOKEN_TIMEOUT,
):
- self.local_storage_general = local_storage_general
+ self.cache: Cache = cache
+ try:
+ self.user_id = cache.userId
+ except Exception:
+ self.user_id = None
- self.general_data = self.local_storage_general.get()
self.timeout = timeout
self.url_public_key = urljoin(self.__get_host(), settings.TOKEN_PUBLIC_KEY)
@@ -55,9 +58,8 @@ def __init__(
def __get_host(self) -> str:
try:
- local_storage_user = LocalStorageUser(user_email=self.general_data.authentication.email)
- user_data = local_storage_user.get()
- auth_host = user_data.config.auth_host
+ user_settings = UserSettings(id=self.user_id)
+ auth_host = user_settings.auth_host
if not auth_host:
raise Exception("User data config does not specify an authentication host.")
@@ -99,12 +101,6 @@ def login(
email: str,
password: str,
) -> dict:
- # set/update user config
- local_storage_user = LocalStorageUser(user_email=email)
- user_data = local_storage_user.get()
- user_data.config.auth_host = settings.AUTH_DEFAULT_HOST
- local_storage_user.set(user_data)
-
# access token + refresh token
response_token = self.__request(
url=self.url_login,
@@ -135,24 +131,24 @@ def login(
requesting_party_token = False
# set authentication data
- self.general_data.authentication = AuthenticationData(
+ self.cache.auth = AuthenticationData(
email=email,
access_token=response["response"]["access_token"],
refresh_token=response["response"]["refresh_token"],
requesting_party_token=requesting_party_token,
)
-
- self.local_storage_general.set(self.general_data)
+ self.cache.save()
return response
def logout(self):
- self.general_data.authentication = AuthenticationData()
- self.local_storage_general.set(self.general_data)
+ self.cache.userId = UUID("00000000-0000-0000-0000-000000000000")
+ self.cache.auth = AuthenticationData()
+ self.cache.save()
def verify(self) -> dict:
# keycloak
- access_token = self.general_data.authentication.access_token
+ access_token = self.cache.auth.access_token
response = self.__request(
url=self.url_verify,
data={},
@@ -166,7 +162,7 @@ def verify(self) -> dict:
def refresh(self) -> dict:
# request
- refresh_token = self.general_data.authentication.refresh_token
+ refresh_token = self.cache.auth.refresh_token
response_token = self.__request(
url=self.url_refresh,
data={
@@ -196,11 +192,10 @@ def refresh(self) -> dict:
# update token
if response["success"]:
- self.general_data = self.local_storage_general.get()
- self.general_data.authentication.access_token = response["response"]["access_token"]
- self.general_data.authentication.refresh_token = response["response"]["refresh_token"]
- self.general_data.authentication.requesting_party_token = requesting_party_token
- self.local_storage_general.set(self.general_data)
+ self.cache.auth.access_token = response["response"]["access_token"]
+ self.cache.auth.refresh_token = response["response"]["refresh_token"]
+ self.cache.auth.requesting_party_token = requesting_party_token
+ self.cache.save()
return response
@@ -283,8 +278,3 @@ def token_from_response(self, response):
options={"verify_signature": False},
)
return token
-
-
-def get_authentication():
- token_authentication = TokenAuthentication(local_storage_general=LocalStorageGeneral())
- return token_authentication
diff --git a/unikube/authentication/flow.py b/unikube/authentication/flow.py
new file mode 100644
index 0000000..b170dd3
--- /dev/null
+++ b/unikube/authentication/flow.py
@@ -0,0 +1,143 @@
+import click
+from oic import rndstr
+from oic.oic import Client
+from oic.utils.authn.client import CLIENT_AUTHN_METHOD
+
+import unikube.cli.console as console
+from unikube import settings
+from unikube.authentication.authentication import TokenAuthentication
+from unikube.cache import Cache
+from unikube.cache.cache import UserIDs, UserInfo, UserSettings
+from unikube.graphql_utils import GraphQL
+
+
+def cache_information(cache: Cache):
+ # GraphQL
+ try:
+ graph_ql = GraphQL(cache=cache)
+ data = graph_ql.query(
+ """
+ query {
+ user {
+ id
+ email
+ name
+ familyName
+ givenName
+ avatarImage
+ }
+ allOrganizations {
+ results {
+ id
+ title
+ }
+ }
+ allProjects(limit: 10000) {
+ results {
+ id
+ title
+ organization {
+ id
+ }
+ }
+ }
+ allDecks(limit: 10000) {
+ results {
+ id
+ title
+ project {
+ id
+ }
+ }
+ }
+ }
+ """,
+ )
+
+ user = data.get("user", None)
+ except Exception as e:
+ console.debug(e)
+ console.exit_generic_error()
+
+ # cache user_id
+ try:
+ cache.userId = user["id"]
+ cache.save()
+ except Exception as e:
+ console.debug(e)
+
+ # cache user settings
+ try:
+ user_settings = UserSettings(id=user["id"])
+ user_settings.save()
+ except Exception as e:
+ console.debug(e)
+
+ # cache user information
+ try:
+ user_info = UserInfo(**user)
+ user_info.save()
+ except Exception as e:
+ console.debug(e)
+
+ # cache IDs
+ try:
+ user_ids = UserIDs(id=user["id"])
+ user_ids.refresh(data)
+ user_ids.save()
+ except Exception as e:
+ console.debug(e)
+
+
+def password_flow(ctx, email: str, password: str) -> bool:
+ auth = TokenAuthentication(cache=ctx.cache)
+ response = auth.login(
+ email,
+ password,
+ )
+ if not response["success"]:
+ return False
+
+ try:
+ _ = auth.token_from_response(response)
+ except Exception as e:
+ console.debug(e)
+ console.debug(response)
+ return False
+
+ cache_information(cache=ctx.cache)
+
+ console.success("Login successful.")
+ return True
+
+
+def web_flow(ctx) -> bool:
+ client = Client(client_authn_method=CLIENT_AUTHN_METHOD)
+ issuer = f"{settings.AUTH_DEFAULT_HOST}/auth/realms/unikube"
+ client.provider_config(issuer)
+
+ state = rndstr()
+ nonce = rndstr()
+
+ # 1. run callback server
+ from unikube.authentication.web import run_callback_server
+
+ port = run_callback_server(state, nonce, client, ctx)
+
+ # 2. send to login with redirect url.
+ args = {
+ "client_id": "cli",
+ "response_type": ["token"],
+ "response_mode": "form_post",
+ "scope": ["openid"],
+ "nonce": nonce,
+ "state": state,
+ "redirect_uri": f"http://localhost:{port}",
+ }
+
+ auth_req = client.construct_AuthorizationRequest(request_args=args)
+ login_url = auth_req.request(client.authorization_endpoint)
+ console.info("If your Browser does not open automatically, go to the following URL and login:")
+ console.link(login_url)
+ click.launch(login_url)
+ return True
diff --git a/unikube/authentication/types.py b/unikube/authentication/types.py
index 8d3b512..d560a84 100644
--- a/unikube/authentication/types.py
+++ b/unikube/authentication/types.py
@@ -6,4 +6,3 @@ class AuthenticationData(BaseModel):
access_token: str = ""
refresh_token: str = ""
requesting_party_token: bool = False
- public_key: str = ""
diff --git a/unikube/authentication/web.py b/unikube/authentication/web.py
index 5b02c99..c903f80 100644
--- a/unikube/authentication/web.py
+++ b/unikube/authentication/web.py
@@ -4,9 +4,12 @@
from threading import Thread
from urllib.parse import parse_qs
-from oic.oic import AccessTokenResponse, AuthorizationResponse, Client
+from oic.oic import Client
+from unikube.authentication.authentication import TokenAuthentication
+from unikube.authentication.flow import cache_information
from unikube.authentication.types import AuthenticationData
+from unikube.cache import Cache
from unikube.cli import console
from unikube.context import ClickContext
@@ -50,7 +53,8 @@ def do_POST(self):
if POST["state"] != state:
raise Exception(f"Invalid state: {POST['state']}")
- response = ctx.auth._get_requesting_party_token(POST["access_token"])
+ auth = TokenAuthentication(cache=ctx.cache)
+ response = auth._get_requesting_party_token(POST["access_token"])
login_file = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "login.html"))
text = login_file.read()
@@ -65,20 +69,21 @@ def do_POST(self):
)
else:
try:
- token = ctx.auth.token_from_response(response)
+ token = auth.token_from_response(response)
except Exception as e:
console.debug(e)
console.debug(response)
console.error("Login failed!")
text = "Login failed! Your token does not match."
else:
- ctx.auth.general_data.authentication = AuthenticationData(
+ cache = Cache()
+ cache.auth = AuthenticationData(
email=token["email"],
access_token=response["response"]["access_token"],
refresh_token=response["response"]["refresh_token"],
requesting_party_token=True,
)
- ctx.auth.local_storage_general.set(ctx.auth.general_data)
+ cache_information(cache=cache)
if given_name := token.get("given_name", ""):
greeting = f"Hello {given_name}!"
@@ -86,7 +91,6 @@ def do_POST(self):
greeting = "Hello!"
html_close = "close"
-
text_html = (
f"You have successfully logged in. You can {html_close} this browser tab and return "
f"to the shell."
diff --git a/unikube/cache/README.md b/unikube/cache/README.md
new file mode 100644
index 0000000..1768707
--- /dev/null
+++ b/unikube/cache/README.md
@@ -0,0 +1,33 @@
+# Cache/Storage Structure
+
+All data saved by the CLI is located in `CLI_UNIKUBE_DIRECTORY` and structured as follows:
+
+```text
+
+- cache.json
+- user
+ -
+ - info.json
+ - settings.json
+ - cache
+ - context.json
+ - IDs.json
+
+- cluster
+ -
+ - k3d
+ - remote
+```
+
+- `cache.json`: This file is the primary cache and will be loaded by the ClickContext. It contains essential information such as user authentication.
+
+## User/Cluster Split
+
+- `user` folder: This folder contains `` folders, which contain user specific information.
+- `cluster` folder: This folder contains `` folders, which contain project specific cluster information.
+
+## User Files
+
+- `info.json`: Information about the user.
+- `settings.json`: User settings.
+- `cache.json`: User specific caches.
diff --git a/unikube/cache/__init__.py b/unikube/cache/__init__.py
new file mode 100644
index 0000000..a4f2e09
--- /dev/null
+++ b/unikube/cache/__init__.py
@@ -0,0 +1 @@
+from .cache import Cache, UserContext, UserIDs, UserInfo, UserSettings
diff --git a/unikube/cache/base_file_cache.py b/unikube/cache/base_file_cache.py
new file mode 100644
index 0000000..958169c
--- /dev/null
+++ b/unikube/cache/base_file_cache.py
@@ -0,0 +1,57 @@
+import json
+import os
+from datetime import datetime
+from pathlib import Path
+from typing import Optional
+
+from pydantic import BaseModel
+
+from unikube import settings
+
+
+class BaseFileCache(BaseModel):
+ timestamp: datetime = datetime.now()
+ file_path: str
+ file_name: str
+
+ def __init__(self, file_name: str, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, **data):
+ if not bool(data):
+ data = self.load(file_path=file_path, file_name=file_name)
+
+ try:
+ if data:
+ super().__init__(file_path=file_path, file_name=file_name, **data)
+ else:
+ super().__init__(file_path=file_path, file_name=file_name)
+ except Exception:
+ file = Path(os.path.join(file_path, file_name))
+ file.unlink()
+
+ def save(self):
+ # create file if not exists
+ Path(self.file_path).mkdir(parents=True, exist_ok=True)
+
+ # save timestamp
+ self.timestamp = datetime.now()
+ file_location = os.path.join(self.file_path, self.file_name)
+ with open(file_location, "w") as f:
+ json.dump(json.loads(self.json(exclude={"file_path", "file_name"})), f, ensure_ascii=False, indent=4)
+
+ @classmethod
+ def load(cls, file_path: str, file_name: str) -> Optional[dict]:
+ file_location = os.path.join(file_path, file_name)
+ try:
+ with open(file_location, "r") as file:
+ data = json.load(file)
+ return data
+
+ except FileNotFoundError:
+ return None
+
+ except Exception:
+ file = Path(file_location)
+ file.unlink()
+
+ def refresh(self):
+ # implement cache specific refresh method if requested
+ pass
diff --git a/unikube/cache/cache.py b/unikube/cache/cache.py
new file mode 100644
index 0000000..32f1dcd
--- /dev/null
+++ b/unikube/cache/cache.py
@@ -0,0 +1,156 @@
+import os
+from typing import Dict, List, Optional
+from uuid import UUID
+
+from pydantic import BaseModel
+
+from unikube import settings
+from unikube.authentication.types import AuthenticationData
+from unikube.cache.base_file_cache import BaseFileCache
+
+
+class Cache(BaseFileCache):
+ userId: UUID = UUID("00000000-0000-0000-0000-000000000000")
+ auth: AuthenticationData = AuthenticationData()
+
+ def __init__(self, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "cache.json", **data):
+ super().__init__(file_path=file_path, file_name=file_name, **data)
+
+
+class UserInfo(BaseFileCache):
+ email: str
+ name: Optional[str]
+ familyName: Optional[str]
+ givenName: Optional[str]
+ avatarImage: Optional[str]
+
+ def __init__(self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "info.json", **data):
+ file_path = os.path.join(file_path, "user", str(id))
+ super().__init__(id=id, file_path=file_path, file_name=file_name, **data)
+
+
+class UserSettings(BaseFileCache):
+ auth_host: str = settings.AUTH_DEFAULT_HOST
+
+ def __init__(
+ self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "settings.json", **data
+ ):
+ file_path = os.path.join(file_path, "user", str(id))
+ super().__init__(file_path=file_path, file_name=file_name, **data)
+
+
+class UserContext(BaseFileCache):
+ organization_id: Optional[UUID] = None
+ project_id: Optional[UUID] = None
+ deck_id: Optional[UUID] = None
+
+ def __init__(
+ self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "context.json", **data
+ ):
+ file_path = os.path.join(file_path, "user", str(id), "cache")
+ super().__init__(file_path=file_path, file_name=file_name, **data)
+
+
+class Organization(BaseModel):
+ title: str
+ project_ids: Optional[List[UUID]] = None
+
+
+class Project(BaseModel):
+ title: str
+ organization_id: UUID
+ deck_ids: Optional[List[UUID]] = None
+
+
+class Deck(BaseModel):
+ title: str
+ project_id: UUID
+
+
+class UserIDs(BaseFileCache):
+ organization: Dict[UUID, Organization] = {}
+ project: Dict[UUID, Project] = {}
+ deck: Dict[UUID, Deck] = {}
+
+ def __init__(self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "IDs.json", **data):
+ file_path = os.path.join(file_path, "user", str(id), "cache")
+ super().__init__(file_path=file_path, file_name=file_name, **data)
+
+ @classmethod
+ def __process_results_organization(cls, data) -> Dict:
+ organization = dict()
+ for item in data["allOrganizations"]["results"]:
+ project_ids = []
+ for project in data["allProjects"]["results"]:
+ if project["organization"]["id"] == item["id"]:
+ project_ids.append(project["id"])
+ organization[item["id"]] = Organization(title=item["title"], project_ids=project_ids or None)
+ return organization
+
+ @classmethod
+ def __process_results_project(cls, data) -> Dict:
+ project = dict()
+ for item in data["allProjects"]["results"]:
+ deck_ids = []
+ for deck in data["allDecks"]["results"]:
+ if deck["project"]["id"] == item["id"]:
+ deck_ids.append(deck["id"])
+ project[item["id"]] = Project(
+ title=item["title"], organization_id=item["organization"]["id"], deck_ids=deck_ids or None
+ )
+ return project
+
+ def refresh(self, data=None):
+ if not data:
+ # GraphQL
+ try:
+ from unikube.graphql_utils import GraphQL
+
+ cache = Cache()
+ graph_ql = GraphQL(cache=cache)
+ data = graph_ql.query(
+ """
+ query {
+ allOrganizations {
+ results {
+ id
+ title
+ }
+ }
+ allProjects(limit: 10000) {
+ results {
+ id
+ title
+ organization {
+ id
+ }
+ }
+ }
+ allDecks(limit: 10000) {
+ results {
+ id
+ title
+ project {
+ id
+ }
+ }
+ }
+ }
+ """,
+ )
+ except Exception as e:
+ from unikube.cli import console
+
+ console.debug(e)
+ return None
+
+ organization = UserIDs.__process_results_organization(data=data)
+ self.organization = organization
+
+ project = UserIDs.__process_results_project(data=data)
+ self.project = project
+
+ deck = dict()
+ for item in data["allDecks"]["results"]:
+ deck[item["id"]] = Deck(title=item["title"], project_id=item["project"]["id"])
+ self.deck = deck
diff --git a/unikube/cache/user_cache_context.py b/unikube/cache/user_cache_context.py
new file mode 100644
index 0000000..ebc4177
--- /dev/null
+++ b/unikube/cache/user_cache_context.py
@@ -0,0 +1,18 @@
+import os
+from typing import Optional
+from uuid import UUID
+
+from unikube import settings
+from unikube.cache.base_file_cache import BaseFileCache
+
+
+class UserContext(BaseFileCache):
+ organization_id: Optional[str] = None
+ project_id: Optional[str] = None
+ deck_id: Optional[str] = None
+
+ def __init__(
+ self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "context.json", **data
+ ):
+ file_path = os.path.join(file_path, "user", str(id), "cache")
+ super().__init__(file_path=file_path, file_name=file_name, **data)
diff --git a/unikube/cache/user_info.py b/unikube/cache/user_info.py
new file mode 100644
index 0000000..8610d34
--- /dev/null
+++ b/unikube/cache/user_info.py
@@ -0,0 +1,19 @@
+import os
+from typing import Optional
+from uuid import UUID
+
+from unikube import settings
+from unikube.cache.base_file_cache import BaseFileCache
+
+
+class UserInfo(BaseFileCache):
+ id: UUID
+ email: str
+ name: Optional[str]
+ familyName: Optional[str]
+ givenName: Optional[str]
+ avatarImage: Optional[str]
+
+ def __init__(self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "info.json", **data):
+ file_path = os.path.join(file_path, "user", str(id))
+ super().__init__(file_path=file_path, file_name=file_name, id=id, **data)
diff --git a/unikube/cache/user_settings.py b/unikube/cache/user_settings.py
new file mode 100644
index 0000000..3d076b3
--- /dev/null
+++ b/unikube/cache/user_settings.py
@@ -0,0 +1,16 @@
+import os
+from uuid import UUID
+
+from unikube import settings
+from unikube.cache.base_file_cache import BaseFileCache
+
+
+class UserSettings(BaseFileCache):
+ id: UUID
+ auth_host: str = ""
+
+ def __init__(
+ self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "settings.json", **data
+ ):
+ file_path = os.path.join(file_path, "user", str(id))
+ super().__init__(file_path=file_path, file_name=file_name, id=id, **data)
diff --git a/unikube/cli/app.py b/unikube/cli/app.py
index 85a87ea..5638c7c 100644
--- a/unikube/cli/app.py
+++ b/unikube/cli/app.py
@@ -1,31 +1,17 @@
-import os
-import socket
import sys
-import tempfile
from collections import OrderedDict
from typing import List, Tuple
import click
import click_spinner
-from unikube import settings
from unikube.cli import console
from unikube.cli.helper import age_from_timestamp
+from unikube.cluster.system import Docker, KubeAPI, KubeCtl
from unikube.graphql_utils import GraphQL
-from unikube.local.providers.helper import get_cluster_or_exit
-from unikube.local.system import Docker, KubeAPI, KubeCtl, Telepresence
-from unikube.settings import UNIKUBE_FILE
from unikube.unikubefile.selector import unikube_file_selector
-def _is_local_port_free(port):
- a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- if a_socket.connect_ex(("127.0.0.1", int(port))) == 0:
- return False
- else:
- return True
-
-
def get_deck_from_arguments(ctx, organization_id: str, project_id: str, deck_id: str):
# context
organization_id, project_id, deck_id = ctx.context.get_context_ids_from_arguments(
@@ -40,7 +26,7 @@ def get_deck_from_arguments(ctx, organization_id: str, project_id: str, deck_id:
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID) {
@@ -56,7 +42,7 @@ def get_deck_from_arguments(ctx, organization_id: str, project_id: str, deck_id:
}
}
""",
- query_variables={"id": deck_id},
+ query_variables={"id": str(deck_id)},
)
deck = data["deck"]
project_id = deck["project"]["id"]
@@ -64,16 +50,7 @@ def get_deck_from_arguments(ctx, organization_id: str, project_id: str, deck_id:
console.debug(e)
console.exit_generic_error()
- # cluster data
- cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)
- if project_id not in [cluster.id for cluster in cluster_list]:
- console.info(f"The project cluster for '{project_id}' is not up or does not exist yet.", _exit=True)
-
- cluster_data = ctx.cluster_manager.get(id=project_id)
- if not cluster_data:
- console.error("The cluster could not be found.", _exit=True)
-
- return cluster_data, deck
+ return project_id, deck
def argument_apps(k8s, apps: List[str], multiselect: bool = False) -> List[str]:
@@ -116,14 +93,11 @@ def argument_app(k8s, app: str) -> str:
def list(ctx, organization, project, deck, **kwargs):
"""List all apps."""
- cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
-
- # get cluster
- cluster = get_cluster_or_exit(ctx, cluster_data.id)
- provider_data = cluster.storage.get()
+ cluster_id, deck = get_deck_from_arguments(ctx, organization, project, deck)
+ cluster = ctx.cluster_manager.select(id=cluster_id, exit_on_exception=True)
# list
- k8s = KubeAPI(provider_data, deck)
+ k8s = KubeAPI(kubeconfig_path=cluster.get_kubeconfig_path(), deck=deck)
pod_table = []
def _ready_ind(c) -> Tuple[bool, str]:
@@ -170,14 +144,11 @@ def _ready_ind(c) -> Tuple[bool, str]:
def info(ctx, app, organization, project, deck, **kwargs):
"""Display the status for the given app name."""
- cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
-
- # get cluster
- cluster = get_cluster_or_exit(ctx, cluster_data.id)
- provider_data = cluster.storage.get()
+ cluster_id, deck = get_deck_from_arguments(ctx, organization, project, deck)
+ cluster = ctx.cluster_manager.select(id=cluster_id, exit_on_exception=True)
# shell
- k8s = KubeAPI(provider_data, deck)
+ k8s = KubeAPI(kubeconfig_path=cluster.get_kubeconfig_path(), deck=deck)
app = argument_app(k8s, app)
# get the data of the selected pod
@@ -253,38 +224,30 @@ def shell(ctx, app, organization=None, project=None, deck=None, container=None,
Drop into an interactive shell.
"""
- cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
-
- # get cluster
- cluster = get_cluster_or_exit(ctx, cluster_data.id)
- provider_data = cluster.storage.get()
+ cluster_id, deck = get_deck_from_arguments(ctx, organization, project, deck)
+ cluster = ctx.cluster_manager.select(id=cluster_id, exit_on_exception=True)
# shell
- k8s = KubeAPI(provider_data, deck)
+ k8s = KubeAPI(kubeconfig_path=cluster.get_kubeconfig_path(), deck=deck)
app = argument_app(k8s, app)
# get the data of the selected pod
data = k8s.get_pod(app)
- telepresence = Telepresence(provider_data)
# the corresponding deployment by getting rid of the pod name suffix
deployment = "-".join(data.metadata.name.split("-")[0:-2])
- # 1. check if this pod is of a switched deployment (in case of an active Telepresence)
- if telepresence.is_swapped(deployment, namespace=data.metadata.namespace):
+ # 1. check if this pod is of a switched deployment
+ if cluster.bridge.is_switched(deployment=deployment, namespace=data.metadata.namespace):
# the container name generated in "app switch" for that pod
- container_name = settings.TELEPRESENCE_DOCKER_IMAGE_FORMAT.format(
- project=cluster_data.name.lower(), deck=deck["title"].lower(), name=deployment.lower()
- ).replace(":", "")
-
- if Docker().check_running(container_name):
- # 2. Connect to that container
- # 2.a connect using Docker
- Docker().exec(container_name, "/bin/sh", interactive=True)
+ image_name = cluster.bridge.get_docker_image_name(deployment=deployment)
+
+ if Docker().check_running(image_name):
+ # 2. connect to that container using Docker
+ Docker().exec(image_name, "/bin/sh", interactive=True)
else:
console.error(
- "This is a Telepresence Pod with no corresponding Docker container "
- "running in order to connect (inconsistent state?)"
+ "This is a switched app with no corresponding docker container (inconsistent state?).", _exit=True
)
else:
@@ -294,7 +257,7 @@ def shell(ctx, app, organization=None, project=None, deck=None, container=None,
return None
# 2.b connect using kubernetes
- KubeCtl(provider_data).exec_pod(
+ KubeCtl(cluster.get_kubeconfig_path()).exec_pod(
app, deck["environment"][0]["namespace"], "/bin/sh", interactive=True, container=container
)
@@ -314,50 +277,30 @@ def exec(ctx, **kwargs):
@click.option("--organization", "-o", help="Select an organization")
@click.option("--project", "-p", help="Select a project")
@click.option("--deck", "-d", help="Select a deck")
-@click.option("--deployment", help="Specify the deployment if not set in the Unikubefile")
-@click.option("--unikubefile", help="Specify the path to the Unikubefile", type=str)
+@click.option("--unikube-file", help="Specify the path to the Unikubefile", type=str)
@click.option(
"--no-build", "-n", is_flag=True, help="Do not build a new container image for the switch operation", default=False
)
@click.pass_obj
-def switch(
- ctx, app, organization, project, deck, deployment, unikubefile: str = None, no_build: bool = False, **kwargs
-):
+def switch(ctx, app, organization, project, deck, unikube_file: str = None, no_build: bool = False, **kwargs):
"""
Switch a running deployment with a local Docker container.
"""
- cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
-
- # get cluster
- cluster = get_cluster_or_exit(ctx, cluster_data.id)
+ cluster_id, deck = get_deck_from_arguments(ctx, organization, project, deck)
+ cluster = ctx.cluster_manager.select(id=cluster_id, exit_on_exception=True)
- # unikube file input
+ # unikube file
try:
- unikube_file = unikube_file_selector.get(path_unikube_file=unikubefile)
+ unikube_file = unikube_file_selector.get(path_unikube_file=unikube_file)
unikube_file_app = unikube_file.get_app(name=app)
except Exception as e:
console.debug(e)
- console.error("Invalid 'app' argument.", _exit=True)
-
- # 2: Get a deployment
- # 2.1.a Check the deployment identifier
- if not deployment and unikube_file_app:
- # 1.1.b check the unikubefile
- deployment = unikube_file_app.get_deployment()
- if not deployment:
- console.error("Please specify the 'deployment' key of your app in your unikube.yaml.", _exit=True)
- else:
- console.error(
- "Please specify the deployment either using the '--deployment' option or in the unikube.yaml. "
- "Run 'unikube app switch' in a directory containing the unikube.yaml file.",
- _exit=True,
- )
+ console.error("Invalid unikube file 'app' argument.", _exit=True)
- # 2.2 Fetch available "deployment:", deployments
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID) {
@@ -379,136 +322,49 @@ def switch(
}
""",
query_variables={
- "id": deck["id"],
+ "id": str(deck["id"]),
},
)
except Exception as e:
console.debug(e)
console.exit_generic_error()
- target_deployment = None
- for _deployment in data["deck"]["deployments"]:
- if _deployment["title"] == deployment:
- target_deployment = _deployment
-
- # 2.3 Check and select deployment data
- if target_deployment is None:
- console.error(
- f"The deployment '{deployment}' you specified could not be found.",
- _exit=True,
- )
+ # select target deployment
+ deployment = unikube_file_app.get_deployment()
+ for target_deployment in data["deck"]["deployments"]:
+ if target_deployment["title"] == deployment:
+ break
+ else:
+ console.error(f"The deployment '{deployment}' you specified could not be found.", _exit=True)
- ports = target_deployment["ports"].split(",")
- deployment = target_deployment["title"]
namespace = deck["environment"][0]["namespace"]
+ ports = target_deployment["ports"].split(",")
- console.info("Please wait while unikube prepares the switch.")
+ # check if deployment exists
with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout):
- # check telepresence
- provider_data = cluster.storage.get()
- telepresence = Telepresence(provider_data)
-
- available_deployments = telepresence.list(namespace, flat=True)
- if deployment not in available_deployments:
- console.error(
- "The given deployment cannot be switched. " f"You may have to run 'unikube deck install {deck}' first.",
- _exit=True,
- )
-
- is_swapped = telepresence.is_swapped(deployment, namespace)
-
- k8s = KubeAPI(provider_data, deck)
- # service account token, service cert
- service_account_tokens = k8s.get_serviceaccount_tokens(deployment)
-
- # 3: Build an new Docker image
- # 3.1 Grab the docker file
- context, dockerfile, target = unikube_file_app.get_docker_build()
- if not target:
- target = ""
- console.debug(f"{context}, {dockerfile}, {target}")
-
- # 3.2 Set an image name
- image_name = settings.TELEPRESENCE_DOCKER_IMAGE_FORMAT.format(
- project=cluster_data.name.replace(" ", "").lower(), deck=deck["title"], name=deployment
+ # TODO:
+ # available_deployments = cluster.bridge.list(namespace, flat=True)
+ # if deployment not in available_deployments:
+ # console.error(
+ # "The given deployment cannot be switched. " f"You may have to run 'unikube deck install {deck}' first.",
+ # _exit=True,
+ # )
+ pass
+
+ # build a (new) docker image
+ console.info("Please wait while unikube prepares the switch.")
+ cluster.bridge.build(deployment, namespace, unikube_file_app, no_build)
+
+ # start switch operation
+ console.info(f"Starting your {cluster.cluster_bridge_type.name} bridge, this may take a while to become effective")
+ cluster.bridge.switch(
+ kubeconfig_path=cluster.get_kubeconfig_path(),
+ deployment=deployment,
+ namespace=namespace,
+ ports=ports,
+ unikube_file_app=unikube_file_app,
)
- docker = Docker()
-
- if is_swapped:
- console.warning("It seems this app is already switched in another process. ")
- if click.confirm("Do you want to kill it and switch here?"):
- telepresence.leave(deployment, namespace, silent=True)
- if docker.check_running(image_name):
- docker.kill(name=image_name)
- else:
- sys.exit(0)
-
- # 3.3 Build image
- if not docker.image_exists(image_name) or not no_build:
- if no_build:
- console.warning(f"Ignoring --no-build since the required image '{image_name}' does not exist")
- console.info(f"Building a Docker image for {dockerfile} with context {context}")
- with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout):
- status, msg = docker.build(image_name, context, dockerfile, target)
- if not status:
- console.debug(msg)
- console.error("Failed to build Docker image.", _exit=True)
-
- console.info(f"Docker image successfully built: {image_name}")
-
- # 4. Start the Telepresence session
- # 4.1 Set the right intercept port
- port = unikube_file_app.get_port()
- if port is None:
- port = str(ports[0])
- if len(ports) > 1:
- console.warning(
- f"No port specified although there are multiple ports available: {ports}. "
- f"Defaulting to port {port} which might not be correct."
- )
- if port not in ports:
- console.error(f"The specified port {port} is not in the rage of available options: {ports}", _exit=True)
- if not _is_local_port_free(port):
- console.error(
- f"The local port {port} is busy. Please stop the application running on " f"this port and try again.",
- _exit=True,
- )
-
- # 4.2 See if there are volume mounts
- mounts = unikube_file_app.get_mounts()
- console.debug(f"Volumes requested: {mounts}")
- # mount service tokens
- if service_account_tokens:
- tmp_sa_token = tempfile.NamedTemporaryFile(delete=True)
- tmp_sa_cert = tempfile.NamedTemporaryFile(delete=True)
- tmp_sa_token.write(service_account_tokens[0].encode())
- tmp_sa_cert.write(service_account_tokens[1].encode())
- tmp_sa_token.flush()
- tmp_sa_cert.flush()
- mounts.append((tmp_sa_token.name, settings.SERVICE_TOKEN_FILENAME))
- mounts.append((tmp_sa_cert.name, settings.SERVICE_CERT_FILENAME))
- else:
- tmp_sa_token = None
- tmp_sa_cert = None
-
- # 4.3 See if there special env variables
- envs = unikube_file_app.get_environment()
- console.debug(f"Envs requested: {envs}")
-
- # 4.4 See if there is a run command to be executed
- command = unikube_file_app.get_command(port=port)
- console.debug(f"Run command: {command}")
-
- console.info("Starting your container, this may take a while to become effective")
-
- telepresence.swap(deployment, image_name, command, namespace, envs, mounts, port)
- if docker.check_running(image_name):
- docker.kill(name=image_name)
- if tmp_sa_token:
- tmp_sa_token.close()
- tmp_sa_cert.close()
-
@click.command()
@click.argument("app", required=False)
@@ -525,14 +381,11 @@ def logs(ctx, app, container=None, organization=None, project=None, deck=None, f
``-f`` flag.
"""
- cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
-
- # get cluster
- cluster = get_cluster_or_exit(ctx, cluster_data.id)
- provider_data = cluster.storage.get()
+ cluster_id, deck = get_deck_from_arguments(ctx, organization, project, deck)
+ cluster = ctx.cluster_manager.select(id=cluster_id, exit_on_exception=True)
# log
- k8s = KubeAPI(provider_data, deck)
+ k8s = KubeAPI(kubeconfig_path=cluster.get_kubeconfig_path(), deck=deck)
app = argument_app(k8s, app)
# get the data of the selected pod
@@ -562,14 +415,11 @@ def env(ctx, app, init, organization, project, deck, **kwargs):
can print the environment variables for all init containers with the ``-i`` flag.
"""
- cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
-
- # get cluster
- cluster = get_cluster_or_exit(ctx, cluster_data.id)
- provider_data = cluster.storage.get()
+ cluster_id, deck = get_deck_from_arguments(ctx, organization, project, deck)
+ cluster = ctx.cluster_manager.select(id=cluster_id, exit_on_exception=True)
# env
- k8s = KubeAPI(provider_data, deck)
+ k8s = KubeAPI(kubeconfig_path=cluster.get_kubeconfig_path(), deck=deck)
app = argument_app(k8s, app)
# get the data of the selected pod
@@ -657,14 +507,11 @@ def update(ctx, app, organization, project, deck, **kwargs):
Trigger a forced update of the given app. This command creates a new app instance.
"""
- cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
-
- # get cluster
- cluster = get_cluster_or_exit(ctx, cluster_data.id)
- provider_data = cluster.storage.get()
+ cluster_id, deck = get_deck_from_arguments(ctx, organization, project, deck)
+ cluster = ctx.cluster_manager.select(id=cluster_id, exit_on_exception=True)
# delete pod
- k8s = KubeAPI(provider_data, deck)
+ k8s = KubeAPI(kubeconfig_path=cluster.get_kubeconfig_path(), deck=deck)
apps = argument_apps(k8s, [app] if app else [], multiselect=True)
[k8s.delete_pod(app) for app in apps]
console.info(f"The app(s) {', '.join(apps)} are currently updating and do not exist anymore.")
diff --git a/unikube/cli/auth.py b/unikube/cli/auth.py
index 36a87df..296eccd 100644
--- a/unikube/cli/auth.py
+++ b/unikube/cli/auth.py
@@ -1,12 +1,10 @@
from getpass import getpass
import click
-from oic import rndstr
-from oic.oic import Client
-from oic.utils.authn.client import CLIENT_AUTHN_METHOD
import unikube.cli.console as console
-from unikube import settings
+from unikube.authentication.authentication import TokenAuthentication
+from unikube.authentication.flow import password_flow, web_flow
from unikube.helpers import compare_current_and_latest_versions
@@ -21,68 +19,24 @@ def login(ctx, email, password, **kwargs):
``-e`` for email and enable the direct login method. For a non-interactive login, you can provide
``-p`` along with the password.
"""
+
compare_current_and_latest_versions()
+
+ # select login flow
if email or password:
if not email:
email = click.prompt("email", type=str)
if not password:
password = getpass("password:")
- return password_flow(ctx, email, password)
- return web_flow(ctx)
-
-def password_flow(ctx, email, password):
- response = ctx.auth.login(
- email,
- password,
- )
- if response["success"]:
- try:
- token = ctx.auth.token_from_response(response)
- except Exception as e:
- console.debug(e)
- console.debug(response)
- console.error("Login failed. Your token does not match.")
- return False
-
- if token["given_name"]:
- console.success(f'Login successful. Hello {token["given_name"]}!')
- else:
- console.success("Login successful.")
+ success = password_flow(ctx, email, password)
else:
- console.error("Login failed. Please check email and password.")
- return True
-
-
-def web_flow(ctx):
- client = Client(client_authn_method=CLIENT_AUTHN_METHOD)
- issuer = f"{settings.AUTH_DEFAULT_HOST}/auth/realms/unikube"
- client.provider_config(issuer)
+ success = web_flow(ctx)
- state = rndstr()
- nonce = rndstr()
+ # error
+ if not success:
+ console.error("Login failed. Please check email and password.", _exit=True)
- # 1. run callback server
- from unikube.authentication.web import run_callback_server
-
- port = run_callback_server(state, nonce, client, ctx)
-
- # 2. send to login with redirect url.
- args = {
- "client_id": "cli",
- "response_type": ["token"],
- "response_mode": "form_post",
- "scope": ["openid"],
- "nonce": nonce,
- "state": state,
- "redirect_uri": f"http://localhost:{port}",
- }
-
- auth_req = client.construct_AuthorizationRequest(request_args=args)
- login_url = auth_req.request(client.authorization_endpoint)
- console.info("If your Browser does not open automatically, go to the following URL and login:")
- console.link(login_url)
- click.launch(login_url)
return True
@@ -93,9 +47,10 @@ def logout(ctx, **kwargs):
Log out of a Unikube host.
"""
- ctx.auth.logout()
- console.info("Logout completed.")
+ auth = TokenAuthentication(cache=ctx.cache)
+ auth.logout()
+ console.info("Logout completed.")
return True
@@ -107,17 +62,19 @@ def status(ctx, token=False, **kwargs):
View authentication status.
"""
- response = ctx.auth.verify()
-
# show token information
if token:
- console.info(f"access token: {ctx.auth.general_data.authentication.access_token}")
+ console.info(f"access token: {ctx.cache.auth.access_token}")
console.echo("---")
- console.info(f"refresh token: {ctx.auth.general_data.authentication.refresh_token}")
+ console.info(f"refresh token: {ctx.cache.auth.refresh_token}")
console.echo("---")
- console.info(f"requesting party token: {ctx.auth.general_data.authentication.requesting_party_token}")
+ console.info(f"requesting party token: {ctx.cache.auth.requesting_party_token}")
console.echo("")
+ # verify
+ auth = TokenAuthentication(cache=ctx.cache)
+ response = auth.verify()
+
if response["success"]:
console.success("Authentication verified.")
else:
diff --git a/unikube/cli/console/deck.py b/unikube/cli/console/deck.py
index bb85a92..1735816 100644
--- a/unikube/cli/console/deck.py
+++ b/unikube/cli/console/deck.py
@@ -1,48 +1,61 @@
-from typing import Union
+from typing import Optional
+from uuid import UUID
import unikube.cli.console as console
+from unikube.cache import UserIDs
from unikube.cli.console.input import get_identifier_or_pass
from unikube.context.helper import convert_deck_argument_to_uuid
-from unikube.graphql_utils import GraphQL
-
-
-def deck_list(ctx, organization_id: str = None, project_id: str = None) -> Union[None, str]:
- # GraphQL
- try:
- graph_ql = GraphQL(authentication=ctx.auth)
- data = graph_ql.query(
- """
- query($organization_id: UUID, $project_id: UUID) {
- allDecks(organizationId: $organization_id, projectId: $project_id) {
- results {
- title
- id
- project {
- id
- title
- organization {
- id
- }
- }
- }
- }
- }
- """,
- query_variables={
- "organization_id": organization_id,
- "project_id": project_id,
- },
- )
- deck_list = data["allDecks"]["results"]
- except Exception as e:
- console.debug(e)
- console.exit_generic_error()
+
+
+def _filter_deck_list(user_ids: UserIDs, organization_id: UUID, project_id: UUID):
+ if project_id or organization_id:
+ deck_list = {}
+ for id, deck in user_ids.deck.items():
+ deck_project_id = deck.project_id
+
+ project = user_ids.project.get(deck_project_id, None)
+ if project:
+ deck_organization_id = project.organization_id
+ else:
+ deck_organization_id = None
+
+ if ((deck_organization_id == organization_id) or (organization_id is None)) and (
+ (deck_project_id == project_id) or (project_id is None)
+ ):
+ deck_list[id] = deck
+ else:
+ deck_list = user_ids.deck
+
+ return deck_list
+
+
+def deck_list(ctx, organization_id: UUID = None, project_id: UUID = None) -> Optional[UUID]:
+ user_ids = UserIDs(id=ctx.user_id)
+ if not user_ids.deck:
+ user_ids.refresh()
+ user_ids.save()
+
+ # filter
+ deck_list = _filter_deck_list(user_ids=user_ids, organization_id=organization_id, project_id=project_id)
+
+ choices = []
+ identifiers = []
+ help_texts = []
+ for id, deck in deck_list.items():
+ choices.append(deck.title)
+ identifiers.append(str(id))
+
+ project = user_ids.project.get(deck.project_id, None)
+ if project:
+ help_texts.append(project.title)
+ else:
+ help_texts.append(None)
selection = console.list(
message="Please select a deck",
- choices=[deck["title"] for deck in deck_list],
- identifiers=[deck["id"] for deck in deck_list],
- help_texts=[deck["project"]["title"] for deck in deck_list],
+ choices=choices,
+ identifiers=identifiers,
+ help_texts=help_texts,
message_no_choices="No decks available!",
)
if selection is None:
@@ -52,6 +65,6 @@ def deck_list(ctx, organization_id: str = None, project_id: str = None) -> Union
deck_argument = get_identifier_or_pass(selection)
deck_id = convert_deck_argument_to_uuid(
- ctx.auth, argument_value=deck_argument, organization_id=organization_id, project_id=project_id
+ ctx.cache, argument_value=deck_argument, organization_id=organization_id, project_id=project_id
)
return deck_id
diff --git a/unikube/cli/console/helpers.py b/unikube/cli/console/helpers.py
index 649aa17..8bf2c5f 100644
--- a/unikube/cli/console/helpers.py
+++ b/unikube/cli/console/helpers.py
@@ -1,82 +1,70 @@
from typing import Optional
+from uuid import UUID
-from unikube.cli import console
-from unikube.graphql_utils import GraphQL
+from unikube.cache.cache import UserIDs
-def organization_id_2_display_name(ctx, id: str = None) -> str:
+def organization_id_2_display_name(ctx, id: UUID = None) -> str:
if not id:
return "-"
- try:
- graph_ql = GraphQL(authentication=ctx.auth)
- data = graph_ql.query(
- """
- query($id: UUID!) {
- organization(id: $id) {
- title
- }
- }
- """,
- query_variables={
- "id": id,
- },
- )
- title = data["organization"]["title"]
- except Exception as e:
- console.debug(e)
- title = "-"
-
- return f"{title} ({id})"
-
-
-def project_id_2_display_name(ctx, id: str = None) -> Optional[str]:
+ user_ids = UserIDs(id=ctx.user_id)
+ organization = user_ids.organization.get(id, None)
+ if organization:
+ if organization.title:
+ return f"{organization.title} ({id})"
+
+ user_ids.refresh()
+ user_ids.save()
+
+ organization = user_ids.organization.get(id, None)
+ if organization:
+ title = organization.get("title", None)
+ else:
+ title = None
+
+ return f"{title or '-'} ({id})"
+
+
+def project_id_2_display_name(ctx, id: UUID = None) -> Optional[str]:
if not id:
return "-"
- try:
- graph_ql = GraphQL(authentication=ctx.auth)
- data = graph_ql.query(
- """
- query($id: UUID!) {
- project(id: $id) {
- title
- }
- }
- """,
- query_variables={
- "id": id,
- },
- )
- title = data["project"]["title"]
- except Exception as e:
- console.debug(e)
- title = "-"
-
- return f"{title} ({id})"
-
-
-def deck_id_2_display_name(ctx, id: str = None) -> Optional[str]:
+ user_ids = UserIDs(id=ctx.user_id)
+ project = user_ids.project.get(id, None)
+ if project:
+ if project.title:
+ return f"{project.title} ({id})"
+
+ user_ids.refresh()
+ user_ids.save()
+
+ project = user_ids.project.get(id, None)
+ if project:
+ title = project.get("title", None)
+ else:
+ title = None
+
+ return f"{title or '-'} ({id})"
+
+
+def deck_id_2_display_name(ctx, id: UUID = None) -> Optional[str]:
if not id:
return "-"
- try:
- graph_ql = GraphQL(authentication=ctx.auth)
- data = graph_ql.query(
- """
- query($id: UUID!) {
- deck(id: $id) {
- title
- }
- }
- """,
- query_variables={
- "id": id,
- },
- )
- title = data["deck"]["title"]
- except Exception as e:
- console.debug(e)
- title = "-"
-
- return f"{title} ({id})"
+ user_ids = UserIDs(id=ctx.user_id)
+ deck = user_ids.deck.get(id, None)
+ if deck:
+ if deck.title:
+ return f"{deck.title} ({id})"
+
+ user_ids.refresh()
+ user_ids.save()
+
+ deck = user_ids.deck.get(id, None)
+ if deck:
+ title = deck.get("title", None)
+ else:
+ title = None
+
+ return f"{title or '-'} ({id})"
diff --git a/unikube/cli/console/input.py b/unikube/cli/console/input.py
index 59b264b..c603a52 100644
--- a/unikube/cli/console/input.py
+++ b/unikube/cli/console/input.py
@@ -1,10 +1,11 @@
import re
-from typing import Any, Callable, List, Union
+from typing import Any, Callable, List, Tuple, Union
from InquirerPy import inquirer
from InquirerPy.utils import InquirerPyValidate
import unikube.cli.console as console
+from unikube.cli.console.prompt import UpdatableFuzzyPrompt
from unikube.settings import INQUIRER_STYLE
@@ -61,13 +62,13 @@ def add_help_text(choice, help_text):
return choices_resolved
-def filter_by_identifiers(choices: List[str], identifiers: List[str], filter: Union[List[str], None]) -> List[str]:
- if filter is None:
+def filter_by_identifiers(choices: List[str], identifiers: List[str], _filter: Union[List[str], None]) -> List[str]:
+ if _filter is None:
return choices
choices_filtered = []
for choice, identifier in zip(choices, identifiers):
- if any(f in choice for f in filter) or identifier in filter:
+ if any(f in choice for f in _filter) or identifier in _filter:
choices_filtered.append(choice)
return choices_filtered
@@ -84,6 +85,22 @@ def exclude_by_identifiers(choices: List[str], identifiers: List[str], excludes:
return choices_excluded
+def prepare_choices(identifiers, choices, help_texts, _filter, allow_duplicates, excludes):
+ # handle duplicates
+ if not allow_duplicates:
+ if identifiers:
+ choices_duplicates = resolve_duplicates(choices=choices, identifiers=identifiers, help_texts=help_texts)
+ else:
+ choices_duplicates = set(choices)
+ else:
+ choices_duplicates = choices
+
+ # filter
+ choices_filtered = filter_by_identifiers(choices=choices_duplicates, identifiers=identifiers, _filter=_filter)
+ # exclude
+ return exclude_by_identifiers(choices=choices_filtered, identifiers=identifiers, excludes=excludes)
+
+
# input
def list(
message: str,
@@ -96,37 +113,29 @@ def list(
message_no_choices: str = "No choices available!",
multiselect: bool = False,
transformer: Callable[[Any], str] = None,
+ update_func: Callable[[], Tuple[List[str], List[str], List[str]]] = None,
) -> Union[None, List[str]]:
+ choices_excluded = prepare_choices(identifiers, choices, help_texts, filter, allow_duplicates, excludes)
+
# choices exist
- if not len(choices) > 0:
+ if not len(choices_excluded) > 0:
console.info(message_no_choices)
return None
- # handle duplicates
- if not allow_duplicates:
- if identifiers:
- choices_duplicates = resolve_duplicates(choices=choices, identifiers=identifiers, help_texts=help_texts)
- else:
- choices_duplicates = set(choices)
- else:
- choices_duplicates = choices
-
- # filter
- choices_filtered = filter_by_identifiers(choices=choices_duplicates, identifiers=identifiers, filter=filter)
+ kwargs = {
+ "message": message,
+ "choices": choices_excluded,
+ "multiselect": multiselect,
+ "transformer": transformer,
+ "keybindings": {"toggle": [{"key": "space"}]},
+ }
- # exclude
- choices_excluded = exclude_by_identifiers(choices=choices_filtered, identifiers=identifiers, excludes=excludes)
+ if update_func:
+ update_wrapper = lambda: prepare_choices(*update_func(), filter, allow_duplicates, excludes) # noqa: E731
+ kwargs.update({"update_func": update_wrapper})
# prompt
- answer = inquirer.fuzzy(
- message=message,
- choices=choices_excluded,
- multiselect=multiselect,
- transformer=transformer,
- keybindings={"toggle": [{"key": "space"}]},
- style=INQUIRER_STYLE,
- amark="✔",
- ).execute()
+ answer = UpdatableFuzzyPrompt(**kwargs).execute()
if not answer:
return None
@@ -157,7 +166,7 @@ def input(
def confirm(
- question: str = "Do want to continue? [N/y]: ",
+ question: str = "Do you want to continue? [N/y]: ",
values: List[str] = ["y", "Y", "yes", "Yes"],
) -> bool:
# confirm action by user input
diff --git a/unikube/cli/console/orga.py b/unikube/cli/console/orga.py
index f92ff96..1314d68 100644
--- a/unikube/cli/console/orga.py
+++ b/unikube/cli/console/orga.py
@@ -1,36 +1,22 @@
-from typing import Union
+from typing import Optional
+from uuid import UUID
import unikube.cli.console as console
+from unikube.cache import UserIDs
from unikube.cli.console.input import get_identifier_or_pass
from unikube.context.helper import convert_organization_argument_to_uuid
-from unikube.graphql_utils import GraphQL
-def organization_list(ctx) -> Union[None, str]:
- # GraphQL
- try:
- graph_ql = GraphQL(authentication=ctx.auth)
- data = graph_ql.query(
- """
- query {
- allOrganizations {
- results {
- id
- title
- }
- }
- }
- """
- )
- organization_list = data["allOrganizations"]["results"]
- except Exception as e:
- console.debug(e)
- console.exit_generic_error()
+def organization_list(ctx) -> Optional[UUID]:
+ user_ids = UserIDs(id=ctx.user_id)
+ if not user_ids.organization:
+ user_ids.refresh()
+ user_ids.save()
selection = console.list(
message="Please select an organization",
- choices=[organization["title"] for organization in organization_list],
- identifiers=[organization["id"] for organization in organization_list],
+ choices=[organization.title for _, organization in user_ids.organization.items()],
+ identifiers=user_ids.organization.keys(),
message_no_choices="No organizations available!",
)
if selection is None:
@@ -39,5 +25,5 @@ def organization_list(ctx) -> Union[None, str]:
# get identifier if available
organization_argument = get_identifier_or_pass(selection)
- organization_id = convert_organization_argument_to_uuid(ctx.auth, argument_value=organization_argument)
+ organization_id = convert_organization_argument_to_uuid(ctx.cache, argument_value=organization_argument)
return organization_id
diff --git a/unikube/cli/console/project.py b/unikube/cli/console/project.py
index de8feb6..05b0ffd 100644
--- a/unikube/cli/console/project.py
+++ b/unikube/cli/console/project.py
@@ -1,49 +1,73 @@
-from typing import List, Union
+from typing import List, Optional, Tuple
+from uuid import UUID
import unikube.cli.console as console
+from unikube.authentication.authentication import TokenAuthentication
+from unikube.cache.cache import UserIDs
from unikube.cli.console.input import get_identifier_or_pass
from unikube.context.helper import convert_project_argument_to_uuid
-from unikube.graphql_utils import GraphQL
+
+
+def updated_projects(ctx, organization_id) -> Tuple[List[str], List[str], List[str]]:
+ auth = TokenAuthentication(cache=ctx.cache)
+ _ = auth.refresh()
+ ctx.cache = auth.cache
+ user_ids = UserIDs(id=ctx.user_id)
+ user_ids.refresh()
+ user_ids.save()
+
+ if organization_id:
+ project_list = {
+ id: project for id, project in user_ids.project.items() if project.organization_id == organization_id
+ }
+ else:
+ project_list = {id: project for id, project in user_ids.project.items()}
+ identifiers = [str(p) for p in project_list.keys()]
+ projects = list(map(lambda x: x.title, project_list.values()))
+ help_texts = list(map(lambda x: user_ids.organization.get(str(x.organization_id)).title, project_list.values()))
+ return identifiers, projects, help_texts
def project_list(
- ctx, organization_id: str = None, filter: List[str] = None, excludes: List[str] = None
-) -> Union[None, str]:
- # GraphQL
- try:
- graph_ql = GraphQL(authentication=ctx.auth)
- data = graph_ql.query(
- """
- query($organization_id: UUID) {
- allProjects(organizationId: $organization_id) {
- results {
- title
- id
- organization {
- id
- title
- }
- }
- }
- }
- """,
- query_variables={
- "organization_id": organization_id,
- },
- )
- project_list = data["allProjects"]["results"]
- except Exception as e:
- console.debug(e)
- console.exit_generic_error()
+ ctx, organization_id: UUID = None, filter: List[str] = None, excludes: List[str] = None
+) -> Optional[UUID]:
+ user_ids = UserIDs(id=ctx.user_id)
+ if not user_ids.project:
+ user_ids.refresh()
+ user_ids.save()
+
+ # filter
+ if organization_id:
+ project_list = {
+ id: project for id, project in user_ids.project.items() if project.organization_id == organization_id
+ }
+ else:
+ project_list = user_ids.project
+
+ choices = []
+ identifiers = []
+ help_texts = []
+ for id, project in project_list.items():
+ choices.append(project.title)
+ identifiers.append(str(id))
+
+ organization = user_ids.organization.get(project.organization_id, None)
+ if organization:
+ help_texts.append(organization.title)
+ else:
+ help_texts.append(None)
+
+ update_func = lambda: updated_projects(ctx, organization_id) # noqa: E731
selection = console.list(
message="Please select a project",
- choices=[project["title"] for project in project_list],
- identifiers=[project["id"] for project in project_list],
+ choices=choices,
+ identifiers=identifiers,
filter=filter,
excludes=excludes,
- help_texts=[project["organization"]["title"] for project in project_list],
+ help_texts=help_texts,
message_no_choices="No projects available!",
+ update_func=update_func,
)
if selection is None:
return None
@@ -52,6 +76,6 @@ def project_list(
project_argument = get_identifier_or_pass(selection)
project_id = convert_project_argument_to_uuid(
- ctx.auth, argument_value=project_argument, organization_id=organization_id
+ ctx.cache, argument_value=project_argument, organization_id=organization_id
)
return project_id
diff --git a/unikube/cli/console/prompt.py b/unikube/cli/console/prompt.py
new file mode 100644
index 0000000..e938e39
--- /dev/null
+++ b/unikube/cli/console/prompt.py
@@ -0,0 +1,49 @@
+import asyncio
+from threading import Thread
+from typing import Any, Callable, List
+
+from InquirerPy.prompts import FuzzyPrompt
+
+
+class UpdatableFuzzyPrompt(FuzzyPrompt):
+ """Based on InquirerPy's FuzzyPrompt.
+
+ Takes an update_function which should return choices.
+ The current choices are then replaced by the new ones.
+ """
+
+ def __init__(self, update_func: Callable[[], List[str]] = None, **kwargs) -> None:
+ super(UpdatableFuzzyPrompt, self).__init__(**kwargs)
+ if update_func:
+ self._update_func = update_func
+
+ def _update_choices(self, loop):
+ # Call update function to retrieve choices
+ choices = self._update_func()
+ if not len(choices):
+ return
+ # keep current choice selection
+ # if choice is removed, go to first one
+ loop.create_task(self._update_display(choices))
+
+ async def _update_display(self, choices):
+ self.content_control.choices = self.content_control._get_choices(
+ choices, choices[self.content_control.selected_choice_index]
+ )
+ # internal thing from InqurirerPy - needed to format choice data structure properly
+ self.content_control._format_choices()
+ # Do the update asynchronously
+ choices = await self.content_control._filter_choices(0.01)
+ self.content_control._filtered_choices = choices
+ self._application.renderer.erase()
+ self._application.invalidate()
+
+ def execute(self, raise_keyboard_interrupt: bool = None) -> Any:
+ loop = asyncio.new_event_loop()
+ if hasattr(self, "_update_func"):
+ thread = Thread(target=self._update_choices, args=[loop])
+ thread.start()
+
+ prompt = loop.create_task(self.execute_async())
+ answer = loop.run_until_complete(prompt)
+ return answer
diff --git a/unikube/cli/context.py b/unikube/cli/context.py
index c8ade3f..0657ab4 100644
--- a/unikube/cli/context.py
+++ b/unikube/cli/context.py
@@ -1,6 +1,7 @@
import click
import unikube.cli.console as console
+from unikube.cache import UserContext
from unikube.cli.console.helpers import (
deck_id_2_display_name,
organization_id_2_display_name,
@@ -8,7 +9,6 @@
)
from unikube.context.helper import convert_context_arguments
from unikube.graphql_utils import GraphQL
-from unikube.storage.user import get_local_storage_user
def show_context(ctx, context):
@@ -34,7 +34,7 @@ def set(ctx, organization=None, project=None, deck=None, **kwargs):
"""
organization_id, project_id, deck_id = convert_context_arguments(
- auth=ctx.auth, organization_argument=organization, project_argument=project, deck_argument=deck
+ cache=ctx.cache, organization_argument=organization, project_argument=project, deck_argument=deck
)
if not (organization or project or deck):
@@ -44,20 +44,19 @@ def set(ctx, organization=None, project=None, deck=None, **kwargs):
console.echo("")
# user_data / context
- local_storage_user = get_local_storage_user()
- user_data = local_storage_user.get()
+ user_context = UserContext(id=ctx.user_id)
if organization_id:
# set organization
- user_data.context.deck_id = None
- user_data.context.project_id = None
- user_data.context.organization_id = organization_id
- local_storage_user.set(user_data)
+ user_context.deck_id = None
+ user_context.project_id = None
+ user_context.organization_id = organization_id
+ user_context.save()
if project_id:
if not organization_id:
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID) {
@@ -78,15 +77,15 @@ def set(ctx, organization=None, project=None, deck=None, **kwargs):
console.exit_generic_error()
# set project
- user_data.context.deck_id = None
- user_data.context.project_id = project_id
- user_data.context.organization_id = organization_id
- local_storage_user.set(user_data)
+ user_context.deck_id = None
+ user_context.project_id = project_id
+ user_context.organization_id = organization_id
+ user_context.save()
if deck_id:
if not organization_id or not project_id:
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID) {
@@ -111,12 +110,12 @@ def set(ctx, organization=None, project=None, deck=None, **kwargs):
console.exit_generic_error()
# set deck
- user_data.context.deck_id = deck_id
- user_data.context.project_id = project_id
- user_data.context.organization_id = organization_id
- local_storage_user.set(user_data)
+ user_context.deck_id = deck_id
+ user_context.project_id = project_id
+ user_context.organization_id = organization_id
+ user_context.save()
- show_context(ctx=ctx, context=user_data.context)
+ show_context(ctx=ctx, context=user_context)
@click.command()
@@ -130,32 +129,31 @@ def remove(ctx, organization=None, project=None, deck=None, **kwargs):
"""
# user_data / context
- local_storage_user = get_local_storage_user()
- user_data = local_storage_user.get()
+ user_context = UserContext(id=ctx.user_id)
if organization:
- user_data.context.deck_id = None
- user_data.context.project_id = None
- user_data.context.organization_id = None
- local_storage_user.set(user_data)
+ user_context.deck_id = None
+ user_context.project_id = None
+ user_context.organization_id = None
+ user_context.save()
console.success("Organization context removed.", _exit=True)
if project:
- user_data.context.deck_id = None
- user_data.context.project_id = None
- local_storage_user.set(user_data)
+ user_context.deck_id = None
+ user_context.project_id = None
+ user_context.save()
console.success("Project context removed.", _exit=True)
if deck:
- user_data.context.deck_id = None
- local_storage_user.set(user_data)
+ user_context.deck_id = None
+ user_context.save()
console.success("Deck context removed.", _exit=True)
# remove complete context
- user_data.context.deck_id = None
- user_data.context.project_id = None
- user_data.context.organization_id = None
- local_storage_user.set(user_data)
+ user_context.deck_id = None
+ user_context.project_id = None
+ user_context.organization_id = None
+ user_context.save()
console.success("Context removed.", _exit=True)
@@ -167,7 +165,5 @@ def show(ctx, **kwargs):
"""
# user_data / context
- local_storage_user = get_local_storage_user()
- user_data = local_storage_user.get()
-
- show_context(ctx=ctx, context=user_data.context)
+ user_context = UserContext(id=ctx.user_id)
+ show_context(ctx=ctx, context=user_context)
diff --git a/unikube/cli/deck.py b/unikube/cli/deck.py
index d84dca7..7f26e17 100644
--- a/unikube/cli/deck.py
+++ b/unikube/cli/deck.py
@@ -1,15 +1,17 @@
+from uuid import UUID
+
import click
import unikube.cli.console as console
+from unikube.cluster.system import KubeAPI, KubeCtl
from unikube.graphql_utils import GraphQL
from unikube.helpers import check_environment_type_local_or_exit, download_manifest
-from unikube.local.system import KubeAPI, KubeCtl, Telepresence
def get_deck(ctx, deck_id: str):
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID) {
@@ -32,7 +34,7 @@ def get_deck(ctx, deck_id: str):
}
}
""",
- query_variables={"id": deck_id},
+ query_variables={"id": str(deck_id)},
)
deck = data["deck"]
except Exception as e:
@@ -42,30 +44,14 @@ def get_deck(ctx, deck_id: str):
return deck
-def get_cluster(ctx, deck: dict):
- cluster_data = ctx.cluster_manager.get(id=deck["project"]["id"])
- if not cluster_data.name:
- console.error(
- "The project cluster does not exist. Please be sure to run 'unikube project up' first.", _exit=True
- )
-
- cluster = ctx.cluster_manager.select(cluster_data=cluster_data)
-
- # check if kubernetes cluster is running/ready
- if not cluster.ready():
- console.error(f"The project cluster for '{cluster.display_name}' is not running.", _exit=True)
-
- return cluster
-
-
-def get_ingress_data(deck, provider_data):
- ingresss = KubeAPI(provider_data, deck).get_ingress()
+def get_ingress_data(deck, kubeconfig_path: str, publisher_port: str):
+ ingresss = KubeAPI(kubeconfig_path=kubeconfig_path, deck=deck).get_ingress()
ingress_data = []
for ingress in ingresss.items:
hosts = []
paths = []
for rule in ingress.spec.rules:
- hosts.append(f"http://{rule.host}:{provider_data.publisher_port}") # NOSONAR
+ hosts.append(f"http://{rule.host}:{publisher_port}") # NOSONAR
for path in rule.http.paths:
paths.append(f"{path.path} -> {path.backend.service_name}")
# this is an empty line in output
@@ -98,7 +84,7 @@ def list(ctx, organization=None, project=None, **kwargs):
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($organization_id: UUID, $project_id: UUID) {
@@ -117,8 +103,8 @@ def list(ctx, organization=None, project=None, **kwargs):
}
""",
query_variables={
- "organization_id": organization_id,
- "project_id": project_id,
+ "organization_id": str(organization_id) if organization_id else None,
+ "project_id": str(project_id) if project_id else None,
},
)
deck_list = data["allDecks"]["results"]
@@ -171,7 +157,7 @@ def info(ctx, organization=None, project=None, deck=None, **kwargs):
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID) {
@@ -184,7 +170,7 @@ def info(ctx, organization=None, project=None, deck=None, **kwargs):
}
}
""",
- query_variables={"id": deck_id},
+ query_variables={"id": str(deck_id)},
)
deck_selected = data["deck"]
except Exception as e:
@@ -229,26 +215,23 @@ def install(ctx, organization=None, project=None, deck=None, **kwargs):
deck = get_deck(ctx, deck_id=deck_id)
# cluster
- cluster = get_cluster(ctx=ctx, deck=deck)
+ cluster = ctx.cluster_manager.select(id=UUID(deck["project"]["id"]), exit_on_exception=True)
+ if not cluster.ready():
+ console.error(f"The project cluster for '{cluster.display_name}' is not running.", _exit=True)
# check environment type
check_environment_type_local_or_exit(deck=deck)
# check for switched app
- provider_data = cluster.storage.get()
- telepresence = Telepresence(provider_data)
- if telepresence.intercept_count():
+ if cluster.bridge.intercept_count():
console.error("It is not possible to install a deck while having an active switch.", _exit=True)
# download manifest
- general_data = ctx.storage_general.get()
- manifest = download_manifest(
- deck=deck, authentication=ctx.auth, access_token=general_data.authentication.access_token
- )
+ manifest = download_manifest(deck=deck, cache=ctx.cache)
# KubeCtl
- provider_data = cluster.storage.get()
- kubectl = KubeCtl(provider_data=provider_data)
+ kubeconfig_path = cluster.get_kubeconfig_path()
+ kubectl = KubeCtl(kubeconfig_path=kubeconfig_path)
namespace = deck["environment"][0]["namespace"]
kubectl.create_namespace(namespace)
with click.progressbar(
@@ -259,7 +242,8 @@ def install(ctx, organization=None, project=None, deck=None, **kwargs):
kubectl.apply_str(namespace, file["content"])
# ingress
- ingress_data = get_ingress_data(deck, provider_data)
+ publisher_port = cluster.storage.provider[cluster.cluster_provider_type].publisher_port
+ ingress_data = get_ingress_data(deck, kubeconfig_path=kubeconfig_path, publisher_port=publisher_port)
if not ingress_data:
console.info("No ingress configuration available.", _exit=True)
@@ -293,16 +277,15 @@ def uninstall(ctx, organization=None, project=None, deck=None, **kwargs):
deck = get_deck(ctx, deck_id=deck_id)
# cluster
- cluster = get_cluster(ctx=ctx, deck=deck)
+ cluster = ctx.cluster_manager.select(id=deck["project"]["id"], exit_on_exception=True)
+ if not cluster.ready():
+ console.error(f"The project cluster for '{cluster.display_name}' is not running.", _exit=True)
# check environment type
check_environment_type_local_or_exit(deck=deck)
# download manifest
- general_data = ctx.storage_general.get()
- manifest = download_manifest(
- deck=deck, authentication=ctx.auth, access_token=general_data.authentication.access_token
- )
+ manifest = download_manifest(deck=deck, cache=ctx.cache)
# KubeCtl
provider_data = cluster.storage.get()
@@ -344,16 +327,21 @@ def ingress(ctx, organization=None, project=None, deck=None, **kwargs):
deck = get_deck(ctx, deck_id=deck_id)
# get cluster
- cluster = get_cluster(ctx=ctx, deck=deck)
- provider_data = cluster.storage.get()
+ cluster = ctx.cluster_manager.select(id=deck["project"]["id"], exit_on_exception=True)
+ if not cluster.ready():
+ console.error(f"The project cluster for '{cluster.display_name}' is not running.", _exit=True)
- ingress_data = get_ingress_data(deck, provider_data)
- console.table(
- ingress_data,
- headers={"name": "Name", "url": "URLs"},
- )
+ kubeconfig_path = cluster.get_kubeconfig_path()
+ publisher_port = cluster.storage.provider[cluster.cluster_provider_type].publisher_port
+ ingress_data = get_ingress_data(deck, kubeconfig_path=kubeconfig_path, publisher_port=publisher_port)
if not ingress_data:
console.warning(
- f"Are you sure the deck is installed? You may have to run 'unikube deck install {deck['title']}' first."
+ f"Are you sure the deck is installed? You may have to run 'unikube deck install {deck['title']}' first.",
+ _exit=True,
)
+
+ console.table(
+ ingress_data,
+ headers={"name": "Name", "url": "URLs"},
+ )
diff --git a/unikube/cli/init.py b/unikube/cli/init.py
index c6820bb..8c6119a 100644
--- a/unikube/cli/init.py
+++ b/unikube/cli/init.py
@@ -4,6 +4,7 @@
import yaml
from pydantic import BaseModel
+from unikube.authentication.authentication import TokenAuthentication
from unikube.cli import console
from unikube.cli.console import confirm, deck_list, organization_list, project_list
@@ -216,7 +217,8 @@ def collect_app_data(ctx) -> UnikubeFileApp:
@click.option("--stdout", "-s", help="Print file output to console.", is_flag=True)
@click.pass_obj
def init(ctx, stdout):
- _ = ctx.auth.refresh()
+ auth = TokenAuthentication(cache=ctx.cache)
+ _ = auth.refresh()
# We plan to support multiple apps in the future.
results = [collect_app_data(ctx)]
diff --git a/unikube/cli/orga.py b/unikube/cli/orga.py
index 7396c40..8759dee 100644
--- a/unikube/cli/orga.py
+++ b/unikube/cli/orga.py
@@ -1,8 +1,8 @@
import click
import unikube.cli.console as console
+from unikube.authentication.authentication import TokenAuthentication
from unikube.graphql_utils import GraphQL
-from unikube.keycloak.permissions import KeycloakPermissions
@click.command()
@@ -12,37 +12,44 @@ def list(ctx, **kwargs):
List all your organizations.
"""
- _ = ctx.auth.refresh()
- context = ctx.context.get()
+ auth = TokenAuthentication(cache=ctx.cache)
+ _ = auth.refresh()
+ ctx.cache = auth.cache
- # keycloak
+ # GraphQL
try:
- keycloak_permissions = KeycloakPermissions(authentication=ctx.auth)
- permission_list = keycloak_permissions.get_permissions_by_scope("organization:*")
+ graph_ql = GraphQL(cache=ctx.cache)
+ data = graph_ql.query(
+ """
+ query {
+ allOrganizations {
+ results {
+ title
+ id
+ description
+ }
+ }
+ }
+ """
+ )
+ organization_list = data["allOrganizations"]["results"]
except Exception as e:
console.debug(e)
console.exit_generic_error()
- # append "(active)"
- if context.organization_id:
- for permission in permission_list:
- if permission.rsid == context.organization_id:
- permission.rsid += " (active)"
-
# console
- organization_list = [
- {
- "id": permission.rsid,
- "name": permission.rsname.replace("organization ", ""),
- }
- for permission in permission_list
- ]
+ if len(organization_list) < 1:
+ console.info(
+ "No organization available. Please go to https://app.unikube.io and create an organization.", _exit=True
+ )
+
console.table(
- data=organization_list,
- headers={
- "id": "id",
- "name": "name",
+ data={
+ "id": [item["id"] for item in organization_list],
+ "title": [item["title"] for item in organization_list],
+ "description": [item["description"] for item in organization_list],
},
+ headers=["id", "name", "description"],
)
@@ -54,7 +61,9 @@ def info(ctx, organization, **kwargs):
Display further information of the selected organization.
"""
- _ = ctx.auth.refresh()
+ auth = TokenAuthentication(cache=ctx.cache)
+ _ = auth.refresh()
+ ctx.cache = auth.cache
# context
organization_id, _, _ = ctx.context.get_context_ids_from_arguments(organization_argument=organization)
@@ -67,7 +76,7 @@ def info(ctx, organization, **kwargs):
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID!) {
@@ -78,7 +87,7 @@ def info(ctx, organization, **kwargs):
}
}
""",
- query_variables={"id": organization_id},
+ query_variables={"id": str(organization_id)},
)
organization_selected = data["organization"]
except Exception as e:
diff --git a/unikube/cli/project.py b/unikube/cli/project.py
index 7f49560..057f040 100644
--- a/unikube/cli/project.py
+++ b/unikube/cli/project.py
@@ -1,17 +1,16 @@
-import sys
-from time import sleep, time
+from uuid import UUID
import click
-import click_spinner
import unikube.cli.console as console
from unikube import settings
+from unikube.authentication.authentication import TokenAuthentication
from unikube.cli.console.helpers import project_id_2_display_name
from unikube.cli.helper import check_ports
+from unikube.cluster.bridge.types import BridgeType
+from unikube.cluster.providers.types import ProviderType
+from unikube.cluster.system import Docker
from unikube.graphql_utils import GraphQL
-from unikube.helpers import check_running_cluster
-from unikube.local.providers.types import K8sProviderType
-from unikube.local.system import Docker, KubeAPI, Telepresence
@click.command()
@@ -22,14 +21,16 @@ def list(ctx, organization, **kwargs):
Display a table of all available project names alongside with the ids.
"""
- _ = ctx.auth.refresh()
+ auth = TokenAuthentication(cache=ctx.cache)
+ _ = auth.refresh()
+ ctx.cache = auth.cache
# context
organization_id, _, _ = ctx.context.get_context_ids_from_arguments(organization_argument=organization)
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($organization_id: UUID) {
@@ -42,7 +43,7 @@ def list(ctx, organization, **kwargs):
}
}
""",
- query_variables={"organization_id": organization_id},
+ query_variables={"organization_id": str(organization_id) if organization_id else None},
)
project_list = data["allProjects"]["results"]
except Exception as e:
@@ -72,7 +73,9 @@ def info(ctx, project=None, organization=None, **kwargs):
Displays the id, title and optional description of the selected project.
"""
- _ = ctx.auth.refresh()
+ auth = TokenAuthentication(cache=ctx.cache)
+ _ = auth.refresh()
+ ctx.cache = auth.cache
# context
organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments(
@@ -87,7 +90,7 @@ def info(ctx, project=None, organization=None, **kwargs):
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID!) {
@@ -103,7 +106,7 @@ def info(ctx, project=None, organization=None, **kwargs):
}
}
""",
- query_variables={"id": project_id},
+ query_variables={"id": str(project_id)},
)
project_selected = data["project"]
except Exception as e:
@@ -131,15 +134,9 @@ def info(ctx, project=None, organization=None, **kwargs):
@click.argument("project", required=False)
@click.option("--organization", "-o", help="Select an organization")
@click.option("--ingress", help="Overwrite the ingress port for the project from cluster settings", default=None)
-@click.option(
- "--provider",
- "-p",
- help="Specify the Kubernetes provider type for this cluster (default uses k3d)",
- default=settings.UNIKUBE_DEFAULT_PROVIDER_TYPE.name,
-)
-@click.option("--workers", help="Specify count of k3d worker nodes", default=1)
+@click.option("--bridge-type", help="Specify the bridge type", default=settings.UNIKUBE_DEFAULT_BRIDGE_TYPE.name)
@click.pass_obj
-def up(ctx, project=None, organization=None, ingress=None, provider=None, workers=None, **kwargs):
+def up(ctx, project: str = None, organization: str = None, ingress: str = None, bridge_type: str = None, **kwargs):
"""
This command starts or resumes a Kubernetes cluster for the specified project. As it is a selection command, the
project can be specified and/or filtered in several ways:
@@ -150,8 +147,11 @@ def up(ctx, project=None, organization=None, ingress=None, provider=None, worker
"""
- _ = ctx.auth.refresh()
+ auth = TokenAuthentication(cache=ctx.cache)
+ _ = auth.refresh()
+ ctx.cache = auth.cache
+ # docker deamon
if not Docker().daemon_active():
console.error("Docker is not running. Please start Docker before starting a project.", _exit=True)
@@ -160,22 +160,29 @@ def up(ctx, project=None, organization=None, ingress=None, provider=None, worker
organization_argument=organization, project_argument=project
)
+ # bridge type
+ try:
+ bridge_type = BridgeType(bridge_type)
+ except Exception as e:
+ console.debug(e)
+ console.error("Invalid bridge-type parameter.", _exit=True)
+
# cluster information
- cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)
- cluster_id_list = [item.id for item in cluster_list]
+ cluster_list = ctx.cluster_manager.get_clusters(ready=True)
+ cluster_ids_exclude = [str(cluster.id) for cluster in cluster_list]
# argument
if not project_id:
- project_id = console.project_list(ctx, organization_id=organization_id, excludes=cluster_id_list)
+ project_id = console.project_list(ctx, organization_id=organization_id, excludes=cluster_ids_exclude)
if not project_id:
return None
- if project_id in cluster_id_list:
+ if project_id in cluster_ids_exclude:
console.info(f"Project '{project_id_2_display_name(ctx=ctx, id=project_id)}' is already up.", _exit=True)
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query($id: UUID) {
@@ -196,31 +203,24 @@ def up(ctx, project=None, organization=None, ingress=None, provider=None, worker
}
""",
query_variables={
- "id": project_id,
+ "id": str(project_id),
},
)
- project_selected = data["project"]
except Exception as e:
console.debug(e)
console.exit_generic_error()
+ project_selected = data.get("project", None)
if not project_selected:
console.info(
f"The project '{project_id_2_display_name(ctx=ctx, id=project_id)}' could not be found.", _exit=True
)
- try:
- cluster_provider_type = K8sProviderType[provider]
- except KeyError:
- console.error(
- f"The provider '{provider}' is not supported. Please use "
- f"one of: {','.join(opt.name for opt in K8sProviderType)}",
- _exit=True,
- )
-
- check_running_cluster(ctx, cluster_provider_type, project_selected)
+ count = ctx.cluster_manager.count_active_clusters()
+ if count > 0:
+ # TODO: limit cluster count???
+ pass
- # get project id
if ingress is None:
ingress = project_selected["clusterSettings"]["port"]
@@ -233,48 +233,17 @@ def up(ctx, project=None, organization=None, ingress=None, provider=None, worker
)
# cluster up
- cluster_data = ctx.cluster_manager.get(id=project_selected["id"])
- cluster_data.name = project_selected["title"]
- ctx.cluster_manager.set(id=project_selected["id"], data=cluster_data)
-
- cluster = ctx.cluster_manager.select(cluster_data=cluster_data, cluster_provider_type=cluster_provider_type)
- console.info(
- f"Setting up a Kubernetes cluster (with provider {provider}) for " f"project '{cluster.display_name}'."
+ cluster_id = UUID(project_selected["id"])
+ provider_type = ProviderType.k3d
+ cluster = ctx.cluster_manager.select(
+ id=cluster_id, provider_type=provider_type, bridge_type=bridge_type, exit_on_exception=True
)
-
- if not cluster.exists():
- console.info(f"Kubernetes cluster for '{cluster.display_name}' does not exist, creating it now.")
- with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout):
- success = cluster.create(
- ingress_port=ingress,
- workers=workers,
- )
-
- # start
- else:
- console.info(f"Kubernetes cluster for '{cluster.display_name}' already exists, starting it now.")
- with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout):
- success = cluster.start()
-
- # console
- if success:
- console.info("Now connecting Telepresence daemon. You probably have to enter your 'sudo' password.")
- provider_data = cluster.storage.get()
- k8s = KubeAPI(provider_data)
- timeout = time() + 60 # wait one minute
- while not k8s.is_available or time() > timeout:
- sleep(1)
- if not k8s.is_available:
- console.error(
- "There was an error bringing up the project cluster. The API was not available within the"
- "expiration period.",
- _exit=True,
- )
- Telepresence(cluster.storage.get()).start()
- console.success("The project cluster is up.")
- else:
+ success = cluster.up(ingress=ingress)
+ if not success:
console.error("The project cluster could not be started.")
+ console.success("The project cluster is up.")
+
@click.command()
@click.argument("project", required=False)
@@ -291,12 +260,12 @@ def down(ctx, project=None, organization=None, **kwargs):
)
# cluster
- cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)
+ cluster_list = ctx.cluster_manager.get_clusters(ready=True)
# argument
if not project_id:
project_id = console.project_list(
- ctx, organization_id=organization_id, filter=[cluster.id for cluster in cluster_list]
+ ctx, organization_id=organization_id, filter=[str(cluster.id) for cluster in cluster_list]
)
if not project_id:
return None
@@ -308,36 +277,14 @@ def down(ctx, project=None, organization=None, **kwargs):
_exit=True,
)
- # get cluster
- cluster = None
- for cluster_data in cluster_list:
- if cluster_data.id == project_id:
- cluster = ctx.cluster_manager.select(
- cluster_data=cluster_data,
- )
- break
-
- # cluster down
- if not cluster.exists():
- # something went wrong or cluster was already delete from somewhere else
- console.info(f"No Kubernetes cluster to stop for '{cluster.display_name}'", _exit=True)
-
- if not cluster.ready():
- console.info(f"Kubernetes cluster for '{cluster.display_name}' is not running", _exit=True)
-
- console.info("Stopping Telepresence daemon.")
- Telepresence(cluster.storage.get()).stop()
-
# stop cluster
- console.info(f"Stopping Kubernetes cluster for '{cluster.display_name}'")
- success = cluster.stop()
-
- # console
- if success:
- console.success("The project cluster is down.")
- else:
+ cluster = ctx.cluster_manager.select(id=project_id, exit_on_exception=True)
+ success = cluster.down()
+ if not success:
console.error("The cluster could not be stopped.")
+ console.success("The project cluster is down.")
+
@click.command()
@click.argument("project", required=False)
@@ -354,12 +301,14 @@ def delete(ctx, project=None, organization=None, **kwargs):
)
# cluster
- cluster_list = ctx.cluster_manager.get_cluster_list()
+ cluster_list = ctx.cluster_manager.get_clusters()
+ if len(cluster_list) == 0:
+ console.info("No projects found.", _exit=True)
# argument
if not project_id:
project_id = console.project_list(
- ctx, organization_id=organization_id, filter=[cluster.id for cluster in cluster_list]
+ ctx, organization_id=organization_id, filter=[str(cluster.id) for cluster in cluster_list]
)
if not project_id:
return None
@@ -370,36 +319,19 @@ def delete(ctx, project=None, organization=None, **kwargs):
_exit=True,
)
- # initial warning
+ # warning
console.warning("Deleting a project will remove the cluster including all of its data.")
-
- # confirm question
- confirm = input("Do want to continue [N/y]: ")
- if confirm not in ["y", "Y", "yes", "Yes"]:
+ confirmed = console.confirm(question="Do you want to remove the cluster? [N/y]: ")
+ if not confirmed:
console.info("No action taken.", _exit=True)
- # get cluster
- cluster = None
- for cluster_data in cluster_list:
- if cluster_data.id == project_id:
- cluster = ctx.cluster_manager.select(
- cluster_data=cluster_data,
- )
- break
-
# delete cluster
- if not cluster.exists():
- ctx.cluster_manager.delete(cluster.id)
- console.info(f"No Kubernetes cluster to delete for '{cluster.display_name}', nothing to do.", _exit=True)
-
+ cluster = ctx.cluster_manager.select(id=project_id, exit_on_exception=True)
success = cluster.delete()
+ if not success:
+ console.error("The cluster could not be deleted.", _exit=True)
- # console
- if success:
- console.success("The project was deleted successfully.")
- ctx.cluster_manager.delete(cluster.id)
- else:
- console.error("The cluster could not be deleted.")
+ console.success("The project was deleted successfully.")
@click.command()
@@ -411,7 +343,7 @@ def prune(ctx, **kwargs):
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query {
@@ -429,30 +361,20 @@ def prune(ctx, **kwargs):
console.exit_generic_error()
# cluster
- cluster_list = ctx.cluster_manager.get_cluster_list()
+ cluster_list = ctx.cluster_manager.get_clusters()
# select clusters to prune
prune_clusters = []
- for cluster_data in cluster_list:
- if cluster_data.id not in [project["id"] for project in projects]:
- prune_clusters.append(cluster_data)
-
- for cluster_data in prune_clusters:
- console.info(f"It seems like the project for cluster '{cluster_data.name}' has been deleted.")
-
- # confirm question
- confirmed = console.confirm(question="Do want to remove the cluster? [N/y]: ")
- if not confirmed:
- console.info("No action taken.")
- continue
-
- # delete
- try:
- cluster = ctx.cluster_manager.select(cluster_data=cluster_data)
- success = cluster.delete()
- if success:
- console.success("The project was deleted successfully.")
- ctx.cluster_manager.delete(cluster.id)
- except Exception as e:
- console.debug(e)
- console.error("The cluster could not be deleted.")
+ for cluster in cluster_list:
+ if cluster.id not in [UUID(project["id"]) for project in projects]:
+ prune_clusters.append(cluster)
+
+ for cluster in prune_clusters:
+ console.info(f"It seems like the project for cluster '{cluster.display_name}' has been deleted.")
+
+ # delete cluster
+ success = cluster.delete()
+ if not success:
+ console.error("The project could not be deleted.", _exit=True)
+
+ console.success("The project was deleted successfully.")
diff --git a/unikube/cli/system.py b/unikube/cli/system.py
index c9d4af6..eaeebf9 100644
--- a/unikube/cli/system.py
+++ b/unikube/cli/system.py
@@ -4,8 +4,8 @@
import click
import unikube.cli.console as console
+from unikube.cluster.dependency import install_dependency, probe_dependencies
from unikube.helpers import compare_current_and_latest_versions
-from unikube.local.dependency import install_dependency, probe_dependencies
@click.command()
diff --git a/unikube/cli/unikube.py b/unikube/cli/unikube.py
index 826b40d..3657689 100644
--- a/unikube/cli/unikube.py
+++ b/unikube/cli/unikube.py
@@ -1,11 +1,9 @@
import click
import unikube.cli.console as console
+from unikube.cache import UserContext
from unikube.cli.context import show_context
from unikube.graphql_utils import GraphQL
-from unikube.local.providers.helper import get_cluster_or_exit
-from unikube.local.system import Telepresence
-from unikube.storage.user import get_local_storage_user
@click.command()
@@ -16,12 +14,12 @@ def ps(ctx, **kwargs):
"""
# cluster
- cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)
+ cluster_list = ctx.cluster_manager.get_clusters(ready=True)
cluster_id_list = [cluster.id for cluster in cluster_list]
# GraphQL
try:
- graph_ql = GraphQL(authentication=ctx.auth)
+ graph_ql = GraphQL(cache=ctx.cache)
data = graph_ql.query(
"""
query {
@@ -59,19 +57,14 @@ def ps(ctx, **kwargs):
# switch
intercept_count = 0
if cluster_data:
- cluster = get_cluster_or_exit(ctx, cluster_data[0]["id"])
- provider_data = cluster.storage.get()
-
- telepresence = Telepresence(provider_data)
- intercept_count = telepresence.intercept_count()
-
- if intercept_count == 0 or not intercept_count:
+ cluster = ctx.cluster_manager.select(cluster_data[0]["id"], exit_on_exception=True)
+ intercept_count = cluster.bridge.intercept_count()
+ if intercept_count == 0:
console.info("No app switched!")
else:
console.info(f"Apps switched: #{intercept_count}")
console.echo("")
# context
- local_storage_user = get_local_storage_user()
- user_data = local_storage_user.get()
- show_context(ctx=ctx, context=user_data.context)
+ user_context = UserContext(id=ctx.user_id)
+ show_context(ctx=ctx, context=user_context)
diff --git a/unikube/cli/utils.py b/unikube/cli/utils.py
index f39145c..c8a5d77 100644
--- a/unikube/cli/utils.py
+++ b/unikube/cli/utils.py
@@ -1,4 +1,5 @@
# heavily inspired by yaspin (https://github.com/pavdmyt/yaspin/blob/master/yaspin/core.py)
+import asyncio
import sys
from functools import wraps
from itertools import cycle
diff --git a/unikube/keycloak/__init__.py b/unikube/cluster/__init__.py
similarity index 100%
rename from unikube/keycloak/__init__.py
rename to unikube/cluster/__init__.py
diff --git a/unikube/cluster/bridge/bridge.py b/unikube/cluster/bridge/bridge.py
new file mode 100644
index 0000000..c8a4977
--- /dev/null
+++ b/unikube/cluster/bridge/bridge.py
@@ -0,0 +1,115 @@
+import socket
+
+from unikube.cli import console
+from unikube.cluster.system import Docker
+from unikube.unikubefile.unikube_file_1_0 import UnikubeFileApp
+
+
+def _is_local_port_free(port):
+ a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if a_socket.connect_ex(("127.0.0.1", int(port))) == 0:
+ return False
+ else:
+ return True
+
+
+class AbstractBridge:
+ DOCKER_IMAGE_PREFIX = "bridge"
+ DOCKER_IMAGE_NAME_PREFIX = "bridge-switch"
+
+ @classmethod
+ def get_docker_image(cls, deployment: str):
+ tag = f"{cls.DOCKER_IMAGE_PREFIX}-{deployment}".lower()
+ return tag
+
+ @classmethod
+ def get_docker_image_name(cls, deployment: str):
+ tag = f"{cls.DOCKER_IMAGE_NAME_PREFIX}-{deployment}".lower()
+ return tag
+
+ def intercept_count(self) -> int:
+ return 0
+
+ def pre_cluster_up(self) -> bool:
+ raise NotImplementedError("Bridge pre_cluster_up os not implemented")
+
+ def post_cluster_up(self) -> bool:
+ raise NotImplementedError("Bridge post_cluster_up os not implemented")
+
+ def pre_cluster_down(self) -> bool:
+ raise NotImplementedError("Bridge pre_cluster_down os not implemented")
+
+ def post_cluster_down(self) -> bool:
+ raise NotImplementedError("Bridge post_cluster_down os not implemented")
+
+ def switch(self) -> bool:
+ raise NotImplementedError("Bridge switch is not implemented.")
+
+ def is_switched(self, deployment: str, namespace: str) -> bool:
+ raise NotImplementedError("Bridge is_switched is not implemented.")
+
+ def kill_switch(self, deployment: str, namespace: str) -> bool:
+ image_name = self.get_docker_image(deployment=deployment)
+
+ docker = Docker()
+ if docker.check_running(image_name):
+ docker.kill(name=image_name)
+
+ raise NotImplementedError("Bridge kill_switch is not implemented.")
+
+ def build(self, deployment: str, namespace: str, unikube_file_app, no_build: bool):
+ # grab the docker file
+ context, dockerfile, target = unikube_file_app.get_docker_build()
+ if not target:
+ target = ""
+
+ # check for active switch
+ if self.is_switched(deployment=deployment, namespace=namespace):
+ console.warning("It seems this app is already switched in another process.")
+
+ confirmed = console.confirm(question="Do you want to kill it and switch here? [N/y]: ")
+ if not confirmed:
+ console.error("Switch aborted.", _exit=True)
+
+ self.kill_switch(deployment=deployment, namespace=namespace)
+
+ # 3.3 Build image
+ image = self.get_docker_image(deployment=deployment)
+
+ docker = Docker()
+ if not docker.image_exists(image) or not no_build:
+ if no_build:
+ console.warning(f"Ignoring --no-build since the required image '{image}' does not exist")
+
+ console.info(f"Building docker image for {dockerfile} with context {context}")
+ status, msg = docker.build(image, context, dockerfile, target)
+
+ if not status:
+ console.debug(msg)
+ console.error("Failed to build docker image.", _exit=True)
+
+ console.success(f"Docker image successfully built: {image}")
+ else:
+ console.info(f"Using existing docker image: {image}")
+
+ def _get_intercept_port(self, unikube_file_app: UnikubeFileApp, ports):
+ # set the right intercept port
+ port = unikube_file_app.get_port()
+ if port is None:
+ port = str(ports[0])
+ if len(ports) > 1:
+ console.warning(
+ f"No port specified although there are multiple ports available: {ports}. "
+ f"Defaulting to port {port} which might not be correct."
+ )
+
+ if port not in ports:
+ console.error(f"The specified port {port} is not in the rage of available options: {ports}", _exit=True)
+
+ if not _is_local_port_free(port):
+ console.error(
+ f"The local port {port} is busy. Please stop the application running on this port and try again.",
+ _exit=True,
+ )
+
+ return port
diff --git a/unikube/cluster/bridge/gefyra.py b/unikube/cluster/bridge/gefyra.py
new file mode 100644
index 0000000..0e98c5d
--- /dev/null
+++ b/unikube/cluster/bridge/gefyra.py
@@ -0,0 +1,186 @@
+from typing import List
+
+from gefyra import api as gefyra_api
+from gefyra.configuration import ClientConfiguration
+
+from unikube.cli import console
+from unikube.cluster.bridge.bridge import AbstractBridge
+from unikube.cluster.system import Docker, KubeAPI
+from unikube.unikubefile.unikube_file_1_0 import UnikubeFileApp
+
+
+class GefyraException(Exception):
+ pass
+
+
+class Gefyra(AbstractBridge):
+ DOCKER_IMAGE_PREFIX = "gefyra"
+ DOCKER_IMAGE_NAME_PREFIX = "gefyra-switch"
+
+ def __init__(self, kubeconfig_path: str):
+ if not kubeconfig_path:
+ raise ValueError("Gefyra does not contain the 'kubeconfig_path' parameter")
+
+ self.config = ClientConfiguration(kube_config_file=kubeconfig_path)
+
+ def intercept_count(self) -> int:
+ try:
+ intercept_requests = gefyra_api.list_interceptrequests(config=self.config)
+ except Exception as e:
+ console.debug(e)
+ return 0
+
+ return len(intercept_requests)
+
+ def pre_cluster_up(self) -> bool:
+ return True
+
+ def post_cluster_up(self) -> bool:
+ try:
+ gefyra_api.up(config=self.config)
+ return True
+ except Exception as e:
+ console.debug(e)
+ return False
+
+ def pre_cluster_down(self) -> bool:
+ try:
+ gefyra_api.down(config=self.config)
+ return True
+ except Exception as e:
+ console.debug(e)
+ return False
+
+ def post_cluster_down(self) -> bool:
+ return True
+
+ def switch(
+ self,
+ kubeconfig_path: str,
+ deployment: str,
+ namespace: str,
+ ports: List[str],
+ unikube_file_app: UnikubeFileApp,
+ *args,
+ **kwargs,
+ ):
+ image = self.get_docker_image(deployment=deployment)
+ image_name = self.get_docker_image_name(deployment=deployment)
+
+ # run
+ port = self._get_intercept_port(unikube_file_app=unikube_file_app, ports=ports)
+ console.debug(f"port: {port}")
+
+ env = unikube_file_app.get_environment()
+ console.debug(f"env: {env}")
+
+ command = unikube_file_app.get_command(port=port)
+ command = " ".join(command)
+ console.debug(f"command: {command}")
+
+ volumes = [":".join(item) for item in unikube_file_app.get_mounts()]
+ console.debug(f"volumes: {volumes}")
+
+ env = ["=".join(item) for item in unikube_file_app.get_environment()]
+ console.debug(f"env: {env}")
+
+ container_name = unikube_file_app.container
+ if not container_name:
+ raise GefyraException("No container name provided. Please add a container to the unikube.yml")
+ console.debug(f"container: {container_name}")
+
+ try:
+ k8s = KubeAPI(kubeconfig_path=kubeconfig_path, namespace=namespace)
+ except Exception as e:
+ console.debug(e)
+ console.error("Does the cluster exist?", _exit=True)
+
+ pods = k8s.get_pods_for_workload(name=deployment, namespace=namespace)
+ for pod in pods:
+ if deployment in pod:
+ break
+ else:
+ raise GefyraException(f"Could not find a pod for deployment: {deployment}")
+
+ console.debug("gefyra run")
+ result = gefyra_api.run(
+ image=image,
+ name=image_name,
+ command=command,
+ volumes=volumes,
+ namespace=namespace,
+ env=env,
+ env_from=f"{pod}/{container_name}",
+ config=self.config,
+ )
+ if not result:
+ console.error("Gefyra run failed.", _exit=True)
+
+ console.debug("docker waiting for container")
+ while not Docker().check_running(image_name):
+ continue
+
+ # bridge
+ console.debug("gefyra bridge")
+ result = gefyra_api.bridge(
+ name=image_name,
+ namespace=namespace,
+ deployment=deployment,
+ ports=unikube_file_app.ports,
+ container_name=container_name,
+ bridge_name=image,
+ config=self.config,
+ )
+ if not result:
+ console.error("Gefyra bridge failed.", _exit=True)
+
+ _ = console.confirm(question="Press ENTER to stop the switch.")
+
+ # print logs? -> gracefull exit currently not working
+ # k8s = KubeAPI(kubeconfig_path=kubeconfig_path, namespace=namespace)
+ # _ = k8s.get_logs(pod=pod, follow=True, container=container_name)
+
+ console.debug("gefyra kill_switch")
+ self.kill_switch(deployment=deployment)
+
+ return True
+
+ def is_switched(self, deployment: str, namespace: str) -> bool:
+ try:
+ intercept_requests = gefyra_api.list_interceptrequests(config=self.config)
+ except Exception as e:
+ console.debug(e)
+ return 0
+
+ if not intercept_requests:
+ return False
+
+ return True
+
+ def kill_switch(self, deployment: str, *args, **kwargs) -> bool:
+ image = self.get_docker_image(deployment=deployment)
+
+ # unbridge
+ console.debug("gefyra unbridge")
+ try:
+ gefyra_api.unbridge(name=image, config=self.config)
+ except Exception as e:
+ console.debug(e)
+
+ # stop docker container
+ console.debug("gefyra kill docker container")
+ docker = Docker()
+ if docker.check_running(image):
+ docker.kill(name=image)
+
+ return True
+
+
+class GefyraBuilder:
+ def __call__(
+ self,
+ kubeconfig_path: str,
+ **kwargs,
+ ):
+ instance = Gefyra(kubeconfig_path=kubeconfig_path)
+ return instance
diff --git a/unikube/cluster/bridge/telepresence.py b/unikube/cluster/bridge/telepresence.py
new file mode 100644
index 0000000..265f1a1
--- /dev/null
+++ b/unikube/cluster/bridge/telepresence.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+import platform
+import re
+import subprocess
+import tempfile
+from time import sleep, time
+from typing import List, Tuple
+
+from pydantic import BaseModel
+
+import unikube.cli.console as console
+from unikube import settings
+from unikube.cluster.bridge.bridge import AbstractBridge
+from unikube.cluster.system import Docker, KubeAPI, KubeCtl
+from unikube.unikubefile.unikube_file_1_0 import UnikubeFileApp
+
+
+class TelepresenceData(BaseModel):
+ pass
+
+
+class Telepresence(AbstractBridge, KubeCtl):
+ base_command = "telepresence"
+
+ def intercept_count(self) -> int:
+ arguments = ["status"]
+ process = self._execute(arguments)
+ status = process.stdout.readlines()
+
+ # parse intercept count
+ try:
+ intercept_line = status[15]
+ match = re.findall("[ ]{1,}Intercepts[ ]{1,}:(.*)[ ]{1,}total", intercept_line)
+ intercept_count = int(match[0])
+ except Exception as e:
+ console.debug(e)
+ intercept_count = 0
+
+ return intercept_count
+
+ def pre_cluster_up(self) -> bool:
+ return True
+
+ def post_cluster_up(self) -> bool:
+ console.info("Now connecting Telepresence daemon. You probably have to enter your 'sudo' password.")
+ k8s = KubeAPI(kubeconfig_path=self._kubeconfig_path)
+ timeout = time() + 60 # wait one minute
+ while not k8s.is_available or time() > timeout:
+ sleep(1)
+
+ if not k8s.is_available:
+ console.error(
+ "There was an error bringing up the project cluster. The API was not available within the"
+ "expiration period.",
+ _exit=True,
+ )
+
+ # start
+ arguments = ["connect", "--no-report"]
+ process = self._execute(arguments)
+ if process.returncode and process.returncode != 0:
+ # this is a retry
+ process = self._execute(arguments)
+ if process.returncode and process.returncode != 0:
+ console.error(f"Could not start Telepresence daemon: {process.stdout.readlines()}", _exit=False)
+
+ return True
+
+ def pre_cluster_down(self) -> bool:
+ arguments = ["quit", "--no-report"]
+ process = self._execute(arguments)
+ if process.returncode and process.returncode != 0:
+ console.error("Could not stop Telepresence daemon", _exit=False)
+
+ return True
+
+ def post_cluster_down(self) -> bool:
+ return True
+
+ def _execute_intercept(self, arguments) -> subprocess.Popen:
+ cmd = [self.base_command] + arguments
+ kwargs = self._get_kwargs()
+ process = subprocess.Popen(cmd, **kwargs)
+ for stdout_line in iter(process.stdout.readline, ""):
+ print(stdout_line, end="", flush=True)
+ return process
+
+ def __service_account_tokens(self, kubeconfig_path: str, namespace: str, deployment: str, volumes: list):
+ k8s = KubeAPI(kubeconfig_path=kubeconfig_path, namespace=namespace)
+ service_account_tokens = k8s.get_serviceaccount_tokens(deployment)
+
+ if service_account_tokens:
+ tmp_sa_token = tempfile.NamedTemporaryFile(delete=True)
+ tmp_sa_cert = tempfile.NamedTemporaryFile(delete=True)
+ tmp_sa_token.write(service_account_tokens[0].encode())
+ tmp_sa_cert.write(service_account_tokens[1].encode())
+ tmp_sa_token.flush()
+ tmp_sa_cert.flush()
+ volumes.append(f"{tmp_sa_token.name}:{settings.SERVICE_TOKEN_FILENAME}")
+ volumes.append(f"{tmp_sa_cert.name}:{settings.SERVICE_CERT_FILENAME}")
+ else:
+ tmp_sa_token = None
+ tmp_sa_cert = None
+
+ return volumes, tmp_sa_token, tmp_sa_cert
+
+ def switch(
+ self,
+ kubeconfig_path: str,
+ deployment: str,
+ namespace: str,
+ ports: List[str],
+ unikube_file_app: UnikubeFileApp,
+ *args,
+ **kwargs,
+ ):
+ # arguments
+ port = self._get_intercept_port(unikube_file_app=unikube_file_app, ports=ports)
+ console.debug(f"port: {port}")
+
+ env = unikube_file_app.get_environment()
+ console.debug(f"env: {env}")
+
+ command = unikube_file_app.get_command(port=port)
+ command = " ".join(command)
+ console.debug(f"command: {command}")
+
+ volumes = [":".join(item) for item in unikube_file_app.get_mounts()]
+ console.debug(f"volumes: {volumes}")
+
+ env = ["=".join(item) for item in unikube_file_app.get_environment()]
+ console.debug(f"env: {env}")
+
+ # service account tokens
+ volumes, tmp_sa_token, tmp_sa_cert = self.__service_account_tokens(
+ kubeconfig_path=kubeconfig_path, namespace=namespace, deployment=deployment, volumes=volumes
+ )
+
+ # telepresence
+ arguments = ["intercept", "--no-report", deployment]
+ if namespace:
+ arguments += ["--namespace", namespace]
+
+ arguments += ["--port", f"{port}:{port}", "--docker-run", "--"]
+ if platform.system() != "Darwin":
+ arguments.append("--network=host")
+
+ arguments += [
+ f"--dns-search={namespace}",
+ "--rm",
+ ]
+
+ if volumes:
+ for volume in volumes:
+ arguments += ["-v", volume]
+
+ if env:
+ for e in env:
+ arguments += ["--env", f"{e[0]}={e[1]}"]
+
+ # this name to be retrieved for "app shell" command
+ image_name = self.get_docker_image_name(deployment=deployment)
+ arguments += ["--name", image_name.replace(":", "")]
+ arguments.append(image_name)
+ if command:
+ arguments += ["sh", "-c"] + [f"{' '.join(command)}"]
+
+ console.debug(arguments)
+ try:
+ process = self._execute_intercept(arguments)
+ if process.returncode and (process.returncode != 0 and not process.returncode != 137):
+ console.error(
+ "There was an error with switching the deployment, please find details above", _exit=False
+ )
+ except KeyboardInterrupt:
+ pass
+
+ console.info("Stopping the switch operation. It takes a few seconds to reset the cluster.")
+ self.kill_switch(deployment=deployment, namespace=namespace)
+
+ # service account tokens
+ if tmp_sa_token:
+ tmp_sa_token.close()
+ tmp_sa_cert.close()
+
+ def is_switched(self, deployment, namespace=None) -> bool:
+ deployments = self.__get_deployments(namespace)
+ swapped = any(filter(lambda x: x[0] == deployment and x[1] == "intercepted", deployments))
+ return swapped
+
+ def kill_switch(self, deployment: str, namespace: str) -> bool:
+ # leave
+ arguments = ["leave", "--no-report"]
+ if namespace:
+ arguments.append(f"{deployment}-{namespace}")
+ else:
+ arguments.append(deployment)
+
+ console.debug(arguments)
+ process = self._execute(arguments)
+ if process.returncode and process.returncode != 0:
+ console.error("There was an error with leaving the deployment, please find details above", _exit=False)
+
+ # uninstall
+ arguments = ["uninstall", "--agent", deployment]
+ arguments.append(deployment)
+ if namespace:
+ arguments += ["-n", namespace]
+
+ console.debug(arguments)
+ process = self._execute(arguments)
+ if process.returncode and process.returncode != 0:
+ console.error(
+ "There was an error with uninstalling the traffic agent, please find details above", _exit=False
+ )
+
+ # docker
+ image_name = self.get_docker_image_name(deployment=deployment)
+ docker = Docker()
+ if docker.check_running(image_name):
+ docker.kill(name=image_name)
+
+ def __get_deployments(self, namespace=None, flat=False) -> List[str]:
+ arguments = ["list", "--no-report"]
+ if namespace:
+ arguments += ["--namespace", namespace]
+ process = self._execute(arguments)
+ deployment_list = process.stdout.readlines()
+ result = []
+ if deployment_list:
+ for deployment in deployment_list:
+ try:
+ name, status = map(str.strip, deployment.split(":"))
+ except ValueError:
+ continue
+ if name in ["Intercept name", "State", "Workload kind", "Destination", "Intercepting"]:
+ continue
+ if "intercepted" in status:
+ result.append((name, "intercepted"))
+ else:
+ result.append((name, "ready"))
+ if flat:
+ result = [deployment[0] for deployment in result]
+ return result
+
+
+class TelepresenceBuilder:
+ def __call__(
+ self,
+ kubeconfig_path: str,
+ **kwargs,
+ ):
+ instance = Telepresence(kubeconfig_path=kubeconfig_path)
+ return instance
diff --git a/unikube/cluster/bridge/types.py b/unikube/cluster/bridge/types.py
new file mode 100644
index 0000000..7353862
--- /dev/null
+++ b/unikube/cluster/bridge/types.py
@@ -0,0 +1,6 @@
+from enum import Enum
+
+
+class BridgeType(Enum):
+ telepresence = "telepresence"
+ gefyra = "gefyra"
diff --git a/unikube/cluster/cluster.py b/unikube/cluster/cluster.py
new file mode 100644
index 0000000..2fc4c7a
--- /dev/null
+++ b/unikube/cluster/cluster.py
@@ -0,0 +1,127 @@
+from typing import Optional
+from uuid import UUID
+
+from unikube.cli import console
+from unikube.cluster.bridge.bridge import AbstractBridge
+from unikube.cluster.bridge.types import BridgeType
+from unikube.cluster.providers.abstract_provider import AbstractProvider
+from unikube.cluster.providers.types import ProviderType
+from unikube.cluster.storage.cluster_storage import ClusterStorage
+from unikube.cluster.system import Docker
+
+
+class Cluster:
+ def __init__(self, id: UUID, display_name: str = None, **kwargs):
+ self.id = id
+ self.__display_name = display_name
+
+ # storage
+ self.storage = ClusterStorage(id=id)
+
+ # provider + bridge
+ self.provider: Optional[AbstractProvider] = None
+ self.bridge: Optional[AbstractBridge] = None
+
+ @property
+ def display_name(self):
+ name = self.__display_name
+ if name:
+ return name
+ return str(self.id)
+
+ @property
+ def cluster_name(self):
+ cluster_name = str(self.id).replace("-", "")
+ return cluster_name[:32] # k3d: cluster name must be <= 32 characters
+
+ @property
+ def cluster_provider_type(self) -> ProviderType:
+ return self.storage.provider_type
+
+ @property
+ def cluster_bridge_type(self) -> Optional[BridgeType]:
+ try:
+ return BridgeType(self.storage.bridge_type)
+ except Exception:
+ return None
+
+ def get_kubeconfig_path(self, provider_type: ProviderType = None):
+ if not provider_type:
+ provider_type = self.cluster_provider_type
+
+ try:
+ kubeconfig_path = self.storage.provider[provider_type.name].kubeconfig_path
+ except Exception:
+ kubeconfig_path = self.provider.kubeconfig_path
+
+ return kubeconfig_path
+
+ def up(self, ingress: str = None, workers: int = None):
+ # pre
+ if self.bridge:
+ success = self.bridge.pre_cluster_up()
+ if not success:
+ console.warning("Bridge up failed.")
+
+ # create/start cluster
+ cluster_exists = self.provider.exists()
+ if not cluster_exists:
+ console.info(f"Kubernetes cluster for '{self.display_name}' does not exist, creating it now.")
+ _ = self.provider.create(
+ ingress_port=ingress,
+ workers=workers,
+ bridge_type=self.cluster_bridge_type,
+ )
+ else:
+ console.info(f"Kubernetes cluster for '{self.display_name}' already exists, starting it now.")
+ self.provider.start()
+
+ # post
+ if self.bridge:
+ success = self.bridge.post_cluster_up()
+ if not success:
+ console.warning("Bridge up failed.")
+
+ return True
+
+ def down(self):
+ # pre
+ if self.bridge:
+ success = self.bridge.pre_cluster_down()
+ if not success:
+ console.warning("Bridge down failed.")
+
+ # stop cluster
+ if not self.provider.exists():
+ console.info(f"No Kubernetes cluster to stop for '{self.display_name}'")
+ return False
+
+ if not self.ready():
+ console.info(f"Kubernetes cluster for '{self.display_name}' is not running")
+ return False
+
+ console.info(f"Stopping Kubernetes cluster for '{self.display_name}'")
+ _ = self.provider.stop()
+
+ # post
+ if self.bridge:
+ success = self.bridge.post_cluster_down()
+ if not success:
+ console.warning("Bridge down failed.")
+
+ return True
+
+ def delete(self):
+ # pre
+ if self.ready():
+ console.info(f"Kubernetes cluster for '{self.display_name}' is still running")
+ return False
+
+ # delete
+ console.info(f"Delete kubernetes cluster for '{self.display_name}'")
+ _ = self.provider.delete()
+
+ return True
+
+ def ready(self) -> bool:
+ return Docker().check_running(self.cluster_name)
diff --git a/unikube/local/dependency.py b/unikube/cluster/dependency.py
similarity index 99%
rename from unikube/local/dependency.py
rename to unikube/cluster/dependency.py
index c8c545b..443c0e3 100644
--- a/unikube/local/dependency.py
+++ b/unikube/cluster/dependency.py
@@ -3,7 +3,6 @@
import platform
import re
import subprocess
-from time import sleep
from typing import Dict, List, Optional, Tuple
import click
@@ -11,7 +10,6 @@
import unikube.cli.console as console
from unikube import settings
-from unikube.cli.console import error
class LocalDependency(object):
diff --git a/unikube/local/__init__.py b/unikube/cluster/providers/__init__.py
similarity index 100%
rename from unikube/local/__init__.py
rename to unikube/cluster/providers/__init__.py
diff --git a/unikube/cluster/providers/abstract_provider.py b/unikube/cluster/providers/abstract_provider.py
new file mode 100644
index 0000000..831958d
--- /dev/null
+++ b/unikube/cluster/providers/abstract_provider.py
@@ -0,0 +1,53 @@
+import socket
+from abc import ABC, abstractmethod
+from uuid import UUID
+
+from semantic_version import Version
+
+
+class AbstractProvider(ABC):
+ provider_type = None
+
+ def __init__(
+ self,
+ id: UUID,
+ cluster_name: str = None,
+ ) -> None:
+ self.id = id
+ self.cluster_name = cluster_name
+
+ @staticmethod
+ def _get_random_unused_port() -> int:
+ tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ tcp.bind(("", 0))
+ _, port = tcp.getsockname()
+ tcp.close()
+ return port
+
+ @abstractmethod
+ def create(self, ingress_port: int = None) -> bool:
+ raise NotImplementedError
+
+ @abstractmethod
+ def start(self) -> bool:
+ raise NotImplementedError
+
+ @abstractmethod
+ def stop(self) -> bool:
+ raise NotImplementedError
+
+ @abstractmethod
+ def delete(self) -> bool:
+ raise NotImplementedError
+
+ @abstractmethod
+ def exists(self) -> bool:
+ raise NotImplementedError
+
+ @abstractmethod
+ def version(self) -> Version:
+ """
+ Best return a type that allows working comparisons between versions of the same provider.
+ E.g. (1, 10) > (1, 2), but "1.10" < "1.2"
+ """
+ raise NotImplementedError
diff --git a/unikube/cluster/providers/factory.py b/unikube/cluster/providers/factory.py
new file mode 100644
index 0000000..67593a5
--- /dev/null
+++ b/unikube/cluster/providers/factory.py
@@ -0,0 +1,52 @@
+from unikube.cluster.bridge.bridge import AbstractBridge
+from unikube.cluster.bridge.gefyra import GefyraBuilder
+from unikube.cluster.bridge.telepresence import TelepresenceBuilder
+from unikube.cluster.bridge.types import BridgeType
+from unikube.cluster.cluster import Cluster
+from unikube.cluster.providers.abstract_provider import AbstractProvider
+from unikube.cluster.providers.k3d.k3d import K3dBuilder
+from unikube.cluster.providers.types import ProviderType
+
+
+class ClusterFactory:
+ def __init__(self):
+ self._builders = {}
+
+ # provider
+ def register_provider_builder(self, provider_type: ProviderType, builder):
+ self._builders[provider_type.value] = builder
+
+ def __create_provider(self, provider_type: ProviderType, **kwargs) -> AbstractProvider:
+ builder = self._builders.get(provider_type.value)
+ if not builder:
+ raise ValueError(provider_type)
+ return builder(**kwargs)
+
+ # bridge
+ def register_bridge_builder(self, bridge_type: BridgeType, builder) -> AbstractBridge:
+ self._builders[bridge_type.value] = builder
+
+ def __create_bridge(self, bridge_type: BridgeType, **kwargs):
+ builder = self._builders.get(bridge_type.value)
+ if not builder:
+ raise ValueError(bridge_type)
+ return builder(**kwargs)
+
+ def get(self, provider_type: ProviderType, bridge_type: BridgeType, **kwargs):
+ cluster = Cluster(**kwargs)
+ kwargs["cluster_name"] = cluster.cluster_name
+
+ # build provider
+ cluster.provider = self.__create_provider(provider_type, **kwargs)
+
+ # build bidge
+ kubeconfig_path = cluster.get_kubeconfig_path()
+ cluster.bridge = self.__create_bridge(bridge_type, kubeconfig_path=kubeconfig_path)
+
+ return cluster
+
+
+kubernetes_cluster_factory = ClusterFactory()
+kubernetes_cluster_factory.register_provider_builder(ProviderType.k3d, K3dBuilder())
+kubernetes_cluster_factory.register_bridge_builder(BridgeType.gefyra, GefyraBuilder())
+kubernetes_cluster_factory.register_bridge_builder(BridgeType.telepresence, TelepresenceBuilder())
diff --git a/unikube/local/providers/__init__.py b/unikube/cluster/providers/k3d/__init__.py
similarity index 100%
rename from unikube/local/providers/__init__.py
rename to unikube/cluster/providers/k3d/__init__.py
diff --git a/unikube/local/providers/k3d/k3d.py b/unikube/cluster/providers/k3d/k3d.py
similarity index 55%
rename from unikube/local/providers/k3d/k3d.py
rename to unikube/cluster/providers/k3d/k3d.py
index fc6d9b1..9c9f201 100644
--- a/unikube/local/providers/k3d/k3d.py
+++ b/unikube/cluster/providers/k3d/k3d.py
@@ -1,51 +1,39 @@
import os
import re
+import shutil
import subprocess
from time import sleep
from typing import Dict, List, Optional
+from uuid import UUID
from semantic_version import Version
import unikube.cli.console as console
from unikube import settings
-from unikube.local.providers.abstract_provider import AbstractK8sProvider
-from unikube.local.providers.k3d.storage import K3dStorage
-from unikube.local.providers.types import K8sProviderType
-from unikube.local.system import CMDWrapper
+from unikube.cluster.bridge.types import BridgeType
+from unikube.cluster.providers.abstract_provider import AbstractProvider
+from unikube.cluster.providers.k3d.storage import K3dData
+from unikube.cluster.providers.types import ProviderType
+from unikube.cluster.storage.cluster_storage import ClusterStorage
+from unikube.cluster.system import CMDWrapper
-class K3d(AbstractK8sProvider, CMDWrapper):
- kubernetes_cluster_type = K8sProviderType.k3d
+class K3d(AbstractProvider, CMDWrapper):
+ provider_type = ProviderType.k3d
base_command = "k3d"
_cluster = []
- def __init__(
- self,
- id,
- name: str = None,
- prefix: str = settings.K3D_CLUSTER_PREFIX,
- _debug_output=False,
- ):
+ def __init__(self, id: UUID, cluster_name: str = None, _debug_output=False):
# storage
- storage = K3dStorage(id=id)
+ self.storage = ClusterStorage(id=id)
# abstract kubernetes cluster
- AbstractK8sProvider.__init__(
- self,
- id=id,
- name=name,
- storage=storage,
- )
+ AbstractProvider.__init__(self, id=id, cluster_name=cluster_name)
# CMDWrapper
self._debug_output = _debug_output
- # cluster name
- cluster_name = prefix + self.name.lower()
- cluster_name = cluster_name.replace(" ", "-")
- self.k3d_cluster_name = cluster_name
-
def _clusters(self) -> List[Dict[str, str]]:
if len(self._cluster) == 0:
arguments = ["cluster", "list", "--no-headers"]
@@ -69,8 +57,13 @@ def _clusters(self) -> List[Dict[str, str]]:
self._cluster = clusters
return self._cluster
+ @property
+ def kubeconfig_path(self):
+ config_path = os.path.join(settings.CLI_UNIKUBE_DIRECTORY, "cluster", str(self.id), "kubeconfig.yaml")
+ return config_path
+
def get_kubeconfig(self, wait=10) -> Optional[str]:
- arguments = ["kubeconfig", "get", self.k3d_cluster_name]
+ arguments = ["kubeconfig", "get", self.cluster_name]
# this is a nasty busy wait, but we don't have another chance
for i in range(1, wait):
process = self._execute(arguments)
@@ -81,53 +74,41 @@ def get_kubeconfig(self, wait=10) -> Optional[str]:
sleep(2)
if process.returncode != 0:
- console.error("Something went completely wrong with the cluster spin up (or we got a timeout).")
- else:
- # we now need to write the kubekonfig to a file
- config = process.stdout.read().strip()
- if not os.path.isdir(os.path.join(settings.CLI_KUBECONFIG_DIRECTORY, self.k3d_cluster_name)):
- os.mkdir(os.path.join(settings.CLI_KUBECONFIG_DIRECTORY, self.k3d_cluster_name))
- config_path = os.path.join(
- settings.CLI_KUBECONFIG_DIRECTORY,
- self.k3d_cluster_name,
- "kubeconfig.yaml",
- )
- file = open(config_path, "w+")
- file.write(config)
- file.close()
- return config_path
-
- @staticmethod
- def _get_random_unused_port() -> int:
- import socket
-
- tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- tcp.bind(("", 0))
- addr, port = tcp.getsockname()
- tcp.close()
- return port
+ console.error("Something went completely wrong with the cluster spin up (or we got a timeout).", _exit=True)
+
+ # we now need to write the kubekonfig to a file
+ config = process.stdout.read().strip()
+ if not os.path.isdir(os.path.join(settings.CLI_UNIKUBE_DIRECTORY, "cluster", str(self.id))):
+ os.mkdir(os.path.join(settings.CLI_UNIKUBE_DIRECTORY, "cluster", str(self.id)))
+
+ config_path = self.kubeconfig_path
+ file = open(config_path, "w+")
+ file.write(config)
+ file.close()
+ return config_path
def exists(self) -> bool:
for cluster in self._clusters():
- if cluster["name"] == self.k3d_cluster_name:
+ if cluster["name"] == self.cluster_name:
return True
return False
- def create(
- self,
- ingress_port=None,
- workers=settings.K3D_DEFAULT_WORKERS,
- ):
+ def create(self, ingress_port=None, workers=settings.K3D_DEFAULT_WORKERS, bridge_type: BridgeType = None):
v5plus = self.version().major >= 5
api_port = self._get_random_unused_port()
+
if not ingress_port:
publisher_port = self._get_random_unused_port()
else:
publisher_port = ingress_port
+
+ if not workers:
+ workers = settings.K3D_DEFAULT_WORKERS
+
arguments = [
"cluster",
"create",
- self.k3d_cluster_name,
+ self.cluster_name,
"--agents",
str(workers),
"--api-port",
@@ -140,36 +121,55 @@ def create(
"--timeout",
"120s",
]
+
+ # bridge specific settings
+ if bridge_type == BridgeType.gefyra:
+ arguments += [
+ "--port",
+ f"31820:31820/UDP@agent{':0' if v5plus else '[0]'}",
+ ]
+
self._execute(arguments)
- data = self.storage.get()
- data.name = self.k3d_cluster_name
- data.api_port = api_port
- data.publisher_port = publisher_port
- data.kubeconfig_path = self.get_kubeconfig()
- self.storage.set(data)
+ self.storage.name = self.cluster_name
+ self.storage.provider[self.provider_type.name] = K3dData(
+ api_port=api_port,
+ publisher_port=publisher_port,
+ kubeconfig_path=self.get_kubeconfig(),
+ )
+ self.storage.save()
return True
def start(self):
- arguments = ["cluster", "start", self.k3d_cluster_name]
+ arguments = ["cluster", "start", self.cluster_name]
p = self._execute(arguments)
if p.returncode != 0:
return False
- data = self.storage.get()
- data.kubeconfig_path = self.get_kubeconfig()
- self.storage.set(data)
+
+ _ = self.get_kubeconfig()
return True
def stop(self):
- arguments = ["cluster", "stop", self.k3d_cluster_name]
+ arguments = ["cluster", "stop", self.cluster_name]
self._execute(arguments)
return True
def delete(self):
- arguments = ["cluster", "delete", self.k3d_cluster_name]
+ arguments = ["cluster", "delete", self.cluster_name]
self._execute(arguments)
- self.storage.delete()
+
+ try:
+ self.storage.delete()
+ except Exception as e:
+ console.debug(e)
+
+ try:
+ folder_path = os.path.join(settings.CLI_UNIKUBE_DIRECTORY, "cluster", str(self.id))
+ shutil.rmtree(folder_path)
+ except Exception as e:
+ console.debug(e)
+
return True
def version(self) -> Version:
@@ -185,9 +185,9 @@ def __init__(self):
def __call__(
self,
- id,
- name=None,
- **_ignored,
+ id: UUID,
+ cluster_name: str = None,
+ **kwargs,
):
# get instance from cache
instance = self._instances.get(id, None)
@@ -195,11 +195,7 @@ def __call__(
return instance
# create instance
- instance = K3d(
- id,
- name=name,
- prefix=settings.K3D_CLUSTER_PREFIX,
- )
+ instance = K3d(id, cluster_name=cluster_name)
self._instances[id] = instance
return instance
diff --git a/unikube/cluster/providers/k3d/storage.py b/unikube/cluster/providers/k3d/storage.py
new file mode 100644
index 0000000..f0cbbec
--- /dev/null
+++ b/unikube/cluster/providers/k3d/storage.py
@@ -0,0 +1,9 @@
+from typing import Optional
+
+from pydantic import BaseModel
+
+
+class K3dData(BaseModel):
+ api_port: str
+ publisher_port: str
+ kubeconfig_path: Optional[str]
diff --git a/unikube/cluster/providers/manager.py b/unikube/cluster/providers/manager.py
new file mode 100644
index 0000000..4b0b6a0
--- /dev/null
+++ b/unikube/cluster/providers/manager.py
@@ -0,0 +1,104 @@
+import os
+from typing import List, Optional
+from uuid import UUID
+
+import unikube.cli.console as console
+from unikube import settings
+from unikube.cluster.bridge.types import BridgeType
+from unikube.cluster.cluster import Cluster
+from unikube.cluster.providers.factory import kubernetes_cluster_factory
+from unikube.cluster.providers.types import ProviderType
+
+
+class ClusterManager:
+ def count_active_clusters(self) -> int:
+ # TODO: determine the number of active clusters
+
+ # for cluster_data in ctx.cluster_manager.get_all():
+ # cluster = ctx.cluster_manager.select(cluster_data=cluster_data, cluster_provider_type=cluster_provider_type)
+ # if cluster.exists() and cluster.ready():
+ # if cluster.name == project_instance["title"] and cluster.id == project_instance["id"]:
+ # console.info(f"Kubernetes cluster for '{cluster.display_name}' is already running.", _exit=True)
+ # else:
+ # console.error(
+ # f"You cannot start multiple projects at the same time. Project {cluster.name} ({cluster.id}) is "
+ # f"currently running. Please run 'unikube project down {cluster.id}' first and "
+ # f"try again.",
+ # _exit=True,
+ # )
+
+ return 0
+
+ def get_cluster_ids(self) -> List[UUID]:
+ folder_path = os.path.join(settings.CLI_UNIKUBE_DIRECTORY, "cluster")
+ if not os.path.isdir(folder_path):
+ return []
+
+ ids = []
+ for folder_name in os.listdir(folder_path):
+ try:
+ cluster_id = UUID(folder_name)
+ ids.append(cluster_id)
+ except Exception:
+ continue
+
+ return ids
+
+ def get_clusters(self, ready: Optional[bool] = None):
+ ls = []
+ for cluster_id in self.get_cluster_ids():
+ for provider_type in ProviderType:
+ for bridge_type in BridgeType:
+ if self.exists(cluster_id, provider_type, bridge_type):
+ # handle ready option
+ cluster = self.select(
+ id=cluster_id,
+ provider_type=provider_type,
+ bridge_type=bridge_type,
+ )
+
+ if ready:
+ if cluster.ready() != ready:
+ continue
+
+ # append cluster to list
+ ls.append(cluster)
+ return ls
+
+ def exists(self, id: UUID, provider_type: ProviderType, bridge_type: BridgeType) -> bool:
+ cluster = self.select(id=id, provider_type=provider_type, bridge_type=bridge_type)
+ if cluster:
+ return True
+ return False
+
+ def select(
+ self,
+ id: UUID,
+ name: str = None,
+ provider_type: ProviderType = settings.UNIKUBE_DEFAULT_PROVIDER_TYPE,
+ bridge_type: BridgeType = settings.UNIKUBE_DEFAULT_BRIDGE_TYPE,
+ exit_on_exception: bool = False,
+ ) -> Optional[Cluster]:
+ # create config
+ config = {
+ "id": id,
+ }
+
+ if name:
+ config["display_name"] = name
+
+ # get selected cluster from factory
+ try:
+ cluster = kubernetes_cluster_factory.get(
+ provider_type,
+ bridge_type,
+ **config,
+ )
+ return cluster
+ except Exception as e:
+ console.debug(e)
+
+ if exit_on_exception:
+ console.error("The selected cluster does not exist.", _exit=True)
+
+ return None
diff --git a/unikube/cluster/providers/types.py b/unikube/cluster/providers/types.py
new file mode 100644
index 0000000..c7d6002
--- /dev/null
+++ b/unikube/cluster/providers/types.py
@@ -0,0 +1,5 @@
+from enum import Enum
+
+
+class ProviderType(Enum):
+ k3d = "k3d"
diff --git a/unikube/cluster/storage/base_storage.py b/unikube/cluster/storage/base_storage.py
new file mode 100644
index 0000000..5645f20
--- /dev/null
+++ b/unikube/cluster/storage/base_storage.py
@@ -0,0 +1,57 @@
+import json
+import os
+from ast import Dict
+from datetime import datetime
+from pathlib import Path
+from typing import Optional
+
+from pydantic import BaseModel
+
+from unikube import settings
+
+
+class BaseStorage(BaseModel):
+ timestamp: datetime = datetime.now()
+ file_path: str
+ file_name: str
+
+ def __init__(self, file_name: str, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, data: Dict = {}, **kwargs):
+ if not bool(data):
+ data = self.load(file_path=file_path, file_name=file_name)
+
+ if data:
+ kwargs = {**data, **kwargs}
+
+ super().__init__(file_path=file_path, file_name=file_name, **kwargs)
+
+ @property
+ def file_location(self):
+ return os.path.join(self.file_path, self.file_name)
+
+ def save(self):
+ # create file if not exists
+ Path(self.file_path).mkdir(parents=True, exist_ok=True)
+
+ # save user information
+ self.timestamp = datetime.now()
+ with open(self.file_location, "w") as f:
+ json.dump(json.loads(self.json(exclude={"file_path", "file_name"})), f, ensure_ascii=False, indent=4)
+
+ @classmethod
+ def load(cls, file_path: str, file_name: str) -> Optional[dict]:
+ file_location = os.path.join(file_path, file_name)
+ try:
+ with open(file_location, "r") as file:
+ data = json.load(file)
+ return data
+
+ except FileNotFoundError:
+ return None
+
+ except Exception:
+ file = Path(file_location)
+ file.unlink()
+
+ def delete(self):
+ file = Path(self.file_location)
+ file.unlink()
diff --git a/unikube/cluster/storage/cluster_storage.py b/unikube/cluster/storage/cluster_storage.py
new file mode 100644
index 0000000..e4c7b86
--- /dev/null
+++ b/unikube/cluster/storage/cluster_storage.py
@@ -0,0 +1,23 @@
+import os
+from typing import Dict, Optional
+from uuid import UUID
+
+from unikube import settings
+from unikube.cluster.bridge.telepresence import TelepresenceData
+from unikube.cluster.providers.k3d.storage import K3dData
+from unikube.cluster.storage.base_storage import BaseStorage
+
+
+class ClusterStorage(BaseStorage):
+ id: UUID
+ name: Optional[str] = None
+ provider_type: str = settings.UNIKUBE_DEFAULT_PROVIDER_TYPE.name
+ provider: Dict[str, K3dData] = {}
+ bridge_type: str = settings.UNIKUBE_DEFAULT_BRIDGE_TYPE.name
+ bridge: Dict[str, TelepresenceData] = {}
+
+ def __init__(
+ self, id: UUID, file_path: str = settings.CLI_UNIKUBE_DIRECTORY, file_name: str = "cluster.json", **kwargs
+ ):
+ file_path = os.path.join(file_path, "cluster", str(id))
+ super().__init__(id=id, file_path=file_path, file_name=file_name, data=kwargs)
diff --git a/unikube/local/system.py b/unikube/cluster/system.py
similarity index 58%
rename from unikube/local/system.py
rename to unikube/cluster/system.py
index d063148..19436ed 100644
--- a/unikube/local/system.py
+++ b/unikube/cluster/system.py
@@ -1,9 +1,8 @@
# -*- coding: utf-8 -*-
import os
-import platform
-import re
import subprocess
-from typing import List, Tuple, Union
+from pathlib import Path
+from typing import List, Tuple
import click
from kubernetes import client, config, watch
@@ -13,7 +12,10 @@
import unikube.cli.console as console
from unikube import settings
-from unikube.local.exceptions import UnikubeClusterUnavailableError
+
+
+class UnikubeClusterUnavailableError(Exception):
+ pass
class CMDWrapper(object):
@@ -71,15 +73,16 @@ def _get_environment(self):
class KubeCtl(CMDWrapper):
base_command = "kubectl"
- def __init__(self, provider_data, debug_output=False):
- if not provider_data.kubeconfig_path:
- raise ValueError("Project does not contain the kubeconfigPath parameter")
- self._provider_data = provider_data
+ def __init__(self, kubeconfig_path: str, debug_output: bool = False):
+ if not kubeconfig_path:
+ raise ValueError("KubeCtl does not contain the 'kubeconfig_path' parameter")
+
+ self._kubeconfig_path = kubeconfig_path
super(KubeCtl, self).__init__(debug_output)
def _get_environment(self):
env = super(KubeCtl, self)._get_environment()
- env["KUBECONFIG"] = self._provider_data.kubeconfig_path
+ env["KUBECONFIG"] = self._kubeconfig_path
return env
def _get_kwargs(self):
@@ -99,26 +102,6 @@ def delete_str(self, namespace, text: str):
arguments = ["delete", "--namespace", namespace, "-f", "-"]
self._execute(arguments, text)
- def get_ingress_data(self, namespace):
- arguments = ["get", "ingress", "--namespace", namespace]
- process = self._execute(arguments)
- output = process.stdout.read()
- # skip the header
- ingress_lines = output.split("\n")[1:]
- result = []
- for line in ingress_lines:
- line = list(filter(lambda x: x != "", line.split(" ")))
- if line:
- result.append(
- {
- "name": line[0],
- "hosts": line[1],
- "address": line[2],
- "ports": line[3],
- }
- )
- return result
-
def get_pods(self, namespace):
arguments = ["get", "pods", "--namespace", namespace]
process = self._execute(arguments)
@@ -172,7 +155,7 @@ def build(self, tag, context, dockerfile=None, target=None) -> Tuple[bool, str]:
"Could not build the Docker image, please make sure the image can be built",
)
- def check_running(self, name):
+ def check_running(self, name) -> bool:
"""Checks whether an image or a specific container is running."""
arguments = ["ps"]
process = self._execute(arguments)
@@ -224,151 +207,23 @@ def image_exists(self, name):
return False
-class Telepresence(KubeCtl):
- base_command = "telepresence"
+class KubeAPI(object):
+ def __init__(self, kubeconfig_path: str, namespace: str = None, deck=None):
+ file = Path(kubeconfig_path)
+ if not file.is_file():
+ raise Exception(f"kubeconfig does not exist: {kubeconfig_path}")
- def _execute_intercept(self, arguments) -> subprocess.Popen:
- cmd = [self.base_command] + arguments
- kwargs = self._get_kwargs()
- process = subprocess.Popen(cmd, **kwargs)
- for stdout_line in iter(process.stdout.readline, ""):
- print(stdout_line, end="", flush=True)
- return process
+ self._kubeconfig_path = kubeconfig_path
- def swap(self, deployment, image_name, command=None, namespace=None, envs=None, mounts=None, port=None):
- arguments = ["intercept", "--no-report", deployment]
- if namespace:
- arguments = arguments + ["--namespace", namespace]
-
- arguments = arguments + ["--port", f"{port}:{port}", "--docker-run", "--"]
- if platform.system() != "Darwin":
- arguments.append("--network=host")
- arguments += [
- f"--dns-search={namespace}",
- "--rm",
- ]
- if mounts:
- for mount in mounts:
- arguments = arguments + ["-v", f"{mount[0]}:{mount[1]}"]
- if envs:
- for env in envs:
- arguments = arguments + ["--env", f"{env[0]}={env[1]}"]
-
- # this name to be retrieved for "app shell" command
- arguments = arguments + ["--name", image_name.replace(":", "")]
- arguments.append(image_name)
- if command:
- arguments = arguments + ["sh", "-c"] + [f"{' '.join(command)}"]
-
- console.debug(arguments)
- try:
- process = self._execute_intercept(arguments)
- if process.returncode and (process.returncode != 0 and not process.returncode != 137):
- console.error(
- "There was an error with switching the deployment, please find details above", _exit=False
- )
- except KeyboardInterrupt:
- pass
- console.info("Stopping the switch operation. It takes a few seconds to reset the cluster.")
- self.leave(deployment, namespace, silent=True)
- self.uninstall(deployment, namespace, silent=True)
-
- def leave(self, deployment, namespace=None, silent=False):
- arguments = ["leave", "--no-report"]
- if namespace:
- arguments.append(f"{deployment}-{namespace}")
+ if not namespace:
+ if deck:
+ self._namespace = deck["environment"][0]["namespace"] or "default"
+ else:
+ self._namespace = "default"
else:
- arguments.append(deployment)
- console.debug(arguments)
- process = self._execute(arguments)
- if not silent and process.returncode and process.returncode != 0:
- console.error("There was an error with leaving the deployment, please find details above", _exit=False)
-
- def uninstall(self, deployment, namespace=None, silent=False):
- arguments = ["uninstall", "--agent", deployment]
- arguments.append(deployment)
- if namespace:
- arguments += ["-n", namespace]
- console.debug(arguments)
- process = self._execute(arguments)
- if not silent and process.returncode and process.returncode != 0:
- console.error(
- "There was an error with uninstalling the traffic agent, please find details above", _exit=False
- )
+ self._namespace = namespace
- def _get_environment(self):
- env = super(Telepresence, self)._get_environment()
- return env
-
- def start(self) -> None:
- arguments = ["connect", "--no-report"]
- process = self._execute(arguments)
- if process.returncode and process.returncode != 0:
- # this is a retry
- process = self._execute(arguments)
- if process.returncode and process.returncode != 0:
- console.error(f"Could not start Telepresence daemon: {process.stdout.readlines()}", _exit=False)
-
- def stop(self) -> None:
- arguments = ["quit", "--no-report"]
- process = self._execute(arguments)
- if process.returncode and process.returncode != 0:
- console.error("Could not stop Telepresence daemon", _exit=False)
-
- def list(self, namespace=None, flat=False) -> List[str]:
- arguments = ["list", "--no-report"]
- if namespace:
- arguments += ["--namespace", namespace]
- process = self._execute(arguments)
- deployment_list = process.stdout.readlines()
- result = []
- if deployment_list:
- for deployment in deployment_list:
- try:
- name, status = map(str.strip, deployment.split(":"))
- except ValueError:
- continue
- if name in ["Intercept name", "State", "Workload kind", "Destination", "Intercepting"]:
- continue
- if "intercepted" in status:
- result.append((name, "intercepted"))
- else:
- result.append((name, "ready"))
- if flat:
- result = [deployment[0] for deployment in result]
- return result
-
- def is_swapped(self, deployment, namespace=None) -> bool:
- deployments = self.list(namespace)
- swapped = any(filter(lambda x: x[0] == deployment and x[1] == "intercepted", deployments))
- return swapped
-
- def intercept_count(self) -> Union[int, None]:
- arguments = ["status"]
- process = self._execute(arguments)
- status = process.stdout.readlines()
-
- # parse intercept count
- try:
- intercept_line = status[15]
- match = re.findall("[ ]{1,}Intercepts[ ]{1,}:(.*)[ ]{1,}total", intercept_line)
- intercept_count = int(match[0])
- except Exception as e:
- console.debug(e)
- intercept_count = None
-
- return intercept_count
-
-
-class KubeAPI(object):
- def __init__(self, provider_data, deck=None):
- self._provider_data = provider_data
- self._deck = deck
- if self._deck:
- self._namespace = self._deck["environment"][0]["namespace"] or "default"
- else:
- self._namespace = "default"
- self._api_client = config.new_client_from_config(provider_data.kubeconfig_path)
+ self._api_client = config.new_client_from_config(kubeconfig_path)
self._core_api = client.CoreV1Api(self._api_client)
self._networking_api = client.NetworkingV1beta1Api(self._api_client)
@@ -473,3 +328,14 @@ def get_serviceaccount_tokens(self, app_name):
break
if std_out:
return std_out
+
+ def get_pods_for_workload(self, name: str, namespace: str) -> List[str]:
+ result = []
+ name = name.split("-")
+ pods = self._core_api.list_namespaced_pod(namespace)
+ for pod in pods.items:
+ pod_name = pod.metadata.name.split("-")
+ if all(x == y for x, y in zip(name, pod_name)) and len(pod_name) - 2 == len(name):
+ # this pod name containers all segments of name
+ result.append(pod.metadata.name)
+ return result
diff --git a/unikube/context/__init__.py b/unikube/context/__init__.py
index 63c5615..1a07ac8 100644
--- a/unikube/context/__init__.py
+++ b/unikube/context/__init__.py
@@ -1,11 +1,13 @@
class ClickContext(object):
def __init__(self):
- from unikube.authentication.authentication import get_authentication
+ from unikube.cache.cache import Cache
+ from unikube.cluster.providers.manager import ClusterManager
from unikube.context.context import Context
- from unikube.local.providers.manager import K8sClusterManager
- from unikube.storage.general import LocalStorageGeneral
- self.auth = get_authentication()
- self.storage_general = LocalStorageGeneral()
- self.context: Context = Context(auth=self.auth)
- self.cluster_manager = K8sClusterManager()
+ # add cache to the context (loading this cache is required to identify the user)
+ cache = Cache()
+ self.cache = cache
+ self.user_id = cache.userId
+
+ self.context = Context(cache=self.cache)
+ self.cluster_manager = ClusterManager()
diff --git a/unikube/context/context.py b/unikube/context/context.py
index f93d9e0..bf01849 100644
--- a/unikube/context/context.py
+++ b/unikube/context/context.py
@@ -1,12 +1,12 @@
-import os
from abc import ABC, abstractmethod
-from typing import List, Tuple, Union
+from typing import List, Optional, Tuple, Union
+from uuid import UUID
from unikube import settings
+from unikube.cache import Cache, UserContext
from unikube.cli import console
from unikube.context.helper import convert_context_arguments, is_valid_uuid4
from unikube.context.types import ContextData
-from unikube.storage.user import LocalStorageUser, get_local_storage_user
from unikube.unikubefile.selector import unikube_file_selector
from unikube.unikubefile.unikube_file import UnikubeFile
@@ -66,15 +66,19 @@ def get(self, **kwargs) -> ContextData:
class LocalContext(IContext):
- def __init__(self, local_storage_user: Union[LocalStorageUser, None]):
- self.local_storage_user = local_storage_user
+ def __init__(self, user_context: Union[UserContext, None]):
+ self.user_context = user_context
def get(self, **kwargs) -> ContextData:
- if not self.local_storage_user:
+ if not self.user_context:
return ContextData()
- user_data = self.local_storage_user.get()
- return user_data.context
+ context_data = ContextData(
+ organization_id=self.user_context.organization_id or None,
+ project_id=self.user_context.project_id or None,
+ deck_id=self.user_context.deck_id or None,
+ )
+ return context_data
class ContextLogic:
@@ -107,11 +111,11 @@ def get(self) -> ContextData:
class Context:
- def __init__(self, auth):
- self._auth = auth
+ def __init__(self, cache: Cache):
+ self.cache = cache
def get(self, **kwargs) -> ContextData:
- local_storage_user = get_local_storage_user()
+ user_context = UserContext(id=self.cache.userId)
context_logic = ContextLogic(
[
@@ -119,7 +123,7 @@ def get(self, **kwargs) -> ContextData:
click_options={key: kwargs[key] for key in ("organization", "project", "deck") if key in kwargs}
),
UnikubeFileContext(path_unikube_file="unikube.yaml"),
- LocalContext(local_storage_user=local_storage_user),
+ LocalContext(user_context=user_context),
]
)
context = context_logic.get()
@@ -134,10 +138,10 @@ def get(self, **kwargs) -> ContextData:
def get_context_ids_from_arguments(
self, organization_argument: str = None, project_argument: str = None, deck_argument: str = None
- ) -> Tuple[str, str, str]:
+ ) -> Tuple[Optional[UUID], Optional[UUID], Optional[UUID]]:
# convert context argments into ids
organization_id, project_id, deck_id = convert_context_arguments(
- auth=self._auth,
+ cache=self.cache,
organization_argument=organization_argument,
project_argument=project_argument,
deck_argument=deck_argument,
@@ -145,4 +149,21 @@ def get_context_ids_from_arguments(
# consider context
context = self.get(organization=organization_id, project=project_id, deck=deck_id)
- return context.organization_id, context.project_id, context.deck_id
+
+ # convert to UUID
+ if context.organization_id:
+ organization_id = context.organization_id
+ else:
+ organization_id = None
+
+ if context.project_id:
+ project_id = context.project_id
+ else:
+ project_id = None
+
+ if context.deck_id:
+ deck_id = context.deck_id
+ else:
+ deck_id = None
+
+ return organization_id, project_id, deck_id
diff --git a/unikube/context/helper.py b/unikube/context/helper.py
index 506d117..2116f2b 100644
--- a/unikube/context/helper.py
+++ b/unikube/context/helper.py
@@ -1,16 +1,21 @@
from typing import Tuple
from uuid import UUID
+from retrying import retry
from slugify import slugify
+from unikube.cache import UserIDs
from unikube.cli import console
-from unikube.graphql_utils import GraphQL
class ArgumentError(Exception):
pass
+class RetryError(Exception):
+ pass
+
+
# uuid validation
def is_valid_uuid4(uuid: str):
try:
@@ -21,12 +26,14 @@ def is_valid_uuid4(uuid: str):
# context arguments
-def __select_result(argument_value: str, results: list, exception_message: str = "context"):
+def __select_result(argument_value: str, results: list, exception_message: str = "context") -> UUID:
# slugify
if slugify(argument_value) != argument_value:
- title_list = [item["title"] for item in results]
+ title_list = [item.title for item in results.values()]
else:
- title_list = [slugify(item["title"]) for item in results]
+ title_list = [slugify(item.title) for item in results.values()]
+
+ uuid_list = list(results.keys())
# check if name/title exists and is unique
count = title_list.count(argument_value)
@@ -43,111 +50,105 @@ def __select_result(argument_value: str, results: list, exception_message: str =
raise ArgumentError(f"Invalid {exception_message} name/slug.")
# convert name/title to uuid
- return results[index]["id"]
+ return uuid_list[index]
-def convert_organization_argument_to_uuid(auth, argument_value: str) -> str:
+@retry(stop_max_attempt_number=2)
+def convert_organization_argument_to_uuid(cache, argument_value: str) -> UUID:
# uuid provided (no conversion required)
if is_valid_uuid4(argument_value):
- return argument_value
+ return UUID(argument_value)
+
+ try:
+ user_ids = UserIDs(id=cache.userId)
+ uuid = __select_result(argument_value, user_ids.organization, exception_message="organization")
+ except Exception as e:
+ user_ids.refresh()
+ raise RetryError(e)
+
+ return uuid
- # get available context options or use provided data (e.g. from previous query)
- graph_ql = GraphQL(authentication=auth)
- data = graph_ql.query(
- """
- query {
- allOrganizations {
- results {
- title
- id
- }
- }
- }
- """
- )
-
- results = data["allOrganizations"]["results"]
- return __select_result(argument_value, results, exception_message="organization")
-
-
-def convert_project_argument_to_uuid(auth, argument_value: str, organization_id: str = None) -> str:
+
+def convert_project_argument_to_uuid(cache, argument_value: str, organization_id: UUID = None) -> UUID:
# uuid provided (no conversion required)
if is_valid_uuid4(argument_value):
- return argument_value
+ return UUID(argument_value)
+
+ try:
+ user_ids = UserIDs(id=cache.userId)
+ projects = user_ids.project
+
+ # filter
+ if organization_id:
+ organization = user_ids.organization.get(organization_id)
+ projects = {key: projects[key] for key in organization.project_ids}
+
+ uuid = __select_result(argument_value, projects, exception_message="project")
+ except Exception as e:
+ user_ids.refresh()
+ raise RetryError(e)
- # get available context options or use provided data (e.g. from previous query)
- graph_ql = GraphQL(authentication=auth)
- data = graph_ql.query(
- """
- query($organization_id: UUID) {
- allProjects(organizationId: $organization_id) {
- results {
- title
- id
- }
- }
- }
- """,
- query_variables={
- "organization_id": organization_id,
- },
- )
-
- results = data["allProjects"]["results"]
- return __select_result(argument_value, results, exception_message="project")
+ return uuid
def convert_deck_argument_to_uuid(
- auth, argument_value: str, organization_id: str = None, project_id: str = None
-) -> str:
+ cache, argument_value: str, organization_id: UUID = None, project_id: UUID = None
+) -> UUID:
# uuid provided (no conversion required)
if is_valid_uuid4(argument_value):
return argument_value
- # get available context options or use provided data (e.g. from previous query)
- graph_ql = GraphQL(authentication=auth)
- data = graph_ql.query(
- """
- query($organization_id: UUID, $project_id: UUID) {
- allDecks(organizationId: $organization_id, projectId: $project_id) {
- results {
- title
- id
- }
- }
- }
- """,
- query_variables={
- "organization_id": organization_id,
- "project_id": project_id,
- },
- )
-
- results = data["allDecks"]["results"]
- return __select_result(argument_value, results, exception_message="deck")
+ try:
+ user_ids = UserIDs(id=cache.userId)
+ decks = user_ids.deck
+
+ project_ids = []
+
+ # filter
+ if organization_id:
+ organization = user_ids.organization.get(organization_id)
+ project_ids = getattr(organization, "project_ids", [])
+
+ if project_id:
+ project_ids = [project_id]
+
+ for id in project_ids:
+ project = user_ids.project.get(id)
+ if project:
+ decks = {key: decks[key] for key in project.deck_ids}
+
+ uuid = __select_result(argument_value, decks, exception_message="deck")
+ except Exception as e:
+ user_ids.refresh()
+ raise RetryError(e)
+
+ return uuid
def convert_context_arguments(
- auth, organization_argument: str = None, project_argument: str = None, deck_argument: str = None
+ cache, organization_argument: str = None, project_argument: str = None, deck_argument: str = None
) -> Tuple[str, str, str]:
try:
# organization
if organization_argument:
- organization_id = convert_organization_argument_to_uuid(auth, organization_argument)
+ organization_id = convert_organization_argument_to_uuid(cache, organization_argument)
+ organization_id = str(organization_id)
else:
organization_id = None
# project
if project_argument:
- project_id = convert_project_argument_to_uuid(auth, project_argument, organization_id=organization_id)
+ project_id = convert_project_argument_to_uuid(cache, project_argument, organization_id=organization_id)
+ project_id = str(project_id)
else:
project_id = None
# deck
if deck_argument:
deck_id = convert_deck_argument_to_uuid(
- auth, deck_argument, organization_id=organization_id, project_id=project_id
+ cache, deck_argument, organization_id=organization_id, project_id=project_id
)
+ deck_id = str(deck_id)
else:
deck_id = None
except Exception as e:
diff --git a/unikube/context/types.py b/unikube/context/types.py
index 82961dd..3375c15 100644
--- a/unikube/context/types.py
+++ b/unikube/context/types.py
@@ -1,9 +1,10 @@
from typing import Optional
+from uuid import UUID
from pydantic import BaseModel
class ContextData(BaseModel):
- organization_id: Optional[str] = None
- project_id: Optional[str] = None
- deck_id: Optional[str] = None
+ organization_id: Optional[UUID] = None
+ project_id: Optional[UUID] = None
+ deck_id: Optional[UUID] = None
diff --git a/unikube/graphql_utils.py b/unikube/graphql_utils.py
index 251019b..0ee3b71 100644
--- a/unikube/graphql_utils.py
+++ b/unikube/graphql_utils.py
@@ -3,6 +3,7 @@
from typing import Union
import click_spinner
+import requests
from gql import Client, gql
from gql.transport.exceptions import TransportServerError
from gql.transport.requests import RequestsHTTPTransport
@@ -10,6 +11,7 @@
import unikube.cli.console as console
from unikube import settings
+from unikube.cache import Cache
# EnvironmentType
@@ -30,16 +32,16 @@ def retry_exception(exception):
class GraphQL:
def __init__(
self,
- authentication,
+ cache: Cache,
url=settings.GRAPHQL_URL,
timeout=settings.GRAPHQL_TIMEOUT,
):
self.url = url
self.timeout = timeout
- # automatic token refresh
- self.authentication = authentication
- self.access_token = str(authentication.general_data.authentication.access_token)
+ # cache / access token
+ self.cache = cache
+ self.access_token = str(cache.auth.access_token)
# client
self.client = self._client()
@@ -74,6 +76,17 @@ def query(
) -> Union[dict, None]:
try:
query = gql(query)
+ data = self.client.execute(
+ document=query,
+ variable_values=query_variables,
+ )
+
+ except requests.exceptions.HTTPError:
+ from unikube.authentication.authentication import TokenAuthentication
+
+ auth = TokenAuthentication(cache=self.cache)
+ response = auth.refresh()
+
with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout):
data = self.client.execute(
document=query,
@@ -81,8 +94,10 @@ def query(
)
except TransportServerError:
- # refresh token
- response = self.authentication.refresh()
+ from unikube.authentication.authentication import TokenAuthentication
+
+ auth = TokenAuthentication(cache=self.cache)
+ response = auth.refresh()
if not response["success"]:
console.exit_login_required()
diff --git a/unikube/helpers.py b/unikube/helpers.py
index d24347c..2eff1b7 100644
--- a/unikube/helpers.py
+++ b/unikube/helpers.py
@@ -1,4 +1,3 @@
-import re
import sys
from pathlib import Path
from urllib.parse import urljoin
@@ -11,10 +10,8 @@
import unikube.cli.console as console
from unikube import settings
from unikube.authentication.authentication import TokenAuthentication
-from unikube.context import ClickContext
+from unikube.cache import Cache
from unikube.graphql_utils import EnvironmentType
-from unikube.local.providers.types import K8sProviderType
-from unikube.local.system import Telepresence
def get_requests_session(access_token) -> Session:
@@ -35,12 +32,12 @@ def download_specs(access_token: str, environment_id: str):
return manifest
-def download_manifest(deck: dict, authentication: TokenAuthentication, access_token: str, environment_index: int = 0):
+def download_manifest(deck: dict, cache: Cache, environment_index: int = 0):
try:
environment_id = deck["environment"][environment_index]["id"]
console.info("Requesting manifests. This process may take a few seconds.")
manifest = download_specs(
- access_token=access_token,
+ access_token=cache.auth.access_token,
environment_id=environment_id,
)
except HTTPError as e:
@@ -55,7 +52,9 @@ def download_manifest(deck: dict, authentication: TokenAuthentication, access_to
elif e.response.status_code == 403:
console.warning("Refreshing access token")
environment_id = deck["environment"][environment_index]["id"]
- response = authentication.refresh()
+
+ auth = TokenAuthentication(cache=cache)
+ response = auth.refresh()
if not response["success"]:
console.exit_login_required()
@@ -93,22 +92,6 @@ def check_environment_type_local_or_exit(deck: dict, environment_index: int = 0)
console.error("This deck cannot be installed locally.", _exit=True)
-def check_running_cluster(ctx: ClickContext, cluster_provider_type: K8sProviderType.k3d, project_instance: dict):
- for cluster_data in ctx.cluster_manager.get_all():
- cluster = ctx.cluster_manager.select(cluster_data=cluster_data, cluster_provider_type=cluster_provider_type)
- if cluster.exists() and cluster.ready():
- if cluster.name == project_instance["title"] and cluster.id == project_instance["id"]:
- Telepresence(cluster.storage.get()).start()
- console.info(f"Kubernetes cluster for '{cluster.display_name}' is already running.", _exit=True)
- else:
- console.error(
- f"You cannot start multiple projects at the same time. Project {cluster.name} ({cluster.id}) is "
- f"currently running. Please run 'unikube project down {cluster.id}' first and "
- f"try again.",
- _exit=True,
- )
-
-
def compare_current_and_latest_versions():
try:
current_version = None
@@ -142,7 +125,3 @@ def compare_current_and_latest_versions():
import traceback
console.info(f"Versions cannot be compared, because of error {traceback.format_exc()}")
-
-
-def compare_decorator(f):
- compare_current_and_latest_versions()
diff --git a/unikube/keycloak/permissions.py b/unikube/keycloak/permissions.py
deleted file mode 100644
index 77868c2..0000000
--- a/unikube/keycloak/permissions.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import fnmatch
-import uuid
-from functools import lru_cache
-from typing import KeysView, List, Optional, Union
-
-import jwt
-from pydantic import BaseModel
-from retrying import retry
-
-from unikube import settings
-from unikube.authentication.authentication import IAuthentication
-from unikube.authentication.types import AuthenticationData
-from unikube.cli import console
-
-
-class KeycloakPermissionData(BaseModel):
- scopes: Optional[List[str]]
- rsid: str
- rsname: str
-
-
-class KeycloakPermissions:
- def __init__(self, authentication: IAuthentication):
- self.authentication = authentication
-
- def _permission_data(self):
- # verify
- response = self.authentication.verify_or_refresh()
- if not response:
- console.exit_login_required()
-
- # get authentication_data
- authentication_data = self.authentication.general_data.authentication
-
- # check for requesting_party_token
- if not authentication_data.requesting_party_token:
- raise Exception("Requesting Party Token (RPT) required.")
-
- # decode requesting_party_token
- requesting_party_token = self._decode_requesting_party_token(
- requesting_party_token=authentication_data.access_token
- )
-
- # convert
- permission_data = KeycloakPermissions._convert(requesting_party_token["authorization"]["permissions"])
-
- return permission_data
-
- def _decode_requesting_party_token(self, requesting_party_token: str) -> dict:
- # decode
- try:
- token = jwt.decode(
- requesting_party_token,
- algorithms=["RS256"],
- audience=settings.TOKEN_AUDIENCE,
- options={"verify_signature": False},
- )
- except Exception as e:
- console.debug(e)
- raise Exception("Requesting Party Token (RPT) could not be decoded.")
-
- return token
-
- @staticmethod
- def _convert(permissions: dict) -> List[KeycloakPermissionData]:
- keycloak_permission_list = []
- for permission_dict in permissions:
- keycloak_permission = KeycloakPermissionData(**permission_dict)
- keycloak_permission_list.append(keycloak_permission)
-
- return keycloak_permission_list
-
- @lru_cache(10)
- def get_permissions_by_scope(self, scope: str) -> List[KeycloakPermissionData]:
- """
- Return a list of resources with the given scope. Supports to filter with wildcards
- e.g. organization:*.
- """
- permission_data = self._permission_data()
-
- results = []
- for permission in permission_data:
- if permission.scopes:
- # 'scopes': ['organization:view', 'organization:edit']
- matched = fnmatch.filter(permission.scopes, scope)
- if matched:
- results.append(permission)
-
- return results
diff --git a/unikube/local/exceptions.py b/unikube/local/exceptions.py
deleted file mode 100644
index 25ef2e7..0000000
--- a/unikube/local/exceptions.py
+++ /dev/null
@@ -1,2 +0,0 @@
-class UnikubeClusterUnavailableError(Exception):
- pass
diff --git a/unikube/local/providers/abstract_provider.py b/unikube/local/providers/abstract_provider.py
deleted file mode 100644
index 58f5096..0000000
--- a/unikube/local/providers/abstract_provider.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from abc import ABC, abstractmethod
-from typing import Any
-
-from semantic_version import Version
-
-from unikube.local.providers.types import K8sProviderData
-from unikube.local.system import Docker
-
-
-class IK8sProviderStorage(ABC):
- @abstractmethod
- def get(self) -> K8sProviderData:
- raise NotImplementedError
-
- @abstractmethod
- def set(self) -> None:
- raise NotImplementedError
-
- @abstractmethod
- def delete(self) -> None:
- raise NotImplementedError
-
-
-class AbstractK8SProviderStorage(IK8sProviderStorage):
- def __init__(
- self,
- id: str,
- storage=None,
- ) -> None:
- super().__init__()
-
- self.id = id
- self.storage = storage
-
- def get(self) -> K8sProviderData:
- return self.storage.get(self.id)
-
- def set(self, data) -> None:
- self.storage.set(self.id, data)
-
- def delete(self) -> None:
- self.storage.delete(self.id)
-
-
-class IK8sProvider(ABC):
- @abstractmethod
- def create(self, ingress_port: int = None) -> bool:
- raise NotImplementedError
-
- @abstractmethod
- def start(self) -> bool:
- raise NotImplementedError
-
- @abstractmethod
- def stop(self) -> bool:
- raise NotImplementedError
-
- @abstractmethod
- def delete(self) -> bool:
- raise NotImplementedError
-
- @abstractmethod
- def exists(self) -> bool:
- raise NotImplementedError
-
- @abstractmethod
- def ready(self) -> bool:
- raise NotImplementedError
-
- @abstractmethod
- def version(self) -> Version:
- """
- Best return a type that allows working comparisons between versions of the same provider.
- E.g. (1, 10) > (1, 2), but "1.10" < "1.2"
- """
- raise NotImplementedError
-
-
-class AbstractK8sProvider(IK8sProvider):
- provider_type = None
-
- def __init__(
- self,
- id: str,
- name: str = None,
- storage: AbstractK8SProviderStorage = None,
- ) -> None:
- self.id = id
- self.name = name
- self.storage = storage
-
- @property
- def display_name(self):
- name = self.name
- if name:
- return name
-
- id = self.id
- return id
-
- @property
- def k8s_provider_type(self):
- return self.provider_type
-
- def ready(self) -> bool:
- # get name
- provider_data = self.storage.get()
- name = provider_data.name
- if not name:
- return False
-
- return Docker().check_running(name)
diff --git a/unikube/local/providers/factory.py b/unikube/local/providers/factory.py
deleted file mode 100644
index aaa3632..0000000
--- a/unikube/local/providers/factory.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from unikube.local.providers.k3d.k3d import K3dBuilder
-from unikube.local.providers.types import K8sProviderType
-
-
-class K8sClusterFactory:
- def __init__(self):
- self._builders = {}
-
- def register_builder(self, provider_type: K8sProviderType, builder):
- self._builders[provider_type.value] = builder
-
- def __create(self, provider_type: K8sProviderType, **kwargs):
- builder = self._builders.get(provider_type.value)
- if not builder:
- raise ValueError(provider_type)
- return builder(**kwargs)
-
- def get(self, provider_type: K8sProviderType, **kwargs):
- return self.__create(provider_type, **kwargs)
-
-
-kubernetes_cluster_factory = K8sClusterFactory()
-kubernetes_cluster_factory.register_builder(K8sProviderType.k3d, K3dBuilder())
diff --git a/unikube/local/providers/helper.py b/unikube/local/providers/helper.py
deleted file mode 100644
index ca24729..0000000
--- a/unikube/local/providers/helper.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from unikube.cli import console
-from unikube.local.providers.abstract_provider import AbstractK8sProvider
-
-
-def get_cluster_or_exit(ctx, project_id) -> AbstractK8sProvider:
- cluster_data = ctx.cluster_manager.get(id=project_id)
- cluster = ctx.cluster_manager.select(cluster_data=cluster_data)
- if not cluster:
- console.error("The project cluster does not exist.", _exit=True)
-
- return cluster
diff --git a/unikube/local/providers/k3d/__init__.py b/unikube/local/providers/k3d/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/unikube/local/providers/k3d/storage.py b/unikube/local/providers/k3d/storage.py
deleted file mode 100644
index 1790647..0000000
--- a/unikube/local/providers/k3d/storage.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from unikube.local.providers.abstract_provider import AbstractK8SProviderStorage
-from unikube.local.providers.k3d.types import K3dData
-from unikube.storage.local_storage import LocalStorage
-
-
-class K3dLocalStorage(LocalStorage):
- table_name = "k3d"
- pydantic_class = K3dData
-
-
-class K3dStorage(AbstractK8SProviderStorage):
- def __init__(self, id: str) -> None:
- super().__init__(
- id=id,
- storage=K3dLocalStorage(),
- )
diff --git a/unikube/local/providers/k3d/types.py b/unikube/local/providers/k3d/types.py
deleted file mode 100644
index 27bd397..0000000
--- a/unikube/local/providers/k3d/types.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from typing import Optional
-
-from unikube.local.providers.types import K8sProviderData
-
-
-class K3dData(K8sProviderData):
- api_port: Optional[str] = None
- publisher_port: Optional[str] = None
- kubeconfig_path: Optional[str] = None
diff --git a/unikube/local/providers/manager.py b/unikube/local/providers/manager.py
deleted file mode 100644
index 57e6516..0000000
--- a/unikube/local/providers/manager.py
+++ /dev/null
@@ -1,83 +0,0 @@
-from typing import List, Union
-
-import unikube.cli.console as console
-from unikube import settings
-from unikube.local.providers.abstract_provider import AbstractK8sProvider
-from unikube.local.providers.factory import kubernetes_cluster_factory
-from unikube.local.providers.types import K8sProviderData, K8sProviderType
-from unikube.storage.local_storage import LocalStorage
-
-
-class K8sClusterManager(LocalStorage):
- table_name = "clusters"
- pydantic_class = K8sProviderData
-
- def get_all(self) -> List[K8sProviderData]:
- cluster_list = []
- for item in self.database.table.all():
- try:
- cluster_data = K8sProviderData(**item)
- cluster_list.append(cluster_data)
- except Exception:
- pass
-
- return cluster_list
-
- def get_cluster_list(self, ready: bool = None):
- ls = []
- for cluster_data in self.get_all():
- for provider_type in K8sProviderType:
- if self.exists(cluster_data, provider_type):
- # handle ready option
- if ready:
- kubernetes_cluster = self.select(
- cluster_data=cluster_data,
- cluster_provider_type=provider_type,
- )
- if not kubernetes_cluster:
- continue
-
- if kubernetes_cluster.ready() != ready:
- continue
-
- # append cluster to list
- ls.append(cluster_data)
- return ls
-
- def exists(
- self,
- cluster_data: K8sProviderData,
- cluster_provider_type: K8sProviderType,
- ) -> bool:
- try:
- _ = self.select(
- cluster_data=cluster_data,
- cluster_provider_type=cluster_provider_type,
- )
- return True
- except Exception:
- return False
-
- def select(
- self,
- cluster_data: K8sProviderData,
- cluster_provider_type: K8sProviderType = settings.UNIKUBE_DEFAULT_PROVIDER_TYPE,
- ) -> Union[AbstractK8sProvider, None]:
- # create config
- config = {
- "id": cluster_data.id,
- }
-
- if cluster_data.name:
- config["name"] = cluster_data.name
-
- # get selected kubernetes cluster from factory
- try:
- kubernetes_cluster = kubernetes_cluster_factory.get(
- cluster_provider_type,
- **config,
- )
- return kubernetes_cluster
- except Exception as e:
- console.debug(e)
- return None
diff --git a/unikube/local/providers/types.py b/unikube/local/providers/types.py
deleted file mode 100644
index 12e804a..0000000
--- a/unikube/local/providers/types.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from enum import Enum
-from typing import Optional
-
-from pydantic.main import BaseModel
-
-
-class K8sProviderType(Enum):
- k3d = "k3d"
-
-
-class K8sProviderData(BaseModel):
- id: str
- name: Optional[str] = None
diff --git a/unikube/settings.py b/unikube/settings.py
index 0b46789..16ceb69 100644
--- a/unikube/settings.py
+++ b/unikube/settings.py
@@ -4,7 +4,8 @@
from InquirerPy import get_style
from unikube.cli.helper import exist_or_create
-from unikube.local.providers.types import K8sProviderType
+from unikube.cluster.bridge.types import BridgeType
+from unikube.cluster.providers.types import ProviderType
# disable warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@@ -13,7 +14,7 @@
CLI_CONFIG_FILE = os.path.expanduser("~/.unikube/config_dev")
exist_or_create(CLI_CONFIG_FILE)
-CLI_KUBECONFIG_DIRECTORY = os.path.expanduser("~/.unikube/")
+CLI_UNIKUBE_DIRECTORY = os.path.expanduser("~/.unikube/")
CLI_TABLEFMT = "psql"
CLI_LOG_LEVEL = "INFO" # DEBUG, INFO, WARNING, ERROR/SUCCESS
@@ -24,7 +25,8 @@
# unikube
UNIKUBE_FILE = "unikube.yaml"
-UNIKUBE_DEFAULT_PROVIDER_TYPE = K8sProviderType.k3d
+UNIKUBE_DEFAULT_PROVIDER_TYPE = ProviderType.k3d
+UNIKUBE_DEFAULT_BRIDGE_TYPE = BridgeType.gefyra
# token
TOKEN_REALM = "unikube"
@@ -54,13 +56,10 @@
K3D_CLI_MIN_VERSION = "3.0.0"
K3D_WEBSITE = "https://github.com/rancher/k3d"
-K3D_CLUSTER_PREFIX = "unikube-"
K3D_DEFAULT_INGRESS_PORT = 80
K3D_DEFAULT_WORKERS = 1
TELEPRESENCE_CLI_MIN_VERSION = "2.3.2"
-TELEPRESENCE_TAG_PREFIX = "telepresence:dev"
-TELEPRESENCE_DOCKER_IMAGE_FORMAT = "{project}-{deck}-{name}-" + TELEPRESENCE_TAG_PREFIX
HOMEBREW_CLI_MIN_VERSION = "3.2.0"
HOMEBREW_WEBSITE = "https://brew.sh/"
diff --git a/unikube/storage/__init__.py b/unikube/storage/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/unikube/storage/general.py b/unikube/storage/general.py
deleted file mode 100644
index 812d079..0000000
--- a/unikube/storage/general.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from unikube.storage.local_storage import LocalStorage
-from unikube.storage.types import GeneralData
-
-
-class LocalStorageGeneral(LocalStorage):
- table_name = GeneralData().id
- pydantic_class = GeneralData
-
- document_id = GeneralData().id
-
- def get(self) -> GeneralData:
- data = super().get(id=self.document_id)
- return self.pydantic_class(**data.dict())
-
- def set(self, data: GeneralData) -> None:
- super().set(id=self.document_id, data=data)
-
- def delete(self) -> None:
- super().delete(id=self.document_id)
diff --git a/unikube/storage/local_storage.py b/unikube/storage/local_storage.py
deleted file mode 100644
index bc04f40..0000000
--- a/unikube/storage/local_storage.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from abc import ABC, abstractmethod
-
-from unikube.storage.tinydb import TinyDatabase
-from unikube.storage.types import TinyDatabaseData
-
-
-class ILocalStorage(ABC):
- @abstractmethod
- def get(self, id: str):
- raise NotImplementedError
-
- @abstractmethod
- def set(self, id: str, data: TinyDatabaseData):
- raise NotImplementedError
-
- @abstractmethod
- def delete(self, id: str):
- raise NotImplementedError
-
-
-class LocalStorage(ILocalStorage):
- table_name = "local"
- pydantic_class = TinyDatabaseData
-
- def __init__(self) -> None:
- # database / storage
- self.database = TinyDatabase(table_name=self.table_name)
-
- def get(self, id: str, **kwargs) -> TinyDatabaseData:
- try:
- document = self.database.select(id=id)
- return self.pydantic_class(**dict(document))
- except Exception:
- return self.pydantic_class(id=id, **kwargs)
-
- def set(self, id: str, data: TinyDatabaseData) -> None:
- self.database.update(id=id, data=data)
-
- def delete(self, id: str) -> None:
- self.database.delete(id=id)
diff --git a/unikube/storage/tinydb.py b/unikube/storage/tinydb.py
deleted file mode 100644
index 483aed8..0000000
--- a/unikube/storage/tinydb.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from typing import List
-
-from pydantic import BaseModel
-from tinydb import Query, TinyDB
-
-from unikube import settings
-from unikube.storage.types import TinyDatabaseData
-
-
-class TinyDatabase:
- def __init__(
- self,
- table_name="database",
- ):
- self.table_name = table_name
-
- self.db = TinyDB(settings.CLI_CONFIG_FILE)
- self.table = self.db.table(self.table_name)
-
- def select(self, id: str) -> TinyDatabaseData:
- document = self.table.get(Query().id == id)
- return document
-
- def insert(self, data: BaseModel) -> int:
- doc_id = self.table.insert(data.dict())
- return doc_id
-
- def update(self, id: str, data: BaseModel) -> List[int]:
- doc_id_list = self.table.upsert(data.dict(), Query().id == id)
- return doc_id_list
-
- def delete(self, id: str) -> bool:
- doc_id = self.table.remove(Query().id == id)
- if not doc_id:
- return False
- return True
-
- def drop(self):
- self.db.purge_table(self.table_name)
diff --git a/unikube/storage/types.py b/unikube/storage/types.py
deleted file mode 100644
index 39a32ad..0000000
--- a/unikube/storage/types.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from pydantic import BaseModel
-
-from unikube.authentication.types import AuthenticationData
-from unikube.context.types import ContextData
-from unikube.types import ConfigurationData
-
-
-class TinyDatabaseData(BaseModel):
- id: str
-
-
-class GeneralData(TinyDatabaseData):
- id: str = "general"
- authentication: AuthenticationData = AuthenticationData()
-
-
-class UserData(TinyDatabaseData):
- context: ContextData = ContextData()
- config: ConfigurationData = ConfigurationData()
diff --git a/unikube/storage/user.py b/unikube/storage/user.py
deleted file mode 100644
index 75ceb4b..0000000
--- a/unikube/storage/user.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from unikube.storage.general import LocalStorageGeneral
-from unikube.storage.local_storage import LocalStorage
-from unikube.storage.types import UserData
-
-
-class LocalStorageUser(LocalStorage):
- table_name = "user"
- pydantic_class = UserData
-
- def __init__(self, user_email) -> None:
- super().__init__()
-
- self.user_email = user_email
-
- def get(self) -> UserData:
- try:
- data = super().get(id=self.user_email)
- return self.pydantic_class(**data.dict())
- except Exception:
- return UserData()
-
- def set(self, data: UserData) -> None:
- super().set(id=self.user_email, data=data)
-
- def delete(self) -> None:
- super().delete(id=self.user_email)
-
-
-def get_local_storage_user():
- try:
- local_storage_general = LocalStorageGeneral()
- general_data = local_storage_general.get()
- local_storage_user = LocalStorageUser(user_email=general_data.authentication.email)
- return local_storage_user
- except Exception:
- return None
diff --git a/unikube/unikubefile/selector.py b/unikube/unikubefile/selector.py
index d8abe1d..f562e78 100644
--- a/unikube/unikubefile/selector.py
+++ b/unikube/unikubefile/selector.py
@@ -38,7 +38,7 @@ def get(self, path_unikube_file: str = None) -> Union[UnikubeFile, UnikubeFile_1
with click.open_file(path_unikube_file) as unikube_file:
data = yaml.load(unikube_file, Loader=yaml.FullLoader)
except FileNotFoundError:
- raise UnikubeFileNotFoundError
+ raise UnikubeFileNotFoundError("Unikube file not found.")
# add & format data
try:
diff --git a/unikube/unikubefile/unikube_file_1_0.py b/unikube/unikubefile/unikube_file_1_0.py
index 3e40cf2..0bc4d26 100644
--- a/unikube/unikubefile/unikube_file_1_0.py
+++ b/unikube/unikubefile/unikube_file_1_0.py
@@ -25,7 +25,9 @@ class UnikubeFileApp(BaseModel):
name: str
build: UnikubeFileBuild
deployment: str
+ container: Optional[str] = None
port: Optional[int] = None
+ ports: Optional[List[str]] = None
command: str
volumes: Optional[List[str]] = None
env: Optional[List[dict]] = None