diff --git a/tests/test_model_config_builder.py b/tests/test_model_config_builder.py new file mode 100644 index 0000000..2830008 --- /dev/null +++ b/tests/test_model_config_builder.py @@ -0,0 +1,3 @@ +# Placeholder: Comprehensive tests for ModelConfigBuilder will be added after resolving the module import path. +# Testing framework will be aligned with the repository's existing setup (likely pytest if present). +# This file will be updated in a subsequent step in this PR automation. \ No newline at end of file diff --git a/tests/test_variable_validator.py b/tests/test_variable_validator.py new file mode 100644 index 0000000..f32e239 --- /dev/null +++ b/tests/test_variable_validator.py @@ -0,0 +1,301 @@ +# flake8: noqa +# Auto-generated tests for VariableValidator component. +# Framework: pytest +import os +import sys +import types +from typing import Any + +# Best-effort import shim: try common roots under src/ and package dirs +# This avoids brittle relative imports while keeping tests runnable in CI and locally. +_CANDIDATE_ROOTS = [ + os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")), + os.path.abspath(os.path.join(os.path.dirname(__file__), "..")), +] +for _root in _CANDIDATE_ROOTS: + if _root not in sys.path and os.path.isdir(_root): + sys.path.insert(0, _root) + +# Try a set of possible module paths +_VAR_VALIDATOR = None +_EXC = None +_MODULE_CANDIDATES = [ + # common paths + "variable_validator", + "validator.variable_validator", + "components.variable_validator", + "core.variable_validator", + "src.variable_validator", + "app.variable_validator", + # deeper paths (based on relative import '..exceptions' seen in snippet) + "prompt.variable_validator", + "prompt.validation.variable_validator", + "prompting.variable_validator", +] + +_EXC_CANDIDATES = [ + "exceptions", + "core.exceptions", + "components.exceptions", + "src.exceptions", + "prompt.exceptions", + "prompt.validation.exceptions", + "app.exceptions", +] + +VariableValidator = None +create_validation_error = None +VariableValidationError = None +RequiredVariableError = None + +for mod_name in _MODULE_CANDIDATES: + try: + _mod = __import__(mod_name, fromlist=["VariableValidator"]) + if hasattr(_mod, "VariableValidator"): + VariableValidator = getattr(_mod, "VariableValidator") + _VAR_VALIDATOR = _mod + break + except Exception: + continue + +for exc_name in _EXC_CANDIDATES: + try: + _ex = __import__(exc_name, fromlist=["VariableValidationError", "RequiredVariableError", "create_validation_error"]) + if hasattr(_ex, "VariableValidationError") and hasattr(_ex, "RequiredVariableError") and hasattr(_ex, "create_validation_error"): + VariableValidationError = getattr(_ex, "VariableValidationError") + RequiredVariableError = getattr(_ex, "RequiredVariableError") + create_validation_error = getattr(_ex, "create_validation_error") + _EXC = _ex + break + except Exception: + continue + +assert VariableValidator is not None, "Could not import VariableValidator module. Adjust _MODULE_CANDIDATES to match actual path." +assert VariableValidationError is not None and RequiredVariableError is not None and create_validation_error is not None, "Could not import exceptions. Adjust _EXC_CANDIDATES to match actual path." + +class DummyLogger: + def __init__(self): + self.warnings = [] + def warning(self, msg: str): + self.warnings.append(msg) + +def test_required_variables_missing_raises_required_variable_error(): + validator = VariableValidator() + schema = { + "required": ["name", "age"], + "types": {"name": "string", "age": "integer"}, + } + user_vars = {"name": "Alice"} # missing age + try: + validator.validate_variables(schema, user_vars, prompt_name="signup") + assert False, "Expected RequiredVariableError" + except RequiredVariableError as e: + # Basic assertions; exact attributes may vary by implementation + msg = str(e) + assert "signup" in msg + assert "age" in msg + assert "name" in msg + +def test_no_required_missing_and_unspecified_type_is_ignored(): + validator = VariableValidator() + schema = { + "required": ["name"], + "types": {"name": "string"}, # 'nickname' not in types -> skip + } + user_vars = {"name": "Bob", "nickname": 123} + validator.validate_variables(schema, user_vars, prompt_name="profile") # should not raise + +def test_type_string_valid_and_invalid(): + validator = VariableValidator() + schema = {"required": ["x"], "types": {"x": "string"}} + validator.validate_variables(schema, {"x": "ok"}, "t1") # pass + try: + validator.validate_variables(schema, {"x": 123}, "t1") + assert False, "Expected VariableValidationError for non-string" + except VariableValidationError as e: + assert "x" in str(e) + +def test_type_integer_accepts_int_and_bool_semantics_edge(): + validator = VariableValidator() + schema = {"required": ["n"], "types": {"n": "integer"}} + # int passes + validator.validate_variables(schema, {"n": 7}, "t2") + # bool is instance of int in Python; current implementation will accept it. Document that behavior. + validator.validate_variables(schema, {"n": True}, "t2") # should not raise by current implementation + +def test_type_boolean_valid_and_invalid(): + validator = VariableValidator() + schema = {"required": ["b"], "types": {"b": "boolean"}} + validator.validate_variables(schema, {"b": False}, "t3") + try: + validator.validate_variables(schema, {"b": 0}, "t3") + assert False, "Expected VariableValidationError for non-boolean" + except VariableValidationError as e: + assert "b" in str(e) + +def test_type_array_and_object(): + validator = VariableValidator() + schema = { + "required": ["arr", "obj"], + "types": {"arr": "array", "obj": "object"}, + } + validator.validate_variables(schema, {"arr": [1,2], "obj": {"k": "v"}}, "t4") + # invalid array + try: + validator.validate_variables(schema, {"arr": "notlist", "obj": {}}, "t4") + assert False, "Expected VariableValidationError for array" + except VariableValidationError: + pass + # invalid object + try: + validator.validate_variables(schema, {"arr": [], "obj": ["notdict"]}, "t4") + assert False, "Expected VariableValidationError for object" + except VariableValidationError: + pass + +def test_type_number_accepts_int_and_float_rejects_str(): + validator = VariableValidator() + schema = {"required": ["v"], "types": {"v": "number"}} + validator.validate_variables(schema, {"v": 1}, "t5") + validator.validate_variables(schema, {"v": 3.14}, "t5") + try: + validator.validate_variables(schema, {"v": "3.14"}, "t5") + assert False, "Expected VariableValidationError for number" + except VariableValidationError: + pass + +def test_enumeration_string_and_int_values(): + validator = VariableValidator() + schema = { + "required": ["color", "retries"], + "types": { + "color": ["red", "green", "blue"], + "retries": [0, 1, 2, 3], + }, + } + # valid enums + validator.validate_variables(schema, {"color": "red", "retries": 2}, "t6") + # invalid enum string + try: + validator.validate_variables(schema, {"color": "purple", "retries": 1}, "t6") + assert False, "Expected VariableValidationError for enum (color)" + except VariableValidationError as e: + assert "color" in str(e) + # invalid enum int + try: + validator.validate_variables(schema, {"color": "blue", "retries": 5}, "t6") + assert False, "Expected VariableValidationError for enum (retries)" + except VariableValidationError as e: + assert "retries" in str(e) + +def test_unknown_type_constraint_logs_warning_and_skips(): + logger = DummyLogger() + validator = VariableValidator(logger=logger) + schema = {"required": ["z"], "types": {"z": "uuid"}} + # Should not raise, but log a warning + validator.validate_variables(schema, {"z": "anything"}, "t7") + assert any("Unknown type constraint 'uuid'" in w for w in logger.warnings) + +# --- validate_builder_type tests --- + +def test_validate_builder_type_skips_when_field_not_in_properties(): + validator = VariableValidator() + validator.validate_builder_type("absent", 123, properties={"present": {"type": "number"}}) # no raise + +def test_validate_builder_type_type_enforcement_and_messages(): + validator = VariableValidator() + props = { + "s": {"type": "string"}, + "n": {"type": "number"}, + "i": {"type": "integer"}, + "b": {"type": "boolean"}, + } + # valid cases + validator.validate_builder_type("s", "ok", props) + validator.validate_builder_type("n", 1.2, props) + validator.validate_builder_type("i", 3, props) + validator.validate_builder_type("b", True, props) + + # invalid string + try: + validator.validate_builder_type("s", 10, props) + assert False, "Expected VariableValidationError for string" + except VariableValidationError as e: + text = str(e) + assert "builder" in text and "string" in text + + # invalid number + try: + validator.validate_builder_type("n", "NaN", props) + assert False, "Expected VariableValidationError for number" + except VariableValidationError as e: + text = str(e) + assert "number" in text + + # invalid integer (note: bool is instance of int; test non-int) + try: + validator.validate_builder_type("i", 3.3, props) + assert False, "Expected VariableValidationError for integer" + except VariableValidationError as e: + text = str(e) + assert "integer" in text + + # invalid boolean + try: + validator.validate_builder_type("b", "false", props) + assert False, "Expected VariableValidationError for boolean" + except VariableValidationError as e: + text = str(e) + assert "boolean" in text + +def test_validate_builder_type_enum_enforcement(): + validator = VariableValidator() + props = { + "mode": {"type": "string", "enum": ["fast", "accurate"]}, + "count": {"type": "integer", "enum": [1, 2, 3]}, + } + # valid enums + validator.validate_builder_type("mode", "fast", props) + validator.validate_builder_type("count", 2, props) + + # invalid enums + try: + validator.validate_builder_type("mode", "turbo", props) + assert False, "Expected VariableValidationError for enum mode" + except VariableValidationError as e: + assert "mode" in str(e) + + try: + validator.validate_builder_type("count", 5, props) + assert False, "Expected VariableValidationError for enum count" + except VariableValidationError as e: + assert "count" in str(e) + +# --- Supplemental tests appended by CI agent (pytest) --- + +def test_required_variables_with_multiple_missing_are_reported(): + validator = VariableValidator() + schema = {"required": ["a", "b", "c"], "types": {"a": "string", "b": "number"}} + user_vars = {"a": "x"} # b and c missing + import pytest + with pytest.raises(RequiredVariableError) as ei: + validator.validate_variables(schema, user_vars, "multi") + msg = str(ei.value) + assert "b" in msg and "c" in msg + assert "a" in msg # provided vars reflected + +def test_types_not_specified_in_schema_are_skipped_no_raise(): + validator = VariableValidator() + schema = {"required": [], "types": {}} + validator.validate_variables(schema, {"free": object()}, "freeform") + +def test_enumeration_with_mixed_types(): + validator = VariableValidator() + schema = {"required": ["v"], "types": {"v": [1, "two", 3]}} + # valid mixed-type enum values + validator.validate_variables(schema, {"v": 1}, "enum-mix") + validator.validate_variables(schema, {"v": "two"}, "enum-mix") + # invalid value not in enum + import pytest + with pytest.raises(VariableValidationError): + validator.validate_variables(schema, {"v": 2}, "enum-mix") \ No newline at end of file diff --git a/tests/test_version_manager.py b/tests/test_version_manager.py new file mode 100644 index 0000000..12e2cc9 --- /dev/null +++ b/tests/test_version_manager.py @@ -0,0 +1,234 @@ +import pytest + +# Testing library and framework: pytest (functional style with parametrize and exception assertions) + +# We attempt flexible imports to accommodate various project layouts. +try: + # Typical package import (adjust if project has a specific root package) + from src.version_manager import VersionManager # type: ignore +except ImportError: + try: + # Alternative common layout: package module path + from version_manager import VersionManager # type: ignore + except ImportError: + # Fallback: relative path within a package structure; update as needed + from ..src.version_manager import VersionManager # type: ignore + +# Import exceptions with similar flexibility +try: + from src.exceptions import NoLiveVersionError, MultipleLiveVersionsError, VersionNotFoundError # type: ignore +except ImportError: + try: + from exceptions import NoLiveVersionError, MultipleLiveVersionsError, VersionNotFoundError # type: ignore + except ImportError: + from ..src.exceptions import NoLiveVersionError, MultipleLiveVersionsError, VersionNotFoundError # type: ignore + +def make_versions(**overrides): + """ + Helper to build a minimal valid versions mapping with an overridable base. + Structure mirrors what VersionManager expects in snippets. + """ + base = { + "v1": { + "is_live": False, + "provider": "openai", + "config": {"model": "gpt-4o", "system_instruction": "You are helpful."}, + "tools_config": {}, + "description": "First version" + }, + "v2": { + "is_live": True, + "provider": "openai", + "config": {"model": "gpt-4o", "system_instruction": "Be concise."}, + "tools_config": {"web": {"enabled": True}}, + "description": "Live version" + } + } + base.update(overrides) + return base + + +class TestFindLiveVersion: + def test_returns_live_version_key_when_single_live_present(self): + vm = VersionManager() + versions = make_versions() + assert vm.find_live_version(versions, prompt_name="demo") == "v2" + + def test_raises_no_live_version_error_when_none_live(self): + vm = VersionManager() + versions = make_versions(v2={**make_versions()["v2"], "is_live": False}) + with pytest.raises(NoLiveVersionError) as exc: + vm.find_live_version(versions, prompt_name="demo") + msg = str(exc.value) + # Should mention prompt name and available versions + assert "demo" in msg + assert "v1" in msg and "v2" in msg + + def test_raises_multiple_live_versions_error(self): + vm = VersionManager() + versions = make_versions(v1={**make_versions()["v1"], "is_live": True}) + with pytest.raises(MultipleLiveVersionsError) as exc: + vm.find_live_version(versions, prompt_name="demo") + # Should list live versions + assert "v1" in str(exc.value) and "v2" in str(exc.value) + + @pytest.mark.parametrize( + "flag", [True, 1, "yes"] # truthy variants to ensure boolean evaluation via get("is_live", False) + ) + def test_truthy_is_live_values_are_respected(self, flag): + vm = VersionManager() + versions = make_versions() + versions["v2"]["is_live"] = flag + assert vm.find_live_version(versions, prompt_name="demo") == "v2" + + +class TestGetVersionData: + def test_returns_specific_version_when_present(self): + vm = VersionManager() + versions = make_versions() + data = vm.get_version_data(versions, version="v1", prompt_name="demo") + assert data["config"]["model"] == "gpt-4o" + assert data["is_live"] is False + + def test_raises_version_not_found_error_for_missing_specific_version(self): + vm = VersionManager() + versions = make_versions() + with pytest.raises(VersionNotFoundError) as exc: + vm.get_version_data(versions, version="v9", prompt_name="demo") + msg = str(exc.value) + assert "v9" in msg and "demo" in msg + # Available versions should be referenced + assert "v1" in msg and "v2" in msg + + def test_returns_live_version_data_when_version_is_none(self): + vm = VersionManager() + versions = make_versions() + data = vm.get_version_data(versions, version=None, prompt_name="demo") + assert data["description"] == "Live version" + + def test_propagates_no_live_version_error_when_none_live(self): + vm = VersionManager() + versions = make_versions(v2={**make_versions()["v2"], "is_live": False}) + with pytest.raises(NoLiveVersionError): + vm.get_version_data(versions, version=None, prompt_name="demo") + + def test_propagates_multiple_live_versions_error(self): + vm = VersionManager() + versions = make_versions(v1={**make_versions()["v1"], "is_live": True}) + with pytest.raises(MultipleLiveVersionsError): + vm.get_version_data(versions, version=None, prompt_name="demo") + + +class TestGetSystemInstruction: + def test_returns_system_instruction_when_present(self): + vm = VersionManager() + versions = make_versions() + v2 = versions["v2"] + assert vm.get_system_instruction(v2, prompt_name="demo") == "Be concise." + + @pytest.mark.parametrize( + "bad_version_data", + [ + {}, # no config at all + {"config": {}}, # config exists but no system_instruction + {"config": {"system_instruction": ""}}, # empty string should be considered missing + ], + ) + def test_raises_value_error_when_missing_system_instruction(self, bad_version_data): + vm = VersionManager() + with pytest.raises(ValueError) as exc: + vm.get_system_instruction(bad_version_data, prompt_name="demo") + assert "config.system_instruction" in str(exc.value) + + +class TestListVersions: + def test_lists_all_versions_with_expected_shape_and_defaults(self): + vm = VersionManager() + versions = make_versions() + # Add a version with minimal fields to check defaults + versions["v3"] = {"config": {}, "description": "No model", "tools_config": {}} + lst = vm.list_versions(versions) + + # Convert to dict keyed by version for easier assertions + by_key = {item["version"]: item for item in lst} + assert set(by_key.keys()) >= {"v1", "v2", "v3"} + + assert by_key["v1"]["is_live"] is False + assert by_key["v1"]["provider"] == "openai" + assert by_key["v1"]["model"] == "gpt-4o" + assert by_key["v1"]["has_tools"] is False + + assert by_key["v2"]["is_live"] is True + assert by_key["v2"]["has_tools"] is True + assert by_key["v2"]["description"] == "Live version" + + # Defaults for missing values + assert by_key["v3"]["provider"] == "unknown" + assert by_key["v3"]["model"] == "unknown" + assert by_key["v3"]["has_tools"] is False + assert by_key["v3"]["description"] == "No model" + + def test_handling_empty_versions_map_returns_empty_list(self): + vm = VersionManager() + assert vm.list_versions({}) == [] + + +class TestValidateVersionData: + def test_valid_data_returns_true(self): + vm = VersionManager() + versions = make_versions() + assert vm.validate_version_data(versions["v2"], prompt_name="demo", version="v2") is True + + @pytest.mark.parametrize( + "mutator,expected_fragment", + [ + (lambda d: d.pop("config", None), "Configuration section is missing"), + (lambda d: d.setdefault("config", {}).pop("system_instruction", None), "System instruction is missing"), + (lambda d: d.setdefault("config", {}).pop("model", None), "Model is missing"), + ], + ) + def test_missing_required_fields_raise_value_error_with_clear_message(self, mutator, expected_fragment): + vm = VersionManager() + data = make_versions()["v2"] + # Deep-copy-ish manual clone to avoid mutating shared structure + data = { + "is_live": data.get("is_live"), + "provider": data.get("provider"), + "config": {**data.get("config", {})}, + "tools_config": {**data.get("tools_config", {})}, + "description": data.get("description", ""), + } + mutator(data) + with pytest.raises(ValueError) as exc: + vm.validate_version_data(data, prompt_name="demo", version="v2") + msg = str(exc.value) + assert "Invalid version data for 'demo' version 'v2'" in msg + assert expected_fragment in msg + + +class TestPrivateGetNestedField: + @pytest.mark.parametrize( + "data,field,expected", + [ + ({"a": {"b": {"c": 3}}}, "a.b.c", 3), + ({"a": {"b": {"c": None}}}, "a.b.c", None), + ({"a": {"b": 5}}, "a.b.c", None), + ({}, "a", None), + ({"config": {"model": "m"}}, "config.model", "m"), + ], + ) + def test_get_nested_field_various_paths(self, data, field, expected): + vm = VersionManager() + assert vm._get_nested_field(data, field) == expected + + def test_get_nested_field_non_dict_breaks_path_and_returns_none(self): + vm = VersionManager() + data = {"a": 1} + assert vm._get_nested_field(data, "a.b") is None + + +def test_logger_injection_is_stored_but_not_required(): + dummy_logger = object() + vm = VersionManager(logger=dummy_logger) + # Access the name-mangled/private field to assert storage without relying on logger behavior + assert getattr(vm, "_logger", None) is dummy_logger \ No newline at end of file