-
-
Notifications
You must be signed in to change notification settings - Fork 10.8k
Closed as not planned
Labels
bugSomething isn't workingSomething isn't workingci/buildstaleOver 90 days of inactivityOver 90 days of inactivityv1
Description
Your current environment
...
🐛 Describe the bug
main commit 51d7c6a
Seen in #15894
FAILED v1/entrypoints/llm/test_struct_output_generate.py::test_structured_output[Qwen/Qwen2.5-1.5B-Instruct-guidance:disable-any-whitespace-auto] - json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
[2025-04-03T16:08:35Z] _ test_structured_output[Qwen/Qwen2.5-1.5B-Instruct-guidance:disable-any-whitespace-auto] _
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7f318d89eb40>
[2025-04-03T16:08:35Z] sample_json_schema = {'properties': {'age': {'type': 'integer'}, 'name': {'type': 'string'}, 'skills': {'items': {'type': 'string'}, 'type'...ition'], 'type': 'object'}, 'type': 'array'}}, 'required': ['name', 'age', 'skills', 'work_history'], 'type': 'object'}
[2025-04-03T16:08:35Z] unsupported_json_schema = {'properties': {'email': {'pattern': '^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$', 'type': 'string'}, 'grade': ...[a-z]{1,10}$', 'type': 'string'}, 'type': 'array'}}, 'required': ['score', 'grade', 'email', 'tags'], 'type': 'object'}
[2025-04-03T16:08:35Z] sample_sql_ebnf = '\nroot ::= select_statement\nselect_statement ::= "SELECT" column "from" table "where" condition\ncolumn ::= "col_1" | "col_2"\ntable ::= "table_1" | "table_2"\ncondition ::= column "=" number\nnumber ::= "1" | "2"\n'
[2025-04-03T16:08:35Z] sample_sql_lark = '\nstart: select_statement\nselect_statement: "SELECT" column "from" table "where" condition\ncolumn: "col_1" | "col_2"\ntable: "table_1" | "table_2"\ncondition: column "=" number\nnumber: "1" | "2"\n'
[2025-04-03T16:08:35Z] sample_regex = '((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.){3}(25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)'
[2025-04-03T16:08:35Z] sample_guided_choice = ['Python', 'Java', 'JavaScript', 'C++', 'C#', 'PHP', ...]
[2025-04-03T16:08:35Z] guided_decoding_backend = 'guidance:disable-any-whitespace'
[2025-04-03T16:08:35Z] tokenizer_mode = 'auto', model_name = 'Qwen/Qwen2.5-1.5B-Instruct'
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] @pytest.mark.skip_global_cleanup
[2025-04-03T16:08:35Z] @pytest.mark.parametrize("model_name, guided_decoding_backend, tokenizer_mode",
[2025-04-03T16:08:35Z] PARAMS_MODELS_BACKENDS_TOKENIZER_MODE)
[2025-04-03T16:08:35Z] def test_structured_output(
[2025-04-03T16:08:35Z] monkeypatch: pytest.MonkeyPatch,
[2025-04-03T16:08:35Z] sample_json_schema: dict[str, Any],
[2025-04-03T16:08:35Z] unsupported_json_schema: dict[str, Any],
[2025-04-03T16:08:35Z] sample_sql_ebnf: str,
[2025-04-03T16:08:35Z] sample_sql_lark: str,
[2025-04-03T16:08:35Z] sample_regex: str,
[2025-04-03T16:08:35Z] sample_guided_choice: str,
[2025-04-03T16:08:35Z] guided_decoding_backend: str,
[2025-04-03T16:08:35Z] tokenizer_mode: str,
[2025-04-03T16:08:35Z] model_name: str,
[2025-04-03T16:08:35Z] ):
[2025-04-03T16:08:35Z] monkeypatch.setenv("VLLM_USE_V1", "1")
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] # Use a single LLM instance for several scenarios to
[2025-04-03T16:08:35Z] # speed up the test suite.
[2025-04-03T16:08:35Z] llm = LLM(model=model_name,
[2025-04-03T16:08:35Z] enforce_eager=True,
[2025-04-03T16:08:35Z] max_model_len=1024,
[2025-04-03T16:08:35Z] guided_decoding_backend=guided_decoding_backend,
[2025-04-03T16:08:35Z] tokenizer_mode=tokenizer_mode)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 1: Generate JSON output based on a provided schema
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=1.0,
[2025-04-03T16:08:35Z] max_tokens=1000,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(json=sample_json_schema))
[2025-04-03T16:08:35Z] outputs = llm.generate(prompts=[
[2025-04-03T16:08:35Z] f"Give an example JSON for an employee profile "
[2025-04-03T16:08:35Z] f"that fits this schema: {sample_json_schema}"
[2025-04-03T16:08:35Z] ] * 2,
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z] prompt = output.prompt
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] generated_text = output.outputs[0].text
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z] if 'disable-any-whitespace' in guided_decoding_backend:
[2025-04-03T16:08:35Z] assert "\n" not in generated_text
[2025-04-03T16:08:35Z] print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
[2025-04-03T16:08:35Z] output_json = json.loads(generated_text)
[2025-04-03T16:08:35Z] jsonschema.validate(instance=output_json, schema=sample_json_schema)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 2: Generate JSON object without a schema
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=1.0,
[2025-04-03T16:08:35Z] max_tokens=100,
[2025-04-03T16:08:35Z] n=2,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(json_object=True))
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] outputs = llm.generate(
[2025-04-03T16:08:35Z] prompts=("Generate a JSON object with curly braces for a person with "
[2025-04-03T16:08:35Z] "name and age fields for John Smith who is 31 years old."),
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] for i in range(2):
[2025-04-03T16:08:35Z] generated_text = output.outputs[i].text
[2025-04-03T16:08:35Z] print(generated_text)
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] # Parse to verify it is valid JSON
[2025-04-03T16:08:35Z] parsed_json = json.loads(generated_text)
[2025-04-03T16:08:35Z] allowed_types: tuple[type, ...] = (dict, )
[2025-04-03T16:08:35Z] if guided_decoding_backend.startswith("xgrammar"):
[2025-04-03T16:08:35Z] # TODO - we are currently too permissive with xgrammar and
[2025-04-03T16:08:35Z] # allow # any valid json (typically comes back as a list or
[2025-04-03T16:08:35Z] # object). We can fix this by specifying a jsonschema of
[2025-04-03T16:08:35Z] # {"type": "object"}, # but we need this fix in a release
[2025-04-03T16:08:35Z] # first: https://github.com/mlc-ai/xgrammar/pull/264
[2025-04-03T16:08:35Z] allowed_types = (dict, list)
[2025-04-03T16:08:35Z] assert isinstance(parsed_json, allowed_types)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 3: test a jsonschema incompatible with xgrammar
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=1.0,
[2025-04-03T16:08:35Z] max_tokens=1000,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(json=unsupported_json_schema))
[2025-04-03T16:08:35Z] if guided_decoding_backend.startswith("xgrammar"):
[2025-04-03T16:08:35Z] with pytest.raises(ValueError,
[2025-04-03T16:08:35Z] match="The provided JSON schema contains features "
[2025-04-03T16:08:35Z] "not supported by xgrammar."):
[2025-04-03T16:08:35Z] llm.generate(prompts=[
[2025-04-03T16:08:35Z] f"Give an example JSON for an employee profile "
[2025-04-03T16:08:35Z] f"that fits this schema: {unsupported_json_schema}"
[2025-04-03T16:08:35Z] ] * 2,
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True)
[2025-04-03T16:08:35Z] else:
[2025-04-03T16:08:35Z] outputs = llm.generate(
[2025-04-03T16:08:35Z] prompts=("Give an example JSON object for a grade "
[2025-04-03T16:08:35Z] "that fits this schema: "
[2025-04-03T16:08:35Z] f"{unsupported_json_schema}"),
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True)
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z] generated_text = output.outputs[0].text
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z] print(generated_text)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] # Parse to verify it is valid JSON
[2025-04-03T16:08:35Z] parsed_json = json.loads(generated_text)
[2025-04-03T16:08:35Z] assert isinstance(parsed_json, dict)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 4: Generate SQL statement using EBNF grammar
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=0.8,
[2025-04-03T16:08:35Z] top_p=0.95,
[2025-04-03T16:08:35Z] max_tokens=1000,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(grammar=sample_sql_ebnf))
[2025-04-03T16:08:35Z] outputs = llm.generate(
[2025-04-03T16:08:35Z] prompts=("Generate a sql statement that selects col_1 from "
[2025-04-03T16:08:35Z] "table_1 where it is equal to 1"),
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True,
[2025-04-03T16:08:35Z] )
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z] prompt = output.prompt
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] generated_text = output.outputs[0].text
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] # remove spaces for comparison b/c we removed them in the grammar
[2025-04-03T16:08:35Z] ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(
[2025-04-03T16:08:35Z] " ", "")
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert generated_text.strip() == ground_truth
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 5: Generate SQL statement using Lark grammar
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=0.8,
[2025-04-03T16:08:35Z] top_p=0.95,
[2025-04-03T16:08:35Z] max_tokens=1000,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(grammar=sample_sql_lark))
[2025-04-03T16:08:35Z] outputs = llm.generate(
[2025-04-03T16:08:35Z] prompts=("Generate a sql statement that selects col_1 from "
[2025-04-03T16:08:35Z] "table_1 where it is equal to 1"),
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True,
[2025-04-03T16:08:35Z] )
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z] prompt = output.prompt
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] generated_text = output.outputs[0].text
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] # use Lark to parse the output, and make sure it's a valid parse tree
[2025-04-03T16:08:35Z] from lark import Lark
[2025-04-03T16:08:35Z] parser = Lark(sample_sql_lark)
[2025-04-03T16:08:35Z] parser.parse(generated_text)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] # remove spaces for comparison b/c we removed them in the grammar
[2025-04-03T16:08:35Z] ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(
[2025-04-03T16:08:35Z] " ", "")
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert generated_text.strip() == ground_truth
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 6: Test invalid grammar input
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=0.8,
[2025-04-03T16:08:35Z] top_p=0.95,
[2025-04-03T16:08:35Z] max_tokens=1000,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(grammar="not a grammar"))
[2025-04-03T16:08:35Z] with pytest.raises(ValueError, match="Failed to convert the grammar "):
[2025-04-03T16:08:35Z] llm.generate(
[2025-04-03T16:08:35Z] prompts=("Generate a sql statement that selects col_1 from "
[2025-04-03T16:08:35Z] "table_1 where it is equal to 1"),
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True,
[2025-04-03T16:08:35Z] )
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 7: Generate text based on a regex pattern
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=0.8,
[2025-04-03T16:08:35Z] top_p=0.95,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(regex=sample_regex))
[2025-04-03T16:08:35Z] outputs = llm.generate(
[2025-04-03T16:08:35Z] prompts=[
[2025-04-03T16:08:35Z] f"Give an example IPv4 address with this regex: {sample_regex}"
[2025-04-03T16:08:35Z] ] * 2,
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True,
[2025-04-03T16:08:35Z] )
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z] prompt = output.prompt
[2025-04-03T16:08:35Z] generated_text = output.outputs[0].text
[2025-04-03T16:08:35Z] print(generated_text)
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z] assert re.fullmatch(sample_regex, generated_text) is not None
[2025-04-03T16:08:35Z] print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 8: Generate text based on a choices
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=0.8,
[2025-04-03T16:08:35Z] top_p=0.95,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(choice=sample_guided_choice))
[2025-04-03T16:08:35Z] outputs = llm.generate(
[2025-04-03T16:08:35Z] prompts="The best language for type-safe systems programming is ",
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True)
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z] prompt = output.prompt
[2025-04-03T16:08:35Z] generated_text = output.outputs[0].text
[2025-04-03T16:08:35Z] print(generated_text)
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z] assert generated_text in sample_guided_choice
[2025-04-03T16:08:35Z] print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] # Test 9: Generate structured output using a Pydantic model with an enum
[2025-04-03T16:08:35Z] #
[2025-04-03T16:08:35Z] json_schema = CarDescription.model_json_schema()
[2025-04-03T16:08:35Z] sampling_params = SamplingParams(
[2025-04-03T16:08:35Z] temperature=1.0,
[2025-04-03T16:08:35Z] max_tokens=1000,
[2025-04-03T16:08:35Z] guided_decoding=GuidedDecodingParams(json=json_schema))
[2025-04-03T16:08:35Z] outputs = llm.generate(
[2025-04-03T16:08:35Z] prompts="Generate a JSON with the brand, model and car_type of"
[2025-04-03T16:08:35Z] "the most iconic car from the 90's",
[2025-04-03T16:08:35Z] sampling_params=sampling_params,
[2025-04-03T16:08:35Z] use_tqdm=True)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] assert outputs is not None
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] for output in outputs:
[2025-04-03T16:08:35Z] assert output is not None
[2025-04-03T16:08:35Z] assert isinstance(output, RequestOutput)
[2025-04-03T16:08:35Z] prompt = output.prompt
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] generated_text = output.outputs[0].text
[2025-04-03T16:08:35Z] assert generated_text is not None
[2025-04-03T16:08:35Z] print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
[2025-04-03T16:08:35Z] > output_json = json.loads(generated_text)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] v1/entrypoints/llm/test_struct_output_generate.py:332:
[2025-04-03T16:08:35Z] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[2025-04-03T16:08:35Z] /usr/lib/python3.12/json/__init__.py:346: in loads
[2025-04-03T16:08:35Z] return _default_decoder.decode(s)
[2025-04-03T16:08:35Z] /usr/lib/python3.12/json/decoder.py:338: in decode
[2025-04-03T16:08:35Z] obj, end = self.raw_decode(s, idx=_w(s, 0).end())
[2025-04-03T16:08:35Z] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] self = <json.decoder.JSONDecoder object at 0x7f32ee3035c0>, s = '', idx = 0
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] def raw_decode(self, s, idx=0):
[2025-04-03T16:08:35Z] """Decode a JSON document from ``s`` (a ``str`` beginning with
[2025-04-03T16:08:35Z] a JSON document) and return a 2-tuple of the Python
[2025-04-03T16:08:35Z] representation and the index in ``s`` where the document ended.
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] This can be used to decode a JSON document from a string that may
[2025-04-03T16:08:35Z] have extraneous data at the end.
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] """
[2025-04-03T16:08:35Z] try:
[2025-04-03T16:08:35Z] obj, end = self.scan_once(s, idx)
[2025-04-03T16:08:35Z] except StopIteration as err:
[2025-04-03T16:08:35Z] > raise JSONDecodeError("Expecting value", s, err.value) from None
[2025-04-03T16:08:35Z] E json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
[2025-04-03T16:08:35Z]
[2025-04-03T16:08:35Z] /usr/lib/python3.12/json/decoder.py:356: JSONDecodeError
Before submitting a new issue...
- Make sure you already searched for relevant issues, and asked the chatbot living at the bottom right corner of the documentation page, which can answer lots of frequently asked questions.
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't workingci/buildstaleOver 90 days of inactivityOver 90 days of inactivityv1