diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index f5becb937..f737b9b00 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -1,7 +1,7 @@
---
# https://docs.readthedocs.io/en/stable/config-file/v2.html
version: 2
-# NOTE: not builing epub because epub does not know how to handle .ico files
+# NOTE: not building epub because epub does not know how to handle .ico files
# which results in a warning which causes the build to fail due to
# `sphinx.fail_on_warning`
# https://github.com/sphinx-doc/sphinx/issues/10350
diff --git a/admin/get_merged_prs.py b/admin/get_merged_prs.py
index 7e96d1d47..ddee02fb4 100644
--- a/admin/get_merged_prs.py
+++ b/admin/get_merged_prs.py
@@ -23,7 +23,11 @@
print(f"Getting {url}")
with urllib.request.urlopen(url) as response:
response_text = response.read()
- link_headers = response.info()["link"].split(",") if response.info()["link"] is not None else None
+ link_headers = (
+ response.info()["link"].split(",")
+ if response.info()["link"] is not None
+ else None
+ )
json_data = json.loads(response_text)
ITEMS.extend(json_data["items"])
diff --git a/devtools/requirements-poetry.in b/devtools/requirements-poetry.in
index 1bf7b707a..7ba51be53 100644
--- a/devtools/requirements-poetry.in
+++ b/devtools/requirements-poetry.in
@@ -1,3 +1,3 @@
# Fixing this here as readthedocs can't use the compiled requirements-poetry.txt
# due to conflicts.
-poetry==1.8.4
+poetry==1.8.5
diff --git a/docker/latest/requirements.in b/docker/latest/requirements.in
index 42fb39ae7..cc344d2a6 100644
--- a/docker/latest/requirements.in
+++ b/docker/latest/requirements.in
@@ -1,6 +1,4 @@
# This file is used for building a docker image of the latest rdflib release. It
# will be updated by dependabot when new releases are made.
-rdflib==7.1.0
+rdflib==7.1.1
html5rdf==1.2.0
-# html5lib-modern is required to allow the Dockerfile to build on with pre-RDFLib-7.1.1 releases.
-html5lib-modern==1.2.0
diff --git a/docker/latest/requirements.txt b/docker/latest/requirements.txt
index 570502462..4357e6d52 100644
--- a/docker/latest/requirements.txt
+++ b/docker/latest/requirements.txt
@@ -5,12 +5,8 @@
# pip-compile docker/latest/requirements.in
#
html5rdf==1.2
- # via
- # -r docker/latest/requirements.in
- # rdflib
-html5lib-modern==1.2
# via -r docker/latest/requirements.in
pyparsing==3.0.9
# via rdflib
-rdflib==7.1.0
+rdflib==7.1.1
# via -r docker/latest/requirements.in
diff --git a/docs/apidocs/examples.rst b/docs/apidocs/examples.rst
index 43b92c137..a8c3429bd 100644
--- a/docs/apidocs/examples.rst
+++ b/docs/apidocs/examples.rst
@@ -3,10 +3,18 @@ examples Package
These examples all live in ``./examples`` in the source-distribution of RDFLib.
-:mod:`~examples.conjunctive_graphs` Module
-------------------------------------------
+:mod:`~examples.datasets` Module
+--------------------------------
+
+.. automodule:: examples.datasets
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`~examples.jsonld_serialization` Module
+--------------------------------------------
-.. automodule:: examples.conjunctive_graphs
+.. automodule:: examples.jsonld_serialization
:members:
:undoc-members:
:show-inheritance:
diff --git a/docs/developers.rst b/docs/developers.rst
index 7ca914fca..e3593711e 100644
--- a/docs/developers.rst
+++ b/docs/developers.rst
@@ -231,20 +231,17 @@ our black.toml config file:
poetry run black .
-Check style and conventions with `flake8 `_:
+Check style and conventions with `ruff `_:
.. code-block:: bash
- poetry run flake8 rdflib
+ poetry run ruff check
-We also provide a `flakeheaven `_
-baseline that ignores existing flake8 errors and only reports on newly
-introduced flake8 errors:
+Any issues that are found can potentially be fixed automatically using:
.. code-block:: bash
- poetry run flakeheaven
-
+ poetry run ruff check --fix
Check types with `mypy `_:
diff --git a/examples/datasets.py b/examples/datasets.py
index d550775a1..eab3aa384 100644
--- a/examples/datasets.py
+++ b/examples/datasets.py
@@ -1,13 +1,23 @@
"""
-An RDFLib Dataset is a slight extension to ConjunctiveGraph: it uses simpler terminology
-and has a few additional convenience methods, for example add() can be used to
-add quads directly to a specific Graph within the Dataset.
+This module contains a number of common tasks using the RDFLib Dataset class.
-This example file shows how to declare a Dataset, add content to it, serialise it, query it
-and remove things from it.
+An RDFLib Dataset is an object that stores multiple Named Graphs - instances of RDFLib
+Graph identified by IRI - within it and allows whole-of-dataset or single Graph use.
+
+Dataset extends Graph's Subject, Predicate, Object structure to include Graph -
+archaically called Context - producing quads of s, p, o, g.
+
+There is an older implementation of a Dataset-like class in RDFLib < 7.x called
+ConjunctiveGraph that is now deprecated.
+
+Sections in this module:
+
+1. Creating & Growing Datasets
+2. Looping & Counting triples/quads in Datasets
+3. Manipulating Graphs with Datasets
"""
-from rdflib import Dataset, Literal, Namespace, URIRef
+from rdflib import Dataset, Graph, Literal, URIRef
# Note regarding `mypy: ignore_errors=true`:
#
@@ -19,41 +29,48 @@
# mypy: ignore_errors=true
-#
-# Create & Add
-#
+#######################################################################################
+# 1. Creating & Growing
+#######################################################################################
# Create an empty Dataset
d = Dataset()
+
# Add a namespace prefix to it, just like for Graph
-d.bind("ex", Namespace("http://example.com/"))
+d.bind("ex", "http://example.com/")
-# Declare a Graph URI to be used to identify a Graph
-graph_1 = URIRef("http://example.com/graph-1")
+# Declare a Graph identifier to be used to identify a Graph
+# A string or a URIRef may be used, but safer to always use a URIRef for usage consistency
+graph_1_id = URIRef("http://example.com/graph-1")
-# Add an empty Graph, identified by graph_1, to the Dataset
-d.graph(identifier=graph_1)
+# Add an empty Graph, identified by graph_1_id, to the Dataset
+d.graph(identifier=graph_1_id)
-# Add two quads to Graph graph_1 in the Dataset
+# Add two quads to the Dataset which are triples + graph ID
+# These insert the triple into the GRaph specified by the ID
d.add(
(
URIRef("http://example.com/subject-x"),
URIRef("http://example.com/predicate-x"),
Literal("Triple X"),
- graph_1,
+ graph_1_id,
)
)
+
d.add(
(
URIRef("http://example.com/subject-z"),
URIRef("http://example.com/predicate-z"),
Literal("Triple Z"),
- graph_1,
+ graph_1_id,
)
)
-# Add another quad to the Dataset to a non-existent Graph:
-# the Graph is created automatically
+# We now have 2 distinct quads in the Dataset to the Dataset has a length of 2
+assert len(d) == 2
+
+# Add another quad to the Dataset specifying a non-existent Graph.
+# The Graph is created automatically
d.add(
(
URIRef("http://example.com/subject-y"),
@@ -63,8 +80,15 @@
)
)
-# printing the Dataset like this: print(d.serialize(format="trig"))
-# produces a result like this:
+assert len(d) == 3
+
+
+# You can print the Dataset like you do a Graph but you must specify a quads format like
+# 'trig' or 'trix', not 'turtle', unless the default_union parameter is set to True, and
+# then you can print the entire Dataset in triples.
+# print(d.serialize(format="trig").strip())
+
+# you should see something like this:
"""
@prefix ex: .
@@ -78,85 +102,278 @@
ex:subject-y ex:predicate-y "Triple Y" .
}
"""
-print("Printing Serialised Dataset:")
-print("---")
-print(d.serialize(format="trig"))
-print("---")
-print()
-print()
-#
-# Use & Query
-#
-# print the length of the Dataset, i.e. the count of all triples in all Graphs
-# we should get
+# Print out one graph in the Dataset, using a standard Graph serialization format - longturtle
+print(d.get_graph(URIRef("http://example.com/graph-2")).serialize(format="longturtle"))
+
+# you should see something like this:
"""
-3
+PREFIX ex:
+
+ex:subject-y
+ ex:predicate-y "Triple Y" ;
+.
"""
-print("Printing Dataset Length:")
-print("---")
-print(len(d))
-print("---")
-print()
-print()
-# Query one graph in the Dataset for all its triples
-# we should get
+
+#######################################################################################
+# 2. Looping & Counting
+#######################################################################################
+
+# Loop through all quads in the dataset
+for s, p, o, g in d.quads((None, None, None, None)): # type: ignore[arg-type]
+ print(f"{s}, {p}, {o}, {g}")
+
+# you should see something like this:
"""
-(rdflib.term.URIRef('http://example.com/subject-z'), rdflib.term.URIRef('http://example.com/predicate-z'), rdflib.term.Literal('Triple Z'))
-(rdflib.term.URIRef('http://example.com/subject-x'), rdflib.term.URIRef('http://example.com/predicate-x'), rdflib.term.Literal('Triple X'))
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1
+http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1
+http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2
"""
-print("Printing all triple from one Graph in the Dataset:")
-print("---")
-for triple in d.triples((None, None, None, graph_1)): # type: ignore[arg-type]
- print(triple)
-print("---")
-print()
-print()
-# Query the union of all graphs in the dataset for all triples
-# we should get nothing:
+# Loop through all the quads in one Graph - just constrain the Graph field
+for s, p, o, g in d.quads((None, None, None, graph_1_id)): # type: ignore[arg-type]
+ print(f"{s}, {p}, {o}, {g}")
+
+# you should see something like this:
"""
+http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1
"""
-# A Dataset's default union graph does not exist by default (default_union property is False)
-print("Attempt #1 to print all triples in the Dataset:")
-print("---")
-for triple in d.triples((None, None, None, None)):
- print(triple)
-print("---")
-print()
-print()
-# Set the Dataset's default_union property to True and re-query
+# Looping through triples in one Graph still works too
+for s, p, o in d.triples((None, None, None, graph_1_id)): # type: ignore[arg-type]
+ print(f"{s}, {p}, {o}")
+
+# you should see something like this:
+"""
+http://example.com/subject-x, http://example.com/predicate-x, Triple X
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z
+"""
+
+# Looping through triples across the whole Dataset will produce nothing
+# unless we set the default_union parameter to True, since each triple is in a Named Graph
+
+# Setting the default_union parameter to True essentially presents all triples in all
+# Graphs as a single Graph
d.default_union = True
-print("Attempt #2 to print all triples in the Dataset:")
-print("---")
-for triple in d.triples((None, None, None, None)):
- print(triple)
-print("---")
-print()
-print()
+for s, p, o in d.triples((None, None, None)):
+ print(f"{s}, {p}, {o}")
+# you should see something like this:
+"""
+http://example.com/subject-x, http://example.com/predicate-x, Triple X
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z
+http://example.com/subject-y, http://example.com/predicate-y, Triple Y
+"""
-#
-# Remove
-#
+# You can still loop through all quads now with the default_union parameter to True
+for s, p, o, g in d.quads((None, None, None)):
+ print(f"{s}, {p}, {o}, {g}")
+
+# you should see something like this:
+"""
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1
+http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1
+http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2
+"""
+
+# Adding a triple in graph-1 to graph-2 increases the number of distinct of quads in
+# the Dataset
+d.add(
+ (
+ URIRef("http://example.com/subject-z"),
+ URIRef("http://example.com/predicate-z"),
+ Literal("Triple Z"),
+ URIRef("http://example.com/graph-2"),
+ )
+)
+
+for s, p, o, g in d.quads((None, None, None, None)):
+ print(f"{s}, {p}, {o}, {g}")
+
+# you should see something like this, with the 'Z' triple in graph-1 and graph-2:
+"""
+http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1
+http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2
+"""
+
+# but the 'length' of the Dataset is still only 3 as only distinct triples are counted
+assert len(d) == 3
+
+
+# Looping through triples sees the 'Z' triple only once
+for s, p, o in d.triples((None, None, None)):
+ print(f"{s}, {p}, {o}")
+
+# you should see something like this:
+"""
+http://example.com/subject-x, http://example.com/predicate-x, Triple X
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z
+http://example.com/subject-y, http://example.com/predicate-y, Triple Y
+"""
+
+#######################################################################################
+# 3. Manipulating Graphs
+#######################################################################################
+
+# List all the Graphs in the Dataset
+for x in d.graphs():
+ print(x)
+
+# this returns the graphs, something like:
+"""
+ a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory'].
+ a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory'].
+ a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory'].
+"""
+
+# So try this
+for x in d.graphs():
+ print(x.identifier)
+
+# you should see something like this, noting the default, currently empty, graph:
+"""
+urn:x-rdflib:default
+http://example.com/graph-2
+http://example.com/graph-1
+"""
-# Remove Graph graph_1 from the Dataset
-d.remove_graph(graph_1)
+# To add to the default Graph, just add a triple, not a quad, to the Dataset directly
+d.add(
+ (
+ URIRef("http://example.com/subject-n"),
+ URIRef("http://example.com/predicate-n"),
+ Literal("Triple N"),
+ )
+)
+for s, p, o, g in d.quads((None, None, None, None)):
+ print(f"{s}, {p}, {o}, {g}")
+
+# you should see something like this, noting the triple in the default Graph:
+"""
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2
+http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1
+http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2
+http://example.com/subject-n, http://example.com/predicate-n, Triple N, urn:x-rdflib:default
+"""
+
+# Loop through triples per graph
+for x in d.graphs():
+ print(x.identifier)
+ for s, p, o in x.triples((None, None, None)):
+ print(f"\t{s}, {p}, {o}")
-# printing the Dataset like this: print(d.serialize(format="trig"))
-# now produces a result like this:
+# you should see something like this:
+"""
+urn:x-rdflib:default
+ http://example.com/subject-n, http://example.com/predicate-n, Triple N
+http://example.com/graph-1
+ http://example.com/subject-x, http://example.com/predicate-x, Triple X
+ http://example.com/subject-z, http://example.com/predicate-z, Triple Z
+http://example.com/graph-2
+ http://example.com/subject-y, http://example.com/predicate-y, Triple Y
+ http://example.com/subject-z, http://example.com/predicate-z, Triple Z
+"""
+# The default_union parameter includes all triples in the Named Graphs and the Default Graph
+for s, p, o in d.triples((None, None, None)):
+ print(f"{s}, {p}, {o}")
+
+# you should see something like this:
+"""
+http://example.com/subject-x, http://example.com/predicate-x, Triple X
+http://example.com/subject-n, http://example.com/predicate-n, Triple N
+http://example.com/subject-z, http://example.com/predicate-z, Triple Z
+http://example.com/subject-y, http://example.com/predicate-y, Triple Y
"""
+
+# To remove a graph
+d.remove_graph(graph_1_id)
+
+# To remove the default graph
+d.remove_graph(URIRef("urn:x-rdflib:default"))
+
+# print what's left - one graph, graph-2
+print(d.serialize(format="trig"))
+
+# you should see something like this:
+"""
+@prefix ex: .
+
ex:graph-2 {
ex:subject-y ex:predicate-y "Triple Y" .
+
+ ex:subject-z ex:predicate-z "Triple Z" .
+}
+"""
+
+# To add a Graph that already exists, you must give it an Identifier or else it will be assigned a Blank Node ID
+g_with_id = Graph(identifier=URIRef("http://example.com/graph-3"))
+g_with_id.bind("ex", "http://example.com/")
+
+# Add a distinct triple to the exiting Graph, using Namepspace IRI shortcuts
+# g_with_id.bind("ex", "http://example.com/")
+g_with_id.add(
+ (
+ URIRef("http://example.com/subject-k"),
+ URIRef("http://example.com/predicate-k"),
+ Literal("Triple K"),
+ )
+)
+d.add_graph(g_with_id)
+print(d.serialize(format="trig"))
+
+# you should see something like this:
+"""
+@prefix ex: .
+
+ex:graph-3 {
+ ex:subject_k ex:predicate_k "Triple K" .
+}
+
+ex:graph-2 {
+ ex:subject-y ex:predicate-y "Triple Y" .
+
+ ex:subject-z ex:predicate-z "Triple Z" .
+}
+"""
+
+# If you add a Graph with no specified identifier...
+g_no_id = Graph()
+g_no_id.bind("ex", "http://example.com/")
+
+g_no_id.add(
+ (
+ URIRef("http://example.com/subject-l"),
+ URIRef("http://example.com/predicate-l"),
+ Literal("Triple L"),
+ )
+)
+d.add_graph(g_no_id)
+
+# now when we print it, we will see a Graph with a Blank Node id:
+print(d.serialize(format="trig"))
+
+# you should see somthing like this, but with a different Blank Node ID , as this is rebuilt each code execution
+"""
+@prefix ex: .
+
+ex:graph-3 {
+ ex:subject-k ex:predicate-k "Triple K" .
+}
+
+ex:graph-2 {
+ ex:subject-y ex:predicate-y "Triple Y" .
+
+ ex:subject-z ex:predicate-z "Triple Z" .
+}
+
+_:N9cc8b54c91724e31896da5ce41e0c937 {
+ ex:subject-l ex:predicate-l "Triple L" .
}
"""
-print("Printing Serialised Dataset after graph_1 removal:")
-print("---")
-print(d.serialize(format="trig").strip())
-print("---")
-print()
-print()
diff --git a/examples/jsonld_serialization.py b/examples/jsonld_serialization.py
index 5bee1a614..dd83d6a5d 100644
--- a/examples/jsonld_serialization.py
+++ b/examples/jsonld_serialization.py
@@ -1,24 +1,35 @@
"""
-JSON-LD is "A JSON-based Serialization for Linked Data" (https://www.w3.org/TR/json-ld/) that RDFLib implements for RDF serialization.
+JSON-LD is "A JSON-based Serialization for Linked Data" (https://www.w3.org/TR/json-ld/)
+that RDFLib implements for RDF serialization.
-This file demonstrated some of the JSON-LD things you can do with RDFLib. Parsing & serializing so far. More to be added later.
+This file demonstrated some of the JSON-LD things you can do with RDFLib. Parsing &
+serializing so far. More to be added later.
Parsing
-------
-There are a number of "flavours" of JSON-LD - compact and verbose etc. RDFLib can parse all of these in a normal RDFLib way.
+
+There are a number of "flavours" of JSON-LD - compact and verbose etc. RDFLib can parse
+all of these in a normal RDFLib way.
Serialization
-------------
-JSON-LD has a number of options for serialization - more than other RDF formats. For example, IRIs within JSON-LD can be compacted down to CURIES when a "context" statment is added to the JSON-LD data that maps identifiers - short codes - to IRIs and namespace IRIs like this:
-# here the short code "dcterms" is mapped to the IRI http://purl.org/dc/terms/ and "schema" to https://schema.org/, as per RDFLib's in-build namespace prefixes
+JSON-LD has a number of options for serialization - more than other RDF formats. For
+example, IRIs within JSON-LD can be compacted down to CURIES when a "context" statement
+is added to the JSON-LD data that maps identifiers - short codes - to IRIs and namespace
+IRIs like this:
-"@context": {
- "dct": "http://purl.org/dc/terms/",
- "schema": "https://schema.org/"
-}
+.. code-block:: json
+
+ "@context": {
+ "dcterms": "http://purl.org/dc/terms/",
+ "schema": "https://schema.org/"
+ }
+
+Here the short code "dcterms" is mapped to the IRI http://purl.org/dc/terms/ and
+"schema" to https://schema.org/, as per RDFLib's in-build namespace prefixes.
"""
# import RDFLib and other things
diff --git a/poetry.lock b/poetry.lock
index c2d3eb897..2072d2c5c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
[[package]]
name = "alabaster"
@@ -830,69 +830,86 @@ test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"]
[[package]]
name = "orjson"
-version = "3.10.10"
+version = "3.10.13"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = true
python-versions = ">=3.8"
files = [
- {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"},
- {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"},
- {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"},
- {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"},
- {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"},
- {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"},
- {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"},
- {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"},
- {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"},
- {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"},
- {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"},
- {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"},
- {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"},
- {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"},
- {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"},
- {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"},
- {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"},
- {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"},
- {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"},
- {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"},
- {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"},
- {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"},
- {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"},
- {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"},
- {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"},
- {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"},
- {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"},
- {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"},
- {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"},
- {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"},
- {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"},
- {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"},
- {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"},
+ {file = "orjson-3.10.13-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1232c5e873a4d1638ef957c5564b4b0d6f2a6ab9e207a9b3de9de05a09d1d920"},
+ {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26a0eca3035619fa366cbaf49af704c7cb1d4a0e6c79eced9f6a3f2437964b6"},
+ {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d4b6acd7c9c829895e50d385a357d4b8c3fafc19c5989da2bae11783b0fd4977"},
+ {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1884e53c6818686891cc6fc5a3a2540f2f35e8c76eac8dc3b40480fb59660b00"},
+ {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a428afb5720f12892f64920acd2eeb4d996595bf168a26dd9190115dbf1130d"},
+ {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba5b13b8739ce5b630c65cb1c85aedbd257bcc2b9c256b06ab2605209af75a2e"},
+ {file = "orjson-3.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cab83e67f6aabda1b45882254b2598b48b80ecc112968fc6483fa6dae609e9f0"},
+ {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:62c3cc00c7e776c71c6b7b9c48c5d2701d4c04e7d1d7cdee3572998ee6dc57cc"},
+ {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:dc03db4922e75bbc870b03fc49734cefbd50fe975e0878327d200022210b82d8"},
+ {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22f1c9a30b43d14a041a6ea190d9eca8a6b80c4beb0e8b67602c82d30d6eec3e"},
+ {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b42f56821c29e697c68d7d421410d7c1d8f064ae288b525af6a50cf99a4b1200"},
+ {file = "orjson-3.10.13-cp310-cp310-win32.whl", hash = "sha256:0dbf3b97e52e093d7c3e93eb5eb5b31dc7535b33c2ad56872c83f0160f943487"},
+ {file = "orjson-3.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:46c249b4e934453be4ff2e518cd1adcd90467da7391c7a79eaf2fbb79c51e8c7"},
+ {file = "orjson-3.10.13-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a36c0d48d2f084c800763473020a12976996f1109e2fcb66cfea442fdf88047f"},
+ {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0065896f85d9497990731dfd4a9991a45b0a524baec42ef0a63c34630ee26fd6"},
+ {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92b4ec30d6025a9dcdfe0df77063cbce238c08d0404471ed7a79f309364a3d19"},
+ {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a94542d12271c30044dadad1125ee060e7a2048b6c7034e432e116077e1d13d2"},
+ {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3723e137772639af8adb68230f2aa4bcb27c48b3335b1b1e2d49328fed5e244c"},
+ {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f00c7fb18843bad2ac42dc1ce6dd214a083c53f1e324a0fd1c8137c6436269b"},
+ {file = "orjson-3.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0e2759d3172300b2f892dee85500b22fca5ac49e0c42cfff101aaf9c12ac9617"},
+ {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee948c6c01f6b337589c88f8e0bb11e78d32a15848b8b53d3f3b6fea48842c12"},
+ {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:aa6fe68f0981fba0d4bf9cdc666d297a7cdba0f1b380dcd075a9a3dd5649a69e"},
+ {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dbcd7aad6bcff258f6896abfbc177d54d9b18149c4c561114f47ebfe74ae6bfd"},
+ {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2149e2fcd084c3fd584881c7f9d7f9e5ad1e2e006609d8b80649655e0d52cd02"},
+ {file = "orjson-3.10.13-cp311-cp311-win32.whl", hash = "sha256:89367767ed27b33c25c026696507c76e3d01958406f51d3a2239fe9e91959df2"},
+ {file = "orjson-3.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:dca1d20f1af0daff511f6e26a27354a424f0b5cf00e04280279316df0f604a6f"},
+ {file = "orjson-3.10.13-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a3614b00621c77f3f6487792238f9ed1dd8a42f2ec0e6540ee34c2d4e6db813a"},
+ {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c976bad3996aa027cd3aef78aa57873f3c959b6c38719de9724b71bdc7bd14b"},
+ {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f74d878d1efb97a930b8a9f9898890067707d683eb5c7e20730030ecb3fb930"},
+ {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33ef84f7e9513fb13b3999c2a64b9ca9c8143f3da9722fbf9c9ce51ce0d8076e"},
+ {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2bcde107221bb9c2fa0c4aaba735a537225104173d7e19cf73f70b3126c993"},
+ {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:064b9dbb0217fd64a8d016a8929f2fae6f3312d55ab3036b00b1d17399ab2f3e"},
+ {file = "orjson-3.10.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0044b0b8c85a565e7c3ce0a72acc5d35cda60793edf871ed94711e712cb637d"},
+ {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7184f608ad563032e398f311910bc536e62b9fbdca2041be889afcbc39500de8"},
+ {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d36f689e7e1b9b6fb39dbdebc16a6f07cbe994d3644fb1c22953020fc575935f"},
+ {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54433e421618cd5873e51c0e9d0b9fb35f7bf76eb31c8eab20b3595bb713cd3d"},
+ {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1ba0c5857dd743438acecc1cd0e1adf83f0a81fee558e32b2b36f89e40cee8b"},
+ {file = "orjson-3.10.13-cp312-cp312-win32.whl", hash = "sha256:a42b9fe4b0114b51eb5cdf9887d8c94447bc59df6dbb9c5884434eab947888d8"},
+ {file = "orjson-3.10.13-cp312-cp312-win_amd64.whl", hash = "sha256:3a7df63076435f39ec024bdfeb4c9767ebe7b49abc4949068d61cf4857fa6d6c"},
+ {file = "orjson-3.10.13-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2cdaf8b028a976ebab837a2c27b82810f7fc76ed9fb243755ba650cc83d07730"},
+ {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a946796e390cbb803e069472de37f192b7a80f4ac82e16d6eb9909d9e39d56"},
+ {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7d64f1db5ecbc21eb83097e5236d6ab7e86092c1cd4c216c02533332951afc"},
+ {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:711878da48f89df194edd2ba603ad42e7afed74abcd2bac164685e7ec15f96de"},
+ {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cf16f06cb77ce8baf844bc222dbcb03838f61d0abda2c3341400c2b7604e436e"},
+ {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8257c3fb8dd7b0b446b5e87bf85a28e4071ac50f8c04b6ce2d38cb4abd7dff57"},
+ {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9c3a87abe6f849a4a7ac8a8a1dede6320a4303d5304006b90da7a3cd2b70d2c"},
+ {file = "orjson-3.10.13-cp313-cp313-win32.whl", hash = "sha256:527afb6ddb0fa3fe02f5d9fba4920d9d95da58917826a9be93e0242da8abe94a"},
+ {file = "orjson-3.10.13-cp313-cp313-win_amd64.whl", hash = "sha256:b5f7c298d4b935b222f52d6c7f2ba5eafb59d690d9a3840b7b5c5cda97f6ec5c"},
+ {file = "orjson-3.10.13-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e49333d1038bc03a25fdfe11c86360df9b890354bfe04215f1f54d030f33c342"},
+ {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:003721c72930dbb973f25c5d8e68d0f023d6ed138b14830cc94e57c6805a2eab"},
+ {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:63664bf12addb318dc8f032160e0f5dc17eb8471c93601e8f5e0d07f95003784"},
+ {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6066729cf9552d70de297b56556d14b4f49c8f638803ee3c90fd212fa43cc6af"},
+ {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a1152e2761025c5d13b5e1908d4b1c57f3797ba662e485ae6f26e4e0c466388"},
+ {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69b21d91c5c5ef8a201036d207b1adf3aa596b930b6ca3c71484dd11386cf6c3"},
+ {file = "orjson-3.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b12a63f48bb53dba8453d36ca2661f2330126d54e26c1661e550b32864b28ce3"},
+ {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a5a7624ab4d121c7e035708c8dd1f99c15ff155b69a1c0affc4d9d8b551281ba"},
+ {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0fee076134398d4e6cb827002468679ad402b22269510cf228301b787fdff5ae"},
+ {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ae537fcf330b3947e82c6ae4271e092e6cf16b9bc2cef68b14ffd0df1fa8832a"},
+ {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f81b26c03f5fb5f0d0ee48d83cea4d7bc5e67e420d209cc1a990f5d1c62f9be0"},
+ {file = "orjson-3.10.13-cp38-cp38-win32.whl", hash = "sha256:0bc858086088b39dc622bc8219e73d3f246fb2bce70a6104abd04b3a080a66a8"},
+ {file = "orjson-3.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:3ca6f17467ebbd763f8862f1d89384a5051b461bb0e41074f583a0ebd7120e8e"},
+ {file = "orjson-3.10.13-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4a11532cbfc2f5752c37e84863ef8435b68b0e6d459b329933294f65fa4bda1a"},
+ {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c96d2fb80467d1d0dfc4d037b4e1c0f84f1fe6229aa7fea3f070083acef7f3d7"},
+ {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dda4ba4d3e6f6c53b6b9c35266788053b61656a716a7fef5c884629c2a52e7aa"},
+ {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f998bbf300690be881772ee9c5281eb9c0044e295bcd4722504f5b5c6092ff"},
+ {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1cc42ed75b585c0c4dc5eb53a90a34ccb493c09a10750d1a1f9b9eff2bd12"},
+ {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b0f29d485411e3c13d79604b740b14e4e5fb58811743f6f4f9693ee6480a8f"},
+ {file = "orjson-3.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:233aae4474078d82f425134bb6a10fb2b3fc5a1a1b3420c6463ddd1b6a97eda8"},
+ {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e384e330a67cf52b3597ee2646de63407da6f8fc9e9beec3eaaaef5514c7a1c9"},
+ {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4222881d0aab76224d7b003a8e5fdae4082e32c86768e0e8652de8afd6c4e2c1"},
+ {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e400436950ba42110a20c50c80dff4946c8e3ec09abc1c9cf5473467e83fd1c5"},
+ {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f47c9e7d224b86ffb086059cdcf634f4b3f32480f9838864aa09022fe2617ce2"},
+ {file = "orjson-3.10.13-cp39-cp39-win32.whl", hash = "sha256:a9ecea472f3eb653e1c0a3d68085f031f18fc501ea392b98dcca3e87c24f9ebe"},
+ {file = "orjson-3.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:5385935a73adce85cc7faac9d396683fd813566d3857fa95a0b521ef84a5b588"},
+ {file = "orjson-3.10.13.tar.gz", hash = "sha256:eb9bfb14ab8f68d9d9492d4817ae497788a15fd7da72e14dfabc289c3bb088ec"},
]
[[package]]
@@ -1034,13 +1051,13 @@ files = [
[[package]]
name = "pytest"
-version = "8.3.3"
+version = "8.3.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"},
- {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"},
+ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"},
+ {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"},
]
[package.dependencies]
@@ -1108,6 +1125,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -1165,29 +1183,29 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "ruff"
-version = "0.7.0"
+version = "0.8.6"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.7.0-py3-none-linux_armv6l.whl", hash = "sha256:0cdf20c2b6ff98e37df47b2b0bd3a34aaa155f59a11182c1303cce79be715628"},
- {file = "ruff-0.7.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:496494d350c7fdeb36ca4ef1c9f21d80d182423718782222c29b3e72b3512737"},
- {file = "ruff-0.7.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:214b88498684e20b6b2b8852c01d50f0651f3cc6118dfa113b4def9f14faaf06"},
- {file = "ruff-0.7.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630fce3fefe9844e91ea5bbf7ceadab4f9981f42b704fae011bb8efcaf5d84be"},
- {file = "ruff-0.7.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:211d877674e9373d4bb0f1c80f97a0201c61bcd1e9d045b6e9726adc42c156aa"},
- {file = "ruff-0.7.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:194d6c46c98c73949a106425ed40a576f52291c12bc21399eb8f13a0f7073495"},
- {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:82c2579b82b9973a110fab281860403b397c08c403de92de19568f32f7178598"},
- {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9af971fe85dcd5eaed8f585ddbc6bdbe8c217fb8fcf510ea6bca5bdfff56040e"},
- {file = "ruff-0.7.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b641c7f16939b7d24b7bfc0be4102c56562a18281f84f635604e8a6989948914"},
- {file = "ruff-0.7.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d71672336e46b34e0c90a790afeac8a31954fd42872c1f6adaea1dff76fd44f9"},
- {file = "ruff-0.7.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ab7d98c7eed355166f367597e513a6c82408df4181a937628dbec79abb2a1fe4"},
- {file = "ruff-0.7.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1eb54986f770f49edb14f71d33312d79e00e629a57387382200b1ef12d6a4ef9"},
- {file = "ruff-0.7.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:dc452ba6f2bb9cf8726a84aa877061a2462afe9ae0ea1d411c53d226661c601d"},
- {file = "ruff-0.7.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4b406c2dce5be9bad59f2de26139a86017a517e6bcd2688da515481c05a2cb11"},
- {file = "ruff-0.7.0-py3-none-win32.whl", hash = "sha256:f6c968509f767776f524a8430426539587d5ec5c662f6addb6aa25bc2e8195ec"},
- {file = "ruff-0.7.0-py3-none-win_amd64.whl", hash = "sha256:ff4aabfbaaba880e85d394603b9e75d32b0693152e16fa659a3064a85df7fce2"},
- {file = "ruff-0.7.0-py3-none-win_arm64.whl", hash = "sha256:10842f69c245e78d6adec7e1db0a7d9ddc2fff0621d730e61657b64fa36f207e"},
- {file = "ruff-0.7.0.tar.gz", hash = "sha256:47a86360cf62d9cd53ebfb0b5eb0e882193fc191c6d717e8bef4462bc3b9ea2b"},
+ {file = "ruff-0.8.6-py3-none-linux_armv6l.whl", hash = "sha256:defed167955d42c68b407e8f2e6f56ba52520e790aba4ca707a9c88619e580e3"},
+ {file = "ruff-0.8.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:54799ca3d67ae5e0b7a7ac234baa657a9c1784b48ec954a094da7c206e0365b1"},
+ {file = "ruff-0.8.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e88b8f6d901477c41559ba540beeb5a671e14cd29ebd5683903572f4b40a9807"},
+ {file = "ruff-0.8.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0509e8da430228236a18a677fcdb0c1f102dd26d5520f71f79b094963322ed25"},
+ {file = "ruff-0.8.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:91a7ddb221779871cf226100e677b5ea38c2d54e9e2c8ed847450ebbdf99b32d"},
+ {file = "ruff-0.8.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:248b1fb3f739d01d528cc50b35ee9c4812aa58cc5935998e776bf8ed5b251e75"},
+ {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bc3c083c50390cf69e7e1b5a5a7303898966be973664ec0c4a4acea82c1d4315"},
+ {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52d587092ab8df308635762386f45f4638badb0866355b2b86760f6d3c076188"},
+ {file = "ruff-0.8.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:61323159cf21bc3897674e5adb27cd9e7700bab6b84de40d7be28c3d46dc67cf"},
+ {file = "ruff-0.8.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ae4478b1471fc0c44ed52a6fb787e641a2ac58b1c1f91763bafbc2faddc5117"},
+ {file = "ruff-0.8.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0c000a471d519b3e6cfc9c6680025d923b4ca140ce3e4612d1a2ef58e11f11fe"},
+ {file = "ruff-0.8.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9257aa841e9e8d9b727423086f0fa9a86b6b420fbf4bf9e1465d1250ce8e4d8d"},
+ {file = "ruff-0.8.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45a56f61b24682f6f6709636949ae8cc82ae229d8d773b4c76c09ec83964a95a"},
+ {file = "ruff-0.8.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:496dd38a53aa173481a7d8866bcd6451bd934d06976a2505028a50583e001b76"},
+ {file = "ruff-0.8.6-py3-none-win32.whl", hash = "sha256:e169ea1b9eae61c99b257dc83b9ee6c76f89042752cb2d83486a7d6e48e8f764"},
+ {file = "ruff-0.8.6-py3-none-win_amd64.whl", hash = "sha256:f1d70bef3d16fdc897ee290d7d20da3cbe4e26349f62e8a0274e7a3f4ce7a905"},
+ {file = "ruff-0.8.6-py3-none-win_arm64.whl", hash = "sha256:7d7fc2377a04b6e04ffe588caad613d0c460eb2ecba4c0ccbbfe2bc973cbc162"},
+ {file = "ruff-0.8.6.tar.gz", hash = "sha256:dcad24b81b62650b0eb8814f576fc65cfee8674772a6e24c9b747911801eeaa5"},
]
[[package]]
@@ -1464,4 +1482,4 @@ orjson = ["orjson"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8.1"
-content-hash = "71704ba175e33528872fab8121cb609041bd97b6a99f8f04022a26904941b27c"
+content-hash = "3d9605c7f277f69e5c732d2edf25ed10fde6af31b791bb787229eb92be962af6"
diff --git a/pyproject.toml b/pyproject.toml
index 1e15fe569..971e229d7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "rdflib"
-version = "7.1.1"
+version = "7.1.3a0"
description = """RDFLib is a Python library for working with RDF, \
a simple yet powerful language for representing information."""
authors = ["Daniel 'eikeon' Krech "]
@@ -69,7 +69,7 @@ sphinx-autodoc-typehints = ">=1.25.3,<=2.0.1"
typing-extensions = "^4.5.0"
[tool.poetry.group.lint.dependencies]
-ruff = ">=0.0.286,<0.8.0"
+ruff = ">=0.0.286,<0.10.0"
[tool.poetry.extras]
berkeleydb = ["berkeleydb"]
@@ -166,7 +166,7 @@ ignore = [
]
[tool.black]
-line-length = "88"
+line-length = 88
target-version = ['py38']
required-version = "24.4.2"
include = '\.pyi?$'
diff --git a/rdflib/__init__.py b/rdflib/__init__.py
index 0c40cd7a4..843b614e4 100644
--- a/rdflib/__init__.py
+++ b/rdflib/__init__.py
@@ -59,6 +59,7 @@
"BNode",
"IdentifiedNode",
"Literal",
+ "Node",
"Variable",
"Namespace",
"Dataset",
@@ -195,7 +196,7 @@
XSD,
Namespace,
)
-from rdflib.term import BNode, IdentifiedNode, Literal, URIRef, Variable
+from rdflib.term import BNode, IdentifiedNode, Literal, Node, URIRef, Variable
from rdflib import plugin, query, util # isort:skip
from rdflib.container import * # isort:skip # noqa: F403
diff --git a/rdflib/extras/shacl.py b/rdflib/extras/shacl.py
index 30fdab07b..1a5094ce3 100644
--- a/rdflib/extras/shacl.py
+++ b/rdflib/extras/shacl.py
@@ -4,18 +4,30 @@
from __future__ import annotations
-from typing import Optional, Union
+from typing import TYPE_CHECKING, Optional, Union
-from rdflib import Graph, Literal, URIRef, paths
+from rdflib import BNode, Graph, Literal, URIRef, paths
+from rdflib.collection import Collection
from rdflib.namespace import RDF, SH
from rdflib.paths import Path
from rdflib.term import Node
+if TYPE_CHECKING:
+ from rdflib.term import IdentifiedNode
+
class SHACLPathError(Exception):
pass
+# Map the variable length path operators to the corresponding SHACL path predicates
+_PATH_MOD_TO_PRED = {
+ paths.ZeroOrMore: SH.zeroOrMorePath,
+ paths.OneOrMore: SH.oneOrMorePath,
+ paths.ZeroOrOne: SH.zeroOrOnePath,
+}
+
+
# This implementation is roughly based on
# pyshacl.helper.sparql_query_helper::SPARQLQueryHelper._shacl_path_to_sparql_path
def parse_shacl_path(
@@ -91,3 +103,110 @@ def parse_shacl_path(
raise SHACLPathError(f"Cannot parse {repr(path_identifier)} as a SHACL Path.")
return path
+
+
+def _build_path_component(
+ graph: Graph, path_component: URIRef | Path
+) -> IdentifiedNode:
+ """
+ Helper method that implements the recursive component of SHACL path
+ triple construction.
+
+ :param graph: A :class:`~rdflib.graph.Graph` into which to insert triples
+ :param graph_component: A :class:`~rdflib.term.URIRef` or
+ :class:`~rdflib.paths.Path` that is part of a path expression
+ :return: The :class:`~rdflib.term.IdentifiedNode of the resource in the
+ graph that corresponds to the provided path_component
+ """
+ # Literals or other types are not allowed
+ if not isinstance(path_component, (URIRef, Path)):
+ raise TypeError(
+ f"Objects of type {type(path_component)} are not valid "
+ + "components of a SHACL path."
+ )
+
+ # If the path component is a URI, return it
+ elif isinstance(path_component, URIRef):
+ return path_component
+ # Otherwise, the path component is represented as a blank node
+ bnode = BNode()
+
+ # Handle Sequence Paths
+ if isinstance(path_component, paths.SequencePath):
+ # Sequence paths are a Collection directly with at least two items
+ if len(path_component.args) < 2:
+ raise SHACLPathError(
+ "A list of SHACL Sequence Paths must contain at least two path items."
+ )
+ Collection(
+ graph,
+ bnode,
+ [_build_path_component(graph, arg) for arg in path_component.args],
+ )
+
+ # Handle Inverse Paths
+ elif isinstance(path_component, paths.InvPath):
+ graph.add(
+ (bnode, SH.inversePath, _build_path_component(graph, path_component.arg))
+ )
+
+ # Handle Alternative Paths
+ elif isinstance(path_component, paths.AlternativePath):
+ # Alternative paths are a Collection but referenced by sh:alternativePath
+ # with at least two items
+ if len(path_component.args) < 2:
+ raise SHACLPathError(
+ "List of SHACL alternate paths must have at least two path items."
+ )
+ coll = Collection(
+ graph,
+ BNode(),
+ [_build_path_component(graph, arg) for arg in path_component.args],
+ )
+ graph.add((bnode, SH.alternativePath, coll.uri))
+
+ # Handle Variable Length Paths
+ elif isinstance(path_component, paths.MulPath):
+ # Get the predicate corresponding to the path modifiier
+ pred = _PATH_MOD_TO_PRED.get(path_component.mod)
+ if pred is None:
+ raise SHACLPathError(f"Unknown path modifier {path_component.mod}")
+ graph.add((bnode, pred, _build_path_component(graph, path_component.path)))
+
+ # Return the blank node created for the provided path_component
+ return bnode
+
+
+def build_shacl_path(
+ path: URIRef | Path, target_graph: Graph | None = None
+) -> tuple[IdentifiedNode, Graph | None]:
+ """
+ Build the SHACL Path triples for a path given by a :class:`~rdflib.term.URIRef` for
+ simple paths or a :class:`~rdflib.paths.Path` for complex paths.
+
+ Returns an :class:`~rdflib.term.IdentifiedNode` for the path (which should be
+ the object of a triple with predicate sh:path) and the graph into which any
+ new triples were added.
+
+ :param path: A :class:`~rdflib.term.URIRef` or a :class:`~rdflib.paths.Path`
+ :param target_graph: Optionally, a :class:`~rdflib.graph.Graph` into which to put
+ constructed triples. If not provided, a new graph will be created
+ :return: A (path_identifier, graph) tuple where:
+ - path_identifier: If path is a :class:`~rdflib.term.URIRef`, this is simply
+ the provided path. If path is a :class:`~rdflib.paths.Path`, this is
+ the :class:`~rdflib.term.BNode` corresponding to the root of the SHACL
+ path expression added to the graph.
+ - graph: None if path is a :class:`~rdflib.term.URIRef` (as no new triples
+ are constructed). If path is a :class:`~rdflib.paths.Path`, this is either the
+ target_graph provided or a new graph into which the path triples were added.
+ """
+ # If a path is a URI, that's the whole path. No graph needs to be constructed.
+ if isinstance(path, URIRef):
+ return path, None
+
+ # Create a graph if one was not provided
+ if target_graph is None:
+ target_graph = Graph()
+
+ # Recurse through the path to build the graph representation
+ return _build_path_component(target_graph, path), target_graph
diff --git a/rdflib/graph.py b/rdflib/graph.py
index 80ccc3fa8..d74dd85cf 100644
--- a/rdflib/graph.py
+++ b/rdflib/graph.py
@@ -411,11 +411,50 @@
class Graph(Node):
- """An RDF Graph
+ """An RDF Graph: a Python object containing nodes and relations between them as
+ RDF 'triples'.
- The constructor accepts one argument, the "store"
- that will be used to store the graph data (see the "store"
- package for stores currently shipped with rdflib).
+ This is the central RDFLib object class and Graph objects are almost always present
+ it all uses of RDFLib.
+
+ The basic use is to create a Graph and iterate through or query its content, e.g.:
+
+ >>> from rdflib import Graph, URIRef
+ >>> g = Graph()
+
+ >>> g.add((
+ ... URIRef("http://example.com/s1"), # subject
+ ... URIRef("http://example.com/p1"), # predicate
+ ... URIRef("http://example.com/o1"), # object
+ ... )) # doctest: +ELLIPSIS
+ )>
+
+ >>> g.add((
+ ... URIRef("http://example.com/s2"), # subject
+ ... URIRef("http://example.com/p2"), # predicate
+ ... URIRef("http://example.com/o2"), # object
+ ... )) # doctest: +ELLIPSIS
+ )>
+
+ >>> for triple in sorted(g): # simple looping
+ ... print(triple)
+ (rdflib.term.URIRef('http://example.com/s1'), rdflib.term.URIRef('http://example.com/p1'), rdflib.term.URIRef('http://example.com/o1'))
+ (rdflib.term.URIRef('http://example.com/s2'), rdflib.term.URIRef('http://example.com/p2'), rdflib.term.URIRef('http://example.com/o2'))
+
+ >>> # get the object of the triple with subject s1 and predicate p1
+ >>> o = g.value(
+ ... subject=URIRef("http://example.com/s1"),
+ ... predicate=URIRef("http://example.com/p1")
+ ... )
+
+
+ The constructor accepts one argument, the "store" that will be used to store the
+ graph data with the default being the `Memory `
+ (in memory) Store. Other Stores that persist content to disk using various file
+ databases or Stores that use remote servers (SPARQL systems) are supported. See
+ the :doc:`rdflib.plugins.stores` package for Stores currently shipped with RDFLib.
+ Other Stores not shipped with RDFLib can be added, such as
+ `HDT `_.
Stores can be context-aware or unaware. Unaware stores take up
(some) less space but cannot support features that require
@@ -423,14 +462,15 @@ class Graph(Node):
provenance.
Even if used with a context-aware store, Graph will only expose the quads which
- belong to the default graph. To access the rest of the data, `ConjunctiveGraph` or
- `Dataset` classes can be used instead.
+ belong to the default graph. To access the rest of the data the
+ `Dataset` class can be used instead.
The Graph constructor can take an identifier which identifies the Graph
by name. If none is given, the graph is assigned a BNode for its
identifier.
- For more on named graphs, see: http://www.w3.org/2004/03/trix/
+ For more on Named Graphs, see the RDFLib `Dataset` class and the TriG Specification,
+ https://www.w3.org/TR/trig/.
"""
context_aware: bool
@@ -1090,10 +1130,10 @@ def transitiveClosure( # noqa: N802
function against the graph
>>> from rdflib.collection import Collection
- >>> g=Graph()
- >>> a=BNode("foo")
- >>> b=BNode("bar")
- >>> c=BNode("baz")
+ >>> g = Graph()
+ >>> a = BNode("foo")
+ >>> b = BNode("bar")
+ >>> c = BNode("baz")
>>> g.add((a,RDF.first,RDF.type)) # doctest: +ELLIPSIS
)>
>>> g.add((a,RDF.rest,b)) # doctest: +ELLIPSIS
@@ -1354,7 +1394,7 @@ def serialize(
else:
os_path = location
with open(os_path, "wb") as stream:
- serializer.serialize(stream, encoding=encoding, **args)
+ serializer.serialize(stream, base=base, encoding=encoding, **args)
return self
def print(
@@ -2297,21 +2337,49 @@ def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]]
class Dataset(ConjunctiveGraph):
"""
- RDF 1.1 Dataset. Small extension to the Conjunctive Graph:
- - the primary term is graphs in the datasets and not contexts with quads,
- so there is a separate method to set/retrieve a graph in a dataset and
- operate with graphs
- - graphs cannot be identified with blank nodes
- - added a method to directly add a single quad
+ An RDFLib Dataset is an object that stores multiple Named Graphs - instances of
+ RDFLib Graph identified by IRI - within it and allows whole-of-dataset or single
+ Graph use.
+
+ RDFLib's Dataset class is based on the `RDF 1.2. 'Dataset' definition
+ `_:
+
+ ..
+
+ An RDF dataset is a collection of RDF graphs, and comprises:
+
+ - Exactly one default graph, being an RDF graph. The default graph does not
+ have a name and MAY be empty.
+ - Zero or more named graphs. Each named graph is a pair consisting of an IRI or
+ a blank node (the graph name), and an RDF graph. Graph names are unique
+ within an RDF dataset.
- Examples of usage:
+ Accordingly, a Dataset allows for `Graph` objects to be added to it with
+ :class:`rdflib.term.URIRef` or :class:`rdflib.term.BNode` identifiers and always
+ creats a default graph with the :class:`rdflib.term.URIRef` identifier
+ :code:`urn:x-rdflib:default`.
+
+ Dataset extends Graph's Subject, Predicate, Object (s, p, o) 'triple'
+ structure to include a graph identifier - archaically called Context - producing
+ 'quads' of s, p, o, g.
+
+ Triples, or quads, can be added to a Dataset. Triples, or quads with the graph
+ identifer :code:`urn:x-rdflib:default` go into the default graph.
+
+ .. note:: Dataset builds on the `ConjunctiveGraph` class but that class's direct
+ use is now deprecated (since RDFLib 7.x) and it should not be used.
+ `ConjunctiveGraph` will be removed from future RDFLib versions.
+
+ Examples of usage and see also the examples/datast.py file:
>>> # Create a new Dataset
>>> ds = Dataset()
>>> # simple triples goes to default graph
- >>> ds.add((URIRef("http://example.org/a"),
- ... URIRef("http://www.example.org/b"),
- ... Literal("foo"))) # doctest: +ELLIPSIS
+ >>> ds.add((
+ ... URIRef("http://example.org/a"),
+ ... URIRef("http://www.example.org/b"),
+ ... Literal("foo")
+ ... )) # doctest: +ELLIPSIS
)>
>>>
>>> # Create a graph in the dataset, if the graph name has already been
@@ -2320,16 +2388,19 @@ class Dataset(ConjunctiveGraph):
>>> g = ds.graph(URIRef("http://www.example.com/gr"))
>>>
>>> # add triples to the new graph as usual
- >>> g.add(
- ... (URIRef("http://example.org/x"),
+ >>> g.add((
+ ... URIRef("http://example.org/x"),
... URIRef("http://example.org/y"),
- ... Literal("bar")) ) # doctest: +ELLIPSIS
+ ... Literal("bar")
+ ... )) # doctest: +ELLIPSIS
)>
>>> # alternatively: add a quad to the dataset -> goes to the graph
- >>> ds.add(
- ... (URIRef("http://example.org/x"),
+ >>> ds.add((
+ ... URIRef("http://example.org/x"),
... URIRef("http://example.org/z"),
- ... Literal("foo-bar"),g) ) # doctest: +ELLIPSIS
+ ... Literal("foo-bar"),
+ ... g
+ ... )) # doctest: +ELLIPSIS
)>
>>>
>>> # querying triples return them all regardless of the graph
@@ -2395,8 +2466,8 @@ class Dataset(ConjunctiveGraph):
>>>
>>> # graph names in the dataset can be queried:
>>> for c in ds.graphs(): # doctest: +SKIP
- ... print(c) # doctest:
- DEFAULT
+ ... print(c.identifier) # doctest:
+ urn:x-rdflib:default
http://www.example.com/gr
>>> # A graph can be created without specifying a name; a skolemized genid
>>> # is created on the fly
@@ -2415,7 +2486,7 @@ class Dataset(ConjunctiveGraph):
>>>
>>> # a graph can also be removed from a dataset via ds.remove_graph(g)
- .. versionadded:: 4.0
+ ... versionadded:: 4.0
"""
def __init__(
diff --git a/rdflib/namespace/__init__.py b/rdflib/namespace/__init__.py
index 4077b0be3..eb8e2eeed 100644
--- a/rdflib/namespace/__init__.py
+++ b/rdflib/namespace/__init__.py
@@ -226,6 +226,7 @@ def __repr__(self) -> str:
# considered part of __dir__ results. These should be all annotations on
# `DefinedNamespaceMeta`.
_DFNS_RESERVED_ATTRS: Set[str] = {
+ "__slots__",
"_NS",
"_warn",
"_fail",
@@ -244,6 +245,8 @@ def __repr__(self) -> str:
class DefinedNamespaceMeta(type):
"""Utility metaclass for generating URIRefs with a common prefix."""
+ __slots__: Tuple[str, ...] = tuple()
+
_NS: Namespace
_warn: bool = True
_fail: bool = False # True means mimic ClosedNamespace
@@ -255,15 +258,11 @@ def __getitem__(cls, name: str, default=None) -> URIRef:
name = str(name)
if name in _DFNS_RESERVED_ATTRS:
- raise AttributeError(
- f"DefinedNamespace like object has no attribute {name!r}"
+ raise KeyError(
+ f"DefinedNamespace like object has no access item named {name!r}"
)
elif name in _IGNORED_ATTR_LOOKUP:
raise KeyError()
- if str(name).startswith("__"):
- # NOTE on type ignore: This seems to be a real bug, super() does not
- # implement this method, it will fail if it is ever reached.
- return super().__getitem__(name, default) # type: ignore[misc] # undefined in superclass
if (cls._warn or cls._fail) and name not in cls:
if cls._fail:
raise AttributeError(f"term '{name}' not in namespace '{cls._NS}'")
@@ -277,26 +276,39 @@ def __getitem__(cls, name: str, default=None) -> URIRef:
def __getattr__(cls, name: str):
if name in _IGNORED_ATTR_LOOKUP:
raise AttributeError()
+ elif name in _DFNS_RESERVED_ATTRS:
+ raise AttributeError(
+ f"DefinedNamespace like object has no attribute {name!r}"
+ )
+ elif name.startswith("__"):
+ return super(DefinedNamespaceMeta, cls).__getattribute__(name)
return cls.__getitem__(name)
def __repr__(cls) -> str:
- return f"Namespace({str(cls._NS)!r})"
+ try:
+ ns_repr = repr(cls._NS)
+ except AttributeError:
+ ns_repr = ""
+ return f"Namespace({ns_repr})"
def __str__(cls) -> str:
- return str(cls._NS)
+ try:
+ return str(cls._NS)
+ except AttributeError:
+ return ""
def __add__(cls, other: str) -> URIRef:
return cls.__getitem__(other)
def __contains__(cls, item: str) -> bool:
"""Determine whether a URI or an individual item belongs to this namespace"""
+ try:
+ this_ns = cls._NS
+ except AttributeError:
+ return False
item_str = str(item)
- if item_str.startswith("__"):
- # NOTE on type ignore: This seems to be a real bug, super() does not
- # implement this method, it will fail if it is ever reached.
- return super().__contains__(item) # type: ignore[misc] # undefined in superclass
- if item_str.startswith(str(cls._NS)):
- item_str = item_str[len(str(cls._NS)) :]
+ if item_str.startswith(str(this_ns)):
+ item_str = item_str[len(str(this_ns)) :]
return any(
item_str in c.__annotations__
or item_str in c._extras
@@ -313,7 +325,7 @@ def __dir__(cls) -> Iterable[str]:
return values
def as_jsonld_context(self, pfx: str) -> dict: # noqa: N804
- """Returns this DefinedNamespace as a a JSON-LD 'context' object"""
+ """Returns this DefinedNamespace as a JSON-LD 'context' object"""
terms = {pfx: str(self._NS)}
for key, term in self.__annotations__.items():
if issubclass(term, URIRef):
@@ -328,6 +340,8 @@ class DefinedNamespace(metaclass=DefinedNamespaceMeta):
Warnings are emitted if unknown members are referenced if _warn is True
"""
+ __slots__: Tuple[str, ...] = tuple()
+
def __init__(self):
raise TypeError("namespace may not be instantiated")
diff --git a/rdflib/plugins/parsers/jsonld.py b/rdflib/plugins/parsers/jsonld.py
index 295a97126..e103e7033 100644
--- a/rdflib/plugins/parsers/jsonld.py
+++ b/rdflib/plugins/parsers/jsonld.py
@@ -34,6 +34,7 @@
# we should consider streaming the input to deal with arbitrarily large graphs.
from __future__ import annotations
+import secrets
import warnings
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
@@ -221,6 +222,7 @@ def __init__(
if allow_lists_of_lists is not None
else ALLOW_LISTS_OF_LISTS
)
+ self.invalid_uri_to_bnode: dict[str, BNode] = {}
def parse(self, data: Any, context: Context, dataset: Graph) -> Graph:
topcontext = False
@@ -629,7 +631,12 @@ def _to_rdf_id(self, context: Context, id_val: str) -> Optional[IdentifiedNode]:
uri = context.resolve(id_val)
if not self.generalized_rdf and ":" not in uri:
return None
- return URIRef(uri)
+ node: IdentifiedNode = URIRef(uri)
+ if not str(node):
+ if id_val not in self.invalid_uri_to_bnode:
+ self.invalid_uri_to_bnode[id_val] = BNode(secrets.token_urlsafe(20))
+ node = self.invalid_uri_to_bnode[id_val]
+ return node
def _get_bnodeid(self, ref: str) -> Optional[str]:
if not ref.startswith("_:"):
diff --git a/rdflib/plugins/serializers/hext.py b/rdflib/plugins/serializers/hext.py
index 9a8187c76..898308a09 100644
--- a/rdflib/plugins/serializers/hext.py
+++ b/rdflib/plugins/serializers/hext.py
@@ -77,8 +77,8 @@ def serialize(
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = "utf-8",
- **kwargs,
- ):
+ **kwargs: Any,
+ ) -> None:
if base is not None:
warnings.warn(
"base has no meaning for Hextuples serialization. "
diff --git a/rdflib/plugins/serializers/jsonld.py b/rdflib/plugins/serializers/jsonld.py
index 15f307edf..0afe8305a 100644
--- a/rdflib/plugins/serializers/jsonld.py
+++ b/rdflib/plugins/serializers/jsonld.py
@@ -64,8 +64,8 @@ def serialize(
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
- **kwargs,
- ):
+ **kwargs: Any,
+ ) -> None:
# TODO: docstring w. args and return value
encoding = encoding or "utf-8"
if encoding not in ("utf-8", "utf-16"):
diff --git a/rdflib/plugins/serializers/longturtle.py b/rdflib/plugins/serializers/longturtle.py
index e886574f3..8de1e52a2 100644
--- a/rdflib/plugins/serializers/longturtle.py
+++ b/rdflib/plugins/serializers/longturtle.py
@@ -16,7 +16,13 @@
- Nicholas Car, 2023
"""
+from __future__ import annotations
+
+from typing import IO, Any, Optional
+
+from rdflib.compare import to_canonical_graph
from rdflib.exceptions import Error
+from rdflib.graph import Graph
from rdflib.namespace import RDF
from rdflib.term import BNode, Literal, URIRef
@@ -38,11 +44,20 @@ class LongTurtleSerializer(RecursiveSerializer):
def __init__(self, store):
self._ns_rewrite = {}
- super(LongTurtleSerializer, self).__init__(store)
+ store = to_canonical_graph(store)
+ content = store.serialize(format="application/n-triples")
+ lines = content.split("\n")
+ lines.sort()
+ graph = Graph()
+ graph.parse(
+ data="\n".join(lines), format="application/n-triples", skolemize=True
+ )
+ graph = graph.de_skolemize()
+ super(LongTurtleSerializer, self).__init__(graph)
self.keywords = {RDF.type: "a"}
self.reset()
self.stream = None
- self._spacious = _SPACIOUS_OUTPUT
+ self._spacious: bool = _SPACIOUS_OUTPUT
def addNamespace(self, prefix, namespace):
# Turtle does not support prefixes that start with _
@@ -74,7 +89,14 @@ def reset(self):
self._started = False
self._ns_rewrite = {}
- def serialize(self, stream, base=None, encoding=None, spacious=None, **args):
+ def serialize(
+ self,
+ stream: IO[bytes],
+ base: Optional[str] = None,
+ encoding: Optional[str] = None,
+ spacious: Optional[bool] = None,
+ **kwargs: Any,
+ ) -> None:
self.reset()
self.stream = stream
# if base is given here, use, if not and a base is set for the graph use that
@@ -175,7 +197,7 @@ def s_squared(self, subject):
return False
self.write("\n" + self.indent() + "[]")
self.predicateList(subject, newline=False)
- self.write(" ;\n.")
+ self.write("\n.")
return True
def path(self, node, position, newline=False):
@@ -292,6 +314,8 @@ def objectList(self, objects):
if count > 1:
if not isinstance(objects[0], BNode):
self.write("\n" + self.indent(1))
+ else:
+ self.write(" ")
first_nl = True
self.path(objects[0], OBJECT, newline=first_nl)
for obj in objects[1:]:
diff --git a/rdflib/plugins/serializers/nquads.py b/rdflib/plugins/serializers/nquads.py
index 3c8d02ccc..b74b9cab5 100644
--- a/rdflib/plugins/serializers/nquads.py
+++ b/rdflib/plugins/serializers/nquads.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import warnings
-from typing import IO, Optional
+from typing import IO, Any, Optional
from rdflib.graph import ConjunctiveGraph, Graph
from rdflib.plugins.serializers.nt import _quoteLiteral
@@ -26,8 +26,8 @@ def serialize(
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
- **args,
- ):
+ **kwargs: Any,
+ ) -> None:
if base is not None:
warnings.warn("NQuadsSerializer does not support base.")
if encoding is not None and encoding.lower() != self.encoding.lower():
diff --git a/rdflib/plugins/serializers/nt.py b/rdflib/plugins/serializers/nt.py
index e87f949e3..1b0343b5a 100644
--- a/rdflib/plugins/serializers/nt.py
+++ b/rdflib/plugins/serializers/nt.py
@@ -2,7 +2,7 @@
import codecs
import warnings
-from typing import IO, TYPE_CHECKING, Optional, Tuple, Union
+from typing import IO, TYPE_CHECKING, Any, Optional, Tuple, Union
from rdflib.graph import Graph
from rdflib.serializer import Serializer
@@ -33,7 +33,7 @@ def serialize(
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = "utf-8",
- **args,
+ **kwargs: Any,
) -> None:
if base is not None:
warnings.warn("NTSerializer does not support base.")
diff --git a/rdflib/plugins/serializers/patch.py b/rdflib/plugins/serializers/patch.py
index 3a5d37215..1bc5ff41f 100644
--- a/rdflib/plugins/serializers/patch.py
+++ b/rdflib/plugins/serializers/patch.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import warnings
-from typing import IO, Optional
+from typing import IO, Any, Optional
from uuid import uuid4
from rdflib import Dataset
@@ -32,8 +32,8 @@ def serialize(
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
- **kwargs,
- ):
+ **kwargs: Any,
+ ) -> None:
"""
Serialize the store to the given stream.
:param stream: The stream to serialize to.
diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py
index d6a2f6abb..8ae7d78cb 100644
--- a/rdflib/plugins/serializers/rdfxml.py
+++ b/rdflib/plugins/serializers/rdfxml.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import xml.dom.minidom
-from typing import IO, Dict, Generator, Optional, Set, Tuple
+from typing import IO, Any, Dict, Generator, Optional, Set, Tuple
from xml.sax.saxutils import escape, quoteattr
from rdflib.collection import Collection
@@ -47,7 +47,7 @@ def serialize(
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
- **args,
+ **kwargs: Any,
) -> None:
# if base is given here, use that, if not and a base is set for the graph use that
if base is not None:
@@ -66,8 +66,8 @@ def serialize(
write(" None:
self.__serialized: Dict[Identifier, int] = {}
store = self.store
@@ -185,7 +185,7 @@ def serialize(
self.base = base
elif store.base is not None:
self.base = store.base
- self.max_depth = args.get("max_depth", 3)
+ self.max_depth = kwargs.get("max_depth", 3)
assert self.max_depth > 0, "max_depth must be greater than 0"
self.nm = nm = store.namespace_manager
@@ -205,8 +205,8 @@ def serialize(
writer.push(RDFVOC.RDF)
- if "xml_base" in args:
- writer.attribute(XMLBASE, args["xml_base"])
+ if "xml_base" in kwargs:
+ writer.attribute(XMLBASE, kwargs["xml_base"])
elif self.base:
writer.attribute(XMLBASE, self.base)
diff --git a/rdflib/plugins/serializers/trig.py b/rdflib/plugins/serializers/trig.py
index 984f80c5a..95b5e42c0 100644
--- a/rdflib/plugins/serializers/trig.py
+++ b/rdflib/plugins/serializers/trig.py
@@ -5,7 +5,7 @@
from __future__ import annotations
-from typing import IO, TYPE_CHECKING, Dict, List, Optional, Tuple, Union
+from typing import IO, TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from rdflib.graph import ConjunctiveGraph, Graph
from rdflib.plugins.serializers.turtle import TurtleSerializer
@@ -67,8 +67,8 @@ def serialize(
base: Optional[str] = None,
encoding: Optional[str] = None,
spacious: Optional[bool] = None,
- **args,
- ):
+ **kwargs: Any,
+ ) -> None:
self.reset()
self.stream = stream
# if base is given here, use that, if not and a base is set for the graph use that
diff --git a/rdflib/plugins/serializers/trix.py b/rdflib/plugins/serializers/trix.py
index 008360e6b..95730e8fb 100644
--- a/rdflib/plugins/serializers/trix.py
+++ b/rdflib/plugins/serializers/trix.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import IO, Optional
+from typing import IO, Any, Optional
from rdflib.graph import ConjunctiveGraph, Graph
from rdflib.namespace import Namespace
@@ -28,8 +28,8 @@ def serialize(
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
- **args,
- ):
+ **kwargs: Any,
+ ) -> None:
nm = self.store.namespace_manager
self.writer = XMLWriter(stream, nm, encoding, extra_ns={"": TRIXNS})
diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py
index a26df04a6..d1dfcf4a6 100644
--- a/rdflib/plugins/serializers/turtle.py
+++ b/rdflib/plugins/serializers/turtle.py
@@ -228,7 +228,7 @@ def serialize(
base: Optional[str] = None,
encoding: Optional[str] = None,
spacious: Optional[bool] = None,
- **args: Any,
+ **kwargs: Any,
) -> None:
self.reset()
self.stream = stream
diff --git a/rdflib/plugins/sparql/parser.py b/rdflib/plugins/sparql/parser.py
index 3ee230f53..789d07610 100644
--- a/rdflib/plugins/sparql/parser.py
+++ b/rdflib/plugins/sparql/parser.py
@@ -1483,7 +1483,7 @@ def expandCollection(terms: ParseResults) -> List[List[Any]]:
AskQuery = Comp(
"AskQuery",
Keyword("ASK")
- + Param("datasetClause", ZeroOrMore(DatasetClause))
+ + ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ WhereClause
+ SolutionModifier
+ ValuesClause,
diff --git a/rdflib/plugins/stores/auditable.py b/rdflib/plugins/stores/auditable.py
index 7a9748c69..b8fb53419 100644
--- a/rdflib/plugins/stores/auditable.py
+++ b/rdflib/plugins/stores/auditable.py
@@ -10,7 +10,7 @@
Calls to commit or rollback, flush the list of reverse operations This
provides thread-safe atomicity and isolation (assuming concurrent operations
occur with different store instances), but no durability (transactions are
-persisted in memory and wont be available to reverse operations after the
+persisted in memory and won't be available to reverse operations after the
system fails): A and I out of ACID.
"""
diff --git a/test/data/longturtle/longturtle-target.ttl b/test/data/longturtle/longturtle-target.ttl
new file mode 100644
index 000000000..54cf23e9f
--- /dev/null
+++ b/test/data/longturtle/longturtle-target.ttl
@@ -0,0 +1,72 @@
+PREFIX geo:
+PREFIX rdf:
+PREFIX schema:
+PREFIX xsd:
+
+
+ a schema:Person ;
+ schema:age 41 ;
+ schema:alternateName
+ [
+ schema:name "Dr N.J. Car" ;
+ ] ,
+ "N.J. Car" ,
+ "Nick Car" ;
+ schema:name
+ [
+ a ;
+ schema:hasPart
+ [
+ a ;
+ schema:hasPart
+ [
+ a ;
+ rdf:value "Car" ;
+ ] ,
+ [
+ a ;
+ rdf:value "Maxov" ;
+ ] ;
+ ] ,
+ [
+ a ;
+ rdf:value "Nicholas" ;
+ ] ,
+ [
+ a ;
+ rdf:value "John" ;
+ ] ;
+ ] ;
+ schema:worksFor ;
+.
+
+
+ a schema:Organization ;
+ schema:location ;
+.
+
+
+ a schema:Place ;
+ schema:address
+ [
+ a schema:PostalAddress ;
+ schema:addressCountry
+ [
+ schema:identifier "au" ;
+ schema:name "Australia" ;
+ ] ;
+ schema:addressLocality "Shorncliffe" ;
+ schema:addressRegion "QLD" ;
+ schema:postalCode 4017 ;
+ schema:streetAddress (
+ 72
+ "Yundah"
+ "Street"
+ ) ;
+ ] ;
+ schema:geo
+ [
+ schema:polygon "POLYGON((153.082403 -27.325801, 153.08241 -27.32582, 153.082943 -27.325612, 153.083010 -27.325742, 153.083543 -27.325521, 153.083456 -27.325365, 153.082403 -27.325801))"^^geo:wktLiteral ;
+ ] ;
+ schema:name "KurrawongAI HQ" ;
+.
diff --git a/test/jsonld/local-suite/manifest.jsonld b/test/jsonld/local-suite/manifest.jsonld
index b32fd059a..0150b44c7 100644
--- a/test/jsonld/local-suite/manifest.jsonld
+++ b/test/jsonld/local-suite/manifest.jsonld
@@ -27,6 +27,17 @@
"purpose": "Multiple @id aliases. Issue #2164",
"input": "toRdf-twoimports-in.jsonld",
"expect": "toRdf-twoimports-out.nq"
+ },
+ {
+ "@id": "#toRdf-two-invalid-ids",
+ "@type": ["jld:PositiveEvaluationTest", "jld:ToRDFTest"],
+ "name": "Two invalid identifiers",
+ "purpose": "Multiple nodes with invalid @ids are not merged together.",
+ "option": {
+ "produceGeneralizedRdf": true
+ },
+ "input": "toRdf-twoinvalidids-in.jsonld",
+ "expect": "toRdf-twoinvalidids-out.nq"
}
]
}
diff --git a/test/jsonld/local-suite/toRdf-twoinvalidids-in.jsonld b/test/jsonld/local-suite/toRdf-twoinvalidids-in.jsonld
new file mode 100644
index 000000000..67f62927c
--- /dev/null
+++ b/test/jsonld/local-suite/toRdf-twoinvalidids-in.jsonld
@@ -0,0 +1,20 @@
+{
+ "@id": "https://example.org/root-object",
+ "https://schema.org/author": [
+ {
+ "@id": "https://example.org/ invalid url 1",
+ "https://schema.org/name": "Jane Doe"
+ },
+ {
+ "@id": "https://example.org/ invalid url 1",
+ "https://schema.org/givenName": "Jane",
+ "https://schema.org/familyName": "Doe"
+ },
+ {
+ "@id": "https://example.org/ invalid url 2",
+ "https://schema.org/name": "John Doe",
+ "https://schema.org/givenName": "John",
+ "https://schema.org/familyName": "Doe"
+ }
+ ]
+}
diff --git a/test/jsonld/local-suite/toRdf-twoinvalidids-out.nq b/test/jsonld/local-suite/toRdf-twoinvalidids-out.nq
new file mode 100644
index 000000000..c6550560c
--- /dev/null
+++ b/test/jsonld/local-suite/toRdf-twoinvalidids-out.nq
@@ -0,0 +1,10 @@
+
+ _:b1.
+ _:b2.
+
+_:b1 "Jane Doe".
+_:b1 "Jane".
+_:b1 "Doe".
+_:b2 "John Doe".
+_:b2 "John".
+_:b2 "Doe".
diff --git a/test/test_dataset/test_dataset.py b/test/test_dataset/test_dataset.py
index 19b9fe830..9f9bc9c26 100644
--- a/test/test_dataset/test_dataset.py
+++ b/test/test_dataset/test_dataset.py
@@ -5,11 +5,10 @@
import pytest
-from rdflib import URIRef, plugin
+from rdflib import BNode, Namespace, URIRef, plugin
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Dataset, Graph
from rdflib.store import Store
from test.data import CONTEXT1, LIKES, PIZZA, TAREK
-from test.utils.namespace import EGSCHEME
# Will also run SPARQLUpdateStore tests against local SPARQL1.1 endpoint if
# available. This assumes SPARQL1.1 query/update endpoints running locally at
@@ -58,9 +57,9 @@ def get_dataset(request):
except ImportError:
pytest.skip("Dependencies for store '%s' not available!" % store)
- graph = Dataset(store=store)
+ d = Dataset(store=store)
- if not graph.store.graph_aware:
+ if not d.store.graph_aware:
return
if store in ["SQLiteLSM", "LevelDB"]:
@@ -75,31 +74,39 @@ def get_dataset(request):
else:
path = tempfile.mkdtemp()
- graph.open(path, create=True if store != "SPARQLUpdateStore" else False)
+ d.open(path, create=True if store != "SPARQLUpdateStore" else False)
if store == "SPARQLUpdateStore":
try:
- graph.store.update("CLEAR ALL")
+ d.graph()
+ d.add(
+ (
+ URIRef("http://example.com/s"),
+ URIRef("http://example.com/p"),
+ URIRef("http://example.com/o"),
+ )
+ )
+ d.store.update("CLEAR ALL")
except Exception as e:
if "SPARQLStore does not support BNodes! " in str(e):
pass
else:
raise Exception(e)
- yield store, graph
+ yield store, d
if store == "SPARQLUpdateStore":
try:
- graph.store.update("CLEAR ALL")
+ d.update("CLEAR ALL")
except Exception as e:
if "SPARQLStore does not support BNodes! " in str(e):
pass
else:
raise Exception(e)
- graph.close()
+ d.close()
else:
- graph.close()
- graph.destroy(path)
+ d.close()
+ d.destroy(path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
@@ -121,7 +128,7 @@ def test_graph_aware(get_dataset):
# empty named graphs
if store != "SPARQLUpdateStore":
# added graph exists
- assert set(x.identifier for x in dataset.contexts()) == set(
+ assert set(x.identifier for x in dataset.graphs()) == set(
[CONTEXT1, DATASET_DEFAULT_GRAPH_ID]
)
@@ -131,7 +138,7 @@ def test_graph_aware(get_dataset):
g1.add((TAREK, LIKES, PIZZA))
# added graph still exists
- assert set(x.identifier for x in dataset.contexts()) == set(
+ assert set(x.identifier for x in dataset.graphs()) == set(
[CONTEXT1, DATASET_DEFAULT_GRAPH_ID]
)
@@ -147,14 +154,14 @@ def test_graph_aware(get_dataset):
# empty named graphs
if store != "SPARQLUpdateStore":
# graph still exists, although empty
- assert set(x.identifier for x in dataset.contexts()) == set(
+ assert set(x.identifier for x in dataset.graphs()) == set(
[CONTEXT1, DATASET_DEFAULT_GRAPH_ID]
)
dataset.remove_graph(CONTEXT1)
# graph is gone
- assert set(x.identifier for x in dataset.contexts()) == set(
+ assert set(x.identifier for x in dataset.graphs()) == set(
[DATASET_DEFAULT_GRAPH_ID]
)
@@ -173,7 +180,7 @@ def test_default_graph(get_dataset):
dataset.add((TAREK, LIKES, PIZZA))
assert len(dataset) == 1
# only default exists
- assert list(dataset.contexts()) == [dataset.default_context]
+ assert list(dataset.graphs()) == [dataset.default_context]
# removing default graph removes triples but not actual graph
dataset.remove_graph(DATASET_DEFAULT_GRAPH_ID)
@@ -181,7 +188,7 @@ def test_default_graph(get_dataset):
assert len(dataset) == 0
# default still exists
- assert set(dataset.contexts()) == set([dataset.default_context])
+ assert set(dataset.graphs()) == set([dataset.default_context])
def test_not_union(get_dataset):
@@ -193,11 +200,11 @@ def test_not_union(get_dataset):
"its default graph as the union of the named graphs"
)
- subgraph1 = dataset.graph(CONTEXT1)
- subgraph1.add((TAREK, LIKES, PIZZA))
+ g1 = dataset.graph(CONTEXT1)
+ g1.add((TAREK, LIKES, PIZZA))
assert list(dataset.objects(TAREK, None)) == []
- assert list(subgraph1.objects(TAREK, None)) == [PIZZA]
+ assert list(g1.objects(TAREK, None)) == [PIZZA]
def test_iter(get_dataset):
@@ -208,16 +215,16 @@ def test_iter(get_dataset):
uri_c = URIRef("https://example.com/c")
uri_d = URIRef("https://example.com/d")
- d.graph(URIRef("https://example.com/subgraph1"))
- d.add((uri_a, uri_b, uri_c, URIRef("https://example.com/subgraph1")))
+ d.graph(URIRef("https://example.com/g1"))
+ d.add((uri_a, uri_b, uri_c, URIRef("https://example.com/g1")))
d.add(
- (uri_a, uri_b, uri_c, URIRef("https://example.com/subgraph1"))
+ (uri_a, uri_b, uri_c, URIRef("https://example.com/g1"))
) # pointless addition: duplicates above
d.graph(URIRef("https://example.com/g2"))
d.add((uri_a, uri_b, uri_c, URIRef("https://example.com/g2")))
- d.add((uri_a, uri_b, uri_d, URIRef("https://example.com/subgraph1")))
+ d.add((uri_a, uri_b, uri_d, URIRef("https://example.com/g1")))
# traditional iterator
i_trad = 0
@@ -232,7 +239,7 @@ def test_iter(get_dataset):
assert i_new == i_trad # both should be 3
-def test_subgraph_without_identifier() -> None:
+def test_graph_without_identifier() -> None:
"""
Graphs with no identifies assigned are identified by Skolem IRIs with a
prefix that is bound to `genid`.
@@ -241,9 +248,9 @@ def test_subgraph_without_identifier() -> None:
reviewed at some point.
"""
- dataset = Dataset()
+ d = Dataset()
- nman = dataset.namespace_manager
+ nman = d.namespace_manager
genid_prefix = URIRef("https://rdflib.github.io/.well-known/genid/rdflib/")
@@ -253,15 +260,36 @@ def test_subgraph_without_identifier() -> None:
is None
)
- subgraph: Graph = dataset.graph()
- subgraph.add((EGSCHEME["subject"], EGSCHEME["predicate"], EGSCHEME["object"]))
+ ex = Namespace("http://example.com/")
+ g1: Graph = d.graph()
+ g1.add((ex.subject, ex.predicate, ex.object))
namespaces = set(nman.namespaces())
assert next(
(namespace for namespace in namespaces if namespace[0] == "genid"), None
) == ("genid", genid_prefix)
- assert f"{subgraph.identifier}".startswith(genid_prefix)
+ assert f"{g1.identifier}".startswith(genid_prefix)
+
+ # now add a preexisting graph with no identifier
+ # i.e. not one created within this Dataset object
+ g2 = Graph()
+ g2.add((ex.subject, ex.predicate, ex.object))
+ d.add_graph(g2)
+
+ iris = 0
+ bns = 0
+ others = 0
+ for g in d.graphs():
+ if type(g.identifier) is URIRef:
+ iris += 1
+ elif type(g.identifier) is BNode:
+ bns += 1
+ else:
+ others += 1
+ assert iris == 2
+ assert bns == 1
+ assert others == 0
def test_not_deprecated():
diff --git a/test/test_extras/test_shacl_extras.py b/test/test_extras/test_shacl_extras.py
index 417e75b68..1144e9b9e 100644
--- a/test/test_extras/test_shacl_extras.py
+++ b/test/test_extras/test_shacl_extras.py
@@ -4,8 +4,9 @@
import pytest
-from rdflib import Graph, URIRef
-from rdflib.extras.shacl import SHACLPathError, parse_shacl_path
+from rdflib import Graph, Literal, URIRef, paths
+from rdflib.compare import graph_diff
+from rdflib.extras.shacl import SHACLPathError, build_shacl_path, parse_shacl_path
from rdflib.namespace import SH, Namespace
from rdflib.paths import Path
@@ -109,7 +110,32 @@ def path_source_data():
) ;
] ;
.
- ex:TestPropShape10
+ ex:TestPropShape10a
+ sh:path (
+ [
+ sh:zeroOrMorePath [
+ sh:inversePath ex:pred1 ;
+ ] ;
+ ]
+ [
+ sh:alternativePath (
+ [
+ sh:zeroOrMorePath [
+ sh:inversePath ex:pred1 ;
+ ] ;
+ ]
+ ex:pred1
+ [
+ sh:oneOrMorePath ex:pred2 ;
+ ]
+ [
+ sh:zeroOrMorePath ex:pred3 ;
+ ]
+ ) ;
+ ]
+ ) ;
+ .
+ ex:TestPropShape10b
sh:path (
[
sh:zeroOrMorePath [
@@ -192,7 +218,13 @@ def path_source_data():
~EX.pred1 | EX.pred1 / EX.pred2 | EX.pred1 | EX.pred2 | EX.pred3,
),
(
- EX.TestPropShape10,
+ EX.TestPropShape10a,
+ ~EX.pred1
+ * "*"
+ / (~EX.pred1 * "*" | EX.pred1 | EX.pred2 * "+" | EX.pred3 * "*"), # type: ignore[operator]
+ ),
+ (
+ EX.TestPropShape10b,
~EX.pred1
* "*"
/ (~EX.pred1 * "*" | EX.pred1 | EX.pred2 * "+" | EX.pred3 * "*"), # type: ignore[operator]
@@ -216,3 +248,49 @@ def test_parse_shacl_path(
parse_shacl_path(path_source_data, path_root) # type: ignore[arg-type]
else:
assert parse_shacl_path(path_source_data, path_root) == expected # type: ignore[arg-type]
+
+
+@pytest.mark.parametrize(
+ ("resource", "path"),
+ (
+ # Single SHACL Path
+ (EX.TestPropShape1, EX.pred1),
+ (EX.TestPropShape2a, EX.pred1 / EX.pred2 / EX.pred3),
+ (EX.TestPropShape3, ~EX.pred1),
+ (EX.TestPropShape4a, EX.pred1 | EX.pred2 | EX.pred3),
+ (EX.TestPropShape5, EX.pred1 * "*"), # type: ignore[operator]
+ (EX.TestPropShape6, EX.pred1 * "+"), # type: ignore[operator]
+ (EX.TestPropShape7, EX.pred1 * "?"), # type: ignore[operator]
+ # SHACL Path Combinations
+ (EX.TestPropShape8, ~EX.pred1 * "*"),
+ (
+ EX.TestPropShape10a,
+ ~EX.pred1
+ * "*"
+ / (~EX.pred1 * "*" | EX.pred1 | EX.pred2 * "+" | EX.pred3 * "*"), # type: ignore[operator]
+ ),
+ (TypeError, Literal("Not a valid path")),
+ (SHACLPathError, paths.SequencePath(SH.targetClass)),
+ (SHACLPathError, paths.AlternativePath(SH.targetClass)),
+ ),
+)
+def test_build_shacl_path(
+ path_source_data: Graph, resource: URIRef | type, path: Union[URIRef, Path]
+):
+ if isinstance(resource, type):
+ with pytest.raises(resource):
+ build_shacl_path(path)
+ else:
+ expected_path_root = path_source_data.value(resource, SH.path)
+ actual_path_root, actual_path_graph = build_shacl_path(path)
+ if isinstance(expected_path_root, URIRef):
+ assert actual_path_root == expected_path_root
+ assert actual_path_graph is None
+ else:
+ assert isinstance(actual_path_graph, Graph)
+ expected_path_graph = path_source_data.cbd(expected_path_root) # type: ignore[arg-type]
+ in_both, in_first, in_second = graph_diff(
+ expected_path_graph, actual_path_graph
+ )
+ assert len(in_first) == 0
+ assert len(in_second) == 0
diff --git a/test/test_namespace/test_definednamespace.py b/test/test_namespace/test_definednamespace.py
index ea8e12969..5860e8eb2 100644
--- a/test/test_namespace/test_definednamespace.py
+++ b/test/test_namespace/test_definednamespace.py
@@ -299,14 +299,9 @@ def test_repr(dfns: Type[DefinedNamespace]) -> None:
ns_uri = f"{prefix}{dfns_info.suffix}"
logging.debug("ns_uri = %s", ns_uri)
- repr_str: Optional[str] = None
-
- with ExitStack() as xstack:
- if dfns_info.suffix is None:
- xstack.enter_context(pytest.raises(AttributeError))
- repr_str = f"{dfns_info.dfns!r}"
+ repr_str: str = f"{dfns_info.dfns!r}"
if dfns_info.suffix is None:
- assert repr_str is None
+ assert "" in repr_str
else:
assert repr_str is not None
repro = eval(repr_str)
@@ -368,20 +363,15 @@ def test_contains(
dfns_info = get_dfns_info(dfns)
if dfns_info.suffix is not None:
logging.debug("dfns_info = %s", dfns_info)
- if dfns_info.has_attrs is False:
+ if dfns_info.has_attrs is False or dfns_info.suffix is None:
is_defined = False
- does_contain: Optional[bool] = None
- with ExitStack() as xstack:
- if dfns_info.suffix is None:
- xstack.enter_context(pytest.raises(AttributeError))
- does_contain = attr_name in dfns
- if dfns_info.suffix is not None:
- if is_defined:
- assert does_contain is True
- else:
- assert does_contain is False
+
+ does_contain: bool = attr_name in dfns
+
+ if is_defined:
+ assert does_contain is True
else:
- assert does_contain is None
+ assert does_contain is False
@pytest.mark.parametrize(
diff --git a/test/test_serializers/test_serializer_longturtle.py b/test/test_serializers/test_serializer_longturtle.py
index 847d506ab..c1761b6da 100644
--- a/test/test_serializers/test_serializer_longturtle.py
+++ b/test/test_serializers/test_serializer_longturtle.py
@@ -1,5 +1,5 @@
import difflib
-from textwrap import dedent
+from pathlib import Path
from rdflib import Graph, Namespace
from rdflib.namespace import GEO, SDO
@@ -170,83 +170,11 @@ def test_longturtle():
output = g.serialize(format="longturtle")
# fix the target
- target = dedent(
- """ PREFIX cn:
- PREFIX ex:
- PREFIX geo:
- PREFIX rdf:
- PREFIX sdo:
- PREFIX xsd:
+ current_dir = Path.cwd() # Get the current directory
+ target_file_path = current_dir / "test/data/longturtle" / "longturtle-target.ttl"
- ex:nicholas
- a sdo:Person ;
- sdo:age 41 ;
- sdo:alternateName
- [
- sdo:name "Dr N.J. Car" ;
- ] ,
- "N.J. Car" ,
- "Nick Car" ;
- sdo:name
- [
- a cn:CompoundName ;
- sdo:hasPart
- [
- a cn:CompoundName ;
- rdf:value "Nicholas" ;
- ] ,
- [
- a cn:CompoundName ;
- rdf:value "John" ;
- ] ,
- [
- a cn:CompoundName ;
- sdo:hasPart
- [
- a cn:CompoundName ;
- rdf:value "Car" ;
- ] ,
- [
- a cn:CompoundName ;
- rdf:value "Maxov" ;
- ] ;
- ] ;
- ] ;
- sdo:worksFor ;
- .
-
-
- a sdo:Organization ;
- sdo:location ;
- .
-
-
- a sdo:Place ;
- sdo:address
- [
- a sdo:PostalAddress ;
- sdo:addressCountry
- [
- sdo:identifier "au" ;
- sdo:name "Australia" ;
- ] ;
- sdo:addressLocality "Shorncliffe" ;
- sdo:addressRegion "QLD" ;
- sdo:postalCode 4017 ;
- sdo:streetAddress (
- 72
- "Yundah"
- "Street"
- ) ;
- ] ;
- sdo:geo
- [
- sdo:polygon "POLYGON((153.082403 -27.325801, 153.08241 -27.32582, 153.082943 -27.325612, 153.083010 -27.325742, 153.083543 -27.325521, 153.083456 -27.325365, 153.082403 -27.325801))"^^geo:wktLiteral ;
- ] ;
- sdo:name "KurrawongAI HQ" ;
- .
- """
- )
+ with open(target_file_path, encoding="utf-8") as file:
+ target = file.read()
# compare output to target
# - any differences will produce output
diff --git a/test/test_serializers/test_serializer_longturtle_sort.py b/test/test_serializers/test_serializer_longturtle_sort.py
new file mode 100644
index 000000000..0e397afaf
--- /dev/null
+++ b/test/test_serializers/test_serializer_longturtle_sort.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python3
+
+# Portions of this file contributed by NIST are governed by the
+# following statement:
+#
+# This software was developed at the National Institute of Standards
+# and Technology by employees of the Federal Government in the course
+# of their official duties. Pursuant to Title 17 Section 105 of the
+# United States Code, this software is not subject to copyright
+# protection within the United States. NIST assumes no responsibility
+# whatsoever for its use by other parties, and makes no guarantees,
+# expressed or implied, about its quality, reliability, or any other
+# characteristic.
+#
+# We would appreciate acknowledgement if the software is used.
+
+from __future__ import annotations
+
+import random
+from collections import defaultdict
+from typing import DefaultDict, List
+
+from rdflib import RDFS, BNode, Graph, Literal, Namespace, URIRef
+
+EX = Namespace("http://example.org/ex/")
+
+
+def test_sort_semiblank_graph() -> None:
+ """
+ This test reviews whether the output of the Turtle form is
+ consistent when involving repeated generates with blank nodes.
+ """
+
+ serialization_counter: DefaultDict[str, int] = defaultdict(int)
+
+ first_graph_text: str = ""
+
+ # Use a fixed sequence of once-but-no-longer random values for more
+ # consistent test results.
+ nonrandom_shuffler = random.Random(1234)
+ for x in range(1, 10):
+ graph = Graph()
+ graph.bind("ex", EX)
+ graph.bind("rdfs", RDFS)
+
+ graph.add((EX.A, RDFS.comment, Literal("Thing A")))
+ graph.add((EX.B, RDFS.comment, Literal("Thing B")))
+ graph.add((EX.C, RDFS.comment, Literal("Thing C")))
+
+ nodes: List[URIRef] = [EX.A, EX.B, EX.C, EX.B]
+ nonrandom_shuffler.shuffle(nodes)
+ for node in nodes:
+ # Instantiate one bnode per URIRef node.
+ graph.add((BNode(), RDFS.seeAlso, node))
+
+ nesteds: List[URIRef] = [EX.A, EX.B, EX.C]
+ nonrandom_shuffler.shuffle(nesteds)
+ for nested in nesteds:
+ # Instantiate a nested node reference.
+ outer_node = BNode()
+ inner_node = BNode()
+ graph.add((outer_node, EX.has, inner_node))
+ graph.add((inner_node, RDFS.seeAlso, nested))
+
+ graph_text = graph.serialize(format="longturtle", sort=True)
+ if first_graph_text == "":
+ first_graph_text = graph_text
+
+ serialization_counter[graph_text] += 1
+
+ expected_serialization = """\
+PREFIX ns1:
+PREFIX rdfs:
+
+ns1:A
+ rdfs:comment "Thing A" ;
+.
+
+ns1:C
+ rdfs:comment "Thing C" ;
+.
+
+ns1:B
+ rdfs:comment "Thing B" ;
+.
+
+[] ns1:has
+ [
+ rdfs:seeAlso ns1:A ;
+ ] ;
+.
+
+[] rdfs:seeAlso ns1:B ;
+.
+
+[] ns1:has
+ [
+ rdfs:seeAlso ns1:C ;
+ ] ;
+.
+
+[] rdfs:seeAlso ns1:A ;
+.
+
+[] rdfs:seeAlso ns1:C ;
+.
+
+[] rdfs:seeAlso ns1:B ;
+.
+
+[] ns1:has
+ [
+ rdfs:seeAlso ns1:B ;
+ ] ;
+.
+
+"""
+
+ assert expected_serialization.strip() == first_graph_text.strip()
+ assert 1 == len(serialization_counter)
diff --git a/test/test_sparql/test_dataset_exclusive.py b/test/test_sparql/test_dataset_exclusive.py
index 2ce23d52b..d867623c2 100644
--- a/test/test_sparql/test_dataset_exclusive.py
+++ b/test/test_sparql/test_dataset_exclusive.py
@@ -82,3 +82,13 @@ def test_from_and_from_named():
(None, URIRef("urn:s1"), URIRef("urn:p1"), URIRef("urn:o1")),
(URIRef("urn:g2"), URIRef("urn:s2"), URIRef("urn:p2"), URIRef("urn:o2")),
]
+
+
+def test_ask_from():
+ query = """
+ ASK
+ FROM
+ WHERE {?s ?p ?o}
+ """
+ results = bool(dataset.query(query))
+ assert results
diff --git a/test_reports/rdflib_w3c_sparql10-HEAD.ttl b/test_reports/rdflib_w3c_sparql10-HEAD.ttl
index 78997b01c..b8369a94d 100644
--- a/test_reports/rdflib_w3c_sparql10-HEAD.ttl
+++ b/test_reports/rdflib_w3c_sparql10-HEAD.ttl
@@ -1795,7 +1795,7 @@
earl:assertedBy ;
earl:mode earl:automatic ;
earl:result [ a earl:TestResult ;
- earl:outcome earl:failed ] ;
+ earl:outcome earl:passed ] ;
earl:subject ;
earl:test .
@@ -1859,7 +1859,7 @@
earl:assertedBy ;
earl:mode earl:automatic ;
earl:result [ a earl:TestResult ;
- earl:outcome earl:failed ] ;
+ earl:outcome earl:passed ] ;
earl:subject ;
earl:test .
@@ -1907,7 +1907,7 @@
earl:assertedBy ;
earl:mode earl:automatic ;
earl:result [ a earl:TestResult ;
- earl:outcome earl:failed ] ;
+ earl:outcome earl:passed ] ;
earl:subject ;
earl:test .
@@ -2787,7 +2787,7 @@
earl:assertedBy ;
earl:mode earl:automatic ;
earl:result [ a earl:TestResult ;
- earl:outcome earl:failed ] ;
+ earl:outcome earl:passed ] ;
earl:subject ;
earl:test .