|
20 | 20 | from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi |
21 | 21 | from groundlight_openapi_client.api.notes_api import NotesApi |
22 | 22 | from groundlight_openapi_client.model.action_request import ActionRequest |
| 23 | +from groundlight_openapi_client.model.bounding_box_mode_configuration import BoundingBoxModeConfiguration |
23 | 24 | from groundlight_openapi_client.model.channel_enum import ChannelEnum |
24 | 25 | from groundlight_openapi_client.model.condition_request import ConditionRequest |
25 | 26 | from groundlight_openapi_client.model.count_mode_configuration import CountModeConfiguration |
@@ -902,10 +903,12 @@ def create_counting_detector( # noqa: PLR0913 # pylint: disable=too-many-argume |
902 | 903 | metadata=metadata, |
903 | 904 | ) |
904 | 905 | detector_creation_input.mode = ModeEnum.COUNT |
905 | | - # TODO: pull the BE defined default |
| 906 | + |
906 | 907 | if max_count is None: |
907 | | - max_count = 10 |
908 | | - mode_config = CountModeConfiguration(max_count=max_count, class_name=class_name) |
| 908 | + mode_config = CountModeConfiguration(class_name=class_name) |
| 909 | + else: |
| 910 | + mode_config = CountModeConfiguration(max_count=max_count, class_name=class_name) |
| 911 | + |
909 | 912 | detector_creation_input.mode_configuration = mode_config |
910 | 913 | obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) |
911 | 914 | return Detector.parse_obj(obj.to_dict()) |
@@ -974,6 +977,81 @@ def create_multiclass_detector( # noqa: PLR0913 # pylint: disable=too-many-argu |
974 | 977 | obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) |
975 | 978 | return Detector.parse_obj(obj.to_dict()) |
976 | 979 |
|
| 980 | + def create_bounding_box_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals |
| 981 | + self, |
| 982 | + name: str, |
| 983 | + query: str, |
| 984 | + class_name: str, |
| 985 | + *, |
| 986 | + max_num_bboxes: Optional[int] = None, |
| 987 | + group_name: Optional[str] = None, |
| 988 | + confidence_threshold: Optional[float] = None, |
| 989 | + patience_time: Optional[float] = None, |
| 990 | + pipeline_config: Optional[str] = None, |
| 991 | + metadata: Union[dict, str, None] = None, |
| 992 | + ) -> Detector: |
| 993 | + """ |
| 994 | + Creates a bounding box detector that can detect objects in images up to a specified maximum number of bounding |
| 995 | + boxes. |
| 996 | +
|
| 997 | + **Example usage**:: |
| 998 | +
|
| 999 | + gl = ExperimentalApi() |
| 1000 | +
|
| 1001 | + # Create a detector that counts people up to 5 |
| 1002 | + detector = gl.create_bounding_box_detector( |
| 1003 | + name="people_counter", |
| 1004 | + query="Draw a bounding box around each person in the image", |
| 1005 | + class_name="person", |
| 1006 | + max_num_bboxes=5, |
| 1007 | + confidence_threshold=0.9, |
| 1008 | + patience_time=30.0 |
| 1009 | + ) |
| 1010 | +
|
| 1011 | + # Use the detector to find people in an image |
| 1012 | + image_query = gl.ask_ml(detector, "path/to/image.jpg") |
| 1013 | + print(f"Confidence: {image_query.result.confidence}") |
| 1014 | + print(f"Bounding boxes: {image_query.result.rois}") |
| 1015 | +
|
| 1016 | + :param name: A short, descriptive name for the detector. |
| 1017 | + :param query: A question about the object to detect in the image. |
| 1018 | + :param class_name: The class name of the object to detect. |
| 1019 | + :param max_num_bboxes: Maximum number of bounding boxes to detect (default: 10) |
| 1020 | + :param group_name: Optional name of a group to organize related detectors together. |
| 1021 | + :param confidence_threshold: A value that sets the minimum confidence level required for the ML model's |
| 1022 | + predictions. If confidence is below this threshold, the query may be sent for human review. |
| 1023 | + :param patience_time: The maximum time in seconds that Groundlight will attempt to generate a |
| 1024 | + confident prediction before falling back to human review. Defaults to 30 seconds. |
| 1025 | + :param pipeline_config: Advanced usage only. Configuration string needed to instantiate a specific |
| 1026 | + prediction pipeline for this detector. |
| 1027 | + :param metadata: A dictionary or JSON string containing custom key/value pairs to associate with |
| 1028 | + the detector (limited to 1KB). This metadata can be used to store additional |
| 1029 | + information like location, purpose, or related system IDs. You can retrieve this |
| 1030 | + metadata later by calling `get_detector()`. |
| 1031 | +
|
| 1032 | + :return: The created Detector object |
| 1033 | + """ |
| 1034 | + |
| 1035 | + detector_creation_input = self._prep_create_detector( |
| 1036 | + name=name, |
| 1037 | + query=query, |
| 1038 | + group_name=group_name, |
| 1039 | + confidence_threshold=confidence_threshold, |
| 1040 | + patience_time=patience_time, |
| 1041 | + pipeline_config=pipeline_config, |
| 1042 | + metadata=metadata, |
| 1043 | + ) |
| 1044 | + detector_creation_input.mode = ModeEnum.BOUNDING_BOX |
| 1045 | + |
| 1046 | + if max_num_bboxes is None: |
| 1047 | + mode_config = BoundingBoxModeConfiguration(class_name=class_name) |
| 1048 | + else: |
| 1049 | + mode_config = BoundingBoxModeConfiguration(max_num_bboxes=max_num_bboxes, class_name=class_name) |
| 1050 | + |
| 1051 | + detector_creation_input.mode_configuration = mode_config |
| 1052 | + obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) |
| 1053 | + return Detector.parse_obj(obj.to_dict()) |
| 1054 | + |
977 | 1055 | def _download_mlbinary_url(self, detector: Union[str, Detector]) -> EdgeModelInfo: |
978 | 1056 | """ |
979 | 1057 | Gets a temporary presigned URL to download the model binaries for the given detector, along |
|
0 commit comments