@@ -599,9 +599,9 @@ def backend_cmake_args(images, components, be, install_dir, library_paths):
599599 "Warning: Detected docker build is used for Windows, backend utility 'device memory tracker' will be disabled due to missing library in CUDA Windows docker image."
600600 )
601601 cargs .append (cmake_backend_enable (be , "TRITON_ENABLE_MEMORY_TRACKER" , False ))
602- elif target_platform () == "jetpack " :
602+ elif target_platform () == "igpu " :
603603 print (
604- "Warning: Detected Jetpack build, backend utility 'device memory tracker' will be disabled as Jetpack doesn't contain required version of the library."
604+ "Warning: Detected iGPU build, backend utility 'device memory tracker' will be disabled as iGPU doesn't contain required version of the library."
605605 )
606606 cargs .append (cmake_backend_enable (be , "TRITON_ENABLE_MEMORY_TRACKER" , False ))
607607 elif FLAGS .enable_gpu :
@@ -613,44 +613,23 @@ def backend_cmake_args(images, components, be, install_dir, library_paths):
613613
614614
615615def pytorch_cmake_args (images ):
616- # If platform is jetpack do not use docker based build
617- if target_platform () == "jetpack" :
618- if "pytorch" not in library_paths :
619- raise Exception (
620- "Must specify library path for pytorch using --library-paths=pytorch:<path_to_pytorch>"
621- )
622- pt_lib_path = library_paths ["pytorch" ] + "/lib"
623- pt_include_paths = ""
624- for suffix in [
625- "include/torch" ,
626- "include/torch/torch/csrc/api/include" ,
627- "include/torchvision" ,
628- ]:
629- pt_include_paths += library_paths ["pytorch" ] + "/" + suffix + ";"
630- cargs = [
631- cmake_backend_arg (
632- "pytorch" , "TRITON_PYTORCH_INCLUDE_PATHS" , None , pt_include_paths
633- ),
634- cmake_backend_arg ("pytorch" , "TRITON_PYTORCH_LIB_PATHS" , None , pt_lib_path ),
635- ]
616+ if "pytorch" in images :
617+ image = images ["pytorch" ]
636618 else :
637- if "pytorch" in images :
638- image = images ["pytorch" ]
639- else :
640- image = "nvcr.io/nvidia/pytorch:{}-py3" .format (
641- FLAGS .upstream_container_version
642- )
643- cargs = [
644- cmake_backend_arg ("pytorch" , "TRITON_PYTORCH_DOCKER_IMAGE" , None , image ),
645- ]
619+ image = "nvcr.io/nvidia/pytorch:{}-py3" .format (
620+ FLAGS .upstream_container_version
621+ )
622+ cargs = [
623+ cmake_backend_arg ("pytorch" , "TRITON_PYTORCH_DOCKER_IMAGE" , None , image ),
624+ ]
646625
647- if FLAGS .enable_gpu :
648- cargs .append (
649- cmake_backend_enable ("pytorch" , "TRITON_PYTORCH_ENABLE_TORCHTRT" , True )
650- )
626+ if FLAGS .enable_gpu :
651627 cargs .append (
652- cmake_backend_enable ("pytorch" , "TRITON_ENABLE_NVTX " , FLAGS . enable_nvtx )
628+ cmake_backend_enable ("pytorch" , "TRITON_PYTORCH_ENABLE_TORCHTRT " , True )
653629 )
630+ cargs .append (
631+ cmake_backend_enable ("pytorch" , "TRITON_ENABLE_NVTX" , FLAGS .enable_nvtx )
632+ )
654633 return cargs
655634
656635
@@ -672,69 +651,57 @@ def onnxruntime_cmake_args(images, library_paths):
672651 )
673652 )
674653
675- # If platform is jetpack do not use docker based build
676- if target_platform () == "jetpack" :
677- if "onnxruntime" not in library_paths :
678- raise Exception (
679- "Must specify library path for onnxruntime using --library-paths=onnxruntime:<path_to_onnxruntime>"
654+
655+ if target_platform () == "windows" :
656+ if "base" in images :
657+ cargs .append (
658+ cmake_backend_arg (
659+ "onnxruntime" , "TRITON_BUILD_CONTAINER" , None , images ["base" ]
660+ )
680661 )
681- ort_lib_path = library_paths ["onnxruntime" ] + "/lib"
682- ort_include_path = library_paths ["onnxruntime" ] + "/include"
683- cargs += [
684- cmake_backend_arg (
685- "onnxruntime" ,
686- "TRITON_ONNXRUNTIME_INCLUDE_PATHS" ,
687- None ,
688- ort_include_path ,
689- ),
690- cmake_backend_arg (
691- "onnxruntime" , "TRITON_ONNXRUNTIME_LIB_PATHS" , None , ort_lib_path
692- ),
693- cmake_backend_enable (
694- "onnxruntime" , "TRITON_ENABLE_ONNXRUNTIME_OPENVINO" , False
695- ),
696- ]
697662 else :
698- if target_platform () == "windows" :
699- if "base" in images :
700- cargs .append (
701- cmake_backend_arg (
702- "onnxruntime" , "TRITON_BUILD_CONTAINER" , None , images ["base" ]
703- )
663+ if "base" in images :
664+ cargs .append (
665+ cmake_backend_arg (
666+ "onnxruntime" , "TRITON_BUILD_CONTAINER" , None , images ["base" ]
704667 )
668+ )
705669 else :
706- if "base" in images :
707- cargs .append (
708- cmake_backend_arg (
709- "onnxruntime" , "TRITON_BUILD_CONTAINER" , None , images ["base" ]
710- )
711- )
712- else :
713- cargs .append (
714- cmake_backend_arg (
715- "onnxruntime" ,
716- "TRITON_BUILD_CONTAINER_VERSION" ,
717- None ,
718- TRITON_VERSION_MAP [FLAGS .version ][1 ],
719- )
670+ cargs .append (
671+ cmake_backend_arg (
672+ "onnxruntime" ,
673+ "TRITON_BUILD_CONTAINER_VERSION" ,
674+ None ,
675+ TRITON_VERSION_MAP [FLAGS .version ][1 ],
720676 )
677+ )
721678
722- if (target_machine () != "aarch64" ) and (
723- TRITON_VERSION_MAP [FLAGS .version ][3 ] is not None
724- ):
725- cargs .append (
726- cmake_backend_enable (
727- "onnxruntime" , "TRITON_ENABLE_ONNXRUNTIME_OPENVINO" , True
728- )
679+ if (target_machine () != "aarch64" ) and (
680+ TRITON_VERSION_MAP [FLAGS .version ][3 ] is not None
681+ ):
682+ cargs .append (
683+ cmake_backend_enable (
684+ "onnxruntime" , "TRITON_ENABLE_ONNXRUNTIME_OPENVINO" , True
729685 )
730- cargs . append (
731- cmake_backend_arg (
732- "onnxruntime" ,
733- "TRITON_BUILD_ONNXRUNTIME_OPENVINO_VERSION " ,
734- None ,
735- TRITON_VERSION_MAP [ FLAGS . version ][ 3 ] ,
736- )
686+ )
687+ cargs . append (
688+ cmake_backend_arg (
689+ "onnxruntime " ,
690+ "TRITON_BUILD_ONNXRUNTIME_OPENVINO_VERSION" ,
691+ None ,
692+ TRITON_VERSION_MAP [ FLAGS . version ][ 3 ],
737693 )
694+ )
695+
696+ if target_platform () == "igpu" :
697+ cargs .append (
698+ cmake_backend_arg (
699+ "onnxruntime" ,
700+ "TRITON_BUILD_TARGET_PLATFORM" ,
701+ None ,
702+ target_platform (),
703+ )
704+ )
738705
739706 return cargs
740707
@@ -790,36 +757,20 @@ def tensorrt_cmake_args():
790757
791758def tensorflow_cmake_args (images , library_paths ):
792759 backend_name = "tensorflow"
793-
794- # If platform is jetpack do not use docker images
795760 extra_args = []
796- if target_platform () == "jetpack" :
797- if backend_name in library_paths :
798- extra_args = [
799- cmake_backend_arg (
800- backend_name ,
801- "TRITON_TENSORFLOW_LIB_PATHS" ,
802- None ,
803- library_paths [backend_name ],
804- )
805- ]
806- else :
807- raise Exception (
808- f"Must specify library path for { backend_name } using --library-paths={ backend_name } :<path_to_{ backend_name } >"
809- )
761+
762+ # If a specific TF image is specified use it, otherwise pull from NGC.
763+ if backend_name in images :
764+ image = images [backend_name ]
810765 else :
811- # If a specific TF image is specified use it, otherwise pull from NGC.
812- if backend_name in images :
813- image = images [backend_name ]
814- else :
815- image = "nvcr.io/nvidia/tensorflow:{}-tf2-py3" .format (
816- FLAGS .upstream_container_version
817- )
818- extra_args = [
819- cmake_backend_arg (
820- backend_name , "TRITON_TENSORFLOW_DOCKER_IMAGE" , None , image
821- )
822- ]
766+ image = "nvcr.io/nvidia/tensorflow:{}-tf2-py3" .format (
767+ FLAGS .upstream_container_version
768+ )
769+ extra_args = [
770+ cmake_backend_arg (
771+ backend_name , "TRITON_TENSORFLOW_DOCKER_IMAGE" , None , image
772+ )
773+ ]
823774 return extra_args
824775
825776
@@ -1639,7 +1590,7 @@ def create_docker_build_script(script_name, container_install_dir, container_ci_
16391590 docker_script .cmd (["docker" , "rm" , "tritonserver_builder" ])
16401591 else :
16411592 docker_script ._file .write (
1642- 'if [ "$(docker ps -a | grep tritonserver_builder)" ]; then docker rm tritonserver_builder; fi\n '
1593+ 'if [ "$(docker ps -a | grep tritonserver_builder)" ]; then docker rm -f tritonserver_builder; fi\n '
16431594 )
16441595
16451596 docker_script .cmd (runargs , check_exitcode = True )
@@ -2018,14 +1969,15 @@ def cibase_build(
20181969 if "onnxruntime" in backends :
20191970 ort_install_dir = os .path .join (build_dir , "onnxruntime" , "install" )
20201971 cmake_script .mkdir (os .path .join (ci_dir , "qa" , "L0_custom_ops" ))
2021- cmake_script .cp (
2022- os .path .join (ort_install_dir , "test" , "libcustom_op_library.so" ),
2023- os .path .join (ci_dir , "qa" , "L0_custom_ops" ),
2024- )
2025- cmake_script .cp (
2026- os .path .join (ort_install_dir , "test" , "custom_op_test.onnx" ),
2027- os .path .join (ci_dir , "qa" , "L0_custom_ops" ),
2028- )
1972+ if target_platform () != "igpu" :
1973+ cmake_script .cp (
1974+ os .path .join (ort_install_dir , "test" , "libcustom_op_library.so" ),
1975+ os .path .join (ci_dir , "qa" , "L0_custom_ops" ),
1976+ )
1977+ cmake_script .cp (
1978+ os .path .join (ort_install_dir , "test" , "custom_op_test.onnx" ),
1979+ os .path .join (ci_dir , "qa" , "L0_custom_ops" ),
1980+ )
20291981 # [WIP] other way than wildcard?
20301982 backend_tests = os .path .join (build_dir , "onnxruntime" , "test" , "*" )
20311983 cmake_script .cpdir (backend_tests , os .path .join (ci_dir , "qa" ))
@@ -2187,7 +2139,7 @@ def enable_all():
21872139 "--target-platform" ,
21882140 required = False ,
21892141 default = None ,
2190- help = 'Target platform for build, can be "linux", "windows" or "jetpack ". If not specified, build targets the current platform.' ,
2142+ help = 'Target platform for build, can be "linux", "windows" or "igpu ". If not specified, build targets the current platform.' ,
21912143 )
21922144 parser .add_argument (
21932145 "--target-machine" ,
0 commit comments