@@ -641,7 +641,7 @@ def test_broadcast_layout():
641641 graph , lib , params = relay .build (mod , "llvm -mcpu=skylake-avx512" )
642642
643643
644- def test_conv2d_int ():
644+ def test_conv2d_int8 ():
645645 data = relay .var ("data" , shape = (1 , 28 , 28 , 128 ), dtype = 'uint8' )
646646 kernel = relay .var ("w" , shape = (3 , 3 , 128 , 256 ), dtype = 'int8' )
647647 conv = relay .nn .conv2d (
@@ -655,7 +655,12 @@ def test_conv2d_int():
655655
656656 with relay .build_config (opt_level = 0 ):
657657 params = {"w" : np .zeros ((3 , 3 , 128 , 256 )).astype ("int8" )}
658- graph , lib , params = relay .build (func , 'llvm' , params = params )
658+ # -mcpu should be specified to avoid the llvm jitting error here:
659+ # https://discuss.tvm.ai/t/segfault-in-llvm/3567
660+ # To use VNNI, we need to specify the micro-architecture that supports
661+ # it, e.g. cascadelake.
662+ graph , lib , params = relay .build (func , 'llvm -mcpu=core-avx2' ,
663+ params = params )
659664 mod = graph_runtime .create (graph , lib , ctx = tvm .cpu (0 ))
660665 mod .set_input ("data" , np .zeros ((1 , 28 , 28 , 128 )).astype ("uint8" ))
661666 mod .set_input (** params )
0 commit comments