2727from  .. import  generic , tag 
2828from  .. import  nn 
2929from  ..util  import  get_const_tuple , get_shape 
30- from  ..nn .conv2d  import  conv2d , conv2d_NCHWc 
31- from  ..nn .conv2d  import  conv2d_alter_layout , conv2d_infer_layout , conv2d_rewrite_op 
32- from  ..nn .conv2d  import  _get_workload  as  _get_conv2d_workload 
30+ from  ..nn .conv2d  import  conv2d , conv2d_NCHWc , \
31+     conv2d_alter_layout , conv2d_infer_layout , _get_workload  as  _get_conv2d_workload 
3332from  ..nn .depthwise_conv2d  import  _get_workload  as  _get_depthwise_conv2d_workload 
3433from  ..nn .depthwise_conv2d  import  depthwise_conv2d_NCHWc , depthwise_conv2d_nchw 
3534from  ..nn .pad  import  pad 
3837
3938logger  =  logging .getLogger ('topi' )
4039
41- def  _is_int8_hw_support (data_dtype , kernel_dtype , target ,  ignore_dtype = False ):
40+ def  _is_int8_hw_support (data_dtype , kernel_dtype , target ):
4241    """ 
4342    Checks to ensure that we can use Intel DLBoost instructions 
4443    1) The datatypes are correct. 
@@ -59,8 +58,6 @@ def _is_int8_hw_support(data_dtype, kernel_dtype, target, ignore_dtype=False):
5958        if  opt  ==  '-mcpu=skylake-avx512' :
6059            is_target_support  =  True 
6160
62-     if  ignore_dtype :
63-         return  is_llvm_support  and  is_target_support 
6461    return  is_dtype_support  and  is_llvm_support  and  is_target_support 
6562
6663def  _get_default_config (cfg , data , kernel , strides , padding , out_dtype , is_depthwise = False ,
@@ -412,54 +409,6 @@ def _topi_nn_conv2d_NCHWc(*args, **kwargs):
412409    s  =  _schedule_conv2d_NCHWc (cfg , [C ])
413410    return  s , [new_data , new_kernel , C ]
414411
415- @conv2d_rewrite_op .register ("cpu" ) 
416- def  _conv2d_rewrite_op (attrs , inputs , arg_types , F ):
417-     if  F .__name__  !=  'tvm.relay.op' :
418-         return  None 
419-     data_type , kernel_type  =  arg_types [0 ], arg_types [1 ]
420-     target  =  tvm .target .current_target ()
421-     data_layout  =  attrs ['data_layout' ]
422-     kernel_layout  =  attrs ['kernel_layout' ]
423-     # Uncomment when this bug is resolved 
424-     # https://discuss.tvm.ai/t/segfault-in-llvm/3567 
425-     # if not ((data_layout == 'NCHW' and kernel_layout == 'OIHW') 
426-     #         or (data_layout == 'NHWC' and kernel_layout == 'HWIO')): 
427-     #     return None 
428-     if  not  (data_layout  ==  'NCHW'  and  kernel_layout  ==  'OIHW' ):
429-         return  None 
430- 
431-     if  not  (data_type .dtype  ==  'int8'  and  kernel_type .dtype  ==  'int8' ):
432-         return   None 
433- 
434-     if  not  _is_int8_hw_support (data_type .dtype , kernel_type .dtype ,
435-                                target ,
436-                                ignore_dtype = True ):
437-         return  None 
438- 
439-     # Convert i8 x i8 to u8 x i8 
440-     # Intel has fast instructions for u8 x i8 conv. For i8 x i8 conv, we can 
441-     # convert the i8 tensor to u8 by adding 128 and use u8 x i8 conv. Since 128 
442-     # has been added, the output now has to be adjusted. 
443-     out_channel  =  attrs ["channels" ]
444-     data_expr , kernel_expr  =  inputs 
445-     data_expr  =  F .cast (data_expr , "int32" )
446-     data_expr  =  F .add (data_expr , F .const (128 , "int32" ))
447-     data_expr  =  F .clip (data_expr , a_min = 0 , a_max = 255 )
448-     data_expr  =  F .cast (data_expr , "uint8" )
449-     conv  =  F .nn .conv2d (data_expr , kernel_expr , ** attrs )
450-     bias_adjust  =  F .cast (kernel_expr , "int32" )
451-     if  kernel_layout  ==  'OIHW'  and  data_layout  ==  'NCHW' :
452-         bias_adjust  =  F .sum (bias_adjust , axis = (1 , 2 , 3 ))
453-         bias_adjust  =  F .reshape (bias_adjust ,
454-                                 newshape = (1 , out_channel , 1 , 1 ))
455-     elif  kernel_layout  ==  'HWIO'  and  data_layout  ==  'NHWC' :
456-         bias_adjust  =  F .sum (bias_adjust , axis = (0 , 1 , 2 ))
457-         bias_adjust  =  F .reshape (bias_adjust ,
458-                                 newshape = (1 , 1 , 1 , out_channel ))
459-     bias_adjust  =  F .cast (bias_adjust , 'int32' )
460-     bias_adjust  =  F .multiply (bias_adjust , F .const (128 , 'int32' ))
461-     return  F .subtract (conv , bias_adjust )
462- 
463412
464413@conv2d_alter_layout .register ("cpu" ) 
465414def  _alter_conv2d_layout (attrs , inputs , tinfo , F ):
0 commit comments