3737ACCEL_TYPES = ["ethos-u55-256" , "ethos-u55-128" , "ethos-u55-64" , "ethos-u55-32" , "ethos-u65-256" ]
3838
3939
40+ def relu_n1_to_1 (x ):
41+ """
42+ The specific pattern will be replaced into RELU_N1_TO_1 by tflite.
43+ """
44+ return tf .math .maximum (- 1.0 , tf .math .minimum (x , 1.0 ))
45+
46+
47+ ACTIVATIONS = [None , tf .nn .relu , tf .nn .relu6 , relu_n1_to_1 ]
48+
49+
4050def is_u55_accel_type (accel_type ):
4151 return "u55" in accel_type
4252
@@ -46,7 +56,7 @@ def is_u55_accel_type(accel_type):
4656@pytest .mark .parametrize ("kernel_shape" , [(3 , 2 ), (1 , 3 )])
4757@pytest .mark .parametrize ("strides, dilation" , [((1 , 1 ), (2 , 1 )), ((3 , 2 ), (1 , 1 ))])
4858@pytest .mark .parametrize ("padding" , ["SAME" , "VALID" ])
49- @pytest .mark .parametrize ("activation" , [ "NONE" , "RELU" ] )
59+ @pytest .mark .parametrize ("activation" , ACTIVATIONS )
5060def test_ethosu_conv2d_single (
5161 ifm_shape ,
5262 kernel_shape ,
@@ -72,8 +82,8 @@ def conv2d(x):
7282 padding = padding ,
7383 dilations = dilation ,
7484 )
75- if activation == "RELU" :
76- op = tf . nn . relu (op )
85+ if activation :
86+ op = activation (op )
7787 return op
7888
7989 infra .compare_tvm_with_tflite (conv2d , [ifm_shape ], accel_type )
@@ -114,7 +124,7 @@ def conv2d(x):
114124@pytest .mark .parametrize ("strides, dilation" , [((1 , 1 ), (2 , 1 )), ((3 , 2 ), (1 , 1 ))])
115125@pytest .mark .parametrize ("padding" , ["SAME" , "VALID" ])
116126@pytest .mark .parametrize ("accel_type" , ACCEL_TYPES + ["ethos-u65-512" ])
117- @pytest .mark .parametrize ("activation" , [ "NONE" , "RELU" ] )
127+ @pytest .mark .parametrize ("activation" , ACTIVATIONS )
118128def test_ethosu_conv2d_double (
119129 ifm_shape ,
120130 kernel_shape ,
@@ -150,22 +160,28 @@ def conv2d_double(x):
150160 padding = padding ,
151161 dilations = dilation ,
152162 )
153- if activation == "RELU" :
154- op2 = tf . nn . relu (op2 )
163+ if activation :
164+ op2 = activation (op2 )
155165 return op2
156166
157167 infra .compare_tvm_with_tflite (conv2d_double , [ifm_shape ], accel_type )
158168
159169
160170@pytest .mark .parametrize ("weight_min, weight_max" , [(0.0 , 1e-11 ), (- 1e10 , 1e10 )])
161- def test_out_of_range_scaling (weight_min , weight_max ):
171+ # relu6 and relu_n1_to_1 operations are excluded from activations since tflite results are different.
172+ # In the tflite model, a rather large scale is generated, so in some cases in tflite result is -128 in ethosu 127.
173+ @pytest .mark .parametrize ("activation" , [None , tf .nn .relu ])
174+ def test_out_of_range_scaling (
175+ weight_min ,
176+ weight_max ,
177+ activation ,
178+ ):
162179 np .random .seed (0 )
163180 ifm_shape = (1 , 6 , 6 , 2 )
164181 strides = (1 , 1 )
165182 kernel_shape = (1 , 1 )
166183 dilation = (1 , 1 )
167184 padding = "SAME"
168- activation = "RELU"
169185 accel_type = "ethos-u55-128"
170186
171187 @tf .function
@@ -186,8 +202,8 @@ def conv_invalid_scale(x):
186202 padding = padding ,
187203 dilations = dilation ,
188204 )
189- if activation == "RELU" :
190- op = tf . nn . relu (op )
205+ if activation :
206+ op = activation (op )
191207 return op
192208
193209 infra .compare_tvm_with_tflite (conv_invalid_scale , [ifm_shape ], accel_type )
@@ -196,19 +212,20 @@ def conv_invalid_scale(x):
196212@pytest .mark .parametrize ("accel_type" , ACCEL_TYPES )
197213@pytest .mark .parametrize ("ifm_shape" , [(1 , 55 , 55 , 3 ), (1 , 23 , 32 , 7 )])
198214@pytest .mark .parametrize (
199- "kernel_shape, activation_function " ,
200- [(( 3 , 3 ), "RELU" ), (( 1 , 2 ), "NONE" )],
215+ "kernel_shape" ,
216+ [(3 , 3 ), ( 1 , 2 )],
201217)
202218@pytest .mark .parametrize ("padding" , ["SAME" , "VALID" ])
203219@pytest .mark .parametrize ("strides, dilation" , [((1 , 1 ), (2 , 2 )), ((3 , 2 ), (1 , 1 ))])
220+ @pytest .mark .parametrize ("activation" , ACTIVATIONS )
204221def test_tflite_depthwise_conv2d (
205222 accel_type ,
206223 ifm_shape ,
207224 kernel_shape ,
208225 padding ,
209226 strides ,
210227 dilation ,
211- activation_function ,
228+ activation ,
212229):
213230 np .random .seed (0 )
214231
@@ -221,8 +238,8 @@ def depthwise_conv2d(x):
221238 op = tf .nn .depthwise_conv2d (
222239 x , weight , strides = tf_strides , padding = padding , dilations = dilation
223240 )
224- if activation_function == "RELU" :
225- op = tf . nn . relu (op )
241+ if activation :
242+ op = activation (op )
226243 return op
227244
228245 infra .compare_tvm_with_tflite (depthwise_conv2d , [ifm_shape ], accel_type )
@@ -265,17 +282,18 @@ def depthwise_conv2d(x):
265282@pytest .mark .parametrize ("pooling_type" , ["MAX" , "AVG" ])
266283@pytest .mark .parametrize ("ifm_shape" , [[1 , 3 , 4 , 3 ], [1 , 4 , 5 , 2 ]])
267284@pytest .mark .parametrize (
268- "pool_shape, strides, activation_function, padding" ,
269- [([1 , 2 ], [1 , 2 ], "NONE" , " SAME" ), ([2 , 3 ], [2 , 3 ], "RELU" , "VALID" )],
285+ "pool_shape, strides, padding" ,
286+ [([1 , 2 ], [1 , 2 ], "SAME" ), ([2 , 3 ], [2 , 3 ], "VALID" )],
270287)
288+ @pytest .mark .parametrize ("activation" , ACTIVATIONS )
271289def test_ethosu_pooling (
272290 accel_type ,
273291 ifm_shape ,
274292 pooling_type ,
275293 strides ,
276294 pool_shape ,
277- activation_function ,
278295 padding ,
296+ activation ,
279297):
280298 np .random .seed (0 )
281299
@@ -285,8 +303,8 @@ def pooling(x):
285303 op = tf .nn .max_pool (x , pool_shape , strides , padding )
286304 elif pooling_type == "AVG" :
287305 op = tf .nn .avg_pool (x , pool_shape , strides , padding )
288- if activation_function == "RELU" :
289- op = tf . nn . relu (op )
306+ if activation :
307+ op = activation (op )
290308 return op
291309
292310 infra .compare_tvm_with_tflite (pooling , [ifm_shape ], accel_type )
@@ -303,13 +321,13 @@ def pooling(x):
303321 ([1 , 4 , 4 ], [4 , 1 ]),
304322 ],
305323)
306- @pytest .mark .parametrize ("activation_function " , [ "NONE" , "RELU" ] )
324+ @pytest .mark .parametrize ("activation " , ACTIVATIONS )
307325def test_ethosu_binary_elementwise (
308326 accel_type ,
309327 operator_type ,
310328 ifm_shape ,
311329 ifm2_shape ,
312- activation_function ,
330+ activation ,
313331):
314332 np .random .seed (0 )
315333
@@ -325,8 +343,8 @@ def binary_elementwise(lhs, rhs):
325343 op = tf .math .minimum (lhs , rhs )
326344 elif operator_type == "MAX" :
327345 op = tf .math .maximum (lhs , rhs )
328- if activation_function == "RELU" :
329- op = tf . nn . relu (op )
346+ if activation :
347+ op = activation (op )
330348 return op
331349
332350 infra .compare_tvm_with_tflite (
@@ -1113,13 +1131,13 @@ def leaky_relu_func(x):
11131131@pytest .mark .parametrize ("ifm_shape" , [(1 , 14 ), (1 , 151 )])
11141132@pytest .mark .parametrize ("ofm_channels" , [32 , 64 ])
11151133@pytest .mark .parametrize ("use_bias" , [True , False ])
1116- @pytest .mark .parametrize ("activation_function " , [ "RELU" , "NONE" ] )
1134+ @pytest .mark .parametrize ("activation " , ACTIVATIONS )
11171135def test_tflite_fully_connected (
11181136 accel_type ,
11191137 ifm_shape ,
11201138 ofm_channels ,
11211139 use_bias ,
1122- activation_function ,
1140+ activation ,
11231141):
11241142 np .random .seed (0 )
11251143
@@ -1134,8 +1152,8 @@ def fully_connected(x):
11341152 x = tf .matmul (x , w )
11351153 if use_bias :
11361154 x = tf .nn .bias_add (x , bias )
1137- if activation_function :
1138- x = tf . nn . relu (x )
1155+ if activation :
1156+ x = activation (x )
11391157 return x
11401158
11411159 infra .compare_tvm_with_tflite (
0 commit comments