[prev in list] [next in list] [prev in thread] [next in thread] 

List:       llvm-commits
Subject:    [PATCH] D140748: [MLIR][Tosa] Make Tosa_IntArrayAttr2 use DenseI64ArrayAttr
From:       Alexander Shaposhnikov via Phabricator via llvm-commits <llvm-commits () lists ! llvm
Date:       2022-12-31 15:53:11
Message-ID: d51zEp7DT8u81liTsoCMLg () geopod-ismtpd-1-1
[Download RAW message or body]

alexander-shaposhnikov updated this revision to Diff 485776.

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D140748/new/

https://reviews.llvm.org/D140748

Files:
  mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
  mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
  mlir/include/mlir/IR/OpBase.td
  mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
  mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
  mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
  mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
  mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
  mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
  mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
  mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
  mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir
  mlir/test/Dialect/Tosa/canonicalize.mlir
  mlir/test/Dialect/Tosa/invalid.mlir
  mlir/test/Dialect/Tosa/ops.mlir
  mlir/test/Dialect/Tosa/quant-test.mlir
  mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
  mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
  mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
  mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
  mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp


["D140748.485776.patch" (D140748.485776.patch)]

Index: mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
===================================================================
--- mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
+++ mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
@@ -148,7 +148,7 @@
   auto newTosaConv2DOp = rewriter.create<tosa::Conv2DOp>(
       op->getLoc(), newTosaConv2DOpType, tosaConv2DOp.getInput(),
       tosaConv2DOp.getWeight(), tosaConv2DOp.getBias(), tosaConv2DOp.getPad(),
-      tosaConv2DOp.getStride(), tosaConv2DOp.getDilation());
+      tosaConv2DOp.getStrideAttr(), tosaConv2DOp.getDilationAttr());
 
   // Create rescale to quantized type
   double inputScale = inputQType.getScale();
Index: mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
===================================================================
--- mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
+++ mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
@@ -669,10 +669,10 @@
 // CHECK-LABEL: @test_pool_static
 func.func @test_pool_static(%arg0: tensor<3x5x6x7xf32>) {
   // CHECK: -> tensor<3x2x4x7xf32>
-  %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, \
1]} : (tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32> +  %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32>  
   // CHECK: -> tensor<3x2x4x7xf32>
-  %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, \
1]} : (tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32> +  %1 = "tosa.max_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32>  return
 }
 
@@ -681,7 +681,7 @@
 // CHECK-LABEL: @conv2d_static
 func.func @conv2d_static(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, \
%bias: tensor<5xf32>) -> () {  // CHECK: -> tensor<2x6x4x5xf32>
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>)  -> \
(tensor<?x?x?x?xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<2x8x9x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>)  -> (tensor<?x?x?x?xf32>)  return
 }
 
@@ -690,7 +690,7 @@
 // CHECK-LABEL: @conv2d_dynamic_input
 func.func @conv2d_dynamic_input(%input: tensor<?x?x?x?xf32>, %weights: \
tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () {  // CHECK: -> tensor<?x?x?x5xf32>
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [1, 1]} : (tensor<?x?x?x?xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>)  -> \
(tensor<?x?x?x?xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<?x?x?x?xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>)  -> (tensor<?x?x?x?xf32>)  return
 }
 
@@ -699,10 +699,10 @@
 // CHECK-LABEL: @test_pool_dynamic_input
 func.func @test_pool_dynamic_input(%arg0: tensor<?x?x?x?xf32>) {
   // CHECK: -> tensor<?x?x?x?xf32>
-  %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, \
1]} : (tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> +  %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>  
   // CHECK: -> tensor<?x?x?x?xf32>
-  %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, \
1]} : (tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> +  %1 = "tosa.max_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>  return
 }
 
@@ -711,10 +711,10 @@
 // CHECK-LABEL: @test_pool_padded
 func.func @test_pool_padded(%arg0: tensor<3x5x6x7xf32>) {
   // CHECK: -> tensor<3x5x11x7xf32>
-  %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [1, 2, 3, 4], stride = [1, \
1]} : (tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32> +  %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [1, 2, 3, 4], stride = array<i64: 1, 1>} : \
(tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32>  
   // CHECK: -> tensor<3x5x11x7xf32>
-  %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [1, 2, 3, 4], stride = [1, \
1]} : (tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32> +  %1 = "tosa.max_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [1, 2, 3, 4], stride = array<i64: 1, 1>} : \
(tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32>  return
 }
 
@@ -723,7 +723,7 @@
 // CHECK-LABEL: @conv2d_dynamic_weight
 func.func @conv2d_dynamic_weight(%input: tensor<2x8x9x3xf32>, %weights: \
tensor<?x?x?x?xf32>, %bias: tensor<5xf32>) -> () {  // CHECK: -> tensor<2x?x?x5xf32>
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<?x?x?x?xf32>, tensor<5xf32>)  -> \
(tensor<?x?x?x?xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<2x8x9x3xf32>, \
tensor<?x?x?x?xf32>, tensor<5xf32>)  -> (tensor<?x?x?x?xf32>)  return
 }
 
@@ -732,7 +732,7 @@
 // CHECK-LABEL: @conv2d_dynamic_bias
 func.func @conv2d_dynamic_bias(%input: tensor<2x8x9x3xf32>, %weights: \
tensor<5x3x6x3xf32>, %bias: tensor<?xf32>) -> () {  // CHECK: -> tensor<2x6x4x5xf32>
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<?xf32>)  -> \
(tensor<?x?x?x?xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<2x8x9x3xf32>, \
tensor<5x3x6x3xf32>, tensor<?xf32>)  -> (tensor<?x?x?x?xf32>)  return
 }
 
@@ -741,10 +741,10 @@
 // CHECK-LABEL: @test_pool_stride
 func.func @test_pool_stride(%arg0: tensor<3x11x12x7xf32>) {
   // CHECK: -> tensor<3x4x4x7xf32>
-  %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [2, \
3]} : (tensor<3x11x12x7xf32>) -> tensor<?x?x?x?xf32> +  %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [0, 0, 0, 0], stride = array<i64: 2, 3>} : \
(tensor<3x11x12x7xf32>) -> tensor<?x?x?x?xf32>  
   // CHECK: -> tensor<3x4x4x7xf32>
-  %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [2, \
3]} : (tensor<3x11x12x7xf32>) -> tensor<?x?x?x?xf32> +  %1 = "tosa.max_pool2d"(%arg0) \
{kernel = array<i64: 4, 3>, pad = [0, 0, 0, 0], stride = array<i64: 2, 3>} : \
(tensor<3x11x12x7xf32>) -> tensor<?x?x?x?xf32>  return
 }
 
@@ -753,7 +753,7 @@
 // CHECK-LABEL: @conv2d_padded
 func.func @conv2d_padded(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, \
%bias: tensor<5xf32>) -> () {  // CHECK: -> tensor<2x9x11x5xf32>
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [1, 2, 3, 4], stride = [1, 1], \
dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>)  -> \
(tensor<?x?x?x?xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [1, 2, 3, \
4], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<2x8x9x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>)  -> (tensor<?x?x?x?xf32>)  return
 }
 
@@ -762,7 +762,7 @@
 // CHECK-LABEL: @conv2d_dilated
 func.func @conv2d_dilated(%input: tensor<2x12x14x3xf32>, %weights: \
tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () {  // CHECK: -> tensor<2x6x4x5xf32>
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [3, 2]} : (tensor<2x12x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>)  -> \
(tensor<?x?x?x?xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 1, 1>, dilation = array<i64: 3, 2>} : (tensor<2x12x14x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>)  -> (tensor<?x?x?x?xf32>)  return
 }
 
@@ -771,7 +771,7 @@
 // CHECK-LABEL: @conv2d_strided
 func.func @conv2d_strided(%input: tensor<1x13x14x1xf32>, %weights: \
tensor<1x1x1x1xf32>, %bias: tensor<1xf32>) -> () {  // CHECK: -> tensor<1x5x7x1xf32>
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [3, 2], \
dilation = [1, 1]} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>)  -> \
(tensor<?x?x?x?xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 3, 2>, dilation = array<i64: 1, 1>} : (tensor<1x13x14x1xf32>, \
tensor<1x1x1x1xf32>, tensor<1xf32>)  -> (tensor<?x?x?x?xf32>)  return
 }
 
@@ -843,7 +843,7 @@
 // CHECK-LABEL: @depthwise_conv2d_static
 func.func @depthwise_conv2d_static(%arg0: tensor<2x8x9x3xf32>, %arg1: \
tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) {  // CHECK: -> tensor<2x6x4x15xf32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, \
0, 0], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) \
-> tensor<2x6x4x15xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) \
{dilation = array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32>  \
return  }
 
@@ -852,7 +852,7 @@
 // CHECK-LABEL: @depthwise_conv2d_dynamic_input
 func.func @depthwise_conv2d_dynamic_input(%arg0: tensor<?x?x?x?xf32>, %arg1: \
tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) {  // CHECK: -> tensor<?x?x?x15xf32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, \
0, 0], stride = [1, 1]} : (tensor<?x?x?x?xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) \
-> tensor<?x?x?x15xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) \
{dilation = array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<?x?x?x?xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<?x?x?x15xf32>  \
return  }
 
@@ -861,7 +861,7 @@
 // CHECK-LABEL: @depthwise_conv2d_dynamic_weight
 func.func @depthwise_conv2d_dynamic_weight(%arg0: tensor<2x8x9x3xf32>, %arg1: \
tensor<?x?x?x?xf32>, %arg2: tensor<15xf32>) {  // CHECK: -> tensor<2x?x?x15xf32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, \
0, 0], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<?x?x?x?xf32>, tensor<15xf32>) \
-> tensor<2x?x?x15xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) \
{dilation = array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<2x8x9x3xf32>, tensor<?x?x?x?xf32>, tensor<15xf32>) -> tensor<2x?x?x15xf32>  \
return  }
 
@@ -870,7 +870,7 @@
 // CHECK-LABEL: @depthwise_conv2d_dynamic_bias
 func.func @depthwise_conv2d_dynamic_bias(%arg0: tensor<2x8x9x3xf32>, %arg1: \
tensor<3x6x3x5xf32>, %arg2: tensor<?xf32>) {  // CHECK: -> tensor<2x6x4x15xf32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, \
0, 0], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<?xf32>) \
-> tensor<2x6x4x15xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) \
{dilation = array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<?xf32>) -> tensor<2x6x4x15xf32>  \
return  }
 
@@ -879,7 +879,7 @@
 // CHECK-LABEL: @depthwise_conv2d_padded
 func.func @depthwise_conv2d_padded(%arg0: tensor<2x8x9x3xf32>, %arg1: \
tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) {  // CHECK: -> tensor<2x9x11x15xf32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [1, 2, \
3, 4], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) \
-> tensor<2x9x11x15xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) \
{dilation = array<i64: 1, 1>, pad = [1, 2, 3, 4], stride = array<i64: 1, 1>} : \
(tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x9x11x15xf32>  \
return  }
 
@@ -888,7 +888,7 @@
 // CHECK-LABEL: @depthwise_conv2d_dilated
 func.func @depthwise_conv2d_dilated(%arg0: tensor<2x12x14x3xf32>, %arg1: \
tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) {  // CHECK: -> tensor<2x6x4x15xf32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [3, 2], pad = [0, 0, \
0, 0], stride = [1, 1]} : (tensor<2x12x14x3xf32>, tensor<3x6x3x5xf32>, \
tensor<15xf32>) -> tensor<2x6x4x15xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, \
%arg2) {dilation = array<i64: 3, 2>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<2x12x14x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32>  \
return  }
 
@@ -897,7 +897,7 @@
 // CHECK-LABEL: @depthwise_conv2d_strided
 func.func @depthwise_conv2d_strided(%arg0: tensor<1x13x14x1xf32>, %arg1: \
tensor<1x1x1x1xf32>, %arg2: tensor<1xf32>) {  // CHECK: -> tensor<1x5x7x1xf32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, \
0, 0], stride = [3, 2]} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) \
-> tensor<1x5x7x1xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation \
= array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 3, 2>} : \
(tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x5x7x1xf32>  \
return  }
 
@@ -906,7 +906,7 @@
 // CHECK-LABEL: @transpose_conv2d_out_shape
 func.func @transpose_conv2d_out_shape(%arg0: tensor<2x?x?x3xf32>, %arg1: \
tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) {  // CHECK: -> tensor<2x8x9x5xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, 8, 9, -1], stride = [1, 1]} : (tensor<2x?x?x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, 8, 9, -1], stride = array<i64: 1, 1>} : (tensor<2x?x?x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32>  return
 }
 
@@ -915,7 +915,7 @@
 // CHECK-LABEL: @transpose_conv2d_static
 func.func @transpose_conv2d_static(%arg0: tensor<2x16x14x3xf32>, %arg1: \
tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) {  // CHECK: -> tensor<2x18x19x5xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 1, 1>} : (tensor<2x16x14x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32>  return
 }
 
@@ -924,7 +924,7 @@
 // CHECK-LABEL: @transpose_conv2d_static_strided
 func.func @transpose_conv2d_static_strided(%arg0: tensor<2x16x14x3xf32>, %arg1: \
tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) {  // CHECK: -> tensor<2x33x45x5xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [2, 3]} : (tensor<2x16x14x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 2, 3>} : (tensor<2x16x14x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32>  return
 }
 
@@ -933,7 +933,7 @@
 // CHECK-LABEL: @transpose_conv2d_dynamic_input
 func.func @transpose_conv2d_dynamic_input(%arg0: tensor<?x?x?x?xf32>, %arg1: \
tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) {  // CHECK: -> tensor<?x?x?x5xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<?x?x?x?xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<?x?x?x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 1, 1>} : (tensor<?x?x?x?xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<?x?x?x5xf32>  return
 }
 
@@ -942,7 +942,7 @@
 // CHECK-LABEL: @transpose_conv2d_dynamic_weights
 func.func @transpose_conv2d_dynamic_weights(%arg0: tensor<2x6x4x3xf32>, %arg1: \
tensor<?x?x?x?xf32>, %arg2: tensor<5xf32>) {  // CHECK: -> tensor<2x?x?x5xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x6x4x3xf32>, \
tensor<?x?x?x?xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 1, 1>} : (tensor<2x6x4x3xf32>, \
tensor<?x?x?x?xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32>  return
 }
 
@@ -951,7 +951,7 @@
 // CHECK-LABEL: @transpose_conv2d_dynamic_bias
 func.func @transpose_conv2d_dynamic_bias(%arg0: tensor<2x6x4x3xf32>, %arg1: \
tensor<5x3x6x3xf32>, %arg2: tensor<?xf32>) {  // CHECK: -> tensor<2x8x9x5xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x6x4x3xf32>, \
tensor<5x3x6x3xf32>, tensor<?xf32>) -> tensor<2x8x9x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 1, 1>} : (tensor<2x6x4x3xf32>, \
tensor<5x3x6x3xf32>, tensor<?xf32>) -> tensor<2x8x9x5xf32>  return
 }
 
@@ -960,14 +960,14 @@
 // CHECK-LABEL: @transpose_conv2d_padded
 func.func @transpose_conv2d_padded(%arg0: tensor<2x9x11x3xf32>, %arg1: \
tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) {  // CHECK: -> tensor<2x10x13x5xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [1, 0, 3, 0], \
out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x9x11x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [1, 0, 3, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 1, 1>} : (tensor<2x9x11x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32>  return
 }
 
 // CHECK-LABEL: @transpose_conv2d_strided
 func.func @transpose_conv2d_strided(%arg0: tensor<1x5x7x1xf32>, %arg1: \
tensor<1x1x1x1xf32>, %arg2: tensor<1xf32>) {  // CHECK: -> tensor<1x13x13x1xf32>
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [3, 2]} : (tensor<1x5x7x1xf32>, \
tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 3, 2>} : (tensor<1x5x7x1xf32>, \
tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32>  return
 }
 
@@ -976,7 +976,7 @@
 // CHECK-LABEL: @resize_int_horizontal
 func.func @resize_int_horizontal(%arg0: tensor<1x15x13x1xi8>) {
   // CHECK: -> tensor<1x23x179x1xi8>
-  %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [11, 7, 89, 6], \
offset = [0, 0], border = [0, 0]} : (tensor<1x15x13x1xi8>) -> tensor<?x?x?x?xi8> +  \
%0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [11, 7, 89, 6], offset \
= array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<1x15x13x1xi8>) -> \
tensor<?x?x?x?xi8>  return
 }
 
@@ -985,7 +985,7 @@
 // CHECK-LABEL: @resize_int_vertical
 func.func @resize_int_vertical(%arg0: tensor<1x49x42x1xi16>) {
   // CHECK: -> tensor<1x112x220x1xi16>
-  %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [37, 16, 219, 41], \
offset = [0, 0], border = [0, 0]} : (tensor<1x49x42x1xi16>) -> tensor<?x?x?x?xi16> +  \
%0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [37, 16, 219, 41], \
offset = array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<1x49x42x1xi16>) -> \
tensor<?x?x?x?xi16>  return
 }
 
@@ -994,7 +994,7 @@
 // CHECK-LABEL: @resize_int_power_of_two_upscale
 func.func @resize_int_power_of_two_upscale(%arg0: tensor<1x23x19x1xi8>) {
   // CHECK: -> tensor<1x353x289x1xi32>
-  %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 1, 16, 1], offset = [0, \
0], border = [0, 0]} : (tensor<1x23x19x1xi8>) -> tensor<?x?x?x?xi32> +  %0 = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 1, 16, 1], offset = array<i64: \
0, 0>, border = array<i64: 0, 0>} : (tensor<1x23x19x1xi8>) -> tensor<?x?x?x?xi32>  \
return  }
 
@@ -1003,7 +1003,7 @@
 // CHECK-LABEL: @resize_int_power_of_two_upscale_offsetted
 func.func @resize_int_power_of_two_upscale_offsetted(%arg0: tensor<1x41x26x1xi16>) {
   // CHECK: -> tensor<1x328x208x1xi48>
-  %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 2, 16, 2], offset = \
[-7, -7], border = [7, 7]} : (tensor<1x41x26x1xi16>) -> tensor<?x?x?x?xi48> +  %0 = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 2, 16, 2], offset = array<i64: \
-7, -7>, border = array<i64: 7, 7>} : (tensor<1x41x26x1xi16>) -> tensor<?x?x?x?xi48>  \
return  }
 
@@ -1011,7 +1011,7 @@
 // CHECK-LABEL: @resize_fp_horizontal
 func.func @resize_fp_horizontal(%arg0: tensor<1x50x48x1xf32>) {
   // CHECK: -> tensor<1x106x85x1xf32>
-  %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [15, 7, 84, 47], offset = \
[0, 0], border = [0, 0]} : (tensor<1x50x48x1xf32>) -> tensor<?x?x?x?xf32> +  %0 = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [15, 7, 84, 47], offset = array<i64: \
0, 0>, border = array<i64: 0, 0>} : (tensor<1x50x48x1xf32>) -> tensor<?x?x?x?xf32>  \
return  }
 
@@ -1019,7 +1019,7 @@
 // CHECK-LABEL: @resize_fp_vertical
 func.func @resize_fp_vertical(%arg0: tensor<1x50x48x1xf32>) {
   // CHECK: -> tensor<1x128x13x1xf32>
-  %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [127, 49, 12, 47], \
offset = [0, 0], border = [0, 0]} : (tensor<1x50x48x1xf32>) -> tensor<?x?x?x?xf32> +  \
%0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [127, 49, 12, 47], \
offset = array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<1x50x48x1xf32>) -> \
tensor<?x?x?x?xf32>  return
 }
 
@@ -1028,7 +1028,7 @@
 // CHECK-LABEL: @resize_fp_power_of_two_upscale
 func.func @resize_fp_power_of_two_upscale(%arg0: tensor<1x23x23x1xf32>) {
   // CHECK: -> tensor<1x89x89x1xf32>
-  %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [4, 1, 4, 1], offset = [0, \
0], border = [0, 0]} : (tensor<1x23x23x1xf32>) -> tensor<?x?x?x?xf32> +  %0 = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [4, 1, 4, 1], offset = array<i64: 0, \
0>, border = array<i64: 0, 0>} : (tensor<1x23x23x1xf32>) -> tensor<?x?x?x?xf32>  \
return  }
 
@@ -1037,7 +1037,7 @@
 // CHECK-LABEL: @resize_fp_power_of_two_upscale_offsetted
 func.func @resize_fp_power_of_two_upscale_offsetted(%arg0: tensor<1x50x48x1xf32>) {
   // CHECK: -> tensor<1x1600x1536x1xf32>
-  %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [64, 2, 64, 2], \
offset = [-31, -31], border = [31, 31]} : (tensor<1x50x48x1xf32>) -> \
tensor<?x?x?x?xf32> +  %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = \
[64, 2, 64, 2], offset = array<i64: -31, -31>, border = array<i64: 31, 31>} : \
(tensor<1x50x48x1xf32>) -> tensor<?x?x?x?xf32>  return
 }
 
Index: mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
===================================================================
--- mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
+++ mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
@@ -4,8 +4,8 @@
 func.func @transpose_conv2d(%arg0: tensor<2x16x14x3xf32>, %arg1: \
tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x?x?x5xf32> {  // CHECK: \
%[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64}  // CHECK: %[[REV2:.+]] = \
                "tosa.reverse"(%[[REV1]]) {axis = 2 : i64}
-  // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = [1, 1], pad = [2, 2, \
                5, 5], stride = [1, 1]}
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xf32>, \
tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x18x19x5xf32> +  // CHECK: \
"tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = array<i64: 1, 1>, pad = [2, 2, 5, \
5], stride = array<i64: 1, 1>} +  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) \
{out_pad = [0, 0, 0, 0], out_shape = [-1, -1, -1, -1], stride = array<i64: 1, 1>} : \
(tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x18x19x5xf32>  \
%1 = tensor.cast %0 : tensor<2x18x19x5xf32> to tensor<2x?x?x5xf32>  return %1 : \
tensor<2x?x?x5xf32>  }
@@ -16,8 +16,8 @@
 func.func @transpose_conv2d_quantized(%arg0: tensor<2x16x14x3xi8>, %arg1: \
tensor<5x3x6x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x18x19x5xi32>) {  // CHECK: \
%[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64}  // CHECK: %[[REV2:.+]] = \
                "tosa.reverse"(%[[REV1]]) {axis = 2 : i64}
-  // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = [1, 1], pad = [2, 2, \
5, 5], quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = \
                [1, 1]}
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = \
[-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xi8>, tensor<5x3x6x3xi8>, \
tensor<5xi32>) -> tensor<2x18x19x5xi32> +  // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], \
%arg2) {dilation = array<i64: 1, 1>, pad = [2, 2, 5, 5], quantization_info = \
#tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = array<i64: 1, 1>} +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = \
[-1, -1, -1, -1], stride = array<i64: 1, 1>} : (tensor<2x16x14x3xi8>, \
tensor<5x3x6x3xi8>, tensor<5xi32>) -> tensor<2x18x19x5xi32>  return %0 : \
tensor<2x18x19x5xi32>  }
 
@@ -27,8 +27,8 @@
 func.func @transpose_conv2d_quantized_padded(%arg0: tensor<2x7x7x18xi8>, %arg1: \
tensor<12x3x5x18xi8>, %arg2: tensor<12xi32>) -> (tensor<2x7x7x12xi32>) {  // CHECK: \
%[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64}  // CHECK: %[[REV2:.+]] = \
                "tosa.reverse"(%[[REV1]]) {axis = 2 : i64}
-  // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = [1, 1], pad = [1, 1, \
2, 2], quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = \
                [1, 1]}
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [1, 1, 2, 2], \
quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = \
[-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x7x7x18xi8>, tensor<12x3x5x18xi8>, \
tensor<12xi32>) -> tensor<2x7x7x12xi32> +  // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], \
%arg2) {dilation = array<i64: 1, 1>, pad = [1, 1, 2, 2], quantization_info = \
#tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = array<i64: 1, 1>} +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [1, 1, 2, 2], \
quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = \
[-1, -1, -1, -1], stride = array<i64: 1, 1>} : (tensor<2x7x7x18xi8>, \
tensor<12x3x5x18xi8>, tensor<12xi32>) -> tensor<2x7x7x12xi32>  return %0 : \
tensor<2x7x7x12xi32>  }
 
@@ -53,13 +53,13 @@
 
   // Manipulate the final shape.
   // CHECK-DAG: %[[BIAS:.+]]  = "tosa.const"() {value = dense<0.000000e+00> : \
                tensor<30xf32>}
-  // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], \
%[[BIAS]]) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} +  // CHECK-DAG: \
%[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = \
array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>}  // CHECK-DAG: \
%[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) {new_shape = [2, 18, 16, 2, 3, 5]}  \
// CHECK-DAG: %[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]])  \
// CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) {new_shape = [2, \
36, 48, 5]}  // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) {size = \
[2, 35, 47, 5], start = [0, 0, 0, 0]}  // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], \
                %arg2)
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [-1, -1, -1, -1], stride = [2, 3]} : (tensor<2x17x15x3xf32>, \
tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = \
[-1, -1, -1, -1], stride = array<i64: 2, 3>} : (tensor<2x17x15x3xf32>, \
tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32>  %1 = tensor.cast %0 : \
tensor<2x35x47x5xf32> to tensor<2x?x?x5xf32>  return %1 : tensor<2x?x?x5xf32>
 }
@@ -85,12 +85,12 @@
 
   // Manipulate the final shape.
   // CHECK-DAG: %[[BIAS:.+]]  = "tosa.const"() {value = dense<0> : tensor<30xi32>}
-  // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], \
%[[BIAS]]) {dilation = [1, 1], pad = [0, 0, 0, 0], quantization_info = \
#tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = [1, 1]} +  // CHECK-DAG: \
%[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = \
array<i64: 1, 1>, pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant<input_zp = \
-22, weight_zp = 42>, stride = array<i64: 1, 1>}  // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] \
= "tosa.reshape"(%[[CONV]]) {new_shape = [2, 18, 16, 2, 3, 5]}  // CHECK-DAG: \
%[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]])  // CHECK-DAG: \
%[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) {new_shape = [2, 36, 48, 5]}  \
// CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) {size = [2, 35, 47, \
5], start = [0, 0, 0, 0]}  // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], %arg2)
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = \
[-1, -1, -1, -1], stride = [2, 3]} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, \
tensor<5xi32>) -> tensor<2x35x47x5xi32> +  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, \
%arg2) {out_pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant<input_zp = -22, \
weight_zp = 42>, out_shape = [-1, -1, -1, -1], stride = array<i64: 2, 3>} : \
(tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>) -> tensor<2x35x47x5xi32>  \
return %0 : tensor<2x35x47x5xi32>  }
Index: mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
===================================================================
--- mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
+++ mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
@@ -14,7 +14,7 @@
   // CHECK: %[[VAR4:.*]] = "tosa.add"(%[[VAR3]], %arg2)
   // CHECK-SAME: -> tensor<4x10x10x6xf32>
   // CHECK: return %[[VAR4]]
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = \
[1, 1], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, \
tensor<6xf32>) -> tensor<4x10x10x6xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, \
%arg2) {pad = [0, 0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : \
(tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32>  \
return %0 : tensor<4x10x10x6xf32>  }
 
@@ -32,7 +32,7 @@
   // CHECK: %[[mul:.+]] = "tosa.mul"(%[[sIn]], %[[sWe]]) {shift = 0 : i32}
   // CHECK: %[[reO:.+]] = "tosa.reshape"(%[[mul]]) {new_shape = [4, 10, 10, 6]}
   // CHECK: %[[add:.+]] = "tosa.add"(%[[reO]], %arg2)
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = \
[1, 1], dilation = [1, 1], quantization_info = #tosa.conv_quant<input_zp = 7, \
weight_zp = 11>} : (tensor<4x10x10x2xi8>, tensor<1x1x2x3xi8>, tensor<6xi32>) -> \
tensor<4x10x10x6xi32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, \
0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>, quantization_info = \
#tosa.conv_quant<input_zp = 7, weight_zp = 11>} : (tensor<4x10x10x2xi8>, \
tensor<1x1x2x3xi8>, tensor<6xi32>) -> tensor<4x10x10x6xi32>  return %0 : \
tensor<4x10x10x6xi32>  }
 
@@ -47,6 +47,6 @@
   // CHECK: %[[mul:.+]] = "tosa.mul"(%3, %arg1) {shift = 0 : i32}
   // CHECK: %[[reOut:.+]] = "tosa.reshape"(%[[mul]]) {new_shape = [4, 12, 12, 6]}
   // CHECK: %[[add:.+]] = "tosa.add"(%[[reOut]], %arg2)
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 1, 1], stride = \
[1, 1], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, \
tensor<6xf32>) -> tensor<4x12x12x6xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, \
%arg2) {pad = [1, 1, 1, 1], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : \
(tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x12x12x6xf32>  \
return %0 : tensor<4x12x12x6xf32>  }
Index: mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
===================================================================
--- mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
+++ mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
@@ -14,7 +14,7 @@
   // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) {new_shape = [4, 10, 10, 3]}
   // CHECK-SAME: -> tensor<4x10x10x3xf32>
   // CHECK: return %[[VAR3]]
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> \
tensor<4x10x10x3xf32> +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], \
stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<4x10x10x2xf32>, \
tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32>  return %0 : \
tensor<4x10x10x3xf32>  }
 
@@ -33,7 +33,7 @@
   // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) {new_shape = [4, 10, 10, 3]}
   // CHECK-SAME: -> tensor<4x10x10x3xi32>
   // CHECK: return %[[VAR3]]
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [1, 1], quantization_info = #tosa.conv_quant<input_zp = 42, weight_zp = \
24>} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> \
tensor<4x10x10x3xi32> +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], \
stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>, quantization_info = \
#tosa.conv_quant<input_zp = 42, weight_zp = 24>} : (tensor<4x10x10x2xi8>, \
tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x10x10x3xi32>  return %0 : \
tensor<4x10x10x3xi32>  }
 
@@ -50,7 +50,7 @@
 // CHECK:           %[[VAL_6:.*]] = "tosa.reshape"(%[[VAL_5]]) {new_shape = [-1, 14, \
14, 384]} : (tensor<?x384xi32>) -> tensor<?x14x14x384xi32>  // CHECK:           \
return %[[VAL_6]] : tensor<?x14x14x384xi32>  // CHECK:         }
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], \
quantization_info = #tosa.conv_quant<input_zp = -6, weight_zp = 11>, stride = [1, 1]} \
: (tensor<?x14x14x64xi8>, tensor<384x1x1x64xi8>, tensor<384xi32>) -> \
tensor<?x14x14x384xi32> +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = \
array<i64: 1, 1>, pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant<input_zp = \
-6, weight_zp = 11>, stride = array<i64: 1, 1>} : (tensor<?x14x14x64xi8>, \
tensor<384x1x1x64xi8>, tensor<384xi32>) -> tensor<?x14x14x384xi32>  return %0 : \
tensor<?x14x14x384xi32>  }
 
@@ -65,6 +65,6 @@
   // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = "tosa.reshape"(%arg1) {new_shape = [3, 2]}
   // CHECK-DAG: %[[FULLY:.+]] = "tosa.fully_connected"(%[[RESHAPE_INPUT]], \
%[[RESHAPE_FILTER]], %arg2) {quantization_info = #tosa.conv_quant<input_zp = 42, \
weight_zp = 24>}  // CHECK: %[[RESHAPE:.+]] = "tosa.reshape"(%[[FULLY]]) {new_shape = \
                [4, 12, 12, 3]}
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 1, 1], stride = [1, 1], \
dilation = [1, 1], quantization_info = #tosa.conv_quant<input_zp = 42, weight_zp = \
24>} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> \
tensor<4x12x12x3xi32> +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 1, 1], \
stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>, quantization_info = \
#tosa.conv_quant<input_zp = 42, weight_zp = 24>} : (tensor<4x10x10x2xi8>, \
tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x12x12x3xi32>  return %0 : \
                tensor<4x12x12x3xi32>
-}
\ No newline at end of file
+}
Index: mlir/test/Dialect/Tosa/quant-test.mlir
===================================================================
--- mlir/test/Dialect/Tosa/quant-test.mlir
+++ mlir/test/Dialect/Tosa/quant-test.mlir
@@ -12,7 +12,7 @@
 // CHECK-LABEL: test_build_mult_and_shift
 func.func @test_build_mult_and_shift(%arg0: tensor<1x32x32x8x!quant.uniform<i8:f32, \
0.015684768557548523>>, %arg1 : tensor<16x1x1x8x!quant.uniform<i8<-127:127>:f32, \
0.015680249780416489>>, %arg2 : tensor<16xi32>) -> \
tensor<1x32x32x16x!quant.uniform<i8:f32, 0.078431375324726104>> {  // CHECK: \
                tosa.conv2d
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 2, 2], dilation = [2, 1], \
stride = [1, 1], quantization_info = #tosa.conv_quant<input_zp = -1, weight_zp = 0>} \
: (tensor<1x32x32x8x!quant.uniform<i8:f32, 0.015684768557548523>>, \
tensor<16x1x1x8x!quant.uniform<i8<-127:127>:f32, 0.015680249780416489>>, \
tensor<16xi32>) -> tensor<1x32x32x16x!quant.uniform<i8:f32, 0.078431375324726104>> +  \
%0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 2, 2], dilation = array<i64: 2, \
1>, stride = array<i64: 1, 1>, quantization_info = #tosa.conv_quant<input_zp = -1, \
weight_zp = 0>} : (tensor<1x32x32x8x!quant.uniform<i8:f32, 0.015684768557548523>>, \
tensor<16x1x1x8x!quant.uniform<i8<-127:127>:f32, 0.015680249780416489>>, \
tensor<16xi32>) -> tensor<1x32x32x16x!quant.uniform<i8:f32, 0.078431375324726104>>  \
return %0 : tensor<1x32x32x16x!quant.uniform<i8:f32, 0.078431375324726104>>  
 }
Index: mlir/test/Dialect/Tosa/ops.mlir
===================================================================
--- mlir/test/Dialect/Tosa/ops.mlir
+++ mlir/test/Dialect/Tosa/ops.mlir
@@ -12,42 +12,42 @@
 // -----
 // CHECK-LABEL: avg_pool2d_f32
 func.func @test_avg_pool2d_f32(%arg0: tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> {
-    %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, \
1]} : (tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> +    %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 2, 2>, pad = [0, 1, 0, 1], stride = array<i64: 1, 1>} : \
(tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32>  return %0 : tensor<1x7x7x9xf32>
 }
 
 // -----
 // CHECK-LABEL: avg_pool2d_i8
 func.func @test_avg_pool2d_i8(%arg0: tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> {
-    %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, \
1]} : (tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> +    %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 2, 2>, pad = [0, 1, 0, 1], stride = array<i64: 1, 1>} : \
(tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8>  return %0 : tensor<1x7x7x9xi8>
 }
 
 // -----
 // CHECK-LABEL: avg_pool2d_i16
 func.func @test_avg_pool2d_i16(%arg0: tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> {
-    %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, \
1]} : (tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> +    %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 2, 2>, pad = [0, 1, 0, 1], stride = array<i64: 1, 1>} : \
(tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16>  return %0 : tensor<1x7x7x9xi16>
 }
 
 // -----
 // CHECK-LABEL: avg_pool2d_q8
 func.func @test_avg_pool2d_q8(%arg0: tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>) \
                -> tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>> {
-    %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, \
1]} : (tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>) -> \
tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>> +    %0 = "tosa.avg_pool2d"(%arg0) \
{kernel = array<i64: 2, 2>, pad = [0, 1, 0, 1], stride = array<i64: 1, 1>} : \
(tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>) -> \
tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>  return %0 : \
tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>  }
 
 // -----
 // CHECK-LABEL: conv2d
 func.func @test_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, \
                %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> {
-    %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], \
stride = [1, 1]} : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>) -> \
tensor<1x4x4x8xf32> +    %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = \
array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32>  \
return %0 : tensor<1x4x4x8xf32>  }
 
 // -----
 // CHECK-LABEL: depthwise_conv2d
 func.func @test_depthwise_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: \
                tensor<1x1x4x2xf32>, %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> {
-  %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, \
0, 0], stride = [1, 1]} : (tensor<1x4x4x4xf32>, tensor<1x1x4x2xf32>, tensor<8xf32>) \
-> tensor<1x4x4x8xf32> +  %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation \
= array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>} : \
(tensor<1x4x4x4xf32>, tensor<1x1x4x2xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32>  \
return %2 : tensor<1x4x4x8xf32>  }
 
@@ -68,14 +68,14 @@
 // -----
 // CHECK-LABEL: max_pool2d
 func.func @test_max_pool2d(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
-  %0 = "tosa.max_pool2d"(%arg0) {kernel = [1, 1], pad = [0, 0, 0, 0], stride = [1, \
1]} : (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> +  %0 = \
"tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = \
array<i64: 1, 1>} : (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>  return %0 : \
tensor<1x32x32x8xf32>  }
 
 // -----
 // CHECK-LABEL: transpose_conv2d
 func.func @test_transpose_conv2d(%arg0: tensor<1x32x32x8xf32>, %arg1: \
                tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>) -> \
                tensor<1x32x32x16xf32> {
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], \
out_shape = [1, 32, 32, 16], stride = [1, 1]} : (tensor<1x32x32x8xf32>, \
tensor<16x1x1x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> +  %0 = \
"tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [1, \
32, 32, 16], stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, \
tensor<16x1x1x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32>  return %0 : \
tensor<1x32x32x16xf32>  }
 
@@ -448,7 +448,7 @@
 // -----
 // CHECK-LABEL: resize
 func.func @test_resize(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> {
-  %1 = "tosa.resize"(%arg0) { scale = [4, 2, 4, 2], offset = [-1, -1], border = [1, \
1], mode = "BILINEAR"} : (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> +  %1 = \
"tosa.resize"(%arg0) { scale = [4, 2, 4, 2], offset = array<i64: -1, -1>, border = \
array<i64: 1, 1>, mode = "BILINEAR"} : (tensor<1x32x32x8xf32>) -> \
tensor<1x64x64x8xf32>  return %1 : tensor<1x64x64x8xf32>
 }
 
Index: mlir/test/Dialect/Tosa/invalid.mlir
===================================================================
--- mlir/test/Dialect/Tosa/invalid.mlir
+++ mlir/test/Dialect/Tosa/invalid.mlir
@@ -3,7 +3,7 @@
 
 func.func @test_conv2d(%arg0: tensor<1x29x29x4xf32>, %arg1: tensor<16x3x3x4xi8>, \
%arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> {  // expected-error@+1 {{expect both \
                input and weight to be float or not together, got 'f32' and 'i8'}}
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], \
stride = [1, 1]} +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array<i64: 1, \
                1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>}
            : (tensor<1x29x29x4xf32>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> \
tensor<1x27x27x16xi8>  return %0 : tensor<1x27x27x16xi8>
 }
@@ -12,7 +12,7 @@
 
 func.func @test_conv2d(%arg0: tensor<*xi8>, %arg1: tensor<16x3x3x4xi8>, %arg2: \
tensor<16xi8>) -> tensor<1x27x27x16xi8> {  // expected-error@+1 {{expect a ranked \
                tensor for input, got <block argument> of type 'tensor<*xi8>' at \
                index: 0}}
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], \
stride = [1, 1]} +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array<i64: 1, \
                1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>}
            : (tensor<*xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> \
tensor<1x27x27x16xi8>  return %0 : tensor<1x27x27x16xi8>
 }
@@ -21,7 +21,7 @@
 
 func.func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<*xi8>, %arg2: \
tensor<16xi8>) -> tensor<1x27x27x16xi8> {  // expected-error@+1 {{expect a ranked \
                tensor for weight, got <block argument> of type 'tensor<*xi8>' at \
                index: 1}}
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], \
stride = [1, 1]} +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array<i64: 1, \
                1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>}
            : (tensor<1x29x29x4xi8>, tensor<*xi8>, tensor<16xi8>) -> \
tensor<1x27x27x16xi8>  return %0 : tensor<1x27x27x16xi8>
 }
@@ -31,7 +31,7 @@
 
 func.func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<16x3x3x4xi8>, \
%arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> {  // expected-error@+1 \
{{'tosa.conv2d' op quantizationattr is required for quantized type, and not allowed \
                for float type}}
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], \
stride = [1, 1]} +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array<i64: 1, \
                1>, pad = [0, 0, 0, 0], stride = array<i64: 1, 1>}
            : (tensor<1x29x29x4xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> \
tensor<1x27x27x16xi8>  return %0 : tensor<1x27x27x16xi8>
 }
Index: mlir/test/Dialect/Tosa/canonicalize.mlir
===================================================================
--- mlir/test/Dialect/Tosa/canonicalize.mlir
+++ mlir/test/Dialect/Tosa/canonicalize.mlir
@@ -106,7 +106,7 @@
   // CHECK: "tosa.conv2d"
   %weight = "tosa.const"() {value = dense<[[[[1.0, 1.0]]], [[[1.0, 1.0]]], [[[1.0, \
1.0]]]]> : tensor<3x1x1x2xf32>} : ()-> tensor<3x1x1x2xf32>  %bias = "tosa.const"() \
                {value = dense<0.0> : tensor<3xf32>} : ()-> tensor<3xf32>
-  %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = [0, 0, 0, 0], stride = [2, 2], \
dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> \
tensor<4x10x10x3xf32> +  %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 2, 2>, dilation = array<i64: 1, 1>} : (tensor<4x10x10x2xf32>, \
tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32>  return %0 : \
tensor<4x10x10x3xf32>  }
 
@@ -115,21 +115,21 @@
   // CHECK: "tosa.conv2d"
   %weight = "tosa.const"() {value = dense<[[[[1.0], [1.0]], [[1.0], [1.0]]]]> : \
tensor<1x2x2x1xf32>} : ()-> tensor<1x2x2x1xf32>  %bias = "tosa.const"() {value = \
                dense<0.0> : tensor<1xf32>} : ()-> tensor<1xf32>
-  %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [1, 1]} : (tensor<4x10x10x1xf32>, tensor<1x2x2x1xf32>, tensor<1xf32>) -> \
tensor<4x10x10x1xf32> +  %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = [0, 0, 0, \
0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<4x10x10x1xf32>, \
tensor<1x2x2x1xf32>, tensor<1xf32>) -> tensor<4x10x10x1xf32>  return %0 : \
tensor<4x10x10x1xf32>  }
 
 // CHECK-LABEL: @depthwise_conv2d_stride_2
 func.func @depthwise_conv2d_stride_2(%arg0: tensor<4x10x10x2xf32>, %arg1: \
tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> {  // CHECK: \
                "tosa.depthwise_conv2d"
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = \
[2, 2], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, \
tensor<6xf32>) -> tensor<4x10x10x6xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, \
%arg2) {pad = [0, 0, 0, 0], stride = array<i64: 2, 2>, dilation = array<i64: 1, 1>} : \
(tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32>  \
return %0 : tensor<4x10x10x6xf32>  }
 
 // CHECK-LABEL: @depthwise_conv2d_weight_2x2
 func.func @depthwise_conv2d_weight_2x2(%arg0: tensor<4x10x10x2xf32>, %arg1: \
tensor<2x2x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> {  // CHECK: \
                "tosa.depthwise_conv2d"
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = \
[1, 1], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<2x2x2x3xf32>, \
tensor<6xf32>) -> tensor<4x10x10x6xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, \
%arg2) {pad = [0, 0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : \
(tensor<4x10x10x2xf32>, tensor<2x2x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32>  \
return %0 : tensor<4x10x10x6xf32>  }
 
@@ -137,7 +137,7 @@
 func.func @max_pool2d_is_noop(%arg0: tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> {
   // CHECK-NOT: "tosa.max_pool2d"
   // CHECK: return %arg0
-  %0 = "tosa.max_pool2d"(%arg0) {kernel = [1, 1], pad = [0, 0, 0, 0], stride = [1, \
1], dilation = [1, 1]} : (tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> +  %0 = \
"tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = [0, 0, 0, 0], stride = \
array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<10x1x1x3xf32>) -> \
tensor<10x1x1x3xf32>  return %0 : tensor<10x1x1x3xf32>
 }
 
@@ -414,7 +414,7 @@
 // CHECK-LABEL: @fold_resize_nearest
 func.func @fold_resize_nearest(%arg0 : tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> \
{  // CHECK: return %arg0
-  %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = [0, 0], border = [0, 0]} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> +  \
%resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<1x15x13x1xi8>) -> \
tensor<1x15x13x1xi8>  return %resize : tensor<1x15x13x1xi8>
 }
 
@@ -423,6 +423,6 @@
 // CHECK-LABEL: @fold_resize_bilinear
 func.func @fold_resize_bilinear(%arg0 : tensor<1x15x13x1xi8>) -> \
tensor<1x15x13x1xi8> {  // CHECK: return %arg0
-  %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 2, 1, 1], offset = \
[0, 0], border = [0, 0]} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> +  %resize \
= "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 2, 1, 1], offset = array<i64: \
0, 0>, border = array<i64: 0, 0>} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8>  \
return %resize : tensor<1x15x13x1xi8>  }
Index: mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir
===================================================================
--- mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir
+++ mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir
@@ -14,7 +14,7 @@
   // CHECK-SAME: outs(%[[EMPTY]] : tensor<3x15x13x7xf32>)
   // CHECK-NEXT: ^bb0(%[[IN:.+]]: f32, %[[OUT:.+]]: f32):
   // CHECK:   linalg.yield %[[IN]]
-  %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = [0, 0], border = [0, 0]} : (tensor<3x1x1x7xf32>) -> tensor<3x15x13x7xf32> +  \
%resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<3x1x1x7xf32>) -> \
tensor<3x15x13x7xf32>  
   // CHECK: return %[[GENERIC]]
   return %resize : tensor<3x15x13x7xf32>
@@ -36,7 +36,7 @@
   // CHECK-SAME: outs(%[[EMPTY]] : tensor<3x15x13x7xf32>)
   // CHECK-NEXT: ^bb0(%[[IN:.+]]: f32, %[[OUT:.+]]: f32):
   // CHECK:   linalg.yield %[[IN]]
-  %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 2, 1, 1], offset = \
[0, 0], border = [0, 0]} : (tensor<3x1x1x7xf32>) -> tensor<3x15x13x7xf32> +  %resize \
= "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 2, 1, 1], offset = array<i64: \
0, 0>, border = array<i64: 0, 0>} : (tensor<3x1x1x7xf32>) -> tensor<3x15x13x7xf32>  
   // CHECK: return %[[GENERIC]]
   return %resize : tensor<3x15x13x7xf32>
@@ -58,7 +58,7 @@
   // CHECK-SAME: outs(%[[EMPTY]] : tensor<3x15x13x7xi8>)
   // CHECK-NEXT: ^bb0(%[[IN:.+]]: i8, %[[OUT:.+]]: i8):
   // CHECK:   linalg.yield %[[IN]]
-  %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = [0, 0], border = [0, 0]} : (tensor<3x1x1x7xi8>) -> tensor<3x15x13x7xi8> +  \
%resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<3x1x1x7xi8>) -> \
tensor<3x15x13x7xi8>  
   // CHECK: return %[[GENERIC]]
   return %resize : tensor<3x15x13x7xi8>
@@ -81,7 +81,7 @@
   // CHECK-NEXT: ^bb0(%[[IN:.+]]: i8, %[[OUT:.+]]: i32):
   // CHECK:   %[[EXT:.+]] = arith.extsi %[[IN]] : i8 to i32
   // CHECK:   linalg.yield %[[EXT]]
-  %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = [0, 0], border = [0, 0]} : (tensor<3x1x1x7xi8>) -> tensor<3x15x13x7xi32> +  \
%resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [2, 2, 1, 1], \
offset = array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<3x1x1x7xi8>) -> \
tensor<3x15x13x7xi32>  
   // CHECK: return %[[GENERIC]]
   return %resize : tensor<3x15x13x7xi32>
@@ -108,7 +108,7 @@
   // CHECK-DAG: %[[C1:.+]] = arith.constant 1 : i32
   // CHECK: %[[MUL2:.+]] = arith.muli %[[MUL1]], %[[C1]] : i32
   // CHECK: linalg.yield %[[MUL2]]
-  %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 2, 1, 1], offset = \
[0, 0], border = [0, 0]} : (tensor<3x1x1x7xi8>) -> tensor<3x15x13x7xi32> +  %resize = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 2, 1, 1], offset = array<i64: 0, \
0>, border = array<i64: 0, 0>} : (tensor<3x1x1x7xi8>) -> tensor<3x15x13x7xi32>  
   // CHECK: return %[[GENERIC]]
   return %resize : tensor<3x15x13x7xi32>
@@ -180,7 +180,7 @@
   // CHECK: linalg.yield %[[EXTRACT]]
 
   // Round to the nearest index.
-  %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [11, 7, 89, 6], \
offset = [0, 0], border = [0, 0]} : (tensor<1x15x13x1xi8>) -> tensor<1x23x179x1xi8> + \
%0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = [11, 7, 89, 6], offset \
= array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<1x15x13x1xi8>) -> \
tensor<1x23x179x1xi8>  return
 }
 
@@ -289,7 +289,7 @@
   // CHECK: linalg.yield %[[RESULT]]
 
   // Round to the nearest index.
-  %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 1, 16, 1], offset = [0, \
0], border = [0, 0]} : (tensor<1x19x20x1xi8>) -> tensor<1x304x320x1xi48> +  %0 = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 1, 16, 1], offset = array<i64: \
0, 0>, border = array<i64: 0, 0>} : (tensor<1x19x20x1xi8>) -> tensor<1x304x320x1xi48> \
return  }
 
@@ -363,7 +363,7 @@
   // CHECK: %[[EXTRACT:.+]] = tensor.extract %arg0[%[[IDX0]], %[[IDY]], %[[IDX]], \
%[[IDX3]]]  // CHECK: linalg.yield %[[EXTRACT]]
 
-  %output = "tosa.resize"(%input) {mode = "NEAREST_NEIGHBOR", scale = [64, 2, 64, \
2], offset = [-31, -31], border = [31, 31]} : (tensor<1x50x48x1xf32>) -> \
tensor<1x1600x1536x1xf32> +  %output = "tosa.resize"(%input) {mode = \
"NEAREST_NEIGHBOR", scale = [64, 2, 64, 2], offset = array<i64: -31, -31>, border = \
array<i64: 31, 31>} : (tensor<1x50x48x1xf32>) -> tensor<1x1600x1536x1xf32>  return
 }
 
@@ -468,7 +468,7 @@
   // CHECK: linalg.yield %[[RESULT]]
 
   // Round by bilinear interpolation
-  %output = "tosa.resize"(%input) {mode = "BILINEAR", scale = [4, 1, 4, 1], offset = \
[0, 0], border = [0, 0]} : (tensor<1x23x24x1xf32>) -> tensor<1x92x96x1xf32> +  \
%output = "tosa.resize"(%input) {mode = "BILINEAR", scale = [4, 1, 4, 1], offset = \
array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<1x23x24x1xf32>) -> \
tensor<1x92x96x1xf32>  
   return
 }
@@ -482,7 +482,7 @@
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = tensor.empty(%[[BATCH]]) : tensor<?x4x4x1xi32>
   // CHECK: %[[GENERIC:.+]] = linalg.generic
-  %output = "tosa.resize"(%input) { scale = [4, 2, 4, 2], offset = [-1, -1], border \
= [1, 1], mode = "BILINEAR" } : (tensor<?x2x2x1xi8>)  -> (tensor<?x4x4x1xi32>) +  \
%output = "tosa.resize"(%input) { scale = [4, 2, 4, 2], offset = array<i64: -1, -1>, \
border = array<i64: 1, 1>, mode = "BILINEAR" } : (tensor<?x2x2x1xi8>)  -> \
(tensor<?x4x4x1xi32>)  return
 }
 
@@ -490,7 +490,7 @@
 
 // CHECK-LABEL: @resize_bilinear_int48
 func.func @resize_bilinear_int48(%arg0: tensor<1x19x19x1xi16>) {
-  %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 1, 16, 1], offset = [0, \
0], border = [0, 0]} : (tensor<1x19x19x1xi16>) -> tensor<1x289x289x1xi48> +  %0 = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [16, 1, 16, 1], offset = array<i64: \
0, 0>, border = array<i64: 0, 0>} : (tensor<1x19x19x1xi16>) -> \
tensor<1x289x289x1xi48>  return
 }
 
@@ -513,7 +513,7 @@
   // CHECK:    %[[ADD:.+]] = arith.addi %[[MUL0]], %[[MUL1]]
   // CHECK:    %[[RES:.+]] = arith.muli %[[ADD]], %[[C2]]
   // CHECK:    linalg.yield %[[RES]]
-  %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 1, 3, 1], offset = \
[0, 0], border = [0, 0]} : (tensor<3x1x2x7xi8>) -> tensor<3x1x5x7xi32> +  %resize = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 1, 3, 1], offset = array<i64: 0, \
0>, border = array<i64: 0, 0>} : (tensor<3x1x2x7xi8>) -> tensor<3x1x5x7xi32>  
   // CHECK:  return %[[GENERIC]]
   return %resize : tensor<3x1x5x7xi32>
@@ -532,7 +532,7 @@
   // CHECK:    %[[MUL1:.+]] = arith.mulf %[[EXTRACT1]], %[[DX]]
   // CHECK:    %[[ADD:.+]] = arith.addf %[[MUL0]], %[[MUL1]]
   // CHECK:    linalg.yield %[[ADD]]
-  %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 1, 3, 1], offset = \
[0, 0], border = [0, 0]} : (tensor<3x1x2x7xf32>) -> tensor<3x1x5x7xf32> +  %resize = \
"tosa.resize"(%arg0) {mode = "BILINEAR", scale = [2, 1, 3, 1], offset = array<i64: 0, \
0>, border = array<i64: 0, 0>} : (tensor<3x1x2x7xf32>) -> tensor<3x1x5x7xf32>  
   // CHECK:  return %[[GENERIC]]
   return %resize : tensor<3x1x5x7xf32>
Index: mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
===================================================================
--- mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
+++ mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
@@ -146,7 +146,7 @@
   // CHECK-DAG: [[FILL:%.+]] = linalg.fill ins([[CONST]]{{.*}}outs([[INIT]]
   // CHECK-DAG: [[KERNEL:%.+]] = tensor.empty()
   // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = \
dense<1> : vector<2xi64>} ins(%arg0, [[KERNEL]] : tensor<1x6x34x62xf32>, \
                tensor<3x3xf32>) outs([[FILL]] : tensor<1x4x32x62xf32>)
-  %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, \
1]} : (tensor<1x6x34x62xf32>)  -> (tensor<1x4x32x62xf32>) +  %0 = \
"tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = array<i64: 3, 3>, stride = \
array<i64: 1, 1>} : (tensor<1x6x34x62xf32>)  -> (tensor<1x4x32x62xf32>)  return
 }
 
@@ -160,7 +160,7 @@
   // CHECK-DAG: [[FILL:%.+]] = linalg.fill ins([[INITVAL]]{{.*}}outs([[INIT]]
   // CHECK-DAG: [[KERNEL:%.+]] = tensor.empty()
   // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = \
dense<1> : vector<2xi64>} ins([[PAD]], [[KERNEL]] : tensor<1x6x35x62xf32>, \
                tensor<3x3xf32>) outs([[FILL]] : tensor<1x4x33x62xf32>)
-  %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 1], kernel = [3, 3], stride = [1, \
1]} : (tensor<1x6x34x62xf32>)  -> (tensor<1x4x33x62xf32>) +  %0 = \
"tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 1], kernel = array<i64: 3, 3>, stride = \
array<i64: 1, 1>} : (tensor<1x6x34x62xf32>)  -> (tensor<1x4x33x62xf32>)  return
 }
 
@@ -173,7 +173,7 @@
   // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CONST]]{{.*}}outs(%[[INIT]]
   // CHECK: %[[KERNEL:.+]] = tensor.empty()
   // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = \
dense<1> : vector<2xi64>} ins(%arg0, %[[KERNEL]] : tensor<?x6x34x62xf32>, \
                tensor<3x3xf32>) outs(%[[FILL]] : tensor<?x4x32x62xf32>)
-  %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, \
1]} : (tensor<?x6x34x62xf32>)  -> (tensor<?x4x32x62xf32>) +  %0 = \
"tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = array<i64: 3, 3>, stride = \
array<i64: 1, 1>} : (tensor<?x6x34x62xf32>)  -> (tensor<?x4x32x62xf32>)  return
 }
 
@@ -181,7 +181,7 @@
 func.func @max_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> () {
   // CHECK: arith.constant -128
   // CHECK: linalg.pooling_nhwc_max
-  %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, \
1]} : (tensor<1x6x34x62xi8>)  -> (tensor<1x4x32x62xi8>) +  %0 = \
"tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = array<i64: 3, 3>, stride = \
array<i64: 1, 1>} : (tensor<1x6x34x62xi8>)  -> (tensor<1x4x32x62xi8>)  return
 }
 
@@ -189,7 +189,7 @@
 func.func @max_pool_i16(%arg0: tensor<1x6x34x62xi16>) -> () {
   // CHECK: arith.constant -32768
   // CHECK: linalg.pooling_nhwc_max
-  %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, \
1]} : (tensor<1x6x34x62xi16>)  -> (tensor<1x4x32x62xi16>) +  %0 = \
"tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = array<i64: 3, 3>, stride = \
array<i64: 1, 1>} : (tensor<1x6x34x62xi16>)  -> (tensor<1x4x32x62xi16>)  return
 }
 
@@ -197,7 +197,7 @@
 func.func @max_pool_i32(%arg0: tensor<1x6x34x62xi32>) -> () {
   // CHECK: arith.constant -2147483648
   // CHECK: linalg.pooling_nhwc_max
-  %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, \
1]} : (tensor<1x6x34x62xi32>)  -> (tensor<1x4x32x62xi32>) +  %0 = \
"tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = array<i64: 3, 3>, stride = \
array<i64: 1, 1>} : (tensor<1x6x34x62xi32>)  -> (tensor<1x4x32x62xi32>)  return
 }
 // -----
@@ -258,7 +258,7 @@
   // CHECK:   [[CF:%.+]] = arith.sitofp [[CI]]
   // CHECK:   [[RESULT:%.+]] = arith.divf %[[BBARG1]], [[CF]]
   // CHECK:   linalg.yield [[RESULT]]
-  %0 = "tosa.avg_pool2d"(%arg0) {pad = [1, 1, 1, 1], kernel = [4, 4], stride = [1, \
1]} : (tensor<1x6x34x62xf32>)  -> (tensor<1x5x33x62xf32>) +  %0 = \
"tosa.avg_pool2d"(%arg0) {pad = [1, 1, 1, 1], kernel = array<i64: 4, 4>, stride = \
array<i64: 1, 1>} : (tensor<1x6x34x62xf32>)  -> (tensor<1x5x33x62xf32>)  return %0 : \
tensor<1x5x33x62xf32>  }
 
@@ -276,7 +276,7 @@
   // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum {dilations = dense<1> : \
vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%[[PAD]], %[[KERNEL]] : \
tensor<?x8x36x62xf32>, tensor<4x4xf32>) outs(%[[FILL]] : tensor<?x5x33x62xf32>)  // \
CHECK: %[[INIT:.+]] = tensor.empty(%[[BATCH]])  // CHECK: %[[GENERIC:.+]] = \
linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", \
"parallel", "parallel", "parallel"]} ins(%[[POOL]] : tensor<?x5x33x62xf32>) \
                outs(%[[INIT]] : tensor<?x5x33x62xf32>)
-  %0 = "tosa.avg_pool2d"(%arg0) {pad = [1, 1, 1, 1], kernel = [4, 4], stride = [1, \
1]} : (tensor<?x6x34x62xf32>)  -> (tensor<?x5x33x62xf32>) +  %0 = \
"tosa.avg_pool2d"(%arg0) {pad = [1, 1, 1, 1], kernel = array<i64: 4, 4>, stride = \
array<i64: 1, 1>} : (tensor<?x6x34x62xf32>)  -> (tensor<?x5x33x62xf32>)  return %0 : \
tensor<?x5x33x62xf32>  }
 
@@ -306,7 +306,7 @@
   // CHECK: %[[CLMP_MAX:.+]] = arith.select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
   // CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]]
   // CHECK: linalg.yield %[[TRUNC]]
-  %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], \
quantization_info = #tosa.unary_quant<input_zp = -128, output_zp = -128>, stride = \
[4, 4]} : (tensor<1x128x128x2xi8>) -> tensor<1x32x32x2xi8> +  %0 = \
"tosa.avg_pool2d"(%arg0) {kernel = array<i64: 4, 4>, pad = [0, 0, 0, 0], \
quantization_info = #tosa.unary_quant<input_zp = -128, output_zp = -128>, stride = \
array<i64: 4, 4>} : (tensor<1x128x128x2xi8>) -> tensor<1x32x32x2xi8>  return
 }
 
@@ -336,7 +336,7 @@
   // CHECK: %[[CLMP_MAX:.+]] = arith.select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
   // CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]]
   // CHECK: linalg.yield %[[TRUNC]]
-  %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], \
quantization_info = #tosa.unary_quant<input_zp = -128, output_zp = -128>, stride = \
[4, 4]} : (tensor<1x128x128x2xi16>) -> tensor<1x32x32x2xi16> +  %0 = \
"tosa.avg_pool2d"(%arg0) {kernel = array<i64: 4, 4>, pad = [0, 0, 0, 0], \
quantization_info = #tosa.unary_quant<input_zp = -128, output_zp = -128>, stride = \
array<i64: 4, 4>} : (tensor<1x128x128x2xi16>) -> tensor<1x32x32x2xi16>  return
 }
 
@@ -357,7 +357,7 @@
   // CHECK: %[[B:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], \
#[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} \
ins(%arg2, %[[CONV]] : tensor<28xf32>, tensor<1x45x40x28xf32>) outs(%[[B_IN]] : \
tensor<1x45x40x28xf32>)  // CHECK:   arith.addf
   // CHECK:   linalg.yield
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [2, 1]} : (tensor<1x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>)  \
-> (tensor<1x45x40x28xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, \
0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: 2, 1>} : \
(tensor<1x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>)  -> \
(tensor<1x45x40x28xf32>)  return
 }
 
@@ -380,7 +380,7 @@
   // CHECK: %[[B:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], \
#[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} \
ins(%arg2, %[[CONV]] : tensor<28xf32>, tensor<?x45x40x28xf32>) outs(%[[B_IN]] : \
tensor<?x45x40x28xf32>)  // CHECK:   %[[ADD:.+]] = arith.addf
   // CHECK:   linalg.yield %[[ADD]] : f32
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [2, 1]} : (tensor<?x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>)  \
-> (tensor<?x45x40x28xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, \
0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: 2, 1>} : \
(tensor<?x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>)  -> \
(tensor<?x45x40x28xf32>)  return
 }
 
@@ -440,7 +440,7 @@
   // CHECK: %[[B:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], \
#[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} \
ins(%arg2, %[[CONV]] : tensor<28xf32>, tensor<1x?x?x28xf32>) outs(%[[B_IN]] : \
tensor<1x?x?x28xf32>)  // CHECK:   %[[ADD:.+]] = arith.addf
   // CHECK:   linalg.yield %[[ADD]] : f32
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], \
dilation = [2, 1]} : (tensor<1x?x?x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>)  \
-> (tensor<1x?x?x28xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, \
0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: 2, 1>} : \
(tensor<1x?x?x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>)  -> \
(tensor<1x?x?x28xf32>)  return
 }
 
@@ -452,7 +452,7 @@
   // CHECK: tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0]
   // CHECK:   tensor.yield %[[C0]]
   // CHECK: linalg.conv_2d_nhwc_hwcf
-  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [1, 1, 1, 1], stride = [1, 1], \
dilation = [2, 1]} : (tensor<1x47x40x28xf32>, tensor<28x3x3x28xf32>, tensor<28xf32>)  \
-> (tensor<1x45x40x28xf32>) +  %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [1, \
1, 1, 1], stride = array<i64: 1, 1>, dilation = array<i64: 2, 1>} : \
(tensor<1x47x40x28xf32>, tensor<28x3x3x28xf32>, tensor<28xf32>)  -> \
(tensor<1x45x40x28xf32>)  return
 }
 
@@ -464,7 +464,7 @@
   // CHECK: tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0]
   // CHECK:   tensor.yield %[[C22]]
   // CHECK: linalg.conv_2d_nhwc_hwcf_q
-  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [1, 1, 1, 1], \
quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = [1, \
1]} : (tensor<1x12x12x1xi8>, tensor<1024x3x3x1xi8>, tensor<1024xi32>) -> \
tensor<1x12x12x1024xi32> +  %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = \
array<i64: 1, 1>, pad = [1, 1, 1, 1], quantization_info = #tosa.conv_quant<input_zp = \
-22, weight_zp = 42>, stride = array<i64: 1, 1>} : (tensor<1x12x12x1xi8>, \
tensor<1024x3x3x1xi8>, tensor<1024xi32>) -> tensor<1x12x12x1024xi32>  return
 }
 
@@ -486,7 +486,7 @@
   // CHECK:   [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
   // CHECK:   linalg.yield [[ADD]] : f32
   // CHECK: } -> tensor<1x5x5x33xf32>
-  %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = \
[1, 1], dilation = [1, 1] } : (tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>, \
tensor<33xf32>)  -> (tensor<1x5x5x33xf32>) +  %2 = "tosa.depthwise_conv2d"(%arg0, \
%arg1, %arg2) { pad = [0, 0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: \
1, 1> } : (tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>)  -> \
(tensor<1x5x5x33xf32>)  return
 }
 
@@ -510,7 +510,7 @@
   // CHECK:   %[[ADD:.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
   // CHECK:   linalg.yield %[[ADD]] : f32
   // CHECK: } -> tensor<?x5x5x33xf32>
-  %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = \
[1, 1], dilation = [1, 1] } : (tensor<?x7x5x3xf32>, tensor<3x1x3x11xf32>, \
tensor<33xf32>)  -> (tensor<?x5x5x33xf32>) +  %2 = "tosa.depthwise_conv2d"(%arg0, \
%arg1, %arg2) { pad = [0, 0, 0, 0], stride = array<i64: 1, 1>, dilation = array<i64: \
1, 1> } : (tensor<?x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>)  -> \
(tensor<?x5x5x33xf32>)  return
 }
 
@@ -532,7 +532,7 @@
   // CHECK:   [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
   // CHECK:   linalg.yield [[ADD]] : f32
   // CHECK: } -> tensor<1x5x5x33xf32>
-  %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = \
[2, 2], dilation = [1, 1] } : (tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>, \
tensor<33xf32>)  -> (tensor<1x5x5x33xf32>) +  %2 = "tosa.depthwise_conv2d"(%arg0, \
%arg1, %arg2) { pad = [0, 0, 0, 0], stride = array<i64: 2, 2>, dilation = array<i64: \
1, 1> } : (tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>)  -> \
(tensor<1x5x5x33xf32>)  return
 }
 
@@ -560,7 +560,7 @@
   // CHECK:   [[ADD:%.+]] = arith.addi %[[ARG3]], %[[ARG4]] : i32
   // CHECK:   linalg.yield [[ADD]] : i32
   // CHECK: } -> tensor<1x12x12x512xi32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 1, 1], \
quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 42>, stride = [1, \
1], dilation = [1, 1] } : (tensor<1x12x12x4xi8>, tensor<3x3x4x128xi8>, \
tensor<512xi32>)  -> tensor<1x12x12x512xi32> +  %0 = "tosa.depthwise_conv2d"(%arg0, \
%arg1, %arg2) {pad = [1, 1, 1, 1], quantization_info = #tosa.conv_quant<input_zp = \
-128, weight_zp = 42>, stride = array<i64: 1, 1>, dilation = array<i64: 1, 1> } : \
(tensor<1x12x12x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>)  -> \
tensor<1x12x12x512xi32>  return
 }
 
@@ -584,7 +584,7 @@
   // CHECK:   [[ADD:%.+]] = arith.addi %[[ARG3]], %[[ARG4]] : i32
   // CHECK:   linalg.yield [[ADD]] : i32
   // CHECK: } -> tensor<1x10x10x512xi32>
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], \
quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 42>, stride = [1, \
1], dilation = [2, 2] } : (tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, \
tensor<512xi32>)  -> tensor<1x10x10x512xi32> +  %0 = "tosa.depthwise_conv2d"(%arg0, \
%arg1, %arg2) {pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant<input_zp = \
-128, weight_zp = 42>, stride = array<i64: 1, 1>, dilation = array<i64: 2, 2> } : \
(tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>)  -> \
tensor<1x10x10x512xi32>  return
 }
 
@@ -600,6 +600,6 @@
   // CHECK:  } : tensor<2x?x?x3xf32> to tensor<2x?x?x3xf32>
   // CHECK: %[[CONV:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<[2, \
1]> : tensor<2xi64>, strides = dense<[1, 2]> : tensor<2xi64>} ins(%[[PADDED]], %arg1 \
: tensor<2x?x?x3xf32>, tensor<3x6x3x5xf32>) outs(%{{.*}} : tensor<2x?x?x3x5xf32>) -> \
tensor<2x?x?x3x5xf32>  // CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[CONV]] \
                {{\[}}[0], [1], [2], [3, 4]]
-  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [1, 2, 3, 4], dilation = \
[2, 1], stride = [1, 2]} : (tensor<2x?x?x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) \
-> tensor<2x?x?x15xf32> +  %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = \
[1, 2, 3, 4], dilation = array<i64: 2, 1>, stride = array<i64: 1, 2>} : \
(tensor<2x?x?x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x?x?x15xf32>  \
return  }
Index: mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
===================================================================
--- mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -96,10 +96,8 @@
     ShapedType resultTy = op->getResult(0).getType().cast<ShapedType>();
 
     llvm::SmallVector<int64_t> pad;
-    llvm::SmallVector<int64_t> stride;
-
+    llvm::ArrayRef<int64_t> stride = op.getStride();
     getValuesFromIntArrayAttribute(op.getOutPad().cast<ArrayAttr>(), pad);
-    getValuesFromIntArrayAttribute(op.getStride().cast<ArrayAttr>(), stride);
 
     // If striding is all 1 we can modify padding and reverse the kernel along
     // the x/y direction to make it a regular convolution. This is much simpler
@@ -129,13 +127,15 @@
     if (op.getQuantizationInfo()) {
       conv2d = rewriter.create<tosa::Conv2DOp>(
           loc, resultTy, input, reverse2, bias,
-          rewriter.getI64ArrayAttr(convPad), rewriter.getI64ArrayAttr(stride),
-          rewriter.getI64ArrayAttr({1, 1}), *op.getQuantizationInfo());
+          rewriter.getI64ArrayAttr(convPad),
+          rewriter.getDenseI64ArrayAttr(stride),
+          rewriter.getDenseI64ArrayAttr({1, 1}), *op.getQuantizationInfo());
     } else {
       conv2d = rewriter.create<tosa::Conv2DOp>(
           loc, resultTy, input, reverse2, bias,
-          rewriter.getI64ArrayAttr(convPad), rewriter.getI64ArrayAttr(stride),
-          rewriter.getI64ArrayAttr({1, 1}));
+          rewriter.getI64ArrayAttr(convPad),
+          rewriter.getDenseI64ArrayAttr(stride),
+          rewriter.getDenseI64ArrayAttr({1, 1}));
     }
 
     rewriter.replaceOp(op, conv2d);
@@ -165,10 +165,8 @@
     Type resultETy = resultTy.getElementType();
 
     llvm::SmallVector<int64_t> pad;
-    llvm::SmallVector<int64_t> stride;
-
+    llvm::ArrayRef<int64_t> stride = op.getStride();
     getValuesFromIntArrayAttribute(op.getOutPad().cast<ArrayAttr>(), pad);
-    getValuesFromIntArrayAttribute(op.getStride().cast<ArrayAttr>(), stride);
 
     // If striding is all 1 we can modify padding and reverse the kernel along
     // the x/y direction to make it a regular convolution. This is much simpler
@@ -293,8 +291,8 @@
                    rewriter, loc, UnrankedTensorType::get(resultETy), input,
                    weight, zeroBias,
                    /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}),
-                   /*stride=*/rewriter.getI64ArrayAttr({1, 1}),
-                   /*dilation=*/rewriter.getI64ArrayAttr({1, 1}),
+                   /*stride=*/rewriter.getDenseI64ArrayAttr({1, 1}),
+                   /*dilation=*/rewriter.getDenseI64ArrayAttr({1, 1}),
                    *op.getQuantizationInfo())
                    .getResult();
     } else {
@@ -302,8 +300,8 @@
                    rewriter, loc, UnrankedTensorType::get(resultETy), input,
                    weight, zeroBias,
                    /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}),
-                   /*stride=*/rewriter.getI64ArrayAttr({1, 1}),
-                   /*dilation=*/rewriter.getI64ArrayAttr({1, 1}))
+                   /*stride=*/rewriter.getDenseI64ArrayAttr({1, 1}),
+                   /*dilation=*/rewriter.getDenseI64ArrayAttr({1, 1}))
                    .getResult();
     }
 
Index: mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
===================================================================
--- mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
+++ mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
@@ -37,12 +37,8 @@
       return failure();
     }
 
-    // Stride must be 1 for this optimization.
-    for (Attribute stride : op.getStride().getValue()) {
-      if (!stride.cast<IntegerAttr>().getValue().isOne()) {
-        return failure();
-      }
-    }
+    if (!llvm::all_of(op.getStride(), [](int64_t v) { return v == 1; }))
+      return failure();
 
     // Only works for a 1x1 kernel.
     ArrayRef<int64_t> weightShape = weightType.getShape();
Index: mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
===================================================================
--- mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
+++ mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
@@ -46,11 +46,8 @@
     if (!weightType.hasRank())
       return rewriter.notifyMatchFailure(op, "unranked weight input");
 
-    // Stride must be 1 for this optimization.
-    for (APInt stride : op.getStride().getAsValueRange<IntegerAttr>()) {
-      if (!stride.isOne())
-        return failure();
-    }
+    if (!llvm::all_of(op.getStride(), [](int64_t v) { return v == 1; }))
+      return failure();
 
     // Only works for a 1x1 kernel.
     ArrayRef<int64_t> weightShape = weightType.getShape();
Index: mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
===================================================================
--- mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -99,8 +99,7 @@
 // TOSA Operator Verifiers.
 //===----------------------------------------------------------------------===//
 
-template <typename T>
-static LogicalResult verifyConvOp(T op) {
+template <typename T> static LogicalResult verifyConvOp(T op) {
   // All TOSA conv ops have an input() and weight().
   auto inputType =
       op.getInput().getType().template dyn_cast<RankedTensorType>();
@@ -173,7 +172,8 @@
 static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
                                      Type outputType, Value input, Value weight,
                                      Value bias, ArrayAttr pad,
-                                     ArrayAttr stride, ArrayAttr dilation) {
+                                     DenseI64ArrayAttr stride,
+                                     DenseI64ArrayAttr dilation) {
 
   result.addOperands({input, weight, bias});
   result.addAttribute("pad", pad);
@@ -191,12 +191,11 @@
 }
 
 /// Handles tosa.transpose_conv2d which has outpad and output shape attributes.
-static void buildTransConvOpWithQuantInfo(OpBuilder &builder,
-                                          OperationState &result,
-                                          Type outputType, Value input,
-                                          Value weight, Value bias,
-                                          ArrayAttr outpad, ArrayAttr stride,
-                                          ArrayAttr outputShape) {
+static void
+buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+                              Type outputType, Value input, Value weight,
+                              Value bias, ArrayAttr outpad,
+                              DenseI64ArrayAttr stride, ArrayAttr outputShape) {
   result.addOperands({input, weight, bias});
   result.addAttribute("out_pad", outpad);
   result.addAttribute("stride", stride);
@@ -269,11 +268,9 @@
 /// Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr
 /// but avg_pool operator has its own builder as it has additional parameters
 /// not part of the unary ops.
-static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder,
-                                          OperationState &result,
-                                          Type outputType, Value input,
-                                          ArrayAttr kernel, ArrayAttr stride,
-                                          ArrayAttr pad) {
+static void buildAvgPool2dOpWithQuantInfo(
+    OpBuilder &builder, OperationState &result, Type outputType, Value input,
+    DenseArrayAttr kernel, DenseArrayAttr stride, ArrayAttr pad) {
   result.addOperands(input);
   result.addAttribute("kernel", kernel);
   result.addAttribute("stride", stride);
@@ -814,11 +811,9 @@
     return failure();
 
   llvm::SmallVector<int64_t> scaleInt;
-  llvm::SmallVector<int64_t> offsetInt;
-  llvm::SmallVector<int64_t> borderInt;
+  llvm::ArrayRef<int64_t> offsetInt = adaptor.getOffset();
+  llvm::ArrayRef<int64_t> borderInt = adaptor.getBorder();
   getI64Values(adaptor.getScale(), scaleInt);
-  getI64Values(adaptor.getOffset(), offsetInt);
-  getI64Values(adaptor.getBorder(), borderInt);
 
   // Compute the output shape based on attributes: scale, offset, and border.
   outputShape[1] =
@@ -982,12 +977,9 @@
   int64_t height = inputShape.getDimSize(1);
   int64_t width = inputShape.getDimSize(2);
 
-  llvm::SmallVector<int64_t> kernel;
-  llvm::SmallVector<int64_t> stride;
+  ArrayRef<int64_t> kernel = attributes.get("kernel").cast<DenseI64ArrayAttr>();
+  ArrayRef<int64_t> stride = attributes.get("stride").cast<DenseI64ArrayAttr>();
   llvm::SmallVector<int64_t> pad;
-
-  getI64Values(attributes.get("kernel").cast<ArrayAttr>(), kernel);
-  getI64Values(attributes.get("stride").cast<ArrayAttr>(), stride);
   getI64Values(attributes.get("pad").cast<ArrayAttr>(), pad);
 
   if (!ShapedType::isDynamic(height)) {
@@ -1041,13 +1033,10 @@
                          : outputShape[3];
   }
 
-  llvm::SmallVector<int64_t> dilation;
+  llvm::ArrayRef<int64_t> dilation = adaptor.getDilation();
+  llvm::ArrayRef<int64_t> stride = adaptor.getStride();
   llvm::SmallVector<int64_t> padding;
-  llvm::SmallVector<int64_t> stride;
-
-  getI64Values(adaptor.getDilation(), dilation);
   getI64Values(adaptor.getPad(), padding);
-  getI64Values(adaptor.getStride(), stride);
 
   if (!ShapedType::isDynamic(inputHeight) &&
       !ShapedType::isDynamic(weightHeight)) {
@@ -1212,13 +1201,10 @@
                          : outputShape[3];
   }
 
-  llvm::SmallVector<int64_t> dilation;
+  llvm::ArrayRef<int64_t> dilation = adaptor.getDilation();
   llvm::SmallVector<int64_t> padding;
-  llvm::SmallVector<int64_t> stride;
-
-  getI64Values(adaptor.getDilation(), dilation);
+  llvm::ArrayRef<int64_t> stride = adaptor.getStride();
   getI64Values(adaptor.getPad(), padding);
-  getI64Values(adaptor.getStride(), stride);
 
   if (!ShapedType::isDynamic(inputHeight) &&
       !ShapedType::isDynamic(weightHeight)) {
@@ -1285,10 +1271,8 @@
   }
 
   llvm::SmallVector<int64_t> padding;
-  llvm::SmallVector<int64_t> stride;
-
+  llvm::ArrayRef<int64_t> stride = adaptor.getStride();
   getI64Values(adaptor.getOutPad(), padding);
-  getI64Values(adaptor.getStride(), stride);
 
   if (!ShapedType::isDynamic(inputHeight) &&
       !ShapedType::isDynamic(weightHeight)) {
Index: mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
===================================================================
--- mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -854,10 +854,10 @@
 // Fold away cases where a tosa.resize operation returns a copy
 // of the input image.
 OpFoldResult ResizeOp::fold(ArrayRef<Attribute> operands) {
-  SmallVector<int32_t> scale, offset, border;
+  ArrayRef<int64_t> offset = getOffset();
+  ArrayRef<int64_t> border = getBorder();
+  SmallVector<int32_t> scale;
   getValuesFromIntArrayAttribute(getScale(), scale);
-  getValuesFromIntArrayAttribute(getOffset(), offset);
-  getValuesFromIntArrayAttribute(getBorder(), border);
 
   // Check unit scaling.
   if (scale[0] != scale[1] || scale[2] != scale[3]) {
Index: mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
===================================================================
--- mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
+++ mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
@@ -69,13 +69,22 @@
       builder.getIndexType(), builder.create<arith::ConstantOp>(attr));
 }
 
+static mlir::Value reifyConstantDim(int64_t attr,
+                                    ImplicitLocOpBuilder &builder) {
+  return builder.createOrFold<arith::IndexCastOp>(
+      builder.getIndexType(),
+      builder.create<arith::ConstantOp>(builder.getI64IntegerAttr(attr)));
+}
+
 // Calculating the output width/height using the formula:
 // H = ((IH+pad_top+pad_bottom-(dilation_y*(KH-1)+1))/stride_y)+1
 // W = ((IW+pad_left+pad_right-(dilation_x*(KW-1)+1))/stride_x)+1
-static mlir::Value
-getConvOutputDim(Location loc, Value inputDim, Attribute padBeforeAttr,
-                 Attribute padAfterAttr, Value kernelDim, Attribute strideAttr,
-                 Attribute dilationAttr, Type inputETy, OpBuilder &rewriter) {
+
+static mlir::Value getConvOutputDim(Location loc, Value inputDim,
+                                    Attribute padBeforeAttr,
+                                    Attribute padAfterAttr, Value kernelDim,
+                                    int64_t strideAttr, int64_t dilationAttr,
+                                    Type inputETy, OpBuilder &rewriter) {
   ImplicitLocOpBuilder builder(loc, rewriter);
   auto one = rewriter.create<arith::ConstantOp>(
       loc, IntegerAttr::get(inputDim.getType(), 1));
@@ -96,10 +105,12 @@
 }
 
 // Creates a vector of the dynamic output dims for Conv2D and Depthwise_Conv2D
-static SmallVector<Value> inferDynamicDimsForConv(
-    Location loc, Value input, Value weight, ShapedType resultTy,
-    ArrayAttr padAttr, ArrayAttr strideAttr, ArrayAttr dilationAttr,
-    int64_t weightHDim, int64_t weightWDim, OpBuilder &rewriter) {
+static SmallVector<Value>
+inferDynamicDimsForConv(Location loc, Value input, Value weight,
+                        ShapedType resultTy, ArrayAttr padAttr,
+                        DenseI64ArrayAttr strideAttr,
+                        DenseI64ArrayAttr dilationAttr, int64_t weightHDim,
+                        int64_t weightWDim, OpBuilder &rewriter) {
   ShapedType inputTy = input.getType().cast<ShapedType>();
   Type inputETy = inputTy.getElementType();
   int64_t inputRank = inputTy.getRank();
@@ -122,8 +133,7 @@
     // H = F(IH, pad_top, pad_bottom, dilation_y, KH, stride_y)
     dynDims[heightDim] = getConvOutputDim(
         loc, initHDim, padAttr.getValue()[0], padAttr.getValue()[1], kernelHDim,
-        strideAttr.getValue()[0], dilationAttr.getValue()[0], inputETy,
-        rewriter);
+        strideAttr[0], dilationAttr[0], inputETy, rewriter);
   }
 
   // Dynamic input weight
@@ -135,8 +145,7 @@
     // W = F(IW, pad_left, pad_right, dilation_x, KW, stride_x)
     dynDims[weightDim] = getConvOutputDim(
         loc, initWDim, padAttr.getValue()[2], padAttr.getValue()[3], kernelWDim,
-        strideAttr.getValue()[1], dilationAttr.getValue()[1], inputETy,
-        rewriter);
+        strideAttr[1], dilationAttr[1], inputETy, rewriter);
   }
 
   SmallVector<Value> filteredDims = condenseValues(dynDims);
@@ -177,9 +186,9 @@
     Type inputETy = inputTy.getElementType();
     Type resultETy = resultTy.getElementType();
 
-    auto padAttr = op->getAttr("pad").cast<ArrayAttr>();
-    auto strideTosaAttr = op->getAttr("stride").cast<ArrayAttr>();
-    auto dilationTosaAttr = op->getAttr("dilation").cast<ArrayAttr>();
+    ArrayAttr padAttr = op.getPadAttr();
+    DenseI64ArrayAttr strideTosaAttr = op.getStrideAttr();
+    DenseI64ArrayAttr dilationTosaAttr = op.getDilationAttr();
     bool isQuantized = op->hasAttr("quantization_info");
 
     if (!weightTy.hasStaticShape() || !biasTy.hasStaticShape())
@@ -249,9 +258,8 @@
                            .result();
 
     // Extract the attributes for convolution.
-    llvm::SmallVector<int64_t> stride, dilation;
-    getValuesFromIntArrayAttribute(strideTosaAttr, stride);
-    getValuesFromIntArrayAttribute(dilationTosaAttr, dilation);
+    ArrayRef<int64_t> stride = strideTosaAttr;
+    ArrayRef<int64_t> dilation = dilationTosaAttr;
 
     // Create the convolution op.
     auto strideAttr = DenseIntElementsAttr::get(
@@ -347,8 +355,8 @@
     Type resultETy = resultTy.getElementType();
 
     auto padAttr = op->getAttr("pad").cast<ArrayAttr>();
-    auto strideTosaAttr = op->getAttr("stride").cast<ArrayAttr>();
-    auto dilationTosaAttr = op->getAttr("dilation").cast<ArrayAttr>();
+    auto strideTosaAttr = op->getAttr("stride").cast<DenseI64ArrayAttr>();
+    auto dilationTosaAttr = op->getAttr("dilation").cast<DenseI64ArrayAttr>();
 
     if (!weightTy.hasStaticShape() || !biasTy.hasStaticShape())
       return rewriter.notifyMatchFailure(
@@ -402,15 +410,15 @@
     input = applyPad(loc, input, pad, zeroAttr, rewriter);
 
     // Extract the attributes for convolution.
-    llvm::SmallVector<int64_t> stride, dilation;
-    getValuesFromIntArrayAttribute(strideTosaAttr, stride);
-    getValuesFromIntArrayAttribute(dilationTosaAttr, dilation);
+    ArrayRef<int64_t> stride = strideTosaAttr;
+    ArrayRef<int64_t> dilation = dilationTosaAttr;
 
     // Create the convolution op.
     auto strideAttr = DenseIntElementsAttr::get(
         RankedTensorType::get({2}, rewriter.getI64Type()), stride);
     auto dilationAttr = DenseIntElementsAttr::get(
         RankedTensorType::get({2}, rewriter.getI64Type()), dilation);
+
     ShapedType linalgConvTy =
         RankedTensorType::get({resultShape[0], resultShape[1], resultShape[2],
                                weightShape[2], weightShape[3]},
@@ -723,9 +731,8 @@
 
     Value initialValue = rewriter.create<arith::ConstantOp>(loc, initialAttr);
 
-    SmallVector<int64_t> kernel, stride;
-    getValuesFromIntArrayAttribute(op.getKernel(), kernel);
-    getValuesFromIntArrayAttribute(op.getStride(), stride);
+    ArrayRef<int64_t> kernel = op.getKernel();
+    ArrayRef<int64_t> stride = op.getStride();
 
     Attribute strideAttr = rewriter.getI64VectorAttr(stride);
     Attribute dilationAttr = rewriter.getI64VectorAttr({1, 1});
@@ -785,9 +792,8 @@
     Attribute initialAttr = rewriter.getZeroAttr(accETy);
     Value initialValue = rewriter.create<arith::ConstantOp>(loc, initialAttr);
 
-    SmallVector<int64_t> kernel, stride;
-    getValuesFromIntArrayAttribute(op.getKernel(), kernel);
-    getValuesFromIntArrayAttribute(op.getStride(), stride);
+    ArrayRef<int64_t> kernel = op.getKernel();
+    ArrayRef<int64_t> stride = op.getStride();
 
     Attribute strideAttr = rewriter.getI64VectorAttr(stride);
     Attribute dilationAttr = rewriter.getI64VectorAttr({1, 1});
Index: mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
===================================================================
--- mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -1480,10 +1480,10 @@
 
       bool floatingPointMode = resultETy.isF32();
 
-      SmallVector<int32_t> scale, offset, border;
+      ArrayRef<int64_t> offset = op.getOffset();
+      ArrayRef<int64_t> border = op.getBorder();
+      SmallVector<int32_t> scale;
       getValuesFromIntArrayAttribute(op.getScale(), scale);
-      getValuesFromIntArrayAttribute(op.getOffset(), offset);
-      getValuesFromIntArrayAttribute(op.getBorder(), border);
 
       Value yScaleN, yScaleD, xScaleN, xScaleD;
       yScaleN = b.create<arith::ConstantOp>(b.getI32IntegerAttr(scale[0]));
Index: mlir/include/mlir/IR/OpBase.td
===================================================================
--- mlir/include/mlir/IR/OpBase.td
+++ mlir/include/mlir/IR/OpBase.td
@@ -1672,6 +1672,10 @@
     CPred<"$_self.cast<::mlir::ArrayAttr>().size() == " #n>,
     "with exactly " # n # " elements">;
 
+class DenseArrayCount<int n> : AttrConstraint<
+    CPred<"$_self.cast<::mlir::DenseArrayAttr>().size() == " #n>,
+    "with exactly " # n # " elements">;
+
 class DenseArrayStrictlyPositive<DenseArrayAttrBase arrayType> : AttrConstraint<
   CPred<"::llvm::all_of($_self.cast<" # arrayType #">().asArrayRef(), "
                         "[&](auto v) { return v > 0; })">,
Index: mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
===================================================================
--- mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
+++ mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
@@ -173,7 +173,7 @@
 def Tosa_Fp32ArrayAttr5 : ConfinedAttr<F32ArrayAttr, [ArrayCount<5>]>;
 def Tosa_Fp32ArrayAttr6 : ConfinedAttr<F32ArrayAttr, [ArrayCount<6>]>;
 
-def Tosa_IntArrayAttr2 : ConfinedAttr<I64ArrayAttr, [ArrayCount<2>]>;
+def Tosa_IntArrayAttr2 : ConfinedAttr<DenseI64ArrayAttr, [DenseArrayCount<2>]>;
 def Tosa_IntArrayAttr3 : ConfinedAttr<I64ArrayAttr, [ArrayCount<3>]>;
 def Tosa_IntArrayAttr4 : ConfinedAttr<I64ArrayAttr, [ArrayCount<4>]>;
 def Tosa_IntArrayAttr5 : ConfinedAttr<I64ArrayAttr, [ArrayCount<5>]>;
Index: mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
===================================================================
--- mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
+++ mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
@@ -123,8 +123,10 @@
 // which has specialized output shape semantics. The builder also defines the
 // bitwidth of the output given the bit width of the input & weight content.
 def Tosa_ConvOpQuantInfoBuilder : OpBuilder<
-  (ins "Type":$outputType, "Value":$input, "Value":$weight, "Value":$bias,
-       "ArrayAttr":$pad, "ArrayAttr":$stride, "ArrayAttr":$dilation),
+  (ins "::mlir::Type":$outputType, "::mlir::Value":$input,
+       "::mlir::Value":$weight, "::mlir::Value":$bias,
+       "::mlir::ArrayAttr":$pad, "::mlir::DenseI64ArrayAttr":$stride,
+       "::mlir::DenseI64ArrayAttr":$dilation),
   [{
     buildConvOpWithQuantInfo($_builder, $_state, outputType,
                              input, weight, bias,
@@ -133,8 +135,10 @@
 
 // Handles tosa.transpose_conv2d which has an outpad and output shape attribute.
 def Tosa_TransConvOpQuantInfoBuilder : OpBuilder<
-  (ins "Type":$outputType, "Value":$input, "Value":$weight, "Value":$bias,
-       "ArrayAttr":$outpad, "ArrayAttr":$stride, "ArrayAttr":$outputShape),
+  (ins "::mlir::Type":$outputType, "::mlir::Value":$input,
+       "::mlir::Value":$weight, "mlir::Value":$bias,
+       "::mlir::ArrayAttr":$outpad, "::mlir::DenseI64ArrayAttr":$stride,
+       "::mlir::ArrayAttr":$outputShape),
   [{
     buildTransConvOpWithQuantInfo($_builder, $_state, outputType,
                                   input, weight, bias,
@@ -166,8 +170,9 @@
 // UnaruOpQuantizationAttr but the avg_pool operator has its own builder as it
 // has additional parameters not part of the unary ops.
 def Tosa_AvgPool2dOpQuantInfoBuilder : OpBuilder<
-  (ins "Type":$outputType, "Value":$input, "ArrayAttr":$kernel,
-       "ArrayAttr":$stride, "ArrayAttr":$pad),
+  (ins "::mlir::Type":$outputType, "::mlir::Value":$input,
+       "::mlir::DenseI64ArrayAttr":$kernel, "::mlir::DenseI64ArrayAttr":$stride,
+       "::mlir::ArrayAttr":$pad),
   [{
     buildAvgPool2dOpWithQuantInfo($_builder, $_state, outputType,
                                   input, kernel, stride, pad);


[Attachment #4 (text/plain)]

_______________________________________________
llvm-commits mailing list
llvm-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic