Skip to content

Commit eb17780

Browse files
[mlir][sparse] Change sparse_tensor.print format (#91528)
1. Remove the trailing comma for the last element of memref and add closing parenthesis. 2. Change integration tests to use the new format.
1 parent df21ee4 commit eb17780

File tree

73 files changed

+1068
-1059
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+1068
-1059
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp

+9-3
Original file line numberDiff line numberDiff line change
@@ -830,11 +830,17 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
830830
vector::PrintPunctuation::Comma);
831831
rewriter.create<vector::PrintOp>(loc, imag,
832832
vector::PrintPunctuation::Close);
833-
rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma);
834833
} else {
835-
rewriter.create<vector::PrintOp>(loc, val,
836-
vector::PrintPunctuation::Comma);
834+
rewriter.create<vector::PrintOp>(
835+
loc, val, vector::PrintPunctuation::NoPunctuation);
837836
}
837+
// Terminating comma (except at end).
838+
auto bound = rewriter.create<arith::AddIOp>(loc, idxs.back(), step);
839+
Value cond = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne,
840+
bound, size);
841+
scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, cond, /*else*/ false);
842+
rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
843+
rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma);
838844
}
839845
idxs.pop_back();
840846
rewriter.setInsertionPointAfter(forOp);

mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir

+9-9
Original file line numberDiff line numberDiff line change
@@ -93,19 +93,19 @@ module {
9393
// CHECK-NEXT: nse = 12
9494
// CHECK-NEXT: dim = ( 4, 6 )
9595
// CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
96-
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
97-
// CHECK-NEXT: crd[1] : ( 0, 2, 1,
98-
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
96+
// CHECK-NEXT: pos[1] : ( 0, 2, 3 )
97+
// CHECK-NEXT: crd[1] : ( 0, 2, 1 )
98+
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
9999
// CHECK-NEXT: ----
100100
sparse_tensor.print %A : tensor<?x?xf64, #BSR>
101101

102102
// CHECK-NEXT: ---- Sparse Tensor ----
103103
// CHECK-NEXT: nse = 12
104104
// CHECK-NEXT: dim = ( 2, 3, 2, 2 )
105105
// CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
106-
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
107-
// CHECK-NEXT: crd[1] : ( 0, 2, 1
108-
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
106+
// CHECK-NEXT: pos[1] : ( 0, 2, 3 )
107+
// CHECK-NEXT: crd[1] : ( 0, 2, 1 )
108+
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
109109
// CHECK-NEXT: ----
110110
%t1 = sparse_tensor.reinterpret_map %A : tensor<?x?xf64, #BSR>
111111
to tensor<?x?x2x2xf64, #DSDD>
@@ -115,9 +115,9 @@ module {
115115
// CHECK-NEXT: nse = 12
116116
// CHECK-NEXT: dim = ( 4, 6 )
117117
// CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
118-
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
119-
// CHECK-NEXT: crd[1] : ( 0, 2, 1,
120-
// CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0,
118+
// CHECK-NEXT: pos[1] : ( 0, 2, 3 )
119+
// CHECK-NEXT: crd[1] : ( 0, 2, 1 )
120+
// CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0 )
121121
// CHECK-NEXT: ----
122122
%As = call @scale(%A) : (tensor<?x?xf64, #BSR>) -> (tensor<?x?xf64, #BSR>)
123123
sparse_tensor.print %As : tensor<?x?xf64, #BSR>

mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir

+12-12
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ module {
108108
// CHECK-NEXT: nse = 24
109109
// CHECK-NEXT: dim = ( 6, 16 )
110110
// CHECK-NEXT: lvl = ( 2, 4, 3, 4 )
111-
// CHECK-NEXT: pos[1] : ( 0, 1, 2,
112-
// CHECK-NEXT: crd[1] : ( 0, 2,
113-
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
111+
// CHECK-NEXT: pos[1] : ( 0, 1, 2 )
112+
// CHECK-NEXT: crd[1] : ( 0, 2 )
113+
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
114114
// CHECK-NEXT: ----
115115
//
116116
func.func @foo1() {
@@ -134,9 +134,9 @@ module {
134134
// CHECK-NEXT: nse = 24
135135
// CHECK-NEXT: dim = ( 6, 16 )
136136
// CHECK-NEXT: lvl = ( 2, 4, 4, 3 )
137-
// CHECK-NEXT: pos[1] : ( 0, 1, 2,
138-
// CHECK-NEXT: crd[1] : ( 0, 2,
139-
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
137+
// CHECK-NEXT: pos[1] : ( 0, 1, 2 )
138+
// CHECK-NEXT: crd[1] : ( 0, 2 )
139+
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
140140
// CHECK-NEXT: ----
141141
//
142142
func.func @foo2() {
@@ -160,9 +160,9 @@ module {
160160
// CHECK-NEXT: nse = 24
161161
// CHECK-NEXT: dim = ( 6, 16 )
162162
// CHECK-NEXT: lvl = ( 4, 2, 3, 4 )
163-
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
164-
// CHECK-NEXT: crd[1] : ( 0, 1,
165-
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
163+
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2 )
164+
// CHECK-NEXT: crd[1] : ( 0, 1 )
165+
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
166166
// CHECK-NEXT: ----
167167
//
168168
func.func @foo3() {
@@ -186,9 +186,9 @@ module {
186186
// CHECK-NEXT: nse = 24
187187
// CHECK-NEXT: dim = ( 6, 16 )
188188
// CHECK-NEXT: lvl = ( 4, 2, 4, 3 )
189-
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
190-
// CHECK-NEXT: crd[1] : ( 0, 1,
191-
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
189+
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2 )
190+
// CHECK-NEXT: crd[1] : ( 0, 1 )
191+
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
192192
// CHECK-NEXT: ----
193193
//
194194
func.func @foo4() {

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir

+10-10
Original file line numberDiff line numberDiff line change
@@ -111,11 +111,11 @@ module {
111111
// CHECK-NEXT: nse = 18
112112
// CHECK-NEXT: dim = ( 9, 4 )
113113
// CHECK-NEXT: lvl = ( 9, 4 )
114-
// CHECK-NEXT: pos[0] : ( 0, 9,
115-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
116-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
117-
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
118-
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
114+
// CHECK-NEXT: pos[0] : ( 0, 9 )
115+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
116+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
117+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
118+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
119119
// CHECK-NEXT: ----
120120
//
121121
%0 = call @concat_sparse_sparse(%sm24cc, %sm34cd, %sm44dc)
@@ -142,11 +142,11 @@ module {
142142
// CHECK-NEXT: nse = 18
143143
// CHECK-NEXT: dim = ( 9, 4 )
144144
// CHECK-NEXT: lvl = ( 9, 4 )
145-
// CHECK-NEXT: pos[0] : ( 0, 9,
146-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
147-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
148-
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
149-
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
145+
// CHECK-NEXT: pos[0] : ( 0, 9 )
146+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
147+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
148+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
149+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
150150
// CHECK-NEXT: ----
151151
//
152152
%2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir

+10-10
Original file line numberDiff line numberDiff line change
@@ -144,11 +144,11 @@ module {
144144
// CHECK-NEXT: nse = 18
145145
// CHECK-NEXT: dim = ( 9, 4 )
146146
// CHECK-NEXT: lvl = ( 4, 9 )
147-
// CHECK-NEXT: pos[0] : ( 0, 4
148-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
149-
// CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18
150-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5
151-
// CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
147+
// CHECK-NEXT: pos[0] : ( 0, 4 )
148+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
149+
// CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18 )
150+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5 )
151+
// CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1 )
152152
// CHECK-NEXT: ----
153153
//
154154
%4 = call @concat_sparse_sparse_perm(%sm24ccp, %sm34cd, %sm44dc)
@@ -173,11 +173,11 @@ module {
173173
// CHECK-NEXT: nse = 18
174174
// CHECK-NEXT: dim = ( 9, 4 )
175175
// CHECK-NEXT: lvl = ( 9, 4 )
176-
// CHECK-NEXT: pos[0] : ( 0, 9
177-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
178-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18
179-
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1
180-
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
176+
// CHECK-NEXT: pos[0] : ( 0, 9 )
177+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
178+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
179+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
180+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
181181
// CHECK-NEXT: ----
182182
//
183183
%6 = call @concat_mix_sparse_perm(%m24, %sm34cdp, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir

+10-10
Original file line numberDiff line numberDiff line change
@@ -116,11 +116,11 @@ module {
116116
// CHECK-NEXT: nse = 18
117117
// CHECK-NEXT: dim = ( 4, 9 )
118118
// CHECK-NEXT: lvl = ( 4, 9 )
119-
// CHECK-NEXT: pos[0] : ( 0, 4
120-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
121-
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
122-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
123-
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
119+
// CHECK-NEXT: pos[0] : ( 0, 4 )
120+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
121+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
122+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
123+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
124124
// CHECK-NEXT: ----
125125
//
126126
%8 = call @concat_sparse_sparse_dim1(%sm42cc, %sm43cd, %sm44dc)
@@ -140,11 +140,11 @@ module {
140140
// CHECK-NEXT: nse = 18
141141
// CHECK-NEXT: dim = ( 4, 9 )
142142
// CHECK-NEXT: lvl = ( 4, 9 )
143-
// CHECK-NEXT: pos[0] : ( 0, 4
144-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
145-
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
146-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
147-
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
143+
// CHECK-NEXT: pos[0] : ( 0, 4 )
144+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
145+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
146+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
147+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
148148
// CHECK-NEXT: ----
149149
//
150150
%10 = call @concat_mix_sparse_dim1(%m42, %sm43cd, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir

+10-10
Original file line numberDiff line numberDiff line change
@@ -130,11 +130,11 @@ module {
130130
// CHECK-NEXT: nse = 18
131131
// CHECK-NEXT: dim = ( 4, 9 )
132132
// CHECK-NEXT: lvl = ( 9, 4 )
133-
// CHECK-NEXT: pos[0] : ( 0, 9
134-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
135-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18
136-
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0
137-
// CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
133+
// CHECK-NEXT: pos[0] : ( 0, 9 )
134+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
135+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18 )
136+
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0 )
137+
// CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1 )
138138
// CHECK-NEXT: ----
139139
//
140140
%12 = call @concat_sparse_sparse_perm_dim1(%sm42ccp, %sm43cd, %sm44dc)
@@ -154,11 +154,11 @@ module {
154154
// CHECK-NEXT: nse = 18
155155
// CHECK-NEXT: dim = ( 4, 9 )
156156
// CHECK-NEXT: lvl = ( 4, 9 )
157-
// CHECK-NEXT: pos[0] : ( 0, 4
158-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
159-
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
160-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
161-
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
157+
// CHECK-NEXT: pos[0] : ( 0, 4 )
158+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
159+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
160+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
161+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
162162
// CHECK-NEXT: ----
163163
//
164164
%14 = call @concat_mix_sparse_perm_dim1(%m42, %sm43cdp, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir

+1-1
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ module {
108108
// CHECK-NEXT: nse = 25
109109
// CHECK-NEXT: dim = ( 5, 5 )
110110
// CHECK-NEXT: lvl = ( 5, 5 )
111-
// CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10,
111+
// CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10 )
112112
// CHECK-NEXT: ----
113113
//
114114
sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir

+1-1
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ module {
9595
// CHECK-NEXT: nse = 32
9696
// CHECK-NEXT: dim = ( 32 )
9797
// CHECK-NEXT: lvl = ( 32 )
98-
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
98+
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
9999
// CHECK-NEXT: ----
100100
//
101101
sparse_tensor.print %0 : tensor<?xbf16, #DenseVector>

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ module {
9696
// CHECK-NEXT: nse = 32
9797
// CHECK-NEXT: dim = ( 32 )
9898
// CHECK-NEXT: lvl = ( 32 )
99-
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
99+
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
100100
// CHECK-NEXT: ----
101101
//
102102
sparse_tensor.print %0 : tensor<?xf16, #DenseVector>

mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir

+14-14
Original file line numberDiff line numberDiff line change
@@ -161,11 +161,11 @@ module {
161161
// CHECK-NEXT: nse = 36
162162
// CHECK-NEXT: dim = ( 6, 6 )
163163
// CHECK-NEXT: lvl = ( 6, 6 )
164-
// CHECK-NEXT: pos[0] : ( 0, 6
165-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
166-
// CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
167-
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
168-
// CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
164+
// CHECK-NEXT: pos[0] : ( 0, 6 )
165+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5 )
166+
// CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36 )
167+
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5 )
168+
// CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0 )
169169
// CHECK-NEXT: ----
170170
//
171171
sparse_tensor.print %2 : tensor<6x6xi32, #DCSR>
@@ -177,9 +177,9 @@ module {
177177
// CHECK-NEXT: nse = 36
178178
// CHECK-NEXT: dim = ( 6, 6 )
179179
// CHECK-NEXT: lvl = ( 6, 6 )
180-
// CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
181-
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
182-
// CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
180+
// CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36 )
181+
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5 )
182+
// CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0 )
183183
// CHECK-NEXT: ----
184184
//
185185
sparse_tensor.print %3 : tensor<6x6xi32, #CSR>
@@ -191,9 +191,9 @@ module {
191191
// CHECK-NEXT: nse = 36
192192
// CHECK-NEXT: dim = ( 6, 6 )
193193
// CHECK-NEXT: lvl = ( 6, 6 )
194-
// CHECK-NEXT: pos[0] : ( 0, 6
195-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
196-
// CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
194+
// CHECK-NEXT: pos[0] : ( 0, 6 )
195+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5 )
196+
// CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0 )
197197
// CHECK-NEXT: ----
198198
//
199199
sparse_tensor.print %4 : tensor<6x6xi32, #CDR>
@@ -205,9 +205,9 @@ module {
205205
// CHECK-NEXT: nse = 36
206206
// CHECK-NEXT: dim = ( 6, 6 )
207207
// CHECK-NEXT: lvl = ( 6, 6 )
208-
// CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
209-
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
210-
// CHECK-NEXT: values : ( 0, -1, 0, -1, 0, 2, 0, 0, -1, 0, 0, -1, -1, 1, 1, 0, 3, 3, -6, 0, 0, 0, 6, 0, -1, 1, 0, 0, -3, -3, 6, 0, 0, 0, -6, 0
208+
// CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36 )
209+
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5 )
210+
// CHECK-NEXT: values : ( 0, -1, 0, -1, 0, 2, 0, 0, -1, 0, 0, -1, -1, 1, 1, 0, 3, 3, -6, 0, 0, 0, 6, 0, -1, 1, 0, 0, -3, -3, 6, 0, 0, 0, -6, 0 )
211211
// CHECK-NEXT: ----
212212
//
213213
sparse_tensor.print %5 : tensor<6x6xi32, #CSC>

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir

+6-6
Original file line numberDiff line numberDiff line change
@@ -120,18 +120,18 @@ module {
120120
// CHECK-NEXT: nse = 12
121121
// CHECK-NEXT: dim = ( 32 )
122122
// CHECK-NEXT: lvl = ( 32 )
123-
// CHECK-NEXT: pos[0] : ( 0, 12,
124-
// CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 20, 21, 28, 29, 31,
125-
// CHECK-NEXT: values : ( 1.5, 1.5, 10.2, 11.3, 1, 1, nan, nan, inf, inf, 0, 0,
123+
// CHECK-NEXT: pos[0] : ( 0, 12 )
124+
// CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 20, 21, 28, 29, 31 )
125+
// CHECK-NEXT: values : ( 1.5, 1.5, 10.2, 11.3, 1, 1, nan, nan, inf, inf, 0, 0 )
126126
// CHECK-NEXT: ----
127127
//
128128
// CHECK-NEXT: ---- Sparse Tensor ----
129129
// CHECK-NEXT: nse = 9
130130
// CHECK-NEXT: dim = ( 32 )
131131
// CHECK-NEXT: lvl = ( 32 )
132-
// CHECK-NEXT: pos[0] : ( 0, 9,
133-
// CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 21, 31,
134-
// CHECK-NEXT: values : ( -2147483648, 2147483647, 1000, 1, 0, 1, 1000, 2147483646, 2147483647,
132+
// CHECK-NEXT: pos[0] : ( 0, 9 )
133+
// CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 21, 31 )
134+
// CHECK-NEXT: values : ( -2147483648, 2147483647, 1000, 1, 0, 1, 1000, 2147483646, 2147483647 )
135135
// CHECK-NEXT: ----
136136
//
137137
sparse_tensor.print %0 : tensor<?xf64, #SparseVector>

0 commit comments

Comments
 (0)