1212
1313rt_atomic_t rt_hw_atomic_exchange (volatile rt_atomic_t * ptr , rt_atomic_t val )
1414{
15- rt_atomic_t result ;
15+ rt_atomic_t result = 0 ;
1616#if __riscv_xlen == 32
1717 asm volatile ("amoswap.w %0, %1, (%2)" : "=r" (result ) : "r" (val ), "r" (ptr ) : "memory" );
1818#elif __riscv_xlen == 64
@@ -23,7 +23,7 @@ rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
2323
2424rt_atomic_t rt_hw_atomic_add (volatile rt_atomic_t * ptr , rt_atomic_t val )
2525{
26- rt_atomic_t result ;
26+ rt_atomic_t result = 0 ;
2727#if __riscv_xlen == 32
2828 asm volatile ("amoadd.w %0, %1, (%2)" : "=r" (result ) : "r" (val ), "r" (ptr ) : "memory" );
2929#elif __riscv_xlen == 64
@@ -34,7 +34,7 @@ rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
3434
3535rt_atomic_t rt_hw_atomic_sub (volatile rt_atomic_t * ptr , rt_atomic_t val )
3636{
37- rt_atomic_t result ;
37+ rt_atomic_t result = 0 ;
3838 val = - val ;
3939#if __riscv_xlen == 32
4040 asm volatile ("amoadd.w %0, %1, (%2)" : "=r" (result ) : "r" (val ), "r" (ptr ) : "memory" );
@@ -46,7 +46,7 @@ rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
4646
4747rt_atomic_t rt_hw_atomic_xor (volatile rt_atomic_t * ptr , rt_atomic_t val )
4848{
49- rt_atomic_t result ;
49+ rt_atomic_t result = 0 ;
5050#if __riscv_xlen == 32
5151 asm volatile ("amoxor.w %0, %1, (%2)" : "=r" (result ) : "r" (val ), "r" (ptr ) : "memory" );
5252#elif __riscv_xlen == 64
@@ -57,7 +57,7 @@ rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
5757
5858rt_atomic_t rt_hw_atomic_and (volatile rt_atomic_t * ptr , rt_atomic_t val )
5959{
60- rt_atomic_t result ;
60+ rt_atomic_t result = 0 ;
6161#if __riscv_xlen == 32
6262 asm volatile ("amoand.w %0, %1, (%2)" : "=r" (result ) : "r" (val ), "r" (ptr ) : "memory" );
6363#elif __riscv_xlen == 64
@@ -68,7 +68,7 @@ rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
6868
6969rt_atomic_t rt_hw_atomic_or (volatile rt_atomic_t * ptr , rt_atomic_t val )
7070{
71- rt_atomic_t result ;
71+ rt_atomic_t result = 0 ;
7272#if __riscv_xlen == 32
7373 asm volatile ("amoor.w %0, %1, (%2)" : "=r" (result ) : "r" (val ), "r" (ptr ) : "memory" );
7474#elif __riscv_xlen == 64
@@ -79,7 +79,7 @@ rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
7979
8080rt_atomic_t rt_hw_atomic_load (volatile rt_atomic_t * ptr )
8181{
82- rt_atomic_t result ;
82+ rt_atomic_t result = 0 ;
8383#if __riscv_xlen == 32
8484 asm volatile ("amoxor.w %0, x0, (%1)" : "=r" (result ) : "r" (ptr ) : "memory" );
8585#elif __riscv_xlen == 64
@@ -90,7 +90,7 @@ rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
9090
9191void rt_hw_atomic_store (volatile rt_atomic_t * ptr , rt_atomic_t val )
9292{
93- rt_atomic_t result ;
93+ rt_atomic_t result = 0 ;
9494#if __riscv_xlen == 32
9595 asm volatile ("amoswap.w %0, %1, (%2)" : "=r" (result ) : "r" (val ), "r" (ptr ) : "memory" );
9696#elif __riscv_xlen == 64
@@ -100,7 +100,7 @@ void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
100100
101101rt_atomic_t rt_hw_atomic_flag_test_and_set (volatile rt_atomic_t * ptr )
102102{
103- rt_atomic_t result ;
103+ rt_atomic_t result = 0 ;
104104 rt_atomic_t temp = 1 ;
105105#if __riscv_xlen == 32
106106 asm volatile ("amoor.w %0, %1, (%2)" : "=r" (result ) : "r" (temp ), "r" (ptr ) : "memory" );
@@ -112,47 +112,47 @@ rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
112112
113113void rt_hw_atomic_flag_clear (volatile rt_atomic_t * ptr )
114114{
115- rt_atomic_t result ;
115+ rt_atomic_t result = 0 ;
116116#if __riscv_xlen == 32
117117 asm volatile ("amoand.w %0, x0, (%1)" : "=r" (result ) :"r" (ptr ) : "memory" );
118118#elif __riscv_xlen == 64
119119 asm volatile ("amoand.d %0, x0, (%1)" : "=r" (result ) :"r" (ptr ) : "memory" );
120120#endif
121121}
122122
123- rt_atomic_t rt_hw_atomic_compare_exchange_strong (volatile rt_atomic_t * ptr , volatile rt_atomic_t * old , rt_atomic_t new )
123+ rt_atomic_t rt_hw_atomic_compare_exchange_strong (volatile rt_atomic_t * ptr , rt_atomic_t * old , rt_atomic_t desired )
124124{
125125 rt_atomic_t tmp = * old ;
126- rt_atomic_t result ;
126+ rt_atomic_t result = 0 ;
127127#if __riscv_xlen == 32
128128 asm volatile (
129129 " fence iorw, ow\n"
130130 "1: lr.w.aq %[result], (%[ptr])\n"
131131 " bne %[result], %[tmp], 2f\n"
132- " sc.w.rl %[tmp], %[new ], (%[ptr])\n"
132+ " sc.w.rl %[tmp], %[desired ], (%[ptr])\n"
133133 " bnez %[tmp], 1b\n"
134134 " li %[result], 1\n"
135135 " j 3f\n"
136136 " 2:sw %[result], (%[old])\n"
137137 " li %[result], 0\n"
138138 " 3:\n"
139139 : [result ]"+r" (result ), [tmp ]"+r" (tmp ), [ptr ]"+r" (ptr )
140- : [new ]"r" (new ), [old ]"r" (old )
140+ : [desired ]"r" (desired ), [old ]"r" (old )
141141 : "memory" );
142142#elif __riscv_xlen == 64
143143 asm volatile (
144144 " fence iorw, ow\n"
145145 "1: lr.d.aq %[result], (%[ptr])\n"
146146 " bne %[result], %[tmp], 2f\n"
147- " sc.d.rl %[tmp], %[new ], (%[ptr])\n"
147+ " sc.d.rl %[tmp], %[desired ], (%[ptr])\n"
148148 " bnez %[tmp], 1b\n"
149149 " li %[result], 1\n"
150150 " j 3f\n"
151151 " 2:sd %[result], (%[old])\n"
152152 " li %[result], 0\n"
153153 " 3:\n"
154154 : [result ]"+r" (result ), [tmp ]"+r" (tmp ), [ptr ]"+r" (ptr )
155- : [new ]"r" (new ), [old ]"r" (old )
155+ : [desired ]"r" (desired ), [old ]"r" (old )
156156 : "memory" );
157157#endif
158158 return result ;
0 commit comments