36 //
37 // Used constraint modifiers:
38 // = write-only access: Value on entry to inline-assembler code irrelevant.
39 // + read/write access: Value on entry is used; on exit value is changed.
40 // read-only access: Value on entry is used and never changed.
41 // & early-clobber access: Might be modified before all read-only operands
42 // have been used.
43 // a address register operand (not GR0).
44 // d general register operand (including GR0)
45 // Q memory operand w/o index register.
46 // 0..9 operand reference (by operand position).
47 // Used for operands that fill multiple roles. One example would be a
48 // write-only operand receiving its initial value from a read-only operand.
49 // Refer to cmpxchg(..) operand #0 and variable cmp_val for a real-life example.
50 //
51
52 // On System z, all store operations are atomic if the address where the data is stored into
53 // is an integer multiple of the data length. Furthermore, all stores are ordered:
54 // a store which occurs conceptually before another store becomes visible to other CPUs
55 // before the other store becomes visible.
56 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
57 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
58 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
59 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
60 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
61 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
62
63 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
64 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
65 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
66 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
67 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
68 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
69
70
71 //------------
72 // Atomic::add
73 //------------
74 // These methods force the value in memory to be augmented by the passed increment.
75 // Both, memory value and increment, are treated as 32bit signed binary integers.
76 // No overflow exceptions are recognized, and the condition code does not hold
77 // information about the value in memory.
78 //
79 // The value in memory is updated by using a compare-and-swap instruction. The
80 // instruction is retried as often as required.
81 //
82 // The return value of the method is the value that was successfully stored. At the
83 // time the caller receives back control, the value in memory may have changed already.
84
85 inline jint Atomic::add(jint inc, volatile jint*dest) {
86 unsigned int old, upd;
87
88 if (VM_Version::has_LoadAndALUAtomicV1()) {
89 __asm__ __volatile__ (
90 " LGFR 0,%[inc] \n\t" // save increment
91 " LA 3,%[mem] \n\t" // force data address into ARG2
92 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value
93 // " LAA 2,0,0(3) \n\t" // actually coded instruction
94 " .byte 0xeb \n\t" // LAA main opcode
95 " .byte 0x20 \n\t" // R1,R3
96 " .byte 0x30 \n\t" // R2,disp1
97 " .byte 0x00 \n\t" // disp2,disp3
98 " .byte 0x00 \n\t" // disp4,disp5
99 " .byte 0xf8 \n\t" // LAA minor opcode
100 " AR 2,0 \n\t" // calc new value in register
101 " LR %[upd],2 \n\t" // move to result register
102 //---< outputs >---
103 : [upd] "=&d" (upd) // write-only, updated counter value
104 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
105 //---< inputs >---
107 //---< clobbered >---
108 : "cc", "r0", "r2", "r3"
109 );
110 } else {
111 __asm__ __volatile__ (
112 " LLGF %[old],%[mem] \n\t" // get old value
113 "0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result
114 " CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
115 " JNE 0b \n\t" // no success? -> retry
116 //---< outputs >---
117 : [old] "=&a" (old) // write-only, old counter value
118 , [upd] "=&d" (upd) // write-only, updated counter value
119 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
120 //---< inputs >---
121 : [inc] "a" (inc) // read-only.
122 //---< clobbered >---
123 : "cc"
124 );
125 }
126
127 return (jint)upd;
128 }
129
130
131 inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) {
132 unsigned long old, upd;
133
134 if (VM_Version::has_LoadAndALUAtomicV1()) {
135 __asm__ __volatile__ (
136 " LGR 0,%[inc] \n\t" // save increment
137 " LA 3,%[mem] \n\t" // force data address into ARG2
138 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value
139 // " LAAG 2,0,0(3) \n\t" // actually coded instruction
140 " .byte 0xeb \n\t" // LAA main opcode
141 " .byte 0x20 \n\t" // R1,R3
142 " .byte 0x30 \n\t" // R2,disp1
143 " .byte 0x00 \n\t" // disp2,disp3
144 " .byte 0x00 \n\t" // disp4,disp5
145 " .byte 0xe8 \n\t" // LAA minor opcode
146 " AGR 2,0 \n\t" // calc new value in register
147 " LGR %[upd],2 \n\t" // move to result register
148 //---< outputs >---
149 : [upd] "=&d" (upd) // write-only, updated counter value
150 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
151 //---< inputs >---
153 //---< clobbered >---
154 : "cc", "r0", "r2", "r3"
155 );
156 } else {
157 __asm__ __volatile__ (
158 " LG %[old],%[mem] \n\t" // get old value
159 "0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result
160 " CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
161 " JNE 0b \n\t" // no success? -> retry
162 //---< outputs >---
163 : [old] "=&a" (old) // write-only, old counter value
164 , [upd] "=&d" (upd) // write-only, updated counter value
165 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
166 //---< inputs >---
167 : [inc] "a" (inc) // read-only.
168 //---< clobbered >---
169 : "cc"
170 );
171 }
172
173 return (intptr_t)upd;
174 }
175
176 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
177 return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
178 }
179
180
181 //------------
182 // Atomic::inc
183 //------------
184 // These methods force the value in memory to be incremented (augmented by 1).
185 // Both, memory value and increment, are treated as 32bit signed binary integers.
186 // No overflow exceptions are recognized, and the condition code does not hold
187 // information about the value in memory.
188 //
189 // The value in memory is updated by using a compare-and-swap instruction. The
190 // instruction is retried as often as required.
191
192 inline void Atomic::inc(volatile jint* dest) {
193 unsigned int old, upd;
194
195 if (VM_Version::has_LoadAndALUAtomicV1()) {
196 // tty->print_cr("Atomic::inc called... dest @%p", dest);
197 __asm__ __volatile__ (
198 " LGHI 2,1 \n\t" // load increment
199 " LA 3,%[mem] \n\t" // force data address into ARG2
200 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value
201 // " LAA 2,2,0(3) \n\t" // actually coded instruction
202 " .byte 0xeb \n\t" // LAA main opcode
203 " .byte 0x22 \n\t" // R1,R3
204 " .byte 0x30 \n\t" // R2,disp1
205 " .byte 0x00 \n\t" // disp2,disp3
206 " .byte 0x00 \n\t" // disp4,disp5
207 " .byte 0xf8 \n\t" // LAA minor opcode
208 " AGHI 2,1 \n\t" // calc new value in register
209 " LR %[upd],2 \n\t" // move to result register
210 //---< outputs >---
211 : [upd] "=&d" (upd) // write-only, updated counter value
212 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
217 : "cc", "r2", "r3"
218 );
219 } else {
220 __asm__ __volatile__ (
221 " LLGF %[old],%[mem] \n\t" // get old value
222 "0: LA %[upd],1(,%[old]) \n\t" // calc result
223 " CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
224 " JNE 0b \n\t" // no success? -> retry
225 //---< outputs >---
226 : [old] "=&a" (old) // write-only, old counter value
227 , [upd] "=&d" (upd) // write-only, updated counter value
228 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
229 //---< inputs >---
230 :
231 //---< clobbered >---
232 : "cc"
233 );
234 }
235 }
236
237 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
238 unsigned long old, upd;
239
240 if (VM_Version::has_LoadAndALUAtomicV1()) {
241 __asm__ __volatile__ (
242 " LGHI 2,1 \n\t" // load increment
243 " LA 3,%[mem] \n\t" // force data address into ARG2
244 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value
245 // " LAAG 2,2,0(3) \n\t" // actually coded instruction
246 " .byte 0xeb \n\t" // LAA main opcode
247 " .byte 0x22 \n\t" // R1,R3
248 " .byte 0x30 \n\t" // R2,disp1
249 " .byte 0x00 \n\t" // disp2,disp3
250 " .byte 0x00 \n\t" // disp4,disp5
251 " .byte 0xe8 \n\t" // LAA minor opcode
252 " AGHI 2,1 \n\t" // calc new value in register
253 " LR %[upd],2 \n\t" // move to result register
254 //---< outputs >---
255 : [upd] "=&d" (upd) // write-only, updated counter value
256 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
257 //---< inputs >---
261 : "cc", "r2", "r3"
262 );
263 } else {
264 __asm__ __volatile__ (
265 " LG %[old],%[mem] \n\t" // get old value
266 "0: LA %[upd],1(,%[old]) \n\t" // calc result
267 " CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
268 " JNE 0b \n\t" // no success? -> retry
269 //---< outputs >---
270 : [old] "=&a" (old) // write-only, old counter value
271 , [upd] "=&d" (upd) // write-only, updated counter value
272 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
273 //---< inputs >---
274 :
275 //---< clobbered >---
276 : "cc"
277 );
278 }
279 }
280
281 inline void Atomic::inc_ptr(volatile void* dest) {
282 inc_ptr((volatile intptr_t*)dest);
283 }
284
285 //------------
286 // Atomic::dec
287 //------------
288 // These methods force the value in memory to be decremented (augmented by -1).
289 // Both, memory value and decrement, are treated as 32bit signed binary integers.
290 // No overflow exceptions are recognized, and the condition code does not hold
291 // information about the value in memory.
292 //
293 // The value in memory is updated by using a compare-and-swap instruction. The
294 // instruction is retried as often as required.
295
296 inline void Atomic::dec(volatile jint* dest) {
297 unsigned int old, upd;
298
299 if (VM_Version::has_LoadAndALUAtomicV1()) {
300 __asm__ __volatile__ (
301 " LGHI 2,-1 \n\t" // load increment
302 " LA 3,%[mem] \n\t" // force data address into ARG2
303 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value
304 // " LAA 2,2,0(3) \n\t" // actually coded instruction
305 " .byte 0xeb \n\t" // LAA main opcode
306 " .byte 0x22 \n\t" // R1,R3
307 " .byte 0x30 \n\t" // R2,disp1
308 " .byte 0x00 \n\t" // disp2,disp3
309 " .byte 0x00 \n\t" // disp4,disp5
310 " .byte 0xf8 \n\t" // LAA minor opcode
311 " AGHI 2,-1 \n\t" // calc new value in register
312 " LR %[upd],2 \n\t" // move to result register
313 //---< outputs >---
314 : [upd] "=&d" (upd) // write-only, updated counter value
315 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
316 //---< inputs >---
323 __asm__ __volatile__ (
324 " LLGF %[old],%[mem] \n\t" // get old value
325 // LAY not supported by inline assembler
326 // "0: LAY %[upd],-1(,%[old]) \n\t" // calc result
327 "0: LR %[upd],%[old] \n\t" // calc result
328 " AHI %[upd],-1 \n\t"
329 " CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
330 " JNE 0b \n\t" // no success? -> retry
331 //---< outputs >---
332 : [old] "=&a" (old) // write-only, old counter value
333 , [upd] "=&d" (upd) // write-only, updated counter value
334 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
335 //---< inputs >---
336 :
337 //---< clobbered >---
338 : "cc"
339 );
340 }
341 }
342
343 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
344 unsigned long old, upd;
345
346 if (VM_Version::has_LoadAndALUAtomicV1()) {
347 __asm__ __volatile__ (
348 " LGHI 2,-1 \n\t" // load increment
349 " LA 3,%[mem] \n\t" // force data address into ARG2
350 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value
351 // " LAAG 2,2,0(3) \n\t" // actually coded instruction
352 " .byte 0xeb \n\t" // LAA main opcode
353 " .byte 0x22 \n\t" // R1,R3
354 " .byte 0x30 \n\t" // R2,disp1
355 " .byte 0x00 \n\t" // disp2,disp3
356 " .byte 0x00 \n\t" // disp4,disp5
357 " .byte 0xe8 \n\t" // LAA minor opcode
358 " AGHI 2,-1 \n\t" // calc new value in register
359 " LR %[upd],2 \n\t" // move to result register
360 //---< outputs >---
361 : [upd] "=&d" (upd) // write-only, updated counter value
362 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
363 //---< inputs >---
370 __asm__ __volatile__ (
371 " LG %[old],%[mem] \n\t" // get old value
372 // LAY not supported by inline assembler
373 // "0: LAY %[upd],-1(,%[old]) \n\t" // calc result
374 "0: LGR %[upd],%[old] \n\t" // calc result
375 " AGHI %[upd],-1 \n\t"
376 " CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
377 " JNE 0b \n\t" // no success? -> retry
378 //---< outputs >---
379 : [old] "=&a" (old) // write-only, old counter value
380 , [upd] "=&d" (upd) // write-only, updated counter value
381 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
382 //---< inputs >---
383 :
384 //---< clobbered >---
385 : "cc"
386 );
387 }
388 }
389
390 inline void Atomic::dec_ptr(volatile void* dest) {
391 dec_ptr((volatile intptr_t*)dest);
392 }
393
394 //-------------
395 // Atomic::xchg
396 //-------------
397 // These methods force the value in memory to be replaced by the new value passed
398 // in as argument.
399 //
400 // The value in memory is replaced by using a compare-and-swap instruction. The
401 // instruction is retried as often as required. This makes sure that the new
402 // value can be seen, at least for a very short period of time, by other CPUs.
403 //
404 // If we would use a normal "load(old value) store(new value)" sequence,
405 // the new value could be lost unnoticed, due to a store(new value) from
406 // another thread.
407 //
408 // The return value is the (unchanged) value from memory as it was when the
409 // replacement succeeded.
410 inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
411 unsigned int old;
412
413 __asm__ __volatile__ (
414 " LLGF %[old],%[mem] \n\t" // get old value
415 "0: CS %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
416 " JNE 0b \n\t" // no success? -> retry
417 //---< outputs >---
418 : [old] "=&d" (old) // write-only, prev value irrelevant
419 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
420 //---< inputs >---
421 : [upd] "d" (xchg_val) // read-only, value to be written to memory
422 //---< clobbered >---
423 : "cc"
424 );
425
426 return (jint)old;
427 }
428
429 inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
430 unsigned long old;
431
432 __asm__ __volatile__ (
433 " LG %[old],%[mem] \n\t" // get old value
434 "0: CSG %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
435 " JNE 0b \n\t" // no success? -> retry
436 //---< outputs >---
437 : [old] "=&d" (old) // write-only, init from memory
438 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
439 //---< inputs >---
440 : [upd] "d" (xchg_val) // read-only, value to be written to memory
441 //---< clobbered >---
442 : "cc"
443 );
444
445 return (intptr_t)old;
446 }
447
448 inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) {
449 return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
450 }
451
452 //----------------
453 // Atomic::cmpxchg
454 //----------------
455 // These methods compare the value in memory with a given compare value.
456 // If both values compare equal, the value in memory is replaced with
457 // the exchange value.
458 //
459 // The value in memory is compared and replaced by using a compare-and-swap
460 // instruction. The instruction is NOT retried (one shot only).
461 //
462 // The return value is the (unchanged) value from memory as it was when the
463 // compare-and-swap instruction completed. A successful exchange operation
464 // is indicated by (return value == compare_value). If unsuccessful, a new
465 // exchange value can be calculated based on the return value which is the
466 // latest contents of the memory location.
467 //
468 // Inspecting the return value is the only way for the caller to determine
469 // if the compare-and-swap instruction was successful:
470 // - If return value and compare value compare equal, the compare-and-swap
471 // instruction was successful and the value in memory was replaced by the
472 // exchange value.
473 // - If return value and compare value compare unequal, the compare-and-swap
474 // instruction was not successful. The value in memory was left unchanged.
475 //
476 // The s390 processors always fence before and after the csg instructions.
477 // Thus we ignore the memory ordering argument. The docu says: "A serialization
478 // function is performed before the operand is fetched and again after the
479 // operation is completed."
480
481 jint Atomic::cmpxchg(jint xchg_val, volatile jint* dest, jint cmp_val, cmpxchg_memory_order unused) {
482 unsigned long old;
483
484 __asm__ __volatile__ (
485 " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
486 // outputs
487 : [old] "=&d" (old) // Write-only, prev value irrelevant.
488 , [mem] "+Q" (*dest) // Read/write, memory to be updated atomically.
489 // inputs
490 : [upd] "d" (xchg_val)
491 , "0" (cmp_val) // Read-only, initial value for [old] (operand #0).
492 // clobbered
493 : "cc"
494 );
495
496 return (jint)old;
497 }
498
499 jlong Atomic::cmpxchg(jlong xchg_val, volatile jlong* dest, jlong cmp_val, cmpxchg_memory_order unused) {
500 unsigned long old;
501
502 __asm__ __volatile__ (
503 " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
504 // outputs
505 : [old] "=&d" (old) // Write-only, prev value irrelevant.
506 , [mem] "+Q" (*dest) // Read/write, memory to be updated atomically.
507 // inputs
508 : [upd] "d" (xchg_val)
509 , "0" (cmp_val) // Read-only, initial value for [old] (operand #0).
510 // clobbered
511 : "cc"
512 );
513
514 return (jlong)old;
515 }
516
517 void* Atomic::cmpxchg_ptr(void *xchg_val, volatile void* dest, void* cmp_val, cmpxchg_memory_order unused) {
518 return (void*)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused);
519 }
520
521 intptr_t Atomic::cmpxchg_ptr(intptr_t xchg_val, volatile intptr_t* dest, intptr_t cmp_val, cmpxchg_memory_order unused) {
522 return (intptr_t)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused);
523 }
524
525 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
526
527 #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP
|
36 //
37 // Used constraint modifiers:
38 // = write-only access: Value on entry to inline-assembler code irrelevant.
39 // + read/write access: Value on entry is used; on exit value is changed.
40 // read-only access: Value on entry is used and never changed.
41 // & early-clobber access: Might be modified before all read-only operands
42 // have been used.
43 // a address register operand (not GR0).
44 // d general register operand (including GR0)
45 // Q memory operand w/o index register.
46 // 0..9 operand reference (by operand position).
47 // Used for operands that fill multiple roles. One example would be a
48 // write-only operand receiving its initial value from a read-only operand.
49 // Refer to cmpxchg(..) operand #0 and variable cmp_val for a real-life example.
50 //
51
52 // On System z, all store operations are atomic if the address where the data is stored into
53 // is an integer multiple of the data length. Furthermore, all stores are ordered:
54 // a store which occurs conceptually before another store becomes visible to other CPUs
55 // before the other store becomes visible.
56
57
58 //------------
59 // Atomic::add
60 //------------
61 // These methods force the value in memory to be augmented by the passed increment.
62 // Both, memory value and increment, are treated as 32bit signed binary integers.
63 // No overflow exceptions are recognized, and the condition code does not hold
64 // information about the value in memory.
65 //
66 // The value in memory is updated by using a compare-and-swap instruction. The
67 // instruction is retried as often as required.
68 //
69 // The return value of the method is the value that was successfully stored. At the
70 // time the caller receives back control, the value in memory may have changed already.
71
72 template <>
73 inline int32_t Atomic::specialized_add<int32_t>(int32_t inc, volatile int32_t* dest) {
74 unsigned int old, upd;
75
76 if (VM_Version::has_LoadAndALUAtomicV1()) {
77 __asm__ __volatile__ (
78 " LGFR 0,%[inc] \n\t" // save increment
79 " LA 3,%[mem] \n\t" // force data address into ARG2
80 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value
81 // " LAA 2,0,0(3) \n\t" // actually coded instruction
82 " .byte 0xeb \n\t" // LAA main opcode
83 " .byte 0x20 \n\t" // R1,R3
84 " .byte 0x30 \n\t" // R2,disp1
85 " .byte 0x00 \n\t" // disp2,disp3
86 " .byte 0x00 \n\t" // disp4,disp5
87 " .byte 0xf8 \n\t" // LAA minor opcode
88 " AR 2,0 \n\t" // calc new value in register
89 " LR %[upd],2 \n\t" // move to result register
90 //---< outputs >---
91 : [upd] "=&d" (upd) // write-only, updated counter value
92 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
93 //---< inputs >---
95 //---< clobbered >---
96 : "cc", "r0", "r2", "r3"
97 );
98 } else {
99 __asm__ __volatile__ (
100 " LLGF %[old],%[mem] \n\t" // get old value
101 "0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result
102 " CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
103 " JNE 0b \n\t" // no success? -> retry
104 //---< outputs >---
105 : [old] "=&a" (old) // write-only, old counter value
106 , [upd] "=&d" (upd) // write-only, updated counter value
107 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
108 //---< inputs >---
109 : [inc] "a" (inc) // read-only.
110 //---< clobbered >---
111 : "cc"
112 );
113 }
114
115 return (int32_t)upd;
116 }
117
118
119 template <>
120 inline int64_t Atomic::specialized_add<int64_t>(int64_t inc, volatile int64_t* dest) {
121 unsigned long old, upd;
122
123 if (VM_Version::has_LoadAndALUAtomicV1()) {
124 __asm__ __volatile__ (
125 " LGR 0,%[inc] \n\t" // save increment
126 " LA 3,%[mem] \n\t" // force data address into ARG2
127 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value
128 // " LAAG 2,0,0(3) \n\t" // actually coded instruction
129 " .byte 0xeb \n\t" // LAA main opcode
130 " .byte 0x20 \n\t" // R1,R3
131 " .byte 0x30 \n\t" // R2,disp1
132 " .byte 0x00 \n\t" // disp2,disp3
133 " .byte 0x00 \n\t" // disp4,disp5
134 " .byte 0xe8 \n\t" // LAA minor opcode
135 " AGR 2,0 \n\t" // calc new value in register
136 " LGR %[upd],2 \n\t" // move to result register
137 //---< outputs >---
138 : [upd] "=&d" (upd) // write-only, updated counter value
139 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
140 //---< inputs >---
142 //---< clobbered >---
143 : "cc", "r0", "r2", "r3"
144 );
145 } else {
146 __asm__ __volatile__ (
147 " LG %[old],%[mem] \n\t" // get old value
148 "0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result
149 " CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
150 " JNE 0b \n\t" // no success? -> retry
151 //---< outputs >---
152 : [old] "=&a" (old) // write-only, old counter value
153 , [upd] "=&d" (upd) // write-only, updated counter value
154 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
155 //---< inputs >---
156 : [inc] "a" (inc) // read-only.
157 //---< clobbered >---
158 : "cc"
159 );
160 }
161
162 return (int64_t)upd;
163 }
164
165 //------------
166
167 // Atomic::inc
168 //------------
169 // These methods force the value in memory to be incremented (augmented by 1).
170 // Both, memory value and increment, are treated as 32bit signed binary integers.
171 // No overflow exceptions are recognized, and the condition code does not hold
172 // information about the value in memory.
173 //
174 // The value in memory is updated by using a compare-and-swap instruction. The
175 // instruction is retried as often as required.
176
177 template <>
178 inline void Atomic::specialized_inc<int32_t>(volatile int32_t* dest) {
179 unsigned int old, upd;
180
181 if (VM_Version::has_LoadAndALUAtomicV1()) {
182 // tty->print_cr("Atomic::inc called... dest @%p", dest);
183 __asm__ __volatile__ (
184 " LGHI 2,1 \n\t" // load increment
185 " LA 3,%[mem] \n\t" // force data address into ARG2
186 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value
187 // " LAA 2,2,0(3) \n\t" // actually coded instruction
188 " .byte 0xeb \n\t" // LAA main opcode
189 " .byte 0x22 \n\t" // R1,R3
190 " .byte 0x30 \n\t" // R2,disp1
191 " .byte 0x00 \n\t" // disp2,disp3
192 " .byte 0x00 \n\t" // disp4,disp5
193 " .byte 0xf8 \n\t" // LAA minor opcode
194 " AGHI 2,1 \n\t" // calc new value in register
195 " LR %[upd],2 \n\t" // move to result register
196 //---< outputs >---
197 : [upd] "=&d" (upd) // write-only, updated counter value
198 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
203 : "cc", "r2", "r3"
204 );
205 } else {
206 __asm__ __volatile__ (
207 " LLGF %[old],%[mem] \n\t" // get old value
208 "0: LA %[upd],1(,%[old]) \n\t" // calc result
209 " CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
210 " JNE 0b \n\t" // no success? -> retry
211 //---< outputs >---
212 : [old] "=&a" (old) // write-only, old counter value
213 , [upd] "=&d" (upd) // write-only, updated counter value
214 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
215 //---< inputs >---
216 :
217 //---< clobbered >---
218 : "cc"
219 );
220 }
221 }
222
223 template <>
224 inline void Atomic::specialized_inc<int64_t>(volatile int64_t* dest) {
225 unsigned long old, upd;
226
227 if (VM_Version::has_LoadAndALUAtomicV1()) {
228 __asm__ __volatile__ (
229 " LGHI 2,1 \n\t" // load increment
230 " LA 3,%[mem] \n\t" // force data address into ARG2
231 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value
232 // " LAAG 2,2,0(3) \n\t" // actually coded instruction
233 " .byte 0xeb \n\t" // LAA main opcode
234 " .byte 0x22 \n\t" // R1,R3
235 " .byte 0x30 \n\t" // R2,disp1
236 " .byte 0x00 \n\t" // disp2,disp3
237 " .byte 0x00 \n\t" // disp4,disp5
238 " .byte 0xe8 \n\t" // LAA minor opcode
239 " AGHI 2,1 \n\t" // calc new value in register
240 " LR %[upd],2 \n\t" // move to result register
241 //---< outputs >---
242 : [upd] "=&d" (upd) // write-only, updated counter value
243 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
244 //---< inputs >---
248 : "cc", "r2", "r3"
249 );
250 } else {
251 __asm__ __volatile__ (
252 " LG %[old],%[mem] \n\t" // get old value
253 "0: LA %[upd],1(,%[old]) \n\t" // calc result
254 " CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
255 " JNE 0b \n\t" // no success? -> retry
256 //---< outputs >---
257 : [old] "=&a" (old) // write-only, old counter value
258 , [upd] "=&d" (upd) // write-only, updated counter value
259 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
260 //---< inputs >---
261 :
262 //---< clobbered >---
263 : "cc"
264 );
265 }
266 }
267
268
269 //------------
270 // Atomic::dec
271 //------------
272 // These methods force the value in memory to be decremented (augmented by -1).
273 // Both, memory value and decrement, are treated as 32bit signed binary integers.
274 // No overflow exceptions are recognized, and the condition code does not hold
275 // information about the value in memory.
276 //
277 // The value in memory is updated by using a compare-and-swap instruction. The
278 // instruction is retried as often as required.
279
280 template <>
281 inline void Atomic::specialized_dec<int32_t>(volatile int32_t* dest) {
282 unsigned int old, upd;
283
284 if (VM_Version::has_LoadAndALUAtomicV1()) {
285 __asm__ __volatile__ (
286 " LGHI 2,-1 \n\t" // load increment
287 " LA 3,%[mem] \n\t" // force data address into ARG2
288 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value
289 // " LAA 2,2,0(3) \n\t" // actually coded instruction
290 " .byte 0xeb \n\t" // LAA main opcode
291 " .byte 0x22 \n\t" // R1,R3
292 " .byte 0x30 \n\t" // R2,disp1
293 " .byte 0x00 \n\t" // disp2,disp3
294 " .byte 0x00 \n\t" // disp4,disp5
295 " .byte 0xf8 \n\t" // LAA minor opcode
296 " AGHI 2,-1 \n\t" // calc new value in register
297 " LR %[upd],2 \n\t" // move to result register
298 //---< outputs >---
299 : [upd] "=&d" (upd) // write-only, updated counter value
300 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
301 //---< inputs >---
308 __asm__ __volatile__ (
309 " LLGF %[old],%[mem] \n\t" // get old value
310 // LAY not supported by inline assembler
311 // "0: LAY %[upd],-1(,%[old]) \n\t" // calc result
312 "0: LR %[upd],%[old] \n\t" // calc result
313 " AHI %[upd],-1 \n\t"
314 " CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
315 " JNE 0b \n\t" // no success? -> retry
316 //---< outputs >---
317 : [old] "=&a" (old) // write-only, old counter value
318 , [upd] "=&d" (upd) // write-only, updated counter value
319 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
320 //---< inputs >---
321 :
322 //---< clobbered >---
323 : "cc"
324 );
325 }
326 }
327
328 template <>
329 inline void Atomic::specialized_dec<int64_t>(volatile int64_t* dest) {
330 unsigned long old, upd;
331
332 if (VM_Version::has_LoadAndALUAtomicV1()) {
333 __asm__ __volatile__ (
334 " LGHI 2,-1 \n\t" // load increment
335 " LA 3,%[mem] \n\t" // force data address into ARG2
336 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value
337 // " LAAG 2,2,0(3) \n\t" // actually coded instruction
338 " .byte 0xeb \n\t" // LAA main opcode
339 " .byte 0x22 \n\t" // R1,R3
340 " .byte 0x30 \n\t" // R2,disp1
341 " .byte 0x00 \n\t" // disp2,disp3
342 " .byte 0x00 \n\t" // disp4,disp5
343 " .byte 0xe8 \n\t" // LAA minor opcode
344 " AGHI 2,-1 \n\t" // calc new value in register
345 " LR %[upd],2 \n\t" // move to result register
346 //---< outputs >---
347 : [upd] "=&d" (upd) // write-only, updated counter value
348 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
349 //---< inputs >---
356 __asm__ __volatile__ (
357 " LG %[old],%[mem] \n\t" // get old value
358 // LAY not supported by inline assembler
359 // "0: LAY %[upd],-1(,%[old]) \n\t" // calc result
360 "0: LGR %[upd],%[old] \n\t" // calc result
361 " AGHI %[upd],-1 \n\t"
362 " CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
363 " JNE 0b \n\t" // no success? -> retry
364 //---< outputs >---
365 : [old] "=&a" (old) // write-only, old counter value
366 , [upd] "=&d" (upd) // write-only, updated counter value
367 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
368 //---< inputs >---
369 :
370 //---< clobbered >---
371 : "cc"
372 );
373 }
374 }
375
376
377 //-------------
378 // Atomic::xchg
379 //-------------
380 // These methods force the value in memory to be replaced by the new value passed
381 // in as argument.
382 //
383 // The value in memory is replaced by using a compare-and-swap instruction. The
384 // instruction is retried as often as required. This makes sure that the new
385 // value can be seen, at least for a very short period of time, by other CPUs.
386 //
387 // If we would use a normal "load(old value) store(new value)" sequence,
388 // the new value could be lost unnoticed, due to a store(new value) from
389 // another thread.
390 //
391 // The return value is the (unchanged) value from memory as it was when the
392 // replacement succeeded.
393 template <>
394 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t xchg_val, volatile int32_t* dest) {
395 unsigned int old;
396
397 __asm__ __volatile__ (
398 " LLGF %[old],%[mem] \n\t" // get old value
399 "0: CS %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
400 " JNE 0b \n\t" // no success? -> retry
401 //---< outputs >---
402 : [old] "=&d" (old) // write-only, prev value irrelevant
403 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
404 //---< inputs >---
405 : [upd] "d" (xchg_val) // read-only, value to be written to memory
406 //---< clobbered >---
407 : "cc"
408 );
409
410 return (int32_t)old;
411 }
412
413 template <>
414 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t xchg_val, volatile int64_t* dest) {
415 unsigned long old;
416
417 __asm__ __volatile__ (
418 " LG %[old],%[mem] \n\t" // get old value
419 "0: CSG %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
420 " JNE 0b \n\t" // no success? -> retry
421 //---< outputs >---
422 : [old] "=&d" (old) // write-only, init from memory
423 , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
424 //---< inputs >---
425 : [upd] "d" (xchg_val) // read-only, value to be written to memory
426 //---< clobbered >---
427 : "cc"
428 );
429
430 return (intptr_t)old;
431 }
432
433
434 //----------------
435 // Atomic::cmpxchg
436 //----------------
437 // These methods compare the value in memory with a given compare value.
438 // If both values compare equal, the value in memory is replaced with
439 // the exchange value.
440 //
441 // The value in memory is compared and replaced by using a compare-and-swap
442 // instruction. The instruction is NOT retried (one shot only).
443 //
444 // The return value is the (unchanged) value from memory as it was when the
445 // compare-and-swap instruction completed. A successful exchange operation
446 // is indicated by (return value == compare_value). If unsuccessful, a new
447 // exchange value can be calculated based on the return value which is the
448 // latest contents of the memory location.
449 //
450 // Inspecting the return value is the only way for the caller to determine
451 // if the compare-and-swap instruction was successful:
452 // - If return value and compare value compare equal, the compare-and-swap
453 // instruction was successful and the value in memory was replaced by the
454 // exchange value.
455 // - If return value and compare value compare unequal, the compare-and-swap
456 // instruction was not successful. The value in memory was left unchanged.
457 //
458 // The s390 processors always fence before and after the csg instructions.
459 // Thus we ignore the memory ordering argument. The docu says: "A serialization
460 // function is performed before the operand is fetched and again after the
461 // operation is completed."
462
463 template <>
464 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t xchg_val, volatile int32_t* dest, int32_t cmp_val, cmpxchg_memory_order order) {
465 unsigned long old;
466
467 __asm__ __volatile__ (
468 " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
469 // outputs
470 : [old] "=&d" (old) // Write-only, prev value irrelevant.
471 , [mem] "+Q" (*dest) // Read/write, memory to be updated atomically.
472 // inputs
473 : [upd] "d" (xchg_val)
474 , "0" (cmp_val) // Read-only, initial value for [old] (operand #0).
475 // clobbered
476 : "cc"
477 );
478
479 return (int32_t)old;
480 }
481
482 template <>
483 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t xchg_val, volatile int64_t* dest, int64_t cmp_val, cmpxchg_memory_order order) {
484 unsigned long old;
485
486 __asm__ __volatile__ (
487 " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
488 // outputs
489 : [old] "=&d" (old) // Write-only, prev value irrelevant.
490 , [mem] "+Q" (*dest) // Read/write, memory to be updated atomically.
491 // inputs
492 : [upd] "d" (xchg_val)
493 , "0" (cmp_val) // Read-only, initial value for [old] (operand #0).
494 // clobbered
495 : "cc"
496 );
497
498 return (int64_t)old;
499 }
500
501 #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP
|