420
421
422 // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
423 // <fence>; <op>; <membar StoreLoad|StoreStore>
424 // But for load-linked/store-conditional based systems a fence here simply means
425 // no load/store can be reordered with respect to the initial load-linked, so we have:
426 // <membar storeload|loadload> ; load-linked; <op>; store-conditional; <membar storeload|storestore>
427 // There are no memory actions in <op> so nothing further is needed.
428 //
429 // So we define the following for convenience:
430 #define MEMBAR_ATOMIC_OP_PRE \
431 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::LoadLoad)
432 #define MEMBAR_ATOMIC_OP_POST \
433 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::StoreStore)
434
435 // Note: JDK 9 only supports ARMv7+ so we always have ldrexd available even though the
436 // code below allows for it to be otherwise. The else clause indicates an ARMv5 system
437 // for which we do not support MP and so membars are not necessary. This ARMv5 code will
438 // be removed in the future.
439
440 // Support for jint Atomic::add(jint add_value, volatile jint *dest)
441 //
442 // Arguments :
443 //
444 // add_value: R0
445 // dest: R1
446 //
447 // Results:
448 //
449 // R0: the new stored in dest
450 //
451 // Overwrites:
452 //
453 // R1, R2, R3
454 //
455 address generate_atomic_add() {
456 address start;
457
458 StubCodeMark mark(this, "StubRoutines", "atomic_add");
459 Label retry;
460 start = __ pc();
|
420
421
422 // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
423 // <fence>; <op>; <membar StoreLoad|StoreStore>
424 // But for load-linked/store-conditional based systems a fence here simply means
425 // no load/store can be reordered with respect to the initial load-linked, so we have:
426 // <membar storeload|loadload> ; load-linked; <op>; store-conditional; <membar storeload|storestore>
427 // There are no memory actions in <op> so nothing further is needed.
428 //
429 // So we define the following for convenience:
430 #define MEMBAR_ATOMIC_OP_PRE \
431 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::LoadLoad)
432 #define MEMBAR_ATOMIC_OP_POST \
433 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::StoreStore)
434
435 // Note: JDK 9 only supports ARMv7+ so we always have ldrexd available even though the
436 // code below allows for it to be otherwise. The else clause indicates an ARMv5 system
437 // for which we do not support MP and so membars are not necessary. This ARMv5 code will
438 // be removed in the future.
439
440 // Support for jint Atomic::add(volatile jint *dest, jint add_value)
441 //
442 // Arguments :
443 //
444 // add_value: R0
445 // dest: R1
446 //
447 // Results:
448 //
449 // R0: the new stored in dest
450 //
451 // Overwrites:
452 //
453 // R1, R2, R3
454 //
455 address generate_atomic_add() {
456 address start;
457
458 StubCodeMark mark(this, "StubRoutines", "atomic_add");
459 Label retry;
460 start = __ pc();
|