420
421
422 // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
423 // <fence>; <op>; <membar StoreLoad|StoreStore>
424 // But for load-linked/store-conditional based systems a fence here simply means
425 // no load/store can be reordered with respect to the initial load-linked, so we have:
426 // <membar storeload|loadload> ; load-linked; <op>; store-conditional; <membar storeload|storestore>
427 // There are no memory actions in <op> so nothing further is needed.
428 //
429 // So we define the following for convenience:
430 #define MEMBAR_ATOMIC_OP_PRE \
431 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::LoadLoad)
432 #define MEMBAR_ATOMIC_OP_POST \
433 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::StoreStore)
434
435 // Note: JDK 9 only supports ARMv7+ so we always have ldrexd available even though the
436 // code below allows for it to be otherwise. The else clause indicates an ARMv5 system
437 // for which we do not support MP and so membars are not necessary. This ARMv5 code will
438 // be removed in the future.
439
440 // Support for jint Atomic::add(jint add_value, volatile jint *dest)
441 //
442 // Arguments :
443 //
444 // add_value: R0
445 // dest: R1
446 //
447 // Results:
448 //
449 // R0: the new stored in dest
450 //
451 // Overwrites:
452 //
453 // R1, R2, R3
454 //
455 address generate_atomic_add() {
456 address start;
457
458 StubCodeMark mark(this, "StubRoutines", "atomic_add");
459 Label retry;
460 start = __ pc();
470 __ ldrex(newval, Address(dest));
471 __ add(newval, addval, newval);
472 __ strex(ok, newval, Address(dest));
473 __ cmp(ok, 0);
474 __ b(retry, ne);
475 __ mov (R0, newval);
476 __ membar(MEMBAR_ATOMIC_OP_POST, prev);
477 } else {
478 __ bind(retry);
479 __ ldr (prev, Address(dest));
480 __ add(newval, addval, prev);
481 __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
482 __ b(retry, ne);
483 __ mov (R0, newval);
484 }
485 __ bx(LR);
486
487 return start;
488 }
489
490 // Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest)
491 //
492 // Arguments :
493 //
494 // exchange_value: R0
495 // dest: R1
496 //
497 // Results:
498 //
499 // R0: the value previously stored in dest
500 //
501 // Overwrites:
502 //
503 // R1, R2, R3
504 //
505 address generate_atomic_xchg() {
506 address start;
507
508 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
509 start = __ pc();
510 Register newval = R0;
518 __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
519 __ bind(retry);
520 __ ldrex(prev, Address(dest));
521 __ strex(ok, newval, Address(dest));
522 __ cmp(ok, 0);
523 __ b(retry, ne);
524 __ mov (R0, prev);
525 __ membar(MEMBAR_ATOMIC_OP_POST, prev);
526 } else {
527 __ bind(retry);
528 __ ldr (prev, Address(dest));
529 __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
530 __ b(retry, ne);
531 __ mov (R0, prev);
532 }
533 __ bx(LR);
534
535 return start;
536 }
537
538 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
539 //
540 // Arguments :
541 //
542 // compare_value: R0
543 // exchange_value: R1
544 // dest: R2
545 //
546 // Results:
547 //
548 // R0: the value previously stored in dest
549 //
550 // Overwrites:
551 //
552 // R0, R1, R2, R3, Rtemp
553 //
554 address generate_atomic_cmpxchg() {
555 address start;
556
557 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
558 start = __ pc();
|
420
421
422 // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
423 // <fence>; <op>; <membar StoreLoad|StoreStore>
424 // But for load-linked/store-conditional based systems a fence here simply means
425 // no load/store can be reordered with respect to the initial load-linked, so we have:
426 // <membar storeload|loadload> ; load-linked; <op>; store-conditional; <membar storeload|storestore>
427 // There are no memory actions in <op> so nothing further is needed.
428 //
429 // So we define the following for convenience:
430 #define MEMBAR_ATOMIC_OP_PRE \
431 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::LoadLoad)
432 #define MEMBAR_ATOMIC_OP_POST \
433 MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::StoreStore)
434
435 // Note: JDK 9 only supports ARMv7+ so we always have ldrexd available even though the
436 // code below allows for it to be otherwise. The else clause indicates an ARMv5 system
437 // for which we do not support MP and so membars are not necessary. This ARMv5 code will
438 // be removed in the future.
439
440 // Support for jint Atomic::add(volatile jint *dest, jint add_value)
441 //
442 // Arguments :
443 //
444 // add_value: R0
445 // dest: R1
446 //
447 // Results:
448 //
449 // R0: the new stored in dest
450 //
451 // Overwrites:
452 //
453 // R1, R2, R3
454 //
455 address generate_atomic_add() {
456 address start;
457
458 StubCodeMark mark(this, "StubRoutines", "atomic_add");
459 Label retry;
460 start = __ pc();
470 __ ldrex(newval, Address(dest));
471 __ add(newval, addval, newval);
472 __ strex(ok, newval, Address(dest));
473 __ cmp(ok, 0);
474 __ b(retry, ne);
475 __ mov (R0, newval);
476 __ membar(MEMBAR_ATOMIC_OP_POST, prev);
477 } else {
478 __ bind(retry);
479 __ ldr (prev, Address(dest));
480 __ add(newval, addval, prev);
481 __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
482 __ b(retry, ne);
483 __ mov (R0, newval);
484 }
485 __ bx(LR);
486
487 return start;
488 }
489
490 // Support for jint Atomic::xchg(volatile jint *dest, jint exchange_value)
491 //
492 // Arguments :
493 //
494 // exchange_value: R0
495 // dest: R1
496 //
497 // Results:
498 //
499 // R0: the value previously stored in dest
500 //
501 // Overwrites:
502 //
503 // R1, R2, R3
504 //
505 address generate_atomic_xchg() {
506 address start;
507
508 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
509 start = __ pc();
510 Register newval = R0;
518 __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
519 __ bind(retry);
520 __ ldrex(prev, Address(dest));
521 __ strex(ok, newval, Address(dest));
522 __ cmp(ok, 0);
523 __ b(retry, ne);
524 __ mov (R0, prev);
525 __ membar(MEMBAR_ATOMIC_OP_POST, prev);
526 } else {
527 __ bind(retry);
528 __ ldr (prev, Address(dest));
529 __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
530 __ b(retry, ne);
531 __ mov (R0, prev);
532 }
533 __ bx(LR);
534
535 return start;
536 }
537
538 // Support for jint Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
539 //
540 // Arguments :
541 //
542 // compare_value: R0
543 // exchange_value: R1
544 // dest: R2
545 //
546 // Results:
547 //
548 // R0: the value previously stored in dest
549 //
550 // Overwrites:
551 //
552 // R0, R1, R2, R3, Rtemp
553 //
554 address generate_atomic_cmpxchg() {
555 address start;
556
557 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
558 start = __ pc();
|