268 default:
269 fatal("BarrierSet AccessBarrier resolving not implemented");
270 return NULL;
271 };
272 }
273
274 static FunctionPointerT resolve_barrier_rt() {
275 if (UseCompressedOops) {
276 const DecoratorSet expanded_decorators = decorators | INTERNAL_RT_USE_COMPRESSED_OOPS;
277 return resolve_barrier_gc<expanded_decorators>();
278 } else {
279 return resolve_barrier_gc<decorators>();
280 }
281 }
282
283 static FunctionPointerT resolve_barrier() {
284 return resolve_barrier_rt();
285 }
286 };
287
288 // Step 4: Runtime dispatch
289 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
290 // accessor. This is required when the access either depends on whether compressed oops
291 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
292 // barriers). The way it works is that a function pointer initially pointing to an
293 // accessor resolution function gets called for each access. Upon first invocation,
294 // it resolves which accessor to be used in future invocations and patches the
295 // function pointer to this new accessor.
296
297 template <DecoratorSet decorators, typename T, BarrierType type>
298 struct RuntimeDispatch: AllStatic {};
299
300 template <DecoratorSet decorators, typename T>
301 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
302 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
303 static func_t _store_func;
304
305 static void store_init(void* addr, T value) {
306 func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
307 _store_func = function;
308 function(addr, value);
309 }
310
311 static inline void store(void* addr, T value) {
312 _store_func(addr, value);
313 }
314 };
315
316 template <DecoratorSet decorators, typename T>
317 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
318 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
319 static func_t _store_at_func;
320
321 static void store_at_init(oop base, ptrdiff_t offset, T value) {
322 func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
323 _store_at_func = function;
324 function(base, offset, value);
325 }
326
327 static inline void store_at(oop base, ptrdiff_t offset, T value) {
328 _store_at_func(base, offset, value);
329 }
330 };
331
332 template <DecoratorSet decorators, typename T>
333 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
334 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
335 static func_t _load_func;
336
337 static T load_init(void* addr) {
338 func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
339 _load_func = function;
340 return function(addr);
341 }
342
343 static inline T load(void* addr) {
344 return _load_func(addr);
345 }
346 };
347
348 template <DecoratorSet decorators, typename T>
349 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
350 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
351 static func_t _load_at_func;
352
353 static T load_at_init(oop base, ptrdiff_t offset) {
354 func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
355 _load_at_func = function;
356 return function(base, offset);
357 }
358
359 static inline T load_at(oop base, ptrdiff_t offset) {
360 return _load_at_func(base, offset);
361 }
362 };
363
364 template <DecoratorSet decorators, typename T>
365 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
366 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
367 static func_t _atomic_cmpxchg_func;
368
369 static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
370 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
371 _atomic_cmpxchg_func = function;
372 return function(new_value, addr, compare_value);
373 }
374
375 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
376 return _atomic_cmpxchg_func(new_value, addr, compare_value);
377 }
378 };
379
380 template <DecoratorSet decorators, typename T>
381 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
382 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
383 static func_t _atomic_cmpxchg_at_func;
384
385 static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
386 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
387 _atomic_cmpxchg_at_func = function;
388 return function(new_value, base, offset, compare_value);
389 }
390
391 static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
392 return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
393 }
394 };
395
396 template <DecoratorSet decorators, typename T>
397 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
398 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
399 static func_t _atomic_xchg_func;
400
401 static T atomic_xchg_init(T new_value, void* addr) {
402 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
403 _atomic_xchg_func = function;
404 return function(new_value, addr);
405 }
406
407 static inline T atomic_xchg(T new_value, void* addr) {
408 return _atomic_xchg_func(new_value, addr);
409 }
410 };
411
412 template <DecoratorSet decorators, typename T>
413 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
414 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
415 static func_t _atomic_xchg_at_func;
416
417 static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
418 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
419 _atomic_xchg_at_func = function;
420 return function(new_value, base, offset);
421 }
422
423 static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
424 return _atomic_xchg_at_func(new_value, base, offset);
425 }
426 };
427
428 template <DecoratorSet decorators, typename T>
429 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
430 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
431 static func_t _arraycopy_func;
432
433 static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
434 func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
435 _arraycopy_func = function;
436 return function(src_obj, dst_obj, src, dst, length);
437 }
438
439 static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
440 return _arraycopy_func(src_obj, dst_obj, src, dst, length);
441 }
442 };
443
444 template <DecoratorSet decorators, typename T>
445 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
446 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
447 static func_t _clone_func;
448
449 static void clone_init(oop src, oop dst, size_t size) {
450 func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
451 _clone_func = function;
452 function(src, dst, size);
453 }
454
455 static inline void clone(oop src, oop dst, size_t size) {
456 _clone_func(src, dst, size);
457 }
458 };
459
460 template <DecoratorSet decorators, typename T>
461 struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
462 typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
463 static func_t _resolve_func;
464
465 static oop resolve_init(oop obj) {
466 func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
467 _resolve_func = function;
468 return function(obj);
469 }
470
471 static inline oop resolve(oop obj) {
472 return _resolve_func(obj);
473 }
474 };
475
476 template <DecoratorSet decorators, typename T>
477 struct RuntimeDispatch<decorators, T, BARRIER_EQUALS>: AllStatic {
478 typedef typename AccessFunction<decorators, T, BARRIER_EQUALS>::type func_t;
479 static func_t _equals_func;
480
481 static bool equals_init(oop o1, oop o2) {
482 func_t function = BarrierResolver<decorators, func_t, BARRIER_EQUALS>::resolve_barrier();
483 _equals_func = function;
484 return function(o1, o2);
485 }
486
487 static inline bool equals(oop o1, oop o2) {
488 return _equals_func(o1, o2);
489 }
490 };
491
492 // Initialize the function pointers to point to the resolving function.
493 template <DecoratorSet decorators, typename T>
494 typename AccessFunction<decorators, T, BARRIER_STORE>::type
495 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
496
497 template <DecoratorSet decorators, typename T>
498 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
499 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
500
501 template <DecoratorSet decorators, typename T>
502 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
503 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
504
505 template <DecoratorSet decorators, typename T>
506 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
507 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
508
509 template <DecoratorSet decorators, typename T>
510 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
511 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
512
513 template <DecoratorSet decorators, typename T>
514 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
515 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
516
517 template <DecoratorSet decorators, typename T>
518 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
519 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
520
521 template <DecoratorSet decorators, typename T>
522 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
523 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
524
525 template <DecoratorSet decorators, typename T>
526 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
527 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
528
529 template <DecoratorSet decorators, typename T>
530 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
531 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
532
533 template <DecoratorSet decorators, typename T>
534 typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
535 RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
536
537 template <DecoratorSet decorators, typename T>
538 typename AccessFunction<decorators, T, BARRIER_EQUALS>::type
539 RuntimeDispatch<decorators, T, BARRIER_EQUALS>::_equals_func = &equals_init;
540
541 // Step 3: Pre-runtime dispatching.
542 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
543 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
544 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
545 // not possible.
546 struct PreRuntimeDispatch: AllStatic {
547 template<DecoratorSet decorators>
548 struct CanHardwireRaw: public IntegralConstant<
549 bool,
550 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
551 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
552 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
553 {};
554
555 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
556
557 template<DecoratorSet decorators>
558 static bool is_hardwired_primitive() {
559 return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
560 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
561 }
562
563 template <DecoratorSet decorators, typename T>
564 inline static typename EnableIf<
565 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
566 store(void* addr, T value) {
567 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
568 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
569 Raw::oop_store(addr, value);
570 } else {
571 Raw::store(addr, value);
572 }
573 }
574
575 template <DecoratorSet decorators, typename T>
576 inline static typename EnableIf<
577 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
578 store(void* addr, T value) {
579 if (UseCompressedOops) {
580 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
581 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
582 } else {
583 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
584 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
585 }
586 }
587
588 template <DecoratorSet decorators, typename T>
589 inline static typename EnableIf<
590 !HasDecorator<decorators, AS_RAW>::value>::type
591 store(void* addr, T value) {
592 if (is_hardwired_primitive<decorators>()) {
593 const DecoratorSet expanded_decorators = decorators | AS_RAW;
594 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
595 } else {
596 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
597 }
598 }
599
600 template <DecoratorSet decorators, typename T>
601 inline static typename EnableIf<
602 HasDecorator<decorators, AS_RAW>::value>::type
603 store_at(oop base, ptrdiff_t offset, T value) {
604 store<decorators>(field_addr(base, offset), value);
605 }
606
607 template <DecoratorSet decorators, typename T>
608 inline static typename EnableIf<
609 !HasDecorator<decorators, AS_RAW>::value>::type
610 store_at(oop base, ptrdiff_t offset, T value) {
611 if (is_hardwired_primitive<decorators>()) {
612 const DecoratorSet expanded_decorators = decorators | AS_RAW;
613 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
614 } else {
615 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
616 }
617 }
618
619 template <DecoratorSet decorators, typename T>
620 inline static typename EnableIf<
621 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
622 load(void* addr) {
623 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
624 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
625 return Raw::template oop_load<T>(addr);
626 } else {
627 return Raw::template load<T>(addr);
628 }
629 }
630
631 template <DecoratorSet decorators, typename T>
632 inline static typename EnableIf<
633 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
634 load(void* addr) {
635 if (UseCompressedOops) {
636 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
637 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
638 } else {
639 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
640 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
641 }
642 }
643
644 template <DecoratorSet decorators, typename T>
645 inline static typename EnableIf<
646 !HasDecorator<decorators, AS_RAW>::value, T>::type
647 load(void* addr) {
648 if (is_hardwired_primitive<decorators>()) {
649 const DecoratorSet expanded_decorators = decorators | AS_RAW;
650 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
651 } else {
652 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
653 }
654 }
655
656 template <DecoratorSet decorators, typename T>
657 inline static typename EnableIf<
658 HasDecorator<decorators, AS_RAW>::value, T>::type
659 load_at(oop base, ptrdiff_t offset) {
660 return load<decorators, T>(field_addr(base, offset));
661 }
662
663 template <DecoratorSet decorators, typename T>
664 inline static typename EnableIf<
665 !HasDecorator<decorators, AS_RAW>::value, T>::type
666 load_at(oop base, ptrdiff_t offset) {
667 if (is_hardwired_primitive<decorators>()) {
668 const DecoratorSet expanded_decorators = decorators | AS_RAW;
669 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
670 } else {
671 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
672 }
673 }
674
675 template <DecoratorSet decorators, typename T>
676 inline static typename EnableIf<
677 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
678 atomic_cmpxchg(T new_value, void* addr, T compare_value) {
679 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
680 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
681 return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
682 } else {
683 return Raw::atomic_cmpxchg(new_value, addr, compare_value);
684 }
685 }
686
687 template <DecoratorSet decorators, typename T>
688 inline static typename EnableIf<
689 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
690 atomic_cmpxchg(T new_value, void* addr, T compare_value) {
691 if (UseCompressedOops) {
692 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
693 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
694 } else {
695 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
696 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
697 }
698 }
699
700 template <DecoratorSet decorators, typename T>
701 inline static typename EnableIf<
702 !HasDecorator<decorators, AS_RAW>::value, T>::type
703 atomic_cmpxchg(T new_value, void* addr, T compare_value) {
704 if (is_hardwired_primitive<decorators>()) {
705 const DecoratorSet expanded_decorators = decorators | AS_RAW;
706 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
707 } else {
708 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
709 }
710 }
711
712 template <DecoratorSet decorators, typename T>
713 inline static typename EnableIf<
714 HasDecorator<decorators, AS_RAW>::value, T>::type
715 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
716 return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
717 }
718
719 template <DecoratorSet decorators, typename T>
720 inline static typename EnableIf<
721 !HasDecorator<decorators, AS_RAW>::value, T>::type
722 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
723 if (is_hardwired_primitive<decorators>()) {
724 const DecoratorSet expanded_decorators = decorators | AS_RAW;
725 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
726 } else {
727 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
728 }
729 }
730
731 template <DecoratorSet decorators, typename T>
732 inline static typename EnableIf<
733 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
734 atomic_xchg(T new_value, void* addr) {
735 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
736 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
737 return Raw::oop_atomic_xchg(new_value, addr);
738 } else {
739 return Raw::atomic_xchg(new_value, addr);
740 }
741 }
742
743 template <DecoratorSet decorators, typename T>
744 inline static typename EnableIf<
745 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
746 atomic_xchg(T new_value, void* addr) {
747 if (UseCompressedOops) {
748 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
749 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
750 } else {
751 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
752 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
753 }
754 }
755
756 template <DecoratorSet decorators, typename T>
757 inline static typename EnableIf<
758 !HasDecorator<decorators, AS_RAW>::value, T>::type
759 atomic_xchg(T new_value, void* addr) {
760 if (is_hardwired_primitive<decorators>()) {
761 const DecoratorSet expanded_decorators = decorators | AS_RAW;
762 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
763 } else {
764 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
765 }
766 }
767
768 template <DecoratorSet decorators, typename T>
769 inline static typename EnableIf<
770 HasDecorator<decorators, AS_RAW>::value, T>::type
771 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
772 return atomic_xchg<decorators>(new_value, field_addr(base, offset));
773 }
774
775 template <DecoratorSet decorators, typename T>
776 inline static typename EnableIf<
777 !HasDecorator<decorators, AS_RAW>::value, T>::type
778 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
779 if (is_hardwired_primitive<decorators>()) {
780 const DecoratorSet expanded_decorators = decorators | AS_RAW;
781 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
782 } else {
783 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
784 }
785 }
786
787 template <DecoratorSet decorators, typename T>
788 inline static typename EnableIf<
789 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
790 arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
791 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
792 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
793 return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
794 } else {
795 return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
796 }
797 }
798
799 template <DecoratorSet decorators, typename T>
800 inline static typename EnableIf<
801 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
802 arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
803 if (UseCompressedOops) {
804 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
805 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
806 } else {
807 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
808 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
809 }
810 }
811
812 template <DecoratorSet decorators, typename T>
813 inline static typename EnableIf<
814 !HasDecorator<decorators, AS_RAW>::value, bool>::type
815 arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
816 if (is_hardwired_primitive<decorators>()) {
817 const DecoratorSet expanded_decorators = decorators | AS_RAW;
818 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
819 } else {
820 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
821 }
822 }
823
824 template <DecoratorSet decorators>
825 inline static typename EnableIf<
826 HasDecorator<decorators, AS_RAW>::value>::type
827 clone(oop src, oop dst, size_t size) {
828 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
829 Raw::clone(src, dst, size);
830 }
831
832 template <DecoratorSet decorators>
833 inline static typename EnableIf<
834 !HasDecorator<decorators, AS_RAW>::value>::type
835 clone(oop src, oop dst, size_t size) {
836 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
837 }
838
839 template <DecoratorSet decorators>
840 inline static typename EnableIf<
841 HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
842 resolve(oop obj) {
843 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
844 return Raw::resolve(obj);
845 }
846
847 template <DecoratorSet decorators>
848 inline static typename EnableIf<
849 !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
850 resolve(oop obj) {
851 return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
852 }
853
854 template <DecoratorSet decorators>
855 inline static typename EnableIf<
856 HasDecorator<decorators, AS_RAW>::value, bool>::type
857 equals(oop o1, oop o2) {
858 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
859 return Raw::equals(o1, o2);
860 }
861
862 template <DecoratorSet decorators>
863 inline static typename EnableIf<
864 !HasDecorator<decorators, AS_RAW>::value, bool>::type
865 equals(oop o1, oop o2) {
866 return RuntimeDispatch<decorators, oop, BARRIER_EQUALS>::equals(o1, o2);
867 }
868 };
869
870 // This class adds implied decorators that follow according to decorator rules.
871 // For example adding default reference strength and default memory ordering
872 // semantics.
873 template <DecoratorSet input_decorators>
874 struct DecoratorFixup: AllStatic {
875 // If no reference strength has been picked, then strong will be picked
876 static const DecoratorSet ref_strength_default = input_decorators |
877 (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
878 ON_STRONG_OOP_REF : INTERNAL_EMPTY);
879 // If no memory ordering has been picked, unordered will be picked
880 static const DecoratorSet memory_ordering_default = ref_strength_default |
881 ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
882 // If no barrier strength has been picked, normal will be used
883 static const DecoratorSet barrier_strength_default = memory_ordering_default |
884 ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
885 // Heap array accesses imply it is a heap access
886 static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
887 ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
888 static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
889 ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
890 static const DecoratorSet archive_root_is_root = conc_root_is_root |
891 ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
892 static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
893 };
894
895 // Step 2: Reduce types.
896 // Enforce that for non-oop types, T and P have to be strictly the same.
897 // P is the type of the address and T is the type of the values.
898 // As for oop types, it is allow to send T in {narrowOop, oop} and
899 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
900 // the subsequent table. (columns are P, rows are T)
901 // | | HeapWord | oop | narrowOop |
902 // | oop | rt-comp | hw-none | hw-comp |
903 // | narrowOop | x | x | hw-none |
904 //
905 // x means not allowed
906 // rt-comp means it must be checked at runtime whether the oop is compressed.
907 // hw-none means it is statically known the oop will not be compressed.
908 // hw-comp means it is statically known the oop will be compressed.
909
910 template <DecoratorSet decorators, typename T>
911 inline void store_reduce_types(T* addr, T value) {
912 PreRuntimeDispatch::store<decorators>(addr, value);
913 }
914
915 template <DecoratorSet decorators>
916 inline void store_reduce_types(narrowOop* addr, oop value) {
917 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
918 INTERNAL_RT_USE_COMPRESSED_OOPS;
919 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
920 }
921
922 template <DecoratorSet decorators>
923 inline void store_reduce_types(narrowOop* addr, narrowOop value) {
924 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
925 INTERNAL_RT_USE_COMPRESSED_OOPS;
926 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
927 }
928
929 template <DecoratorSet decorators>
930 inline void store_reduce_types(HeapWord* addr, oop value) {
931 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
932 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
933 }
934
935 template <DecoratorSet decorators, typename T>
936 inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
937 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
938 }
939
940 template <DecoratorSet decorators>
941 inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
942 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
943 INTERNAL_RT_USE_COMPRESSED_OOPS;
944 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
945 }
946
947 template <DecoratorSet decorators>
948 inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
949 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
950 INTERNAL_RT_USE_COMPRESSED_OOPS;
951 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
952 }
953
954 template <DecoratorSet decorators>
955 inline oop atomic_cmpxchg_reduce_types(oop new_value,
956 HeapWord* addr,
957 oop compare_value) {
958 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
959 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
960 }
961
962 template <DecoratorSet decorators, typename T>
963 inline T atomic_xchg_reduce_types(T new_value, T* addr) {
964 const DecoratorSet expanded_decorators = decorators;
965 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
966 }
967
968 template <DecoratorSet decorators>
969 inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
970 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
971 INTERNAL_RT_USE_COMPRESSED_OOPS;
972 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
973 }
974
975 template <DecoratorSet decorators>
976 inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
977 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
978 INTERNAL_RT_USE_COMPRESSED_OOPS;
979 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
980 }
981
982 template <DecoratorSet decorators>
983 inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
984 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
985 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
986 }
987
988 template <DecoratorSet decorators, typename T>
989 inline T load_reduce_types(T* addr) {
990 return PreRuntimeDispatch::load<decorators, T>(addr);
991 }
992
993 template <DecoratorSet decorators, typename T>
994 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
995 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
996 INTERNAL_RT_USE_COMPRESSED_OOPS;
997 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
998 }
999
1000 template <DecoratorSet decorators, typename T>
1001 inline oop load_reduce_types(HeapWord* addr) {
1002 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1003 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1004 }
1005
1006 template <DecoratorSet decorators, typename T>
1007 inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
1008 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
1009 }
1010
1011 template <DecoratorSet decorators>
1012 inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
1013 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1014 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
1015 }
1016
1017 template <DecoratorSet decorators>
1018 inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
1019 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1020 INTERNAL_RT_USE_COMPRESSED_OOPS;
1021 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
1022 }
1023
1024 // Step 1: Set default decorators. This step remembers if a type was volatile
1025 // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
1026 // memory ordering is set for the access, and the implied decorator rules
1027 // are applied to select sensible defaults for decorators that have not been
1028 // explicitly set. For example, default object referent strength is set to strong.
1029 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1030 // and references from the types). This step also perform some type verification
1031 // that the passed in types make sense.
1032
1033 template <DecoratorSet decorators, typename T>
1034 static void verify_types(){
1035 // If this fails to compile, then you have sent in something that is
1036 // not recognized as a valid primitive type to a primitive Access function.
1037 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1038 (IsPointer<T>::value || IsIntegral<T>::value) ||
1039 IsFloatingPoint<T>::value)); // not allowed primitive type
1040 }
1041
1042 template <DecoratorSet decorators, typename P, typename T>
1043 inline void store(P* addr, T value) {
1044 verify_types<decorators, T>();
1045 typedef typename Decay<P>::type DecayedP;
1046 typedef typename Decay<T>::type DecayedT;
1047 DecayedT decayed_value = value;
1048 // If a volatile address is passed in but no memory ordering decorator,
1049 // set the memory ordering to MO_VOLATILE by default.
1050 const DecoratorSet expanded_decorators = DecoratorFixup<
1051 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1052 (MO_VOLATILE | decorators) : decorators>::value;
1053 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1054 }
1055
1056 template <DecoratorSet decorators, typename T>
1057 inline void store_at(oop base, ptrdiff_t offset, T value) {
1058 verify_types<decorators, T>();
1059 typedef typename Decay<T>::type DecayedT;
1060 DecayedT decayed_value = value;
1061 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1062 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1063 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1064 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1065 }
1066
1067 template <DecoratorSet decorators, typename P, typename T>
1068 inline T load(P* addr) {
1069 verify_types<decorators, T>();
1070 typedef typename Decay<P>::type DecayedP;
1071 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1072 typename OopOrNarrowOop<T>::type,
1073 typename Decay<T>::type>::type DecayedT;
1074 // If a volatile address is passed in but no memory ordering decorator,
1075 // set the memory ordering to MO_VOLATILE by default.
1076 const DecoratorSet expanded_decorators = DecoratorFixup<
1077 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1078 (MO_VOLATILE | decorators) : decorators>::value;
1079 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1080 }
1081
1082 template <DecoratorSet decorators, typename T>
1083 inline T load_at(oop base, ptrdiff_t offset) {
1084 verify_types<decorators, T>();
1085 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1086 typename OopOrNarrowOop<T>::type,
1087 typename Decay<T>::type>::type DecayedT;
1088 // Expand the decorators (figure out sensible defaults)
1089 // Potentially remember if we need compressed oop awareness
1090 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1091 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1092 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1093 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1094 }
1095
1096 template <DecoratorSet decorators, typename P, typename T>
1097 inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
1098 verify_types<decorators, T>();
1099 typedef typename Decay<P>::type DecayedP;
1100 typedef typename Decay<T>::type DecayedT;
1101 DecayedT new_decayed_value = new_value;
1102 DecayedT compare_decayed_value = compare_value;
1103 const DecoratorSet expanded_decorators = DecoratorFixup<
1104 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1105 (MO_SEQ_CST | decorators) : decorators>::value;
1106 return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
1107 const_cast<DecayedP*>(addr),
1108 compare_decayed_value);
1109 }
1110
1111 template <DecoratorSet decorators, typename T>
1112 inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
1113 verify_types<decorators, T>();
1114 typedef typename Decay<T>::type DecayedT;
1115 DecayedT new_decayed_value = new_value;
1116 DecayedT compare_decayed_value = compare_value;
1117 // Determine default memory ordering
1118 const DecoratorSet expanded_decorators = DecoratorFixup<
1119 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1120 (MO_SEQ_CST | decorators) : decorators>::value;
1121 // Potentially remember that we need compressed oop awareness
1122 const DecoratorSet final_decorators = expanded_decorators |
1123 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1124 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
1125 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
1126 offset, compare_decayed_value);
1127 }
1128
1129 template <DecoratorSet decorators, typename P, typename T>
1130 inline T atomic_xchg(T new_value, P* addr) {
1131 verify_types<decorators, T>();
1132 typedef typename Decay<P>::type DecayedP;
1133 typedef typename Decay<T>::type DecayedT;
1134 DecayedT new_decayed_value = new_value;
1135 // atomic_xchg is only available in SEQ_CST flavour.
1136 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1137 return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
1138 const_cast<DecayedP*>(addr));
1139 }
1140
1141 template <DecoratorSet decorators, typename T>
1142 inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
1143 verify_types<decorators, T>();
1144 typedef typename Decay<T>::type DecayedT;
1145 DecayedT new_decayed_value = new_value;
1146 // atomic_xchg is only available in SEQ_CST flavour.
1147 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1148 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1149 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1150 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
1151 }
1152
1153 template <DecoratorSet decorators, typename T>
1154 inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
1155 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1156 (IsSame<T, void>::value || IsIntegral<T>::value) ||
1157 IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
1158 typedef typename Decay<T>::type DecayedT;
1159 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
1160 return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
1161 const_cast<DecayedT*>(src),
1162 const_cast<DecayedT*>(dst),
1163 length);
1164 }
1165
1166 template <DecoratorSet decorators>
1167 inline void clone(oop src, oop dst, size_t size) {
1168 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1169 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1170 }
1171
1172 template <DecoratorSet decorators>
1173 inline oop resolve(oop obj) {
1174 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1175 return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
1176 }
1177
1178 template <DecoratorSet decorators>
1179 inline bool equals(oop o1, oop o2) {
1180 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1181 return PreRuntimeDispatch::equals<expanded_decorators>(o1, o2);
1182 }
1183 }
1184
1185 template <DecoratorSet decorators>
1186 template <DecoratorSet expected_decorators>
1187 void Access<decorators>::verify_decorators() {
1188 STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
1189 const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
1190 STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
1191 (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
1192 (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
1193 (barrier_strength_decorators ^ AS_RAW) == 0 ||
1194 (barrier_strength_decorators ^ AS_NORMAL) == 0
1195 ));
1196 const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
1197 STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
1198 (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
1199 (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
1200 (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
1201 (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
1202 ));
1203 const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
1204 STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
1205 (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
1206 (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
1207 (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
1208 (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
1209 (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
1210 (memory_ordering_decorators ^ MO_SEQ_CST) == 0
1211 ));
1212 const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
1213 STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
1214 (location_decorators ^ IN_ROOT) == 0 ||
1215 (location_decorators ^ IN_HEAP) == 0 ||
1216 (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
1217 (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
1218 (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
1219 ));
1220 }
1221
1222 #endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
|
253 default:
254 fatal("BarrierSet AccessBarrier resolving not implemented");
255 return NULL;
256 };
257 }
258
259 static FunctionPointerT resolve_barrier_rt() {
260 if (UseCompressedOops) {
261 const DecoratorSet expanded_decorators = decorators | INTERNAL_RT_USE_COMPRESSED_OOPS;
262 return resolve_barrier_gc<expanded_decorators>();
263 } else {
264 return resolve_barrier_gc<decorators>();
265 }
266 }
267
268 static FunctionPointerT resolve_barrier() {
269 return resolve_barrier_rt();
270 }
271 };
272
273 // Step 5.a: Barrier resolution
274 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
275 // accessor. This is required when the access either depends on whether compressed oops
276 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
277 // barriers). The way it works is that a function pointer initially pointing to an
278 // accessor resolution function gets called for each access. Upon first invocation,
279 // it resolves which accessor to be used in future invocations and patches the
280 // function pointer to this new accessor.
281
282 template <DecoratorSet decorators, typename T>
283 void RuntimeDispatch<decorators, T, BARRIER_STORE>::store_init(void* addr, T value) {
284 func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
285 _store_func = function;
286 function(addr, value);
287 }
288
289 template <DecoratorSet decorators, typename T>
290 void RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at_init(oop base, ptrdiff_t offset, T value) {
291 func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
292 _store_at_func = function;
293 function(base, offset, value);
294 }
295
296 template <DecoratorSet decorators, typename T>
297 T RuntimeDispatch<decorators, T, BARRIER_LOAD>::load_init(void* addr) {
298 func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
299 _load_func = function;
300 return function(addr);
301 }
302
303 template <DecoratorSet decorators, typename T>
304 T RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at_init(oop base, ptrdiff_t offset) {
305 func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
306 _load_at_func = function;
307 return function(base, offset);
308 }
309
310 template <DecoratorSet decorators, typename T>
311 T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
312 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
313 _atomic_cmpxchg_func = function;
314 return function(new_value, addr, compare_value);
315 }
316
317 template <DecoratorSet decorators, typename T>
318 T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T comp
319 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
320 _atomic_cmpxchg_at_func = function;
321 return function(new_value, base, offset, compare_value);
322 }
323
324 template <DecoratorSet decorators, typename T>
325 T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(T new_value, void* addr) {
326 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
327 _atomic_xchg_func = function;
328 return function(new_value, addr);
329 }
330
331 template <DecoratorSet decorators, typename T>
332 T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
333 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
334 _atomic_xchg_at_func = function;
335 return function(new_value, base, offset);
336 }
337
338 template <DecoratorSet decorators, typename T>
339 bool RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t l
340 func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
341 _arraycopy_func = function;
342 return function(src_obj, dst_obj, src, dst, length);
343 }
344
345 template <DecoratorSet decorators, typename T>
346 void RuntimeDispatch<decorators, T, BARRIER_CLONE>::clone_init(oop src, oop dst, size_t size) {
347 func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
348 _clone_func = function;
349 function(src, dst, size);
350 }
351
352 template <DecoratorSet decorators, typename T>
353 oop RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::resolve_init(oop obj) {
354 func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
355 _resolve_func = function;
356 return function(obj);
357 }
358
359 template <DecoratorSet decorators, typename T>
360 bool RuntimeDispatch<decorators, T, BARRIER_EQUALS>::equals_init(oop o1, oop o2) {
361 func_t function = BarrierResolver<decorators, func_t, BARRIER_EQUALS>::resolve_barrier();
362 _equals_func = function;
363 return function(o1, o2);
364 }
365 }
366
367 #endif // SHARE_OOPS_ACCESS_INLINE_HPP
|