2539
2540 // The sharpened class might be unloaded if there is no class loader
2541 // contraint in place.
2542 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2543 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2544
2545 #ifndef PRODUCT
2546 if (C->print_intrinsics() || C->print_inlining()) {
2547 tty->print(" from base type: "); adr_type->dump();
2548 tty->print(" sharpened value: "); tjp->dump();
2549 }
2550 #endif
2551 // Sharpen the value type.
2552 return tjp;
2553 }
2554 return NULL;
2555 }
2556
2557 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
2558 if (callee()->is_static()) return false; // caller must have the capability!
2559
2560 #ifndef PRODUCT
2561 {
2562 ResourceMark rm;
2563 // Check the signatures.
2564 ciSignature* sig = callee()->signature();
2565 #ifdef ASSERT
2566 if (!is_store) {
2567 // Object getObject(Object base, int/long offset), etc.
2568 BasicType rtype = sig->return_type()->basic_type();
2569 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2570 rtype = T_ADDRESS; // it is really a C void*
2571 assert(rtype == type, "getter must return the expected value");
2572 if (!is_native_ptr) {
2573 assert(sig->count() == 2, "oop getter has 2 arguments");
2574 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2575 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2576 } else {
2577 assert(sig->count() == 1, "native getter has 1 argument");
2578 assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
2614 offset = argument(2); // type: long
2615 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2616 // to be plain byte offsets, which are also the same as those accepted
2617 // by oopDesc::field_base.
2618 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2619 "fieldOffset must be byte-scaled");
2620 // 32-bit machines ignore the high half!
2621 offset = ConvL2X(offset);
2622 adr = make_unsafe_address(base, offset);
2623 heap_base_oop = base;
2624 val = is_store ? argument(4) : NULL;
2625 } else {
2626 Node* ptr = argument(1); // type: long
2627 ptr = ConvL2X(ptr); // adjust Java long to machine word
2628 adr = make_unsafe_address(NULL, ptr);
2629 val = is_store ? argument(3) : NULL;
2630 }
2631
2632 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2633
2634 // First guess at the value type.
2635 const Type *value_type = Type::get_const_basic_type(type);
2636
2637 // Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM,
2638 // there was not enough information to nail it down.
2639 Compile::AliasType* alias_type = C->alias_type(adr_type);
2640 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2641
2642 // We will need memory barriers unless we can determine a unique
2643 // alias category for this reference. (Note: If for some reason
2644 // the barriers get omitted and the unsafe reference begins to "pollute"
2645 // the alias analysis of the rest of the graph, either Compile::can_alias
2646 // or Compile::must_alias will throw a diagnostic assert.)
2647 bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2648
2649 // If we are reading the value of the referent field of a Reference
2650 // object (either by using Unsafe directly or through reflection)
2651 // then, if G1 is enabled, we need to record the referent in an
2652 // SATB log buffer using the pre-barrier mechanism.
2653 // Also we need to add memory barrier to prevent commoning reads
2654 // from this field across safepoint since GC can change its value.
2655 bool need_read_barrier = !is_native_ptr && !is_store &&
2656 offset != top() && heap_base_oop != top();
2657
2658 if (!is_store && type == T_OBJECT) {
2659 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2660 if (tjp != NULL) {
2661 value_type = tjp;
2680 // rough approximation of type.
2681 need_mem_bar = true;
2682 // For Stores, place a memory ordering barrier now.
2683 if (is_store) {
2684 insert_mem_bar(Op_MemBarRelease);
2685 } else {
2686 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2687 insert_mem_bar(Op_MemBarVolatile);
2688 }
2689 }
2690 }
2691
2692 // Memory barrier to prevent normal and 'unsafe' accesses from
2693 // bypassing each other. Happens after null checks, so the
2694 // exception paths do not take memory state from the memory barrier,
2695 // so there's no problems making a strong assert about mixing users
2696 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar
2697 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2698 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2699
2700 assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
2701 alias_type->field() != NULL || alias_type->element() != NULL, "field, array element or unknown");
2702 bool mismatched = false;
2703 if (alias_type->element() != NULL || alias_type->field() != NULL) {
2704 BasicType bt;
2705 if (alias_type->element() != NULL) {
2706 const Type* element = alias_type->element();
2707 bt = element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
2708 } else {
2709 bt = alias_type->field()->type()->basic_type();
2710 }
2711 if (bt != type) {
2712 mismatched = true;
2713 }
2714 }
2715 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2716
2717 if (!is_store) {
2718 MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2719 // To be valid, unsafe loads may depend on other conditions than
2720 // the one that guards them: pin the Load node
2721 Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2722 // load value
2723 switch (type) {
2724 case T_BOOLEAN:
2725 case T_CHAR:
2726 case T_BYTE:
2727 case T_SHORT:
2728 case T_INT:
2729 case T_LONG:
2730 case T_FLOAT:
2731 case T_DOUBLE:
2732 break;
2733 case T_OBJECT:
2734 if (need_read_barrier) {
2735 insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2736 }
2955 oldval = NULL;
2956 newval = argument(4); // type: oop, int, or long
2957 }
2958
2959 // Null check receiver.
2960 receiver = null_check(receiver);
2961 if (stopped()) {
2962 return true;
2963 }
2964
2965 // Build field offset expression.
2966 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2967 // to be plain byte offsets, which are also the same as those accepted
2968 // by oopDesc::field_base.
2969 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2970 // 32-bit machines ignore the high half of long offsets
2971 offset = ConvL2X(offset);
2972 Node* adr = make_unsafe_address(base, offset);
2973 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2974
2975 // For CAS, unlike inline_unsafe_access, there seems no point in
2976 // trying to refine types. Just use the coarse types here.
2977 const Type *value_type = Type::get_const_basic_type(type);
2978 Compile::AliasType* alias_type = C->alias_type(adr_type);
2979 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2980
2981 if (kind == LS_xchg && type == T_OBJECT) {
2982 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2983 if (tjp != NULL) {
2984 value_type = tjp;
2985 }
2986 }
2987
2988 int alias_idx = C->get_alias_index(adr_type);
2989
2990 // Memory-model-wise, a LoadStore acts like a little synchronized
2991 // block, so needs barriers on each side. These don't translate
2992 // into actual barriers on most machines, but we still need rest of
2993 // compiler to respect ordering.
2994
2995 insert_mem_bar(Op_MemBarRelease);
2996 insert_mem_bar(Op_MemBarCPUOrder);
2997
2998 // 4984716: MemBars must be inserted before this
2999 // memory node in order to avoid a false
|
2539
2540 // The sharpened class might be unloaded if there is no class loader
2541 // contraint in place.
2542 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2543 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2544
2545 #ifndef PRODUCT
2546 if (C->print_intrinsics() || C->print_inlining()) {
2547 tty->print(" from base type: "); adr_type->dump();
2548 tty->print(" sharpened value: "); tjp->dump();
2549 }
2550 #endif
2551 // Sharpen the value type.
2552 return tjp;
2553 }
2554 return NULL;
2555 }
2556
2557 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
2558 if (callee()->is_static()) return false; // caller must have the capability!
2559 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2560
2561 #ifndef PRODUCT
2562 {
2563 ResourceMark rm;
2564 // Check the signatures.
2565 ciSignature* sig = callee()->signature();
2566 #ifdef ASSERT
2567 if (!is_store) {
2568 // Object getObject(Object base, int/long offset), etc.
2569 BasicType rtype = sig->return_type()->basic_type();
2570 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2571 rtype = T_ADDRESS; // it is really a C void*
2572 assert(rtype == type, "getter must return the expected value");
2573 if (!is_native_ptr) {
2574 assert(sig->count() == 2, "oop getter has 2 arguments");
2575 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2576 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2577 } else {
2578 assert(sig->count() == 1, "native getter has 1 argument");
2579 assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
2615 offset = argument(2); // type: long
2616 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2617 // to be plain byte offsets, which are also the same as those accepted
2618 // by oopDesc::field_base.
2619 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2620 "fieldOffset must be byte-scaled");
2621 // 32-bit machines ignore the high half!
2622 offset = ConvL2X(offset);
2623 adr = make_unsafe_address(base, offset);
2624 heap_base_oop = base;
2625 val = is_store ? argument(4) : NULL;
2626 } else {
2627 Node* ptr = argument(1); // type: long
2628 ptr = ConvL2X(ptr); // adjust Java long to machine word
2629 adr = make_unsafe_address(NULL, ptr);
2630 val = is_store ? argument(3) : NULL;
2631 }
2632
2633 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2634
2635 // Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM,
2636 // there was not enough information to nail it down.
2637 Compile::AliasType* alias_type = C->alias_type(adr_type);
2638 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2639
2640 assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
2641 alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
2642 bool mismatched = false;
2643 BasicType bt = alias_type->basic_type();
2644 if (bt != T_ILLEGAL) {
2645 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2646 // Alias type doesn't differentiate between byte[] and boolean[]).
2647 // Use address type to get the element type.
2648 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2649 }
2650 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2651 // accessing an array field with getObject is not a mismatch
2652 bt = T_OBJECT;
2653 }
2654 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2655 // Don't intrinsify mismatched object accesses
2656 return false;
2657 }
2658 mismatched = (bt != type);
2659 }
2660
2661 // First guess at the value type.
2662 const Type *value_type = Type::get_const_basic_type(type);
2663
2664 // We will need memory barriers unless we can determine a unique
2665 // alias category for this reference. (Note: If for some reason
2666 // the barriers get omitted and the unsafe reference begins to "pollute"
2667 // the alias analysis of the rest of the graph, either Compile::can_alias
2668 // or Compile::must_alias will throw a diagnostic assert.)
2669 bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2670
2671 // If we are reading the value of the referent field of a Reference
2672 // object (either by using Unsafe directly or through reflection)
2673 // then, if G1 is enabled, we need to record the referent in an
2674 // SATB log buffer using the pre-barrier mechanism.
2675 // Also we need to add memory barrier to prevent commoning reads
2676 // from this field across safepoint since GC can change its value.
2677 bool need_read_barrier = !is_native_ptr && !is_store &&
2678 offset != top() && heap_base_oop != top();
2679
2680 if (!is_store && type == T_OBJECT) {
2681 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2682 if (tjp != NULL) {
2683 value_type = tjp;
2702 // rough approximation of type.
2703 need_mem_bar = true;
2704 // For Stores, place a memory ordering barrier now.
2705 if (is_store) {
2706 insert_mem_bar(Op_MemBarRelease);
2707 } else {
2708 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2709 insert_mem_bar(Op_MemBarVolatile);
2710 }
2711 }
2712 }
2713
2714 // Memory barrier to prevent normal and 'unsafe' accesses from
2715 // bypassing each other. Happens after null checks, so the
2716 // exception paths do not take memory state from the memory barrier,
2717 // so there's no problems making a strong assert about mixing users
2718 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar
2719 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2720 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2721
2722 if (!is_store) {
2723 MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2724 // To be valid, unsafe loads may depend on other conditions than
2725 // the one that guards them: pin the Load node
2726 Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2727 // load value
2728 switch (type) {
2729 case T_BOOLEAN:
2730 case T_CHAR:
2731 case T_BYTE:
2732 case T_SHORT:
2733 case T_INT:
2734 case T_LONG:
2735 case T_FLOAT:
2736 case T_DOUBLE:
2737 break;
2738 case T_OBJECT:
2739 if (need_read_barrier) {
2740 insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2741 }
2960 oldval = NULL;
2961 newval = argument(4); // type: oop, int, or long
2962 }
2963
2964 // Null check receiver.
2965 receiver = null_check(receiver);
2966 if (stopped()) {
2967 return true;
2968 }
2969
2970 // Build field offset expression.
2971 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2972 // to be plain byte offsets, which are also the same as those accepted
2973 // by oopDesc::field_base.
2974 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2975 // 32-bit machines ignore the high half of long offsets
2976 offset = ConvL2X(offset);
2977 Node* adr = make_unsafe_address(base, offset);
2978 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2979
2980 Compile::AliasType* alias_type = C->alias_type(adr_type);
2981 assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
2982 alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
2983 BasicType bt = alias_type->basic_type();
2984 if (bt != T_ILLEGAL &&
2985 ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2986 // Don't intrinsify mismatched object accesses.
2987 return false;
2988 }
2989
2990 // For CAS, unlike inline_unsafe_access, there seems no point in
2991 // trying to refine types. Just use the coarse types here.
2992 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2993 const Type *value_type = Type::get_const_basic_type(type);
2994
2995 if (kind == LS_xchg && type == T_OBJECT) {
2996 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2997 if (tjp != NULL) {
2998 value_type = tjp;
2999 }
3000 }
3001
3002 int alias_idx = C->get_alias_index(adr_type);
3003
3004 // Memory-model-wise, a LoadStore acts like a little synchronized
3005 // block, so needs barriers on each side. These don't translate
3006 // into actual barriers on most machines, but we still need rest of
3007 // compiler to respect ordering.
3008
3009 insert_mem_bar(Op_MemBarRelease);
3010 insert_mem_bar(Op_MemBarCPUOrder);
3011
3012 // 4984716: MemBars must be inserted before this
3013 // memory node in order to avoid a false
|