src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/library_call.cpp

Print this page




2399     offset = argument(2);  // type: long
2400     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2401     // to be plain byte offsets, which are also the same as those accepted
2402     // by oopDesc::field_base.
2403     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2404            "fieldOffset must be byte-scaled");
2405     // 32-bit machines ignore the high half!
2406     offset = ConvL2X(offset);
2407     adr = make_unsafe_address(base, offset);
2408     heap_base_oop = base;
2409     val = is_store ? argument(4) : NULL;
2410   } else {
2411     Node* ptr = argument(1);  // type: long
2412     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2413     adr = make_unsafe_address(NULL, ptr);
2414     val = is_store ? argument(3) : NULL;
2415   }
2416 
2417   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2418 
2419   // First guess at the value type.
2420   const Type *value_type = Type::get_const_basic_type(type);
2421 
2422   // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2423   // there was not enough information to nail it down.
2424   Compile::AliasType* alias_type = C->alias_type(adr_type);
2425   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2426 




















2427   // We will need memory barriers unless we can determine a unique
2428   // alias category for this reference.  (Note:  If for some reason
2429   // the barriers get omitted and the unsafe reference begins to "pollute"
2430   // the alias analysis of the rest of the graph, either Compile::can_alias
2431   // or Compile::must_alias will throw a diagnostic assert.)
2432   bool need_mem_bar;
2433   switch (kind) {
2434       case Relaxed:
2435           need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2436           break;
2437       case Opaque:
2438           // Opaque uses CPUOrder membars for protection against code movement.
2439       case Acquire:
2440       case Release:
2441       case Volatile:
2442           need_mem_bar = true;
2443           break;
2444       default:
2445           ShouldNotReachHere();
2446   }


2507     case Volatile:
2508       if (is_store) {
2509         insert_mem_bar(Op_MemBarRelease);
2510       } else {
2511         if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2512           insert_mem_bar(Op_MemBarVolatile);
2513         }
2514       }
2515       break;
2516     default:
2517       ShouldNotReachHere();
2518   }
2519 
2520   // Memory barrier to prevent normal and 'unsafe' accesses from
2521   // bypassing each other.  Happens after null checks, so the
2522   // exception paths do not take memory state from the memory barrier,
2523   // so there's no problems making a strong assert about mixing users
2524   // of safe & unsafe memory.
2525   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2526 
2527   assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
2528          alias_type->field() != NULL || alias_type->element() != NULL, "field, array element or unknown");
2529   bool mismatched = false;
2530   if (alias_type->element() != NULL || alias_type->field() != NULL) {
2531     BasicType bt;
2532     if (alias_type->element() != NULL) {
2533       // Use address type to get the element type. Alias type doesn't provide
2534       // enough information (e.g., doesn't differentiate between byte[] and boolean[]).
2535       const Type* element = adr_type->is_aryptr()->elem();
2536       bt = element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
2537     } else {
2538       bt = alias_type->field()->layout_type();
2539     }
2540     if (bt == T_ARRAY) {
2541       // accessing an array field with getObject is not a mismatch
2542       bt = T_OBJECT;
2543     }
2544     if (bt != type) {
2545       mismatched = true;
2546     }
2547   }
2548   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2549 
2550   if (!is_store) {
2551     Node* p = NULL;
2552     // Try to constant fold a load from a constant field
2553     ciField* field = alias_type->field();
2554     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2555       // final or stable field
2556       p = make_constant_from_field(field, heap_base_oop);
2557     }
2558     if (p == NULL) {
2559       // To be valid, unsafe loads may depend on other conditions than
2560       // the one that guards them: pin the Load node
2561       p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, requires_atomic_access, unaligned, mismatched);
2562       // load value
2563       switch (type) {
2564       case T_BOOLEAN:
2565       case T_CHAR:
2566       case T_BYTE:
2567       case T_SHORT:
2568       case T_INT:
2569       case T_LONG:


2797     default:
2798       ShouldNotReachHere();
2799   }
2800 
2801   // Null check receiver.
2802   receiver = null_check(receiver);
2803   if (stopped()) {
2804     return true;
2805   }
2806 
2807   // Build field offset expression.
2808   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2809   // to be plain byte offsets, which are also the same as those accepted
2810   // by oopDesc::field_base.
2811   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2812   // 32-bit machines ignore the high half of long offsets
2813   offset = ConvL2X(offset);
2814   Node* adr = make_unsafe_address(base, offset);
2815   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2816 










2817   // For CAS, unlike inline_unsafe_access, there seems no point in
2818   // trying to refine types. Just use the coarse types here.
2819   const Type *value_type = Type::get_const_basic_type(type);
2820   Compile::AliasType* alias_type = C->alias_type(adr_type);
2821   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");

2822 
2823   switch (kind) {
2824     case LS_get_set:
2825     case LS_cmp_exchange: {
2826       if (type == T_OBJECT) {
2827         const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2828         if (tjp != NULL) {
2829           value_type = tjp;
2830         }
2831       }
2832       break;
2833     }
2834     case LS_cmp_swap:
2835     case LS_cmp_swap_weak:
2836     case LS_get_add:
2837       break;
2838     default:
2839       ShouldNotReachHere();
2840   }
2841 




2399     offset = argument(2);  // type: long
2400     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2401     // to be plain byte offsets, which are also the same as those accepted
2402     // by oopDesc::field_base.
2403     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2404            "fieldOffset must be byte-scaled");
2405     // 32-bit machines ignore the high half!
2406     offset = ConvL2X(offset);
2407     adr = make_unsafe_address(base, offset);
2408     heap_base_oop = base;
2409     val = is_store ? argument(4) : NULL;
2410   } else {
2411     Node* ptr = argument(1);  // type: long
2412     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2413     adr = make_unsafe_address(NULL, ptr);
2414     val = is_store ? argument(3) : NULL;
2415   }
2416 
2417   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2418 



2419   // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2420   // there was not enough information to nail it down.
2421   Compile::AliasType* alias_type = C->alias_type(adr_type);
2422   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2423 
2424   assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
2425          alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
2426   bool mismatched = false;
2427   BasicType bt = alias_type->basic_type();
2428   if (bt != T_ILLEGAL) {
2429     if (bt == T_ARRAY) {
2430       // accessing an array field with getObject is not a mismatch
2431       bt = T_OBJECT;
2432     }
2433     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2434       // Don't intrinsify mismatched object accesses
2435       return false;
2436     }
2437     mismatched = (bt != type);
2438     assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2439   }
2440 
2441   // First guess at the value type.
2442   const Type *value_type = Type::get_const_basic_type(type);
2443 
2444   // We will need memory barriers unless we can determine a unique
2445   // alias category for this reference.  (Note:  If for some reason
2446   // the barriers get omitted and the unsafe reference begins to "pollute"
2447   // the alias analysis of the rest of the graph, either Compile::can_alias
2448   // or Compile::must_alias will throw a diagnostic assert.)
2449   bool need_mem_bar;
2450   switch (kind) {
2451       case Relaxed:
2452           need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2453           break;
2454       case Opaque:
2455           // Opaque uses CPUOrder membars for protection against code movement.
2456       case Acquire:
2457       case Release:
2458       case Volatile:
2459           need_mem_bar = true;
2460           break;
2461       default:
2462           ShouldNotReachHere();
2463   }


2524     case Volatile:
2525       if (is_store) {
2526         insert_mem_bar(Op_MemBarRelease);
2527       } else {
2528         if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2529           insert_mem_bar(Op_MemBarVolatile);
2530         }
2531       }
2532       break;
2533     default:
2534       ShouldNotReachHere();
2535   }
2536 
2537   // Memory barrier to prevent normal and 'unsafe' accesses from
2538   // bypassing each other.  Happens after null checks, so the
2539   // exception paths do not take memory state from the memory barrier,
2540   // so there's no problems making a strong assert about mixing users
2541   // of safe & unsafe memory.
2542   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2543 























2544   if (!is_store) {
2545     Node* p = NULL;
2546     // Try to constant fold a load from a constant field
2547     ciField* field = alias_type->field();
2548     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2549       // final or stable field
2550       p = make_constant_from_field(field, heap_base_oop);
2551     }
2552     if (p == NULL) {
2553       // To be valid, unsafe loads may depend on other conditions than
2554       // the one that guards them: pin the Load node
2555       p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, requires_atomic_access, unaligned, mismatched);
2556       // load value
2557       switch (type) {
2558       case T_BOOLEAN:
2559       case T_CHAR:
2560       case T_BYTE:
2561       case T_SHORT:
2562       case T_INT:
2563       case T_LONG:


2791     default:
2792       ShouldNotReachHere();
2793   }
2794 
2795   // Null check receiver.
2796   receiver = null_check(receiver);
2797   if (stopped()) {
2798     return true;
2799   }
2800 
2801   // Build field offset expression.
2802   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2803   // to be plain byte offsets, which are also the same as those accepted
2804   // by oopDesc::field_base.
2805   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2806   // 32-bit machines ignore the high half of long offsets
2807   offset = ConvL2X(offset);
2808   Node* adr = make_unsafe_address(base, offset);
2809   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2810 
2811   Compile::AliasType* alias_type = C->alias_type(adr_type);
2812   assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
2813          alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
2814   BasicType bt = alias_type->basic_type();
2815   if (bt != T_ILLEGAL && 
2816       ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2817     // Don't intrinsify mismatched object accesses.
2818     return false;
2819   }
2820 
2821   // For CAS, unlike inline_unsafe_access, there seems no point in
2822   // trying to refine types. Just use the coarse types here.


2823   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2824   const Type *value_type = Type::get_const_basic_type(type);
2825 
2826   switch (kind) {
2827     case LS_get_set:
2828     case LS_cmp_exchange: {
2829       if (type == T_OBJECT) {
2830         const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2831         if (tjp != NULL) {
2832           value_type = tjp;
2833         }
2834       }
2835       break;
2836     }
2837     case LS_cmp_swap:
2838     case LS_cmp_swap_weak:
2839     case LS_get_add:
2840       break;
2841     default:
2842       ShouldNotReachHere();
2843   }
2844 


src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File