< prev index next >

src/share/vm/opto/library_call.cpp

Print this page




  30 #include "compiler/compileLog.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/countbitsnode.hpp"
  39 #include "opto/intrinsicnode.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/mathexactnode.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/parse.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "opto/subnode.hpp"
  49 #include "prims/nativeLookup.hpp"

  50 #include "runtime/sharedRuntime.hpp"
  51 #include "trace/traceMacros.hpp"
  52 
  53 class LibraryIntrinsic : public InlineCallGenerator {
  54   // Extend the set of intrinsics known to the runtime:
  55  public:
  56  private:
  57   bool             _is_virtual;
  58   bool             _does_virtual_dispatch;
  59   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  60   int8_t           _last_predicate; // Last generated predicate
  61   vmIntrinsics::ID _intrinsic_id;
  62 
  63  public:
  64   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  65     : InlineCallGenerator(m),
  66       _is_virtual(is_virtual),
  67       _does_virtual_dispatch(does_virtual_dispatch),
  68       _predicates_count((int8_t)predicates_count),
  69       _last_predicate((int8_t)-1),


 210   bool inline_math_native(vmIntrinsics::ID id);
 211   bool inline_trig(vmIntrinsics::ID id);
 212   bool inline_math(vmIntrinsics::ID id);
 213   template <typename OverflowOp>
 214   bool inline_math_overflow(Node* arg1, Node* arg2);
 215   void inline_math_mathExact(Node* math, Node* test);
 216   bool inline_math_addExactI(bool is_increment);
 217   bool inline_math_addExactL(bool is_increment);
 218   bool inline_math_multiplyExactI();
 219   bool inline_math_multiplyExactL();
 220   bool inline_math_negateExactI();
 221   bool inline_math_negateExactL();
 222   bool inline_math_subtractExactI(bool is_decrement);
 223   bool inline_math_subtractExactL(bool is_decrement);
 224   bool inline_exp();
 225   bool inline_pow();
 226   Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
 227   bool inline_min_max(vmIntrinsics::ID id);
 228   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 229   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 230   int classify_unsafe_addr(Node* &base, Node* &offset);
 231   Node* make_unsafe_address(Node* base, Node* offset);
 232   // Helper for inline_unsafe_access.
 233   // Generates the guards that check whether the result of
 234   // Unsafe.getObject should be recorded in an SATB log buffer.
 235   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
 236   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
 237   static bool klass_needs_init_guard(Node* kls);
 238   bool inline_unsafe_allocate();
 239   bool inline_unsafe_copyMemory();
 240   bool inline_native_currentThread();
 241 #ifdef TRACE_HAVE_INTRINSICS
 242   bool inline_native_classID();
 243   bool inline_native_threadID();
 244 #endif
 245   bool inline_native_time_funcs(address method, const char* funcName);
 246   bool inline_native_isInterrupted();
 247   bool inline_native_Class_query(vmIntrinsics::ID id);
 248   bool inline_native_subtype_check();
 249 
 250   bool inline_native_newArray();
 251   bool inline_native_getLength();
 252   bool inline_array_copyOf(bool is_copyOfRange);
 253   bool inline_array_equals();
 254   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
 255   bool inline_native_clone(bool is_virtual);
 256   bool inline_native_Reflection_getCallerClass();


 807   case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile);
 808   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile);
 809   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile);
 810   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile);
 811   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile);
 812   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile);
 813   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile);
 814   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile);
 815   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile);
 816 
 817   case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile);
 818   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile);
 819   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile);
 820   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile);
 821   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile);
 822   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile);
 823   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile);
 824   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile);
 825   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile);
 826 
 827   case vmIntrinsics::_getShortUnaligned:        return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile);
 828   case vmIntrinsics::_getCharUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile);
 829   case vmIntrinsics::_getIntUnaligned:          return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile);
 830   case vmIntrinsics::_getLongUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile);
 831 
 832   case vmIntrinsics::_putShortUnaligned:        return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile);
 833   case vmIntrinsics::_putCharUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile);
 834   case vmIntrinsics::_putIntUnaligned:          return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile);
 835   case vmIntrinsics::_putLongUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile);
 836 
 837   case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
 838   case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
 839   case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
 840 
 841   case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
 842   case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
 843   case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
 844 
 845   case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
 846   case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
 847   case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
 848   case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
 849   case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
 850 
 851   case vmIntrinsics::_loadFence:
 852   case vmIntrinsics::_storeFence:
 853   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 854 
 855   case vmIntrinsics::_currentThread:            return inline_native_currentThread();


2263 
2264   return _gvn.transform(cmov);
2265 
2266   /*
2267   // This is not as desirable as it may seem, since Min and Max
2268   // nodes do not have a full set of optimizations.
2269   // And they would interfere, anyway, with 'if' optimizations
2270   // and with CMoveI canonical forms.
2271   switch (id) {
2272   case vmIntrinsics::_min:
2273     result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2274   case vmIntrinsics::_max:
2275     result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2276   default:
2277     ShouldNotReachHere();
2278   }
2279   */
2280 }
2281 
2282 inline int
2283 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
2284   const TypePtr* base_type = TypePtr::NULL_PTR;
2285   if (base != NULL)  base_type = _gvn.type(base)->isa_ptr();
2286   if (base_type == NULL) {
2287     // Unknown type.
2288     return Type::AnyPtr;
2289   } else if (base_type == TypePtr::NULL_PTR) {
2290     // Since this is a NULL+long form, we have to switch to a rawptr.
2291     base   = _gvn.transform(new CastX2PNode(offset));
2292     offset = MakeConX(0);
2293     return Type::RawPtr;
2294   } else if (base_type->base() == Type::RawPtr) {
2295     return Type::RawPtr;
2296   } else if (base_type->isa_oopptr()) {




2297     // Base is never null => always a heap address.
2298     if (base_type->ptr() == TypePtr::NotNull) {

2299       return Type::OopPtr;
2300     }
2301     // Offset is small => always a heap address.
2302     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2303     if (offset_type != NULL &&
2304         base_type->offset() == 0 &&     // (should always be?)
2305         offset_type->_lo >= 0 &&
2306         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {

2307       return Type::OopPtr;
2308     }
2309     // Otherwise, it might either be oop+off or NULL+addr.















2310     return Type::AnyPtr;
2311   } else {
2312     // No information:
2313     return Type::AnyPtr;
2314   }
2315 }
2316 
2317 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
2318   int kind = classify_unsafe_addr(base, offset);
2319   if (kind == Type::RawPtr) {
2320     return basic_plus_adr(top(), base, offset);
2321   } else {
2322     return basic_plus_adr(base, offset);
2323   }
2324 }
2325 
2326 //--------------------------inline_number_methods-----------------------------
2327 // inline int     Integer.numberOfLeadingZeros(int)
2328 // inline int        Long.numberOfLeadingZeros(long)
2329 //
2330 // inline int     Integer.numberOfTrailingZeros(int)
2331 // inline int        Long.numberOfTrailingZeros(long)
2332 //
2333 // inline int     Integer.bitCount(int)
2334 // inline int        Long.bitCount(long)
2335 //
2336 // inline char  Character.reverseBytes(char)
2337 // inline short     Short.reverseBytes(short)
2338 // inline int     Integer.reverseBytes(int)


2443                     __ ctrl(),
2444                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2445                     pre_val /* pre_val */,
2446                     T_OBJECT);
2447         if (need_mem_bar) {
2448           // Add memory barrier to prevent commoning reads from this field
2449           // across safepoint since GC can change its value.
2450           insert_mem_bar(Op_MemBarCPUOrder);
2451         }
2452         // Update IdealKit from graphKit.
2453         __ sync_kit(this);
2454 
2455       } __ end_if(); // _ref_type != ref_none
2456   } __ end_if(); // offset == referent_offset
2457 
2458   // Final sync IdealKit and GraphKit.
2459   final_sync(ideal);
2460 #undef __
2461 }
2462 
2463 
2464 // Interpret Unsafe.fieldOffset cookies correctly:
2465 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2466 
2467 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
2468   // Attempt to infer a sharper value type from the offset and base type.
2469   ciKlass* sharpened_klass = NULL;
2470 
2471   // See if it is an instance field, with an object type.
2472   if (alias_type->field() != NULL) {
2473     assert(!is_native_ptr, "native pointer op cannot use a java address");
2474     if (alias_type->field()->type()->is_klass()) {
2475       sharpened_klass = alias_type->field()->type()->as_klass();
2476     }
2477   }
2478 
2479   // See if it is a narrow oop array.
2480   if (adr_type->isa_aryptr()) {
2481     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2482       const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2483       if (elem_type != NULL) {
2484         sharpened_klass = elem_type->klass();
2485       }
2486     }
2487   }
2488 
2489   // The sharpened class might be unloaded if there is no class loader
2490   // contraint in place.
2491   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2492     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2493 
2494 #ifndef PRODUCT
2495     if (C->print_intrinsics() || C->print_inlining()) {
2496       tty->print("  from base type: ");  adr_type->dump();
2497       tty->print("  sharpened value: ");  tjp->dump();
2498     }
2499 #endif
2500     // Sharpen the value type.
2501     return tjp;
2502   }
2503   return NULL;
2504 }
2505 
2506 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
2507   if (callee()->is_static())  return false;  // caller must have the capability!
2508 
2509 #ifndef PRODUCT
2510   {
2511     ResourceMark rm;
2512     // Check the signatures.
2513     ciSignature* sig = callee()->signature();
2514 #ifdef ASSERT
2515     if (!is_store) {
2516       // Object getObject(Object base, int/long offset), etc.
2517       BasicType rtype = sig->return_type()->basic_type();
2518       if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2519           rtype = T_ADDRESS;  // it is really a C void*
2520       assert(rtype == type, "getter must return the expected value");
2521       if (!is_native_ptr) {
2522         assert(sig->count() == 2, "oop getter has 2 arguments");
2523         assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2524         assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2525       } else {
2526         assert(sig->count() == 1, "native getter has 1 argument");


2544     }
2545 #endif // ASSERT
2546  }
2547 #endif //PRODUCT
2548 
2549   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2550 
2551   Node* receiver = argument(0);  // type: oop
2552 
2553   // Build address expression.
2554   Node* adr;
2555   Node* heap_base_oop = top();
2556   Node* offset = top();
2557   Node* val;
2558 
2559   if (!is_native_ptr) {
2560     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2561     Node* base = argument(1);  // type: oop
2562     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2563     offset = argument(2);  // type: long
2564     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2565     // to be plain byte offsets, which are also the same as those accepted
2566     // by oopDesc::field_base.
2567     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2568            "fieldOffset must be byte-scaled");
2569     // 32-bit machines ignore the high half!
2570     offset = ConvL2X(offset);
2571     adr = make_unsafe_address(base, offset);
2572     heap_base_oop = base;
2573     val = is_store ? argument(4) : NULL;
2574   } else {
2575     Node* ptr = argument(1);  // type: long
2576     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2577     adr = make_unsafe_address(NULL, ptr);
2578     val = is_store ? argument(3) : NULL;
2579   }
2580 
2581   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2582 
2583   // First guess at the value type.
2584   const Type *value_type = Type::get_const_basic_type(type);
2585 
2586   // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2587   // there was not enough information to nail it down.
2588   Compile::AliasType* alias_type = C->alias_type(adr_type);
2589   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2590 
2591   // We will need memory barriers unless we can determine a unique
2592   // alias category for this reference.  (Note:  If for some reason
2593   // the barriers get omitted and the unsafe reference begins to "pollute"
2594   // the alias analysis of the rest of the graph, either Compile::can_alias
2595   // or Compile::must_alias will throw a diagnostic assert.)
2596   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2597 


2635       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2636         insert_mem_bar(Op_MemBarVolatile);
2637       }
2638     }
2639   }
2640 
2641   // Memory barrier to prevent normal and 'unsafe' accesses from
2642   // bypassing each other.  Happens after null checks, so the
2643   // exception paths do not take memory state from the memory barrier,
2644   // so there's no problems making a strong assert about mixing users
2645   // of safe & unsafe memory.
2646   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2647 
2648    if (!is_store) {
2649     Node* p = NULL;
2650     // Try to constant fold a load from a constant field
2651     ciField* field = alias_type->field();
2652     if (heap_base_oop != top() &&
2653         field != NULL && field->is_constant() && field->layout_type() == type) {
2654       // final or stable field
2655       const Type* con_type = Type::make_constant(alias_type->field(), heap_base_oop);

2656       if (con_type != NULL) {






2657         p = makecon(con_type);
2658       }
2659     }
2660     if (p == NULL) {
2661       MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2662       // To be valid, unsafe loads may depend on other conditions than
2663       // the one that guards them: pin the Load node
2664       p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile);
2665       // load value
2666       switch (type) {
2667       case T_BOOLEAN:
2668       case T_CHAR:
2669       case T_BYTE:
2670       case T_SHORT:
2671       case T_INT:
2672       case T_LONG:
2673       case T_FLOAT:
2674       case T_DOUBLE:
2675         break;
2676       case T_OBJECT:


2689       }
2690     }
2691     // The load node has the control of the preceding MemBarCPUOrder.  All
2692     // following nodes will have the control of the MemBarCPUOrder inserted at
2693     // the end of this method.  So, pushing the load onto the stack at a later
2694     // point is fine.
2695     set_result(p);
2696   } else {
2697     // place effect of store into memory
2698     switch (type) {
2699     case T_DOUBLE:
2700       val = dstore_rounding(val);
2701       break;
2702     case T_ADDRESS:
2703       // Repackage the long as a pointer.
2704       val = ConvL2X(val);
2705       val = _gvn.transform(new CastX2PNode(val));
2706       break;
2707     }
2708 











2709     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2710     if (type != T_OBJECT ) {
2711       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2712     } else {
2713       // Possibly an oop being stored to Java heap or native memory
2714       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2715         // oop to Java heap.
2716         (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2717       } else {
2718         // We can't tell at compile time if we are storing in the Java heap or outside
2719         // of it. So we need to emit code to conditionally do the proper type of
2720         // store.
2721 
2722         IdealKit ideal(this);
2723 #define __ ideal.
2724         // QQQ who knows what probability is here??
2725         __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2726           // Sync IdealKit and graphKit.
2727           sync_kit(ideal);
2728           Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);


2823     receiver = argument(0);  // type: oop
2824     base     = argument(1);  // type: oop
2825     offset   = argument(2);  // type: long
2826     oldval   = argument(4);  // type: oop, int, or long
2827     newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
2828   } else if (kind == LS_xadd || kind == LS_xchg){
2829     receiver = argument(0);  // type: oop
2830     base     = argument(1);  // type: oop
2831     offset   = argument(2);  // type: long
2832     oldval   = NULL;
2833     newval   = argument(4);  // type: oop, int, or long
2834   }
2835 
2836   // Null check receiver.
2837   receiver = null_check(receiver);
2838   if (stopped()) {
2839     return true;
2840   }
2841 
2842   // Build field offset expression.
2843   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2844   // to be plain byte offsets, which are also the same as those accepted
2845   // by oopDesc::field_base.
2846   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2847   // 32-bit machines ignore the high half of long offsets
2848   offset = ConvL2X(offset);
2849   Node* adr = make_unsafe_address(base, offset);
2850   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2851 
2852   // For CAS, unlike inline_unsafe_access, there seems no point in
2853   // trying to refine types. Just use the coarse types here.
2854   const Type *value_type = Type::get_const_basic_type(type);
2855   Compile::AliasType* alias_type = C->alias_type(adr_type);
2856   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2857 
2858   if (kind == LS_xchg && type == T_OBJECT) {
2859     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2860     if (tjp != NULL) {
2861       value_type = tjp;
2862     }
2863   }
2864 
2865   int alias_idx = C->get_alias_index(adr_type);
2866 
2867   // Memory-model-wise, a LoadStore acts like a little synchronized
2868   // block, so needs barriers on each side.  These don't translate
2869   // into actual barriers on most machines, but we still need rest of


3018     assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
3019 #endif // ASSERT
3020   }
3021 #endif //PRODUCT
3022 
3023   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
3024 
3025   // Get arguments:
3026   Node* receiver = argument(0);  // type: oop
3027   Node* base     = argument(1);  // type: oop
3028   Node* offset   = argument(2);  // type: long
3029   Node* val      = argument(4);  // type: oop, int, or long
3030 
3031   // Null check receiver.
3032   receiver = null_check(receiver);
3033   if (stopped()) {
3034     return true;
3035   }
3036 
3037   // Build field offset expression.
3038   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
3039   // 32-bit machines ignore the high half of long offsets
3040   offset = ConvL2X(offset);
3041   Node* adr = make_unsafe_address(base, offset);
3042   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3043   const Type *value_type = Type::get_const_basic_type(type);
3044   Compile::AliasType* alias_type = C->alias_type(adr_type);
3045 
3046   insert_mem_bar(Op_MemBarRelease);
3047   insert_mem_bar(Op_MemBarCPUOrder);
3048   // Ensure that the store is atomic for longs:
3049   const bool require_atomic_access = true;
3050   Node* store;
3051   if (type == T_OBJECT) // reference stores need a store barrier.
3052     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3053   else {
3054     store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3055   }
3056   insert_mem_bar(Op_MemBarCPUOrder);
3057   return true;
3058 }
3059 
3060 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3061   // Regardless of form, don't allow previous ld/st to move down,


4407 #define XTOP ,top() /*additional argument*/
4408 #else  //_LP64
4409 #define XTOP        /*no additional argument*/
4410 #endif //_LP64
4411 
4412 //----------------------inline_unsafe_copyMemory-------------------------
4413 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4414 bool LibraryCallKit::inline_unsafe_copyMemory() {
4415   if (callee()->is_static())  return false;  // caller must have the capability!
4416   null_check_receiver();  // null-check receiver
4417   if (stopped())  return true;
4418 
4419   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4420 
4421   Node* src_ptr =         argument(1);   // type: oop
4422   Node* src_off = ConvL2X(argument(2));  // type: long
4423   Node* dst_ptr =         argument(4);   // type: oop
4424   Node* dst_off = ConvL2X(argument(5));  // type: long
4425   Node* size    = ConvL2X(argument(7));  // type: long
4426 
4427   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4428          "fieldOffset must be byte-scaled");
4429 
4430   Node* src = make_unsafe_address(src_ptr, src_off);
4431   Node* dst = make_unsafe_address(dst_ptr, dst_off);
4432 
4433   // Conservatively insert a memory barrier on all memory slices.
4434   // Do not let writes of the copy source or destination float below the copy.
4435   insert_mem_bar(Op_MemBarCPUOrder);
4436 
4437   // Call it.  Note that the length argument is not scaled.
4438   make_runtime_call(RC_LEAF|RC_NO_FP,
4439                     OptoRuntime::fast_arraycopy_Type(),
4440                     StubRoutines::unsafe_arraycopy(),
4441                     "unsafe_arraycopy",
4442                     TypeRawPtr::BOTTOM,
4443                     src, dst, size XTOP);
4444 
4445   // Do not let reads of the copy destination float above the copy.
4446   insert_mem_bar(Op_MemBarCPUOrder);
4447 
4448   return true;
4449 }
4450 
4451 //------------------------clone_coping-----------------------------------




  30 #include "compiler/compileLog.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/countbitsnode.hpp"
  39 #include "opto/intrinsicnode.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/mathexactnode.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/parse.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "opto/subnode.hpp"
  49 #include "prims/nativeLookup.hpp"
  50 #include "prims/unsafe.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "trace/traceMacros.hpp"
  53 
  54 class LibraryIntrinsic : public InlineCallGenerator {
  55   // Extend the set of intrinsics known to the runtime:
  56  public:
  57  private:
  58   bool             _is_virtual;
  59   bool             _does_virtual_dispatch;
  60   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  61   int8_t           _last_predicate; // Last generated predicate
  62   vmIntrinsics::ID _intrinsic_id;
  63 
  64  public:
  65   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  66     : InlineCallGenerator(m),
  67       _is_virtual(is_virtual),
  68       _does_virtual_dispatch(does_virtual_dispatch),
  69       _predicates_count((int8_t)predicates_count),
  70       _last_predicate((int8_t)-1),


 211   bool inline_math_native(vmIntrinsics::ID id);
 212   bool inline_trig(vmIntrinsics::ID id);
 213   bool inline_math(vmIntrinsics::ID id);
 214   template <typename OverflowOp>
 215   bool inline_math_overflow(Node* arg1, Node* arg2);
 216   void inline_math_mathExact(Node* math, Node* test);
 217   bool inline_math_addExactI(bool is_increment);
 218   bool inline_math_addExactL(bool is_increment);
 219   bool inline_math_multiplyExactI();
 220   bool inline_math_multiplyExactL();
 221   bool inline_math_negateExactI();
 222   bool inline_math_negateExactL();
 223   bool inline_math_subtractExactI(bool is_decrement);
 224   bool inline_math_subtractExactL(bool is_decrement);
 225   bool inline_exp();
 226   bool inline_pow();
 227   Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
 228   bool inline_min_max(vmIntrinsics::ID id);
 229   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 230   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 231   int classify_unsafe_addr(Node* &base, Node* &offset, bool decode_offset);
 232   Node* make_unsafe_address(Node* base, Node* offset, bool decode_offset);
 233   // Helper for inline_unsafe_access.
 234   // Generates the guards that check whether the result of
 235   // Unsafe.getObject should be recorded in an SATB log buffer.
 236   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
 237   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned = false);
 238   static bool klass_needs_init_guard(Node* kls);
 239   bool inline_unsafe_allocate();
 240   bool inline_unsafe_copyMemory();
 241   bool inline_native_currentThread();
 242 #ifdef TRACE_HAVE_INTRINSICS
 243   bool inline_native_classID();
 244   bool inline_native_threadID();
 245 #endif
 246   bool inline_native_time_funcs(address method, const char* funcName);
 247   bool inline_native_isInterrupted();
 248   bool inline_native_Class_query(vmIntrinsics::ID id);
 249   bool inline_native_subtype_check();
 250 
 251   bool inline_native_newArray();
 252   bool inline_native_getLength();
 253   bool inline_array_copyOf(bool is_copyOfRange);
 254   bool inline_array_equals();
 255   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
 256   bool inline_native_clone(bool is_virtual);
 257   bool inline_native_Reflection_getCallerClass();


 808   case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile);
 809   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile);
 810   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile);
 811   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile);
 812   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile);
 813   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile);
 814   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile);
 815   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile);
 816   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile);
 817 
 818   case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile);
 819   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile);
 820   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile);
 821   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile);
 822   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile);
 823   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile);
 824   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile);
 825   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile);
 826   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile);
 827 
 828   case vmIntrinsics::_getShortUnaligned:        return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile, /*unaligned=*/true);
 829   case vmIntrinsics::_getCharUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile, /*unaligned=*/true);
 830   case vmIntrinsics::_getIntUnaligned:          return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile, /*unaligned=*/true);
 831   case vmIntrinsics::_getLongUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile, /*unaligned=*/true);
 832 
 833   case vmIntrinsics::_putShortUnaligned:        return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile, /*unaligned=*/true);
 834   case vmIntrinsics::_putCharUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile, /*unaligned=*/true);
 835   case vmIntrinsics::_putIntUnaligned:          return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile, /*unaligned=*/true);
 836   case vmIntrinsics::_putLongUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile, /*unaligned=*/true);
 837 
 838   case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
 839   case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
 840   case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
 841 
 842   case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
 843   case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
 844   case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
 845 
 846   case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
 847   case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
 848   case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
 849   case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
 850   case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
 851 
 852   case vmIntrinsics::_loadFence:
 853   case vmIntrinsics::_storeFence:
 854   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 855 
 856   case vmIntrinsics::_currentThread:            return inline_native_currentThread();


2264 
2265   return _gvn.transform(cmov);
2266 
2267   /*
2268   // This is not as desirable as it may seem, since Min and Max
2269   // nodes do not have a full set of optimizations.
2270   // And they would interfere, anyway, with 'if' optimizations
2271   // and with CMoveI canonical forms.
2272   switch (id) {
2273   case vmIntrinsics::_min:
2274     result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2275   case vmIntrinsics::_max:
2276     result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2277   default:
2278     ShouldNotReachHere();
2279   }
2280   */
2281 }
2282 
2283 inline int
2284 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, bool decode_offset) {
2285   const TypePtr* base_type = TypePtr::NULL_PTR;
2286   if (base != NULL)  base_type = _gvn.type(base)->isa_ptr();
2287   if (base_type == NULL) {
2288     // Unknown type.
2289     return Type::AnyPtr;
2290   } else if (base_type == TypePtr::NULL_PTR) {
2291     // Since this is a NULL+long form, we have to switch to a rawptr.
2292     base   = _gvn.transform(new CastX2PNode(offset));
2293     offset = MakeConX(0);
2294     return Type::RawPtr;
2295   } else if (base_type->base() == Type::RawPtr) {
2296     return Type::RawPtr;
2297   } else if (base_type->isa_oopptr()) {
2298     Node* decoded_offset = offset;
2299     if (decode_offset) {
2300       decoded_offset = _gvn.transform(new RShiftXNode(offset, intcon(Unsafe::offset_shift)));
2301     }
2302     // Base is never null => always a heap address.
2303     if (base_type->ptr() == TypePtr::NotNull) {
2304       offset = decoded_offset;
2305       return Type::OopPtr;
2306     }
2307     // Offset is small => always a heap address.
2308     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2309     if (offset_type != NULL &&
2310         base_type->offset() == 0 &&     // (should always be?)
2311         offset_type->_lo >= 0 &&
2312         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2313       offset = decoded_offset;
2314       return Type::OopPtr;
2315     }
2316     // Otherwise, it might either be oop+off or NULL+addr.
2317     // For oop+off case the offset should be decoded first, but
2318     // NULL+addr can be used as is.
2319     IdealKit ideal(this);
2320 #define __ ideal.
2321     IdealVariable off(ideal);
2322     __ declarations_done();
2323     __ set(off, offset);
2324     __ if_then(base, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2325       __ set(off, decoded_offset);
2326     } __ end_if();
2327     // Final sync IdealKit and GraphKit.
2328     decoded_offset = __ value(off);
2329     final_sync(ideal);
2330 #undef __
2331     offset = decoded_offset;
2332     return Type::AnyPtr;
2333   } else {
2334     // No information:
2335     return Type::AnyPtr;
2336   }
2337 }
2338 
2339 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset, bool decode_offset) {
2340   int kind = classify_unsafe_addr(base, offset, decode_offset);
2341   if (kind == Type::RawPtr) {
2342     return basic_plus_adr(top(), base, offset);
2343   } else {
2344     return basic_plus_adr(base, offset);
2345   }
2346 }
2347 
2348 //--------------------------inline_number_methods-----------------------------
2349 // inline int     Integer.numberOfLeadingZeros(int)
2350 // inline int        Long.numberOfLeadingZeros(long)
2351 //
2352 // inline int     Integer.numberOfTrailingZeros(int)
2353 // inline int        Long.numberOfTrailingZeros(long)
2354 //
2355 // inline int     Integer.bitCount(int)
2356 // inline int        Long.bitCount(long)
2357 //
2358 // inline char  Character.reverseBytes(char)
2359 // inline short     Short.reverseBytes(short)
2360 // inline int     Integer.reverseBytes(int)


2465                     __ ctrl(),
2466                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2467                     pre_val /* pre_val */,
2468                     T_OBJECT);
2469         if (need_mem_bar) {
2470           // Add memory barrier to prevent commoning reads from this field
2471           // across safepoint since GC can change its value.
2472           insert_mem_bar(Op_MemBarCPUOrder);
2473         }
2474         // Update IdealKit from graphKit.
2475         __ sync_kit(this);
2476 
2477       } __ end_if(); // _ref_type != ref_none
2478   } __ end_if(); // offset == referent_offset
2479 
2480   // Final sync IdealKit and GraphKit.
2481   final_sync(ideal);
2482 #undef __
2483 }
2484 




2485 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
2486   // Attempt to infer a sharper value type from the offset and base type.
2487   ciKlass* sharpened_klass = NULL;
2488 
2489   // See if it is an instance field, with an object type.
2490   if (alias_type->field() != NULL) {
2491     assert(!is_native_ptr, "native pointer op cannot use a java address");
2492     if (alias_type->field()->type()->is_klass()) {
2493       sharpened_klass = alias_type->field()->type()->as_klass();
2494     }
2495   }
2496 
2497   // See if it is a narrow oop array.
2498   if (adr_type->isa_aryptr()) {
2499     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2500       const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2501       if (elem_type != NULL) {
2502         sharpened_klass = elem_type->klass();
2503       }
2504     }
2505   }
2506 
2507   // The sharpened class might be unloaded if there is no class loader
2508   // contraint in place.
2509   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2510     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2511 
2512 #ifndef PRODUCT
2513     if (C->print_intrinsics() || C->print_inlining()) {
2514       tty->print("  from base type: ");  adr_type->dump();
2515       tty->print("  sharpened value: ");  tjp->dump();
2516     }
2517 #endif
2518     // Sharpen the value type.
2519     return tjp;
2520   }
2521   return NULL;
2522 }
2523 
2524 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
2525   if (callee()->is_static())  return false;  // caller must have the capability!
2526 
2527 #ifndef PRODUCT
2528   {
2529     ResourceMark rm;
2530     // Check the signatures.
2531     ciSignature* sig = callee()->signature();
2532 #ifdef ASSERT
2533     if (!is_store) {
2534       // Object getObject(Object base, int/long offset), etc.
2535       BasicType rtype = sig->return_type()->basic_type();
2536       if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2537           rtype = T_ADDRESS;  // it is really a C void*
2538       assert(rtype == type, "getter must return the expected value");
2539       if (!is_native_ptr) {
2540         assert(sig->count() == 2, "oop getter has 2 arguments");
2541         assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2542         assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2543       } else {
2544         assert(sig->count() == 1, "native getter has 1 argument");


2562     }
2563 #endif // ASSERT
2564  }
2565 #endif //PRODUCT
2566 
2567   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2568 
2569   Node* receiver = argument(0);  // type: oop
2570 
2571   // Build address expression.
2572   Node* adr;
2573   Node* heap_base_oop = top();
2574   Node* offset = top();
2575   Node* val;
2576 
2577   if (!is_native_ptr) {
2578     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2579     Node* base = argument(1);  // type: oop
2580     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2581     offset = argument(2);  // type: long





2582     // 32-bit machines ignore the high half!
2583     offset = ConvL2X(offset);
2584     adr = make_unsafe_address(base, offset, /*decode=*/!unaligned);
2585     heap_base_oop = base;
2586     val = is_store ? argument(4) : NULL;
2587   } else {
2588     Node* ptr = argument(1);  // type: long
2589     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2590     adr = make_unsafe_address(NULL, ptr, /*decode=*/false);
2591     val = is_store ? argument(3) : NULL;
2592   }
2593 
2594   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2595 
2596   // First guess at the value type.
2597   const Type *value_type = Type::get_const_basic_type(type);
2598 
2599   // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2600   // there was not enough information to nail it down.
2601   Compile::AliasType* alias_type = C->alias_type(adr_type);
2602   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2603 
2604   // We will need memory barriers unless we can determine a unique
2605   // alias category for this reference.  (Note:  If for some reason
2606   // the barriers get omitted and the unsafe reference begins to "pollute"
2607   // the alias analysis of the rest of the graph, either Compile::can_alias
2608   // or Compile::must_alias will throw a diagnostic assert.)
2609   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2610 


2648       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2649         insert_mem_bar(Op_MemBarVolatile);
2650       }
2651     }
2652   }
2653 
2654   // Memory barrier to prevent normal and 'unsafe' accesses from
2655   // bypassing each other.  Happens after null checks, so the
2656   // exception paths do not take memory state from the memory barrier,
2657   // so there's no problems making a strong assert about mixing users
2658   // of safe & unsafe memory.
2659   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2660 
2661    if (!is_store) {
2662     Node* p = NULL;
2663     // Try to constant fold a load from a constant field
2664     ciField* field = alias_type->field();
2665     if (heap_base_oop != top() &&
2666         field != NULL && field->is_constant() && field->layout_type() == type) {
2667       // final or stable field
2668       ciField* field = alias_type->field();
2669       const Type* con_type = Type::make_constant(field, heap_base_oop);
2670       if (con_type != NULL) {
2671         if (TrustFinalNonStaticFields &&
2672             !field->is_static() && heap_base_oop->is_Con()) {
2673           const TypeOopPtr* oop_ptr = heap_base_oop->bottom_type()->isa_oopptr();
2674           ciObject* constant_oop = oop_ptr->const_oop();
2675           C->dependencies()->assert_constant_field_value_instance(field, constant_oop);
2676         }
2677         p = makecon(con_type);
2678       }
2679     }
2680     if (p == NULL) {
2681       MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2682       // To be valid, unsafe loads may depend on other conditions than
2683       // the one that guards them: pin the Load node
2684       p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile);
2685       // load value
2686       switch (type) {
2687       case T_BOOLEAN:
2688       case T_CHAR:
2689       case T_BYTE:
2690       case T_SHORT:
2691       case T_INT:
2692       case T_LONG:
2693       case T_FLOAT:
2694       case T_DOUBLE:
2695         break;
2696       case T_OBJECT:


2709       }
2710     }
2711     // The load node has the control of the preceding MemBarCPUOrder.  All
2712     // following nodes will have the control of the MemBarCPUOrder inserted at
2713     // the end of this method.  So, pushing the load onto the stack at a later
2714     // point is fine.
2715     set_result(p);
2716   } else {
2717     // place effect of store into memory
2718     switch (type) {
2719     case T_DOUBLE:
2720       val = dstore_rounding(val);
2721       break;
2722     case T_ADDRESS:
2723       // Repackage the long as a pointer.
2724       val = ConvL2X(val);
2725       val = _gvn.transform(new CastX2PNode(val));
2726       break;
2727     }
2728 
2729     { // Need to check all dependent nmethods when final field is updated through Unsafe.
2730       Node* final_bit = _gvn.transform(new AndXNode(/*offset*/argument(2), MakeConX(Unsafe::final_mask)));
2731       Node* cmp_final_bit = _gvn.transform(new CmpXNode(final_bit, MakeConX(0)));
2732       Node* bol_final_bit = _gvn.transform(new BoolNode(cmp_final_bit, BoolTest::eq));
2733 
2734       BuildCutout unless(this, bol_final_bit, PROB_MAX);
2735       uncommon_trap(Deoptimization::Reason_intrinsic,
2736                     Deoptimization::Action_none,
2737                     NULL, "final_field_unsafe_update");
2738     }
2739 
2740     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2741     if (type != T_OBJECT ) {
2742       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2743     } else {
2744       // Possibly an oop being stored to Java heap or native memory
2745       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2746         // oop to Java heap.
2747         (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2748       } else {
2749         // We can't tell at compile time if we are storing in the Java heap or outside
2750         // of it. So we need to emit code to conditionally do the proper type of
2751         // store.
2752 
2753         IdealKit ideal(this);
2754 #define __ ideal.
2755         // QQQ who knows what probability is here??
2756         __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2757           // Sync IdealKit and graphKit.
2758           sync_kit(ideal);
2759           Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);


2854     receiver = argument(0);  // type: oop
2855     base     = argument(1);  // type: oop
2856     offset   = argument(2);  // type: long
2857     oldval   = argument(4);  // type: oop, int, or long
2858     newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
2859   } else if (kind == LS_xadd || kind == LS_xchg){
2860     receiver = argument(0);  // type: oop
2861     base     = argument(1);  // type: oop
2862     offset   = argument(2);  // type: long
2863     oldval   = NULL;
2864     newval   = argument(4);  // type: oop, int, or long
2865   }
2866 
2867   // Null check receiver.
2868   receiver = null_check(receiver);
2869   if (stopped()) {
2870     return true;
2871   }
2872 
2873   // Build field offset expression.




2874   // 32-bit machines ignore the high half of long offsets
2875   offset = ConvL2X(offset);
2876   Node* adr = make_unsafe_address(base, offset, /*decode=*/true);
2877   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2878 
2879   // For CAS, unlike inline_unsafe_access, there seems no point in
2880   // trying to refine types. Just use the coarse types here.
2881   const Type *value_type = Type::get_const_basic_type(type);
2882   Compile::AliasType* alias_type = C->alias_type(adr_type);
2883   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2884 
2885   if (kind == LS_xchg && type == T_OBJECT) {
2886     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2887     if (tjp != NULL) {
2888       value_type = tjp;
2889     }
2890   }
2891 
2892   int alias_idx = C->get_alias_index(adr_type);
2893 
2894   // Memory-model-wise, a LoadStore acts like a little synchronized
2895   // block, so needs barriers on each side.  These don't translate
2896   // into actual barriers on most machines, but we still need rest of


3045     assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
3046 #endif // ASSERT
3047   }
3048 #endif //PRODUCT
3049 
3050   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
3051 
3052   // Get arguments:
3053   Node* receiver = argument(0);  // type: oop
3054   Node* base     = argument(1);  // type: oop
3055   Node* offset   = argument(2);  // type: long
3056   Node* val      = argument(4);  // type: oop, int, or long
3057 
3058   // Null check receiver.
3059   receiver = null_check(receiver);
3060   if (stopped()) {
3061     return true;
3062   }
3063 
3064   // Build field offset expression.

3065   // 32-bit machines ignore the high half of long offsets
3066   offset = ConvL2X(offset);
3067   Node* adr = make_unsafe_address(base, offset, /*decode=*/true);
3068   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3069   const Type *value_type = Type::get_const_basic_type(type);
3070   Compile::AliasType* alias_type = C->alias_type(adr_type);
3071 
3072   insert_mem_bar(Op_MemBarRelease);
3073   insert_mem_bar(Op_MemBarCPUOrder);
3074   // Ensure that the store is atomic for longs:
3075   const bool require_atomic_access = true;
3076   Node* store;
3077   if (type == T_OBJECT) // reference stores need a store barrier.
3078     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3079   else {
3080     store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3081   }
3082   insert_mem_bar(Op_MemBarCPUOrder);
3083   return true;
3084 }
3085 
3086 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3087   // Regardless of form, don't allow previous ld/st to move down,


4433 #define XTOP ,top() /*additional argument*/
4434 #else  //_LP64
4435 #define XTOP        /*no additional argument*/
4436 #endif //_LP64
4437 
4438 //----------------------inline_unsafe_copyMemory-------------------------
4439 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4440 bool LibraryCallKit::inline_unsafe_copyMemory() {
4441   if (callee()->is_static())  return false;  // caller must have the capability!
4442   null_check_receiver();  // null-check receiver
4443   if (stopped())  return true;
4444 
4445   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4446 
4447   Node* src_ptr =         argument(1);   // type: oop
4448   Node* src_off = ConvL2X(argument(2));  // type: long
4449   Node* dst_ptr =         argument(4);   // type: oop
4450   Node* dst_off = ConvL2X(argument(5));  // type: long
4451   Node* size    = ConvL2X(argument(7));  // type: long
4452 
4453   Node* src = make_unsafe_address(src_ptr, src_off, /*decode=*/true);
4454   Node* dst = make_unsafe_address(dst_ptr, dst_off, /*decode=*/true);



4455 
4456   // Conservatively insert a memory barrier on all memory slices.
4457   // Do not let writes of the copy source or destination float below the copy.
4458   insert_mem_bar(Op_MemBarCPUOrder);
4459 
4460   // Call it.  Note that the length argument is not scaled.
4461   make_runtime_call(RC_LEAF|RC_NO_FP,
4462                     OptoRuntime::fast_arraycopy_Type(),
4463                     StubRoutines::unsafe_arraycopy(),
4464                     "unsafe_arraycopy",
4465                     TypeRawPtr::BOTTOM,
4466                     src, dst, size XTOP);
4467 
4468   // Do not let reads of the copy destination float above the copy.
4469   insert_mem_bar(Op_MemBarCPUOrder);
4470 
4471   return true;
4472 }
4473 
4474 //------------------------clone_coping-----------------------------------


< prev index next >