< prev index next >

src/share/vm/opto/library_call.cpp

Print this page
rev 12906 : [mq]: gc_interface


 223   bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
 224   bool inline_math_native(vmIntrinsics::ID id);
 225   bool inline_math(vmIntrinsics::ID id);
 226   template <typename OverflowOp>
 227   bool inline_math_overflow(Node* arg1, Node* arg2);
 228   void inline_math_mathExact(Node* math, Node* test);
 229   bool inline_math_addExactI(bool is_increment);
 230   bool inline_math_addExactL(bool is_increment);
 231   bool inline_math_multiplyExactI();
 232   bool inline_math_multiplyExactL();
 233   bool inline_math_negateExactI();
 234   bool inline_math_negateExactL();
 235   bool inline_math_subtractExactI(bool is_decrement);
 236   bool inline_math_subtractExactL(bool is_decrement);
 237   bool inline_min_max(vmIntrinsics::ID id);
 238   bool inline_notify(vmIntrinsics::ID id);
 239   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 240   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 241   int classify_unsafe_addr(Node* &base, Node* &offset);
 242   Node* make_unsafe_address(Node* base, Node* offset);
 243   // Helper for inline_unsafe_access.
 244   // Generates the guards that check whether the result of
 245   // Unsafe.getObject should be recorded in an SATB log buffer.
 246   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
 247 
 248   typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
 249   bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
 250   static bool klass_needs_init_guard(Node* kls);
 251   bool inline_unsafe_allocate();
 252   bool inline_unsafe_newArray(bool uninitialized);
 253   bool inline_unsafe_copyMemory();
 254   bool inline_native_currentThread();
 255 
 256   bool inline_native_time_funcs(address method, const char* funcName);
 257 #ifdef TRACE_HAVE_INTRINSICS
 258   bool inline_native_classID();
 259   bool inline_native_getBufferWriter();
 260 #endif
 261   bool inline_native_isInterrupted();
 262   bool inline_native_Class_query(vmIntrinsics::ID id);
 263   bool inline_native_subtype_check();
 264   bool inline_native_getLength();
 265   bool inline_array_copyOf(bool is_copyOfRange);
 266   bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
 267   bool inline_preconditions_checkIndex();
 268   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
 269   bool inline_native_clone(bool is_virtual);
 270   bool inline_native_Reflection_getCallerClass();
 271   // Helper function for inlining native object hash method
 272   bool inline_native_hashcode(bool is_virtual, bool is_static);
 273   bool inline_native_getClass();
 274 
 275   // Helper functions for inlining arraycopy
 276   bool inline_arraycopy();
 277   AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
 278                                                 RegionNode* slow_region);
 279   JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
 280   void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp,
 281                                       uint new_idx);
 282 
 283   typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
 284   MemNode::MemOrd access_kind_to_memord_LS(AccessKind access_kind, bool is_store);
 285   MemNode::MemOrd access_kind_to_memord(AccessKind access_kind);
 286   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind, AccessKind access_kind);
 287   bool inline_unsafe_fence(vmIntrinsics::ID id);
 288   bool inline_onspinwait();


2134   Node* n = NULL;
2135   switch (id) {
2136   case vmIntrinsics::_numberOfLeadingZeros_i:   n = new CountLeadingZerosINode( arg);  break;
2137   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2138   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2139   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2140   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2141   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2142   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2143   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2144   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2145   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2146   default:  fatal_unexpected_iid(id);  break;
2147   }
2148   set_result(_gvn.transform(n));
2149   return true;
2150 }
2151 
2152 //----------------------------inline_unsafe_access----------------------------
2153 
2154 // Helper that guards and inserts a pre-barrier.
2155 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2156                                         Node* pre_val, bool need_mem_bar) {
2157   // We could be accessing the referent field of a reference object. If so, when G1
2158   // is enabled, we need to log the value in the referent field in an SATB buffer.
2159   // This routine performs some compile time filters and generates suitable
2160   // runtime filters that guard the pre-barrier code.
2161   // Also add memory barrier for non volatile load from the referent field
2162   // to prevent commoning of loads across safepoint.
2163   if (!UseG1GC && !need_mem_bar)
2164     return;
2165 
2166   // Some compile time checks.
2167 
2168   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2169   const TypeX* otype = offset->find_intptr_t_type();
2170   if (otype != NULL && otype->is_con() &&
2171       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2172     // Constant offset but not the reference_offset so just return
2173     return;
2174   }
2175 
2176   // We only need to generate the runtime guards for instances.
2177   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2178   if (btype != NULL) {
2179     if (btype->isa_aryptr()) {
2180       // Array type so nothing to do
2181       return;
2182     }
2183 
2184     const TypeInstPtr* itype = btype->isa_instptr();
2185     if (itype != NULL) {
2186       // Can the klass of base_oop be statically determined to be
2187       // _not_ a sub-class of Reference and _not_ Object?
2188       ciKlass* klass = itype->klass();
2189       if ( klass->is_loaded() &&
2190           !klass->is_subtype_of(env()->Reference_klass()) &&
2191           !env()->Object_klass()->is_subtype_of(klass)) {
2192         return;
2193       }
2194     }
2195   }
2196 
2197   // The compile time filters did not reject base_oop/offset so
2198   // we need to generate the following runtime filters
2199   //
2200   // if (offset == java_lang_ref_Reference::_reference_offset) {
2201   //   if (instance_of(base, java.lang.ref.Reference)) {
2202   //     pre_barrier(_, pre_val, ...);
2203   //   }
2204   // }
2205 
2206   float likely   = PROB_LIKELY(  0.999);
2207   float unlikely = PROB_UNLIKELY(0.999);
2208 
2209   IdealKit ideal(this);
2210 #define __ ideal.
2211 
2212   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
2213 
2214   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
2215       // Update graphKit memory and control from IdealKit.
2216       sync_kit(ideal);
2217 
2218       Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
2219       Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
2220 
2221       // Update IdealKit memory and control from graphKit.
2222       __ sync_kit(this);
2223 
2224       Node* one = __ ConI(1);
2225       // is_instof == 0 if base_oop == NULL
2226       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
2227 
2228         // Update graphKit from IdeakKit.
2229         sync_kit(ideal);
2230 
2231         // Use the pre-barrier to record the value in the referent field
2232         pre_barrier(false /* do_load */,
2233                     __ ctrl(),
2234                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2235                     pre_val /* pre_val */,
2236                     T_OBJECT);
2237         if (need_mem_bar) {
2238           // Add memory barrier to prevent commoning reads from this field
2239           // across safepoint since GC can change its value.
2240           insert_mem_bar(Op_MemBarCPUOrder);
2241         }
2242         // Update IdealKit from graphKit.
2243         __ sync_kit(this);
2244 
2245       } __ end_if(); // _ref_type != ref_none
2246   } __ end_if(); // offset == referent_offset
2247 
2248   // Final sync IdealKit and GraphKit.
2249   final_sync(ideal);
2250 #undef __
2251 }
2252 
2253 
2254 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2255   // Attempt to infer a sharper value type from the offset and base type.
2256   ciKlass* sharpened_klass = NULL;
2257 
2258   // See if it is an instance field, with an object type.
2259   if (alias_type->field() != NULL) {
2260     if (alias_type->field()->type()->is_klass()) {
2261       sharpened_klass = alias_type->field()->type()->as_klass();
2262     }
2263   }
2264 
2265   // See if it is a narrow oop array.
2266   if (adr_type->isa_aryptr()) {
2267     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2268       const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2269       if (elem_type != NULL) {
2270         sharpened_klass = elem_type->klass();
2271       }
2272     }
2273   }
2274 
2275   // The sharpened class might be unloaded if there is no class loader
2276   // contraint in place.
2277   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2278     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2279 
2280 #ifndef PRODUCT
2281     if (C->print_intrinsics() || C->print_inlining()) {
2282       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2283       tty->print("  sharpened value: ");  tjp->dump();      tty->cr();
2284     }
2285 #endif
2286     // Sharpen the value type.
2287     return tjp;
2288   }
2289   return NULL;
2290 }
2291 
2292 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2293   if (callee()->is_static())  return false;  // caller must have the capability!

2294   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2295   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2296   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2297 




2298 #ifndef PRODUCT
2299   {
2300     ResourceMark rm;
2301     // Check the signatures.
2302     ciSignature* sig = callee()->signature();
2303 #ifdef ASSERT
2304     if (!is_store) {
2305       // Object getObject(Object base, int/long offset), etc.
2306       BasicType rtype = sig->return_type()->basic_type();
2307       assert(rtype == type, "getter must return the expected value");
2308       assert(sig->count() == 2, "oop getter has 2 arguments");
2309       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2310       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2311     } else {
2312       // void putObject(Object base, int/long offset, Object x), etc.
2313       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2314       assert(sig->count() == 3, "oop putter has 3 arguments");
2315       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2316       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2317       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();


2335   Node* base = argument(1);  // type: oop
2336   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2337   offset = argument(2);  // type: long
2338   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2339   // to be plain byte offsets, which are also the same as those accepted
2340   // by oopDesc::field_base.
2341   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2342          "fieldOffset must be byte-scaled");
2343   // 32-bit machines ignore the high half!
2344   offset = ConvL2X(offset);
2345   adr = make_unsafe_address(base, offset);
2346   if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2347     heap_base_oop = base;
2348   } else if (type == T_OBJECT) {
2349     return false; // off-heap oop accesses are not supported
2350   }
2351 
2352   // Can base be NULL? Otherwise, always on-heap access.
2353   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2354 




2355   val = is_store ? argument(4) : NULL;
2356 
2357   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2358 
2359   // Try to categorize the address.
2360   Compile::AliasType* alias_type = C->alias_type(adr_type);
2361   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2362 
2363   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2364       alias_type->adr_type() == TypeAryPtr::RANGE) {
2365     return false; // not supported
2366   }
2367 
2368   bool mismatched = false;
2369   BasicType bt = alias_type->basic_type();
2370   if (bt != T_ILLEGAL) {
2371     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2372     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2373       // Alias type doesn't differentiate between byte[] and boolean[]).
2374       // Use address type to get the element type.
2375       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2376     }
2377     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2378       // accessing an array field with getObject is not a mismatch
2379       bt = T_OBJECT;
2380     }
2381     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2382       // Don't intrinsify mismatched object accesses
2383       return false;
2384     }
2385     mismatched = (bt != type);
2386   } else if (alias_type->adr_type()->isa_oopptr()) {
2387     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2388   }
2389 
2390   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2391 




2392   // First guess at the value type.
2393   const Type *value_type = Type::get_const_basic_type(type);
2394 
2395   // We will need memory barriers unless we can determine a unique
2396   // alias category for this reference.  (Note:  If for some reason
2397   // the barriers get omitted and the unsafe reference begins to "pollute"
2398   // the alias analysis of the rest of the graph, either Compile::can_alias
2399   // or Compile::must_alias will throw a diagnostic assert.)
2400   bool need_mem_bar = false;
2401   switch (kind) {
2402       case Relaxed:
2403           need_mem_bar = mismatched && !adr_type->isa_aryptr();
2404           break;
2405       case Opaque:
2406           // Opaque uses CPUOrder membars for protection against code movement.
2407       case Acquire:
2408       case Release:
2409       case Volatile:
2410           need_mem_bar = true;
2411           break;
2412       default:
2413           ShouldNotReachHere();
2414   }
2415 
2416   // Some accesses require access atomicity for all types, notably longs and doubles.
2417   // When AlwaysAtomicAccesses is enabled, all accesses are atomic.
2418   bool requires_atomic_access = false;
2419   switch (kind) {
2420       case Relaxed:
2421           requires_atomic_access = AlwaysAtomicAccesses;
2422           break;
2423       case Opaque:
2424           // Opaque accesses are atomic.
2425       case Acquire:


2426       case Release:


2427       case Volatile:
2428           requires_atomic_access = true;
2429           break;
2430       default:
2431           ShouldNotReachHere();
2432   }
2433 
2434   // Figure out the memory ordering.
2435   // Acquire/Release/Volatile accesses require marking the loads/stores with MemOrd
2436   MemNode::MemOrd mo = access_kind_to_memord_LS(kind, is_store);
2437 
2438   // If we are reading the value of the referent field of a Reference
2439   // object (either by using Unsafe directly or through reflection)
2440   // then, if G1 is enabled, we need to record the referent in an
2441   // SATB log buffer using the pre-barrier mechanism.
2442   // Also we need to add memory barrier to prevent commoning reads
2443   // from this field across safepoint since GC can change its value.
2444   bool need_read_barrier = !is_store &&
2445                            offset != top() && heap_base_oop != top();
2446 
2447   if (!is_store && type == T_OBJECT) {
2448     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2449     if (tjp != NULL) {
2450       value_type = tjp;
2451     }



















2452   }
2453 
2454   receiver = null_check(receiver);
2455   if (stopped()) {
2456     return true;
2457   }
2458   // Heap pointers get a null-check from the interpreter,
2459   // as a courtesy.  However, this is not guaranteed by Unsafe,
2460   // and it is not possible to fully distinguish unintended nulls
2461   // from intended ones in this API.
2462 
2463   // We need to emit leading and trailing CPU membars (see below) in
2464   // addition to memory membars for special access modes. This is a little
2465   // too strong, but avoids the need to insert per-alias-type
2466   // volatile membars (for stores; compare Parse::do_put_xxx), which
2467   // we cannot do effectively here because we probably only have a
2468   // rough approximation of type.
2469 
2470   switch(kind) {
2471     case Relaxed:
2472     case Opaque:
2473     case Acquire:
2474       break;
2475     case Release:
2476     case Volatile:
2477       if (is_store) {
2478         insert_mem_bar(Op_MemBarRelease);
2479       } else {
2480         if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2481           insert_mem_bar(Op_MemBarVolatile);
2482         }
2483       }
2484       break;
2485     default:
2486       ShouldNotReachHere();
2487   }
2488 
2489   // Memory barrier to prevent normal and 'unsafe' accesses from
2490   // bypassing each other.  Happens after null checks, so the
2491   // exception paths do not take memory state from the memory barrier,
2492   // so there's no problems making a strong assert about mixing users
2493   // of safe & unsafe memory.
2494   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2495 
2496   if (!is_store) {
2497     Node* p = NULL;
2498     // Try to constant fold a load from a constant field
2499     ciField* field = alias_type->field();
2500     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2501       // final or stable field
2502       p = make_constant_from_field(field, heap_base_oop);
2503     }
2504     if (p == NULL) {
2505       // To be valid, unsafe loads may depend on other conditions than
2506       // the one that guards them: pin the Load node
2507       p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, requires_atomic_access, unaligned, mismatched);
2508       // load value
2509       switch (type) {
2510       case T_BOOLEAN:
2511       {
2512         // Normalize the value returned by getBoolean in the following cases
2513         if (mismatched ||

2514             heap_base_oop == top() ||                            // - heap_base_oop is NULL or
2515             (can_access_non_heap && alias_type->field() == NULL) // - heap_base_oop is potentially NULL
2516                                                                  //   and the unsafe access is made to large offset
2517                                                                  //   (i.e., larger than the maximum offset necessary for any
2518                                                                  //   field access)
2519             ) {
2520           IdealKit ideal = IdealKit(this);
2521 #define __ ideal.
2522           IdealVariable normalized_result(ideal);
2523           __ declarations_done();
2524           __ set(normalized_result, p);
2525           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2526           __ set(normalized_result, ideal.ConI(1));
2527           ideal.end_if();
2528           final_sync(ideal);
2529           p = __ value(normalized_result);
2530 #undef __
2531         }
2532       }
2533       case T_CHAR:
2534       case T_BYTE:
2535       case T_SHORT:
2536       case T_INT:
2537       case T_LONG:
2538       case T_FLOAT:
2539       case T_DOUBLE:
2540         break;
2541       case T_OBJECT:
2542         if (need_read_barrier) {
2543           // We do not require a mem bar inside pre_barrier if need_mem_bar
2544           // is set: the barriers would be emitted by us.
2545           insert_pre_barrier(heap_base_oop, offset, p, !need_mem_bar);
2546         }
2547         break;
2548       case T_ADDRESS:
2549         // Cast to an int type.
2550         p = _gvn.transform(new CastP2XNode(NULL, p));
2551         p = ConvX2UL(p);
2552         break;
2553       default:
2554         fatal("unexpected type %d: %s", type, type2name(type));
2555         break;
2556       }
2557     }
2558     // The load node has the control of the preceding MemBarCPUOrder.  All
2559     // following nodes will have the control of the MemBarCPUOrder inserted at
2560     // the end of this method.  So, pushing the load onto the stack at a later
2561     // point is fine.
2562     set_result(p);
2563   } else {
2564     // place effect of store into memory
2565     switch (type) {
2566     case T_DOUBLE:
2567       val = dstore_rounding(val);
2568       break;
2569     case T_ADDRESS:
2570       // Repackage the long as a pointer.
2571       val = ConvL2X(val);
2572       val = _gvn.transform(new CastX2PNode(val));
2573       break;
2574     }
2575 
2576     if (type == T_OBJECT) {
2577       store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2578     } else {
2579       store_to_memory(control(), adr, val, type, adr_type, mo, requires_atomic_access, unaligned, mismatched);
2580     }
2581   }
2582 
2583   switch(kind) {
2584     case Relaxed:
2585     case Opaque:
2586     case Release:
2587       break;
2588     case Acquire:
2589     case Volatile:
2590       if (!is_store) {
2591         insert_mem_bar(Op_MemBarAcquire);
2592       } else {
2593         if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2594           insert_mem_bar(Op_MemBarVolatile);
2595         }
2596       }
2597       break;
2598     default:
2599       ShouldNotReachHere();
2600   }
2601 
2602   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2603 
2604   return true;
2605 }
2606 
2607 //----------------------------inline_unsafe_load_store----------------------------
2608 // This method serves a couple of different customers (depending on LoadStoreKind):
2609 //
2610 // LS_cmp_swap:
2611 //
2612 //   boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
2613 //   boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
2614 //   boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
2615 //
2616 // LS_cmp_swap_weak:
2617 //
2618 //   boolean weakCompareAndSwapObject(       Object o, long offset, Object expected, Object x);
2619 //   boolean weakCompareAndSwapObjectAcquire(Object o, long offset, Object expected, Object x);
2620 //   boolean weakCompareAndSwapObjectRelease(Object o, long offset, Object expected, Object x);
2621 //
2622 //   boolean weakCompareAndSwapInt(          Object o, long offset, int    expected, int    x);
2623 //   boolean weakCompareAndSwapIntAcquire(   Object o, long offset, int    expected, int    x);


2645 //
2646 //   int  getAndAddInt( Object o, long offset, int  delta)
2647 //   long getAndAddLong(Object o, long offset, long delta)
2648 //
2649 // LS_get_set:
2650 //
2651 //   int    getAndSet(Object o, long offset, int    newValue)
2652 //   long   getAndSet(Object o, long offset, long   newValue)
2653 //   Object getAndSet(Object o, long offset, Object newValue)
2654 //
2655 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2656   // This basic scheme here is the same as inline_unsafe_access, but
2657   // differs in enough details that combining them would make the code
2658   // overly confusing.  (This is a true fact! I originally combined
2659   // them, but even I was confused by it!) As much code/comments as
2660   // possible are retained from inline_unsafe_access though to make
2661   // the correspondences clearer. - dl
2662 
2663   if (callee()->is_static())  return false;  // caller must have the capability!
2664 












2665 #ifndef PRODUCT
2666   BasicType rtype;
2667   {
2668     ResourceMark rm;
2669     // Check the signatures.
2670     ciSignature* sig = callee()->signature();
2671     rtype = sig->return_type()->basic_type();
2672     switch(kind) {
2673       case LS_get_add:
2674       case LS_get_set: {
2675       // Check the signatures.
2676 #ifdef ASSERT
2677       assert(rtype == type, "get and set must return the expected type");
2678       assert(sig->count() == 3, "get and set has 3 arguments");
2679       assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2680       assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2681       assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2682       assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2683 #endif // ASSERT
2684         break;


2776         }
2777       }
2778       break;
2779     }
2780     case LS_cmp_swap:
2781     case LS_cmp_swap_weak:
2782     case LS_get_add:
2783       break;
2784     default:
2785       ShouldNotReachHere();
2786   }
2787 
2788   // Null check receiver.
2789   receiver = null_check(receiver);
2790   if (stopped()) {
2791     return true;
2792   }
2793 
2794   int alias_idx = C->get_alias_index(adr_type);
2795 
2796   // Memory-model-wise, a LoadStore acts like a little synchronized
2797   // block, so needs barriers on each side.  These don't translate
2798   // into actual barriers on most machines, but we still need rest of
2799   // compiler to respect ordering.
2800 
2801   switch (access_kind) {
2802     case Relaxed:
2803     case Acquire:
2804       break;
2805     case Release:
2806       insert_mem_bar(Op_MemBarRelease);
2807       break;
2808     case Volatile:
2809       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2810         insert_mem_bar(Op_MemBarVolatile);
2811       } else {
2812         insert_mem_bar(Op_MemBarRelease);
2813       }
2814       break;
2815     default:
2816       ShouldNotReachHere();
2817   }
2818   insert_mem_bar(Op_MemBarCPUOrder);
2819 
2820   // Figure out the memory ordering.
2821   MemNode::MemOrd mo = access_kind_to_memord(access_kind);
2822 
2823   // 4984716: MemBars must be inserted before this
2824   //          memory node in order to avoid a false
2825   //          dependency which will confuse the scheduler.
2826   Node *mem = memory(alias_idx);
2827 
2828   // For now, we handle only those cases that actually exist: ints,
2829   // longs, and Object. Adding others should be straightforward.
2830   Node* load_store = NULL;
2831   switch(type) {
2832   case T_BYTE:
2833     switch(kind) {
2834       case LS_get_add:
2835         load_store = _gvn.transform(new GetAndAddBNode(control(), mem, adr, newval, adr_type));
2836         break;
2837       case LS_get_set:
2838         load_store = _gvn.transform(new GetAndSetBNode(control(), mem, adr, newval, adr_type));
2839         break;
2840       case LS_cmp_swap_weak:
2841         load_store = _gvn.transform(new WeakCompareAndSwapBNode(control(), mem, adr, newval, oldval, mo));
2842         break;
2843       case LS_cmp_swap:
2844         load_store = _gvn.transform(new CompareAndSwapBNode(control(), mem, adr, newval, oldval, mo));
2845         break;
2846       case LS_cmp_exchange:
2847         load_store = _gvn.transform(new CompareAndExchangeBNode(control(), mem, adr, newval, oldval, adr_type, mo));
2848         break;
2849       default:
2850         ShouldNotReachHere();
2851     }
2852     break;
2853   case T_SHORT:
2854     switch(kind) {
2855       case LS_get_add:
2856         load_store = _gvn.transform(new GetAndAddSNode(control(), mem, adr, newval, adr_type));
2857         break;
2858       case LS_get_set:
2859         load_store = _gvn.transform(new GetAndSetSNode(control(), mem, adr, newval, adr_type));
2860         break;
2861       case LS_cmp_swap_weak:
2862         load_store = _gvn.transform(new WeakCompareAndSwapSNode(control(), mem, adr, newval, oldval, mo));
2863         break;
2864       case LS_cmp_swap:
2865         load_store = _gvn.transform(new CompareAndSwapSNode(control(), mem, adr, newval, oldval, mo));
2866         break;
2867       case LS_cmp_exchange:
2868         load_store = _gvn.transform(new CompareAndExchangeSNode(control(), mem, adr, newval, oldval, adr_type, mo));
2869         break;
2870       default:
2871         ShouldNotReachHere();
2872     }
2873     break;
2874   case T_INT:
2875     switch(kind) {
2876       case LS_get_add:
2877         load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2878         break;
2879       case LS_get_set:
2880         load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2881         break;
2882       case LS_cmp_swap_weak:
2883         load_store = _gvn.transform(new WeakCompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
2884         break;
2885       case LS_cmp_swap:
2886         load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
2887         break;
2888       case LS_cmp_exchange:
2889         load_store = _gvn.transform(new CompareAndExchangeINode(control(), mem, adr, newval, oldval, adr_type, mo));
2890         break;
2891       default:
2892         ShouldNotReachHere();
2893     }
2894     break;
2895   case T_LONG:
2896     switch(kind) {
2897       case LS_get_add:
2898         load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2899         break;
2900       case LS_get_set:
2901         load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2902         break;
2903       case LS_cmp_swap_weak:
2904         load_store = _gvn.transform(new WeakCompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
2905         break;
2906       case LS_cmp_swap:
2907         load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
2908         break;
2909       case LS_cmp_exchange:
2910         load_store = _gvn.transform(new CompareAndExchangeLNode(control(), mem, adr, newval, oldval, adr_type, mo));
2911         break;
2912       default:
2913         ShouldNotReachHere();
2914     }
2915     break;
2916   case T_OBJECT:
2917     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2918     // could be delayed during Parse (for example, in adjust_map_after_if()).
2919     // Execute transformation here to avoid barrier generation in such case.
2920     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2921       newval = _gvn.makecon(TypePtr::NULL_PTR);
2922 
2923     // Reference stores need a store barrier.
2924     switch(kind) {
2925       case LS_get_set: {
2926         // If pre-barrier must execute before the oop store, old value will require do_load here.
2927         if (!can_move_pre_barrier()) {
2928           pre_barrier(true /* do_load*/,
2929                       control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2930                       NULL /* pre_val*/,
2931                       T_OBJECT);
2932         } // Else move pre_barrier to use load_store value, see below.
2933         break;
2934       }
2935       case LS_cmp_swap_weak:
2936       case LS_cmp_swap:
2937       case LS_cmp_exchange: {
2938         // Same as for newval above:
2939         if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2940           oldval = _gvn.makecon(TypePtr::NULL_PTR);
2941         }
2942         // The only known value which might get overwritten is oldval.
2943         pre_barrier(false /* do_load */,
2944                     control(), NULL, NULL, max_juint, NULL, NULL,
2945                     oldval /* pre_val */,
2946                     T_OBJECT);
2947         break;
2948       }
2949       default:
2950         ShouldNotReachHere();
2951     }
2952 
2953 #ifdef _LP64
2954     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2955       Node *newval_enc = _gvn.transform(new EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2956 
2957       switch(kind) {
2958         case LS_get_set:
2959           load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
2960           break;
2961         case LS_cmp_swap_weak: {
2962           Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2963           load_store = _gvn.transform(new WeakCompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
2964           break;
2965         }
2966         case LS_cmp_swap: {
2967           Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2968           load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
2969           break;
2970         }
2971         case LS_cmp_exchange: {
2972           Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2973           load_store = _gvn.transform(new CompareAndExchangeNNode(control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
2974           break;
2975         }
2976         default:
2977           ShouldNotReachHere();
2978       }
2979     } else
2980 #endif
2981     switch (kind) {
2982       case LS_get_set:
2983         load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2984         break;
2985       case LS_cmp_swap_weak:
2986         load_store = _gvn.transform(new WeakCompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
2987         break;
2988       case LS_cmp_swap:
2989         load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
2990         break;
2991       case LS_cmp_exchange:
2992         load_store = _gvn.transform(new CompareAndExchangePNode(control(), mem, adr, newval, oldval, adr_type, value_type->is_oopptr(), mo));
2993         break;
2994       default:
2995         ShouldNotReachHere();
2996     }
2997 
2998     // Emit the post barrier only when the actual store happened. This makes sense
2999     // to check only for LS_cmp_* that can fail to set the value.
3000     // LS_cmp_exchange does not produce any branches by default, so there is no
3001     // boolean result to piggyback on. TODO: When we merge CompareAndSwap with
3002     // CompareAndExchange and move branches here, it would make sense to conditionalize
3003     // post_barriers for LS_cmp_exchange as well.
3004     //
3005     // CAS success path is marked more likely since we anticipate this is a performance
3006     // critical path, while CAS failure path can use the penalty for going through unlikely
3007     // path as backoff. Which is still better than doing a store barrier there.
3008     switch (kind) {
3009       case LS_get_set:
3010       case LS_cmp_exchange: {
3011         post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3012         break;
3013       }
3014       case LS_cmp_swap_weak:

3015       case LS_cmp_swap: {
3016         IdealKit ideal(this);
3017         ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
3018           sync_kit(ideal);
3019           post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3020           ideal.sync_kit(this);
3021         } ideal.end_if();
3022         final_sync(ideal);
3023         break;
3024       }
3025       default:
3026         ShouldNotReachHere();
3027     }
3028     break;
3029   default:
3030     fatal("unexpected type %d: %s", type, type2name(type));
3031     break;
3032   }
3033 
3034   // SCMemProjNodes represent the memory state of a LoadStore. Their
3035   // main role is to prevent LoadStore nodes from being optimized away
3036   // when their results aren't used.
3037   Node* proj = _gvn.transform(new SCMemProjNode(load_store));
3038   set_memory(proj, alias_idx);
3039 
3040   if (type == T_OBJECT && (kind == LS_get_set || kind == LS_cmp_exchange)) {
3041 #ifdef _LP64
3042     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3043       load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
3044     }
3045 #endif
3046     if (can_move_pre_barrier() && kind == LS_get_set) {
3047       // Don't need to load pre_val. The old value is returned by load_store.
3048       // The pre_barrier can execute after the xchg as long as no safepoint
3049       // gets inserted between them.
3050       pre_barrier(false /* do_load */,
3051                   control(), NULL, NULL, max_juint, NULL, NULL,
3052                   load_store /* pre_val */,
3053                   T_OBJECT);
3054     }
3055   }
3056 
3057   // Add the trailing membar surrounding the access
3058   insert_mem_bar(Op_MemBarCPUOrder);
3059 
3060   switch (access_kind) {
3061     case Relaxed:
3062     case Release:
3063       break; // do nothing
3064     case Acquire:
3065     case Volatile:
3066       insert_mem_bar(Op_MemBarAcquire);
3067       // !support_IRIW_for_not_multiple_copy_atomic_cpu handled in platform code
3068       break;

3069     default:
3070       ShouldNotReachHere();
3071   }
3072 
3073   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3074   set_result(load_store);
3075   return true;
3076 }
3077 
3078 MemNode::MemOrd LibraryCallKit::access_kind_to_memord_LS(AccessKind kind, bool is_store) {
3079   MemNode::MemOrd mo = MemNode::unset;
3080   switch(kind) {
3081     case Opaque:
3082     case Relaxed:  mo = MemNode::unordered; break;
3083     case Acquire:  mo = MemNode::acquire;   break;
3084     case Release:  mo = MemNode::release;   break;
3085     case Volatile: mo = is_store ? MemNode::release : MemNode::acquire; break;
3086     default:
3087       ShouldNotReachHere();
3088   }
3089   guarantee(mo != MemNode::unset, "Should select memory ordering");
3090   return mo;
3091 }
3092 
3093 MemNode::MemOrd LibraryCallKit::access_kind_to_memord(AccessKind kind) {
3094   MemNode::MemOrd mo = MemNode::unset;


3375     PhiNode* result_mem  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3376     PhiNode* result_io   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
3377 
3378     result_rgn->init_req(slow_result_path, control());
3379     result_io ->init_req(slow_result_path, i_o());
3380     result_mem->init_req(slow_result_path, reset_memory());
3381     result_val->init_req(slow_result_path, slow_val);
3382 
3383     set_all_memory(_gvn.transform(result_mem));
3384     set_i_o(       _gvn.transform(result_io));
3385   }
3386 
3387   C->set_has_split_ifs(true); // Has chance for split-if optimization
3388   set_result(result_rgn, result_val);
3389   return true;
3390 }
3391 
3392 //---------------------------load_mirror_from_klass----------------------------
3393 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3394 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {

3395   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3396   return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);

3397 }
3398 
3399 //-----------------------load_klass_from_mirror_common-------------------------
3400 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3401 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3402 // and branch to the given path on the region.
3403 // If never_see_null, take an uncommon trap on null, so we can optimistically
3404 // compile for the non-null case.
3405 // If the region is NULL, force never_see_null = true.
3406 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3407                                                     bool never_see_null,
3408                                                     RegionNode* region,
3409                                                     int null_path,
3410                                                     int offset) {
3411   if (region == NULL)  never_see_null = true;
3412   Node* p = basic_plus_adr(mirror, offset);
3413   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3414   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3415   Node* null_ctl = top();
3416   kls = null_check_oop(kls, &null_ctl, never_see_null);


4522   // Conservatively insert a memory barrier on all memory slices.
4523   // Do not let writes of the copy source or destination float below the copy.
4524   insert_mem_bar(Op_MemBarCPUOrder);
4525 
4526   // Call it.  Note that the length argument is not scaled.
4527   make_runtime_call(RC_LEAF|RC_NO_FP,
4528                     OptoRuntime::fast_arraycopy_Type(),
4529                     StubRoutines::unsafe_arraycopy(),
4530                     "unsafe_arraycopy",
4531                     TypeRawPtr::BOTTOM,
4532                     src, dst, size XTOP);
4533 
4534   // Do not let reads of the copy destination float above the copy.
4535   insert_mem_bar(Op_MemBarCPUOrder);
4536 
4537   return true;
4538 }
4539 
4540 //------------------------clone_coping-----------------------------------
4541 // Helper function for inline_native_clone.
4542 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4543   assert(obj_size != NULL, "");
4544   Node* raw_obj = alloc_obj->in(1);
4545   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4546 
4547   AllocateNode* alloc = NULL;
4548   if (ReduceBulkZeroing) {
4549     // We will be completely responsible for initializing this object -
4550     // mark Initialize node as complete.
4551     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4552     // The object was just allocated - there should be no any stores!
4553     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4554     // Mark as complete_with_arraycopy so that on AllocateNode
4555     // expansion, we know this AllocateNode is initialized by an array
4556     // copy and a StoreStore barrier exists after the array copy.
4557     alloc->initialization()->set_complete_with_arraycopy();
4558   }
4559 
4560   // Copy the fastest available way.
4561   // TODO: generate fields copies for small objects instead.
4562   Node* src  = obj;
4563   Node* dest = alloc_obj;
4564   Node* size = _gvn.transform(obj_size);
4565 
4566   // Exclude the header but include array length to copy by 8 bytes words.
4567   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
4568   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4569                             instanceOopDesc::base_offset_in_bytes();
4570   // base_off:
4571   // 8  - 32-bit VM
4572   // 12 - 64-bit VM, compressed klass
4573   // 16 - 64-bit VM, normal klass
4574   if (base_off % BytesPerLong != 0) {
4575     assert(UseCompressedClassPointers, "");
4576     if (is_array) {
4577       // Exclude length to copy by 8 bytes words.
4578       base_off += sizeof(int);
4579     } else {
4580       // Include klass to copy by 8 bytes words.
4581       base_off = instanceOopDesc::klass_offset_in_bytes();
4582     }
4583     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4584   }
4585   src  = basic_plus_adr(src,  base_off);
4586   dest = basic_plus_adr(dest, base_off);
4587 
4588   // Compute the length also, if needed:
4589   Node* countx = size;
4590   countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4591   countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong) ));
4592 
4593   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4594 
4595   ArrayCopyNode* ac = ArrayCopyNode::make(this, false, src, NULL, dest, NULL, countx, false, false);
4596   ac->set_clonebasic();
4597   Node* n = _gvn.transform(ac);
4598   if (n == ac) {
4599     set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
4600   } else {
4601     set_all_memory(n);
4602   }
4603 
4604   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4605   if (card_mark) {
4606     assert(!is_array, "");
4607     // Put in store barrier for any and all oops we are sticking
4608     // into this object.  (We could avoid this if we could prove
4609     // that the object type contains no oop fields at all.)
4610     Node* no_particular_value = NULL;
4611     Node* no_particular_field = NULL;
4612     int raw_adr_idx = Compile::AliasIdxRaw;
4613     post_barrier(control(),
4614                  memory(raw_adr_type),
4615                  alloc_obj,
4616                  no_particular_field,
4617                  raw_adr_idx,
4618                  no_particular_value,
4619                  T_OBJECT,
4620                  false);
4621   }
4622 
4623   // Do not let reads from the cloned object float above the arraycopy.
4624   if (alloc != NULL) {
4625     // Do not let stores that initialize this object be reordered with
4626     // a subsequent store that would make this object accessible by
4627     // other threads.
4628     // Record what AllocateNode this StoreStore protects so that
4629     // escape analysis can go from the MemBarStoreStoreNode to the
4630     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4631     // based on the escape status of the AllocateNode.
4632     insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
4633   } else {
4634     insert_mem_bar(Op_MemBarCPUOrder);
4635   }
4636 }
4637 
4638 //------------------------inline_native_clone----------------------------
4639 // protected native Object java.lang.Object.clone();
4640 //
4641 // Here are the simple edge cases:


4691                                 : TypeInstPtr::NOTNULL);
4692 
4693     // Conservatively insert a memory barrier on all memory slices.
4694     // Do not let writes into the original float below the clone.
4695     insert_mem_bar(Op_MemBarCPUOrder);
4696 
4697     // paths into result_reg:
4698     enum {
4699       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4700       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4701       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4702       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4703       PATH_LIMIT
4704     };
4705     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4706     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4707     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4708     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4709     record_for_igvn(result_reg);
4710 
4711     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4712     int raw_adr_idx = Compile::AliasIdxRaw;
4713 
4714     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4715     if (array_ctl != NULL) {
4716       // It's an array.
4717       PreserveJVMState pjvms(this);
4718       set_control(array_ctl);
4719       Node* obj_length = load_array_length(obj);
4720       Node* obj_size  = NULL;
4721       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4722 
4723       if (!use_ReduceInitialCardMarks()) {

4724         // If it is an oop array, it requires very special treatment,
4725         // because card marking is required on each card of the array.
4726         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4727         if (is_obja != NULL) {
4728           PreserveJVMState pjvms2(this);
4729           set_control(is_obja);
4730           // Generate a direct call to the right arraycopy function(s).
4731           Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4732           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
4733           ac->set_cloneoop();
4734           Node* n = _gvn.transform(ac);
4735           assert(n == ac, "cannot disappear");
4736           ac->connect_outputs(this);
4737 
4738           result_reg->init_req(_objArray_path, control());
4739           result_val->init_req(_objArray_path, alloc_obj);
4740           result_i_o ->set_req(_objArray_path, i_o());
4741           result_mem ->set_req(_objArray_path, reset_memory());
4742         }
4743       }
4744       // Otherwise, there are no card marks to worry about.
4745       // (We can dispense with card marks if we know the allocation
4746       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4747       //  causes the non-eden paths to take compensating steps to
4748       //  simulate a fresh allocation, so that no further
4749       //  card marks are required in compiled code to initialize
4750       //  the object.)
4751 
4752       if (!stopped()) {
4753         copy_to_clone(obj, alloc_obj, obj_size, true, false);
4754 
4755         // Present the results of the copy.
4756         result_reg->init_req(_array_path, control());
4757         result_val->init_req(_array_path, alloc_obj);
4758         result_i_o ->set_req(_array_path, i_o());
4759         result_mem ->set_req(_array_path, reset_memory());
4760       }
4761     }
4762 
4763     // We only go to the instance fast case code if we pass a number of guards.
4764     // The paths which do not pass are accumulated in the slow_region.
4765     RegionNode* slow_region = new RegionNode(1);
4766     record_for_igvn(slow_region);
4767     if (!stopped()) {
4768       // It's an instance (we did array above).  Make the slow-path tests.
4769       // If this is a virtual call, we generate a funny guard.  We grab
4770       // the vtable entry corresponding to clone() from the target object.
4771       // If the target method which we are calling happens to be the
4772       // Object clone() method, we pass the guard.  We do not need this
4773       // guard for non-virtual calls; the caller is known to be the native


4779       // The object must be easily cloneable and must not have a finalizer.
4780       // Both of these conditions may be checked in a single test.
4781       // We could optimize the test further, but we don't care.
4782       generate_access_flags_guard(obj_klass,
4783                                   // Test both conditions:
4784                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4785                                   // Must be cloneable but not finalizer:
4786                                   JVM_ACC_IS_CLONEABLE_FAST,
4787                                   slow_region);
4788     }
4789 
4790     if (!stopped()) {
4791       // It's an instance, and it passed the slow-path tests.
4792       PreserveJVMState pjvms(this);
4793       Node* obj_size  = NULL;
4794       // Need to deoptimize on exception from allocation since Object.clone intrinsic
4795       // is reexecuted if deoptimization occurs and there could be problems when merging
4796       // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
4797       Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
4798 
4799       copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
4800 
4801       // Present the results of the slow call.
4802       result_reg->init_req(_instance_path, control());
4803       result_val->init_req(_instance_path, alloc_obj);
4804       result_i_o ->set_req(_instance_path, i_o());
4805       result_mem ->set_req(_instance_path, reset_memory());
4806     }
4807 
4808     // Generate code for the slow case.  We make a call to clone().
4809     set_control(_gvn.transform(slow_region));
4810     if (!stopped()) {
4811       PreserveJVMState pjvms(this);
4812       CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4813       Node* slow_result = set_results_for_java_call(slow_call);
4814       // this->control() comes from set_results_for_java_call
4815       result_reg->init_req(_slow_path, control());
4816       result_val->init_req(_slow_path, slow_result);
4817       result_i_o ->set_req(_slow_path, i_o());
4818       result_mem ->set_req(_slow_path, reset_memory());
4819     }


5978 
5979   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5980                                  stubAddr, stubName, TypePtr::BOTTOM,
5981                                  crc, src_start, length);
5982 
5983   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5984   set_result(result);
5985   return true;
5986 }
5987 
5988 //----------------------------inline_reference_get----------------------------
5989 // public T java.lang.ref.Reference.get();
5990 bool LibraryCallKit::inline_reference_get() {
5991   const int referent_offset = java_lang_ref_Reference::referent_offset;
5992   guarantee(referent_offset > 0, "should have already been set");
5993 
5994   // Get the argument:
5995   Node* reference_obj = null_check_receiver();
5996   if (stopped()) return true;
5997 









5998   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);

5999 
6000   ciInstanceKlass* klass = env()->Object_klass();
6001   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6002 
6003   Node* no_ctrl = NULL;
6004   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6005 
6006   // Use the pre-barrier to record the value in the referent field
6007   pre_barrier(false /* do_load */,
6008               control(),
6009               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6010               result /* pre_val */,
6011               T_OBJECT);
6012 
6013   // Add memory barrier to prevent commoning reads from this field
6014   // across safepoint since GC can change its value.
6015   insert_mem_bar(Op_MemBarCPUOrder);
6016 
6017   set_result(result);
6018   return true;
6019 }
6020 
6021 
6022 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6023                                               bool is_exact=true, bool is_static=false,
6024                                               ciInstanceKlass * fromKls=NULL) {
6025   if (fromKls == NULL) {
6026     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6027     assert(tinst != NULL, "obj is null");
6028     assert(tinst->klass()->is_loaded(), "obj is not loaded");
6029     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6030     fromKls = tinst->klass()->as_instance_klass();
6031   } else {
6032     assert(is_static, "only for static field access");


6045 
6046   // Next code  copied from Parse::do_get_xxx():
6047 
6048   // Compute address and memory type.
6049   int offset  = field->offset_in_bytes();
6050   bool is_vol = field->is_volatile();
6051   ciType* field_klass = field->type();
6052   assert(field_klass->is_loaded(), "should be loaded");
6053   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6054   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6055   BasicType bt = field->layout_type();
6056 
6057   // Build the resultant type of the load
6058   const Type *type;
6059   if (bt == T_OBJECT) {
6060     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6061   } else {
6062     type = Type::get_const_basic_type(bt);
6063   }
6064 
6065   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6066     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6067   }
6068   // Build the load.
6069   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6070   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6071   // If reference is volatile, prevent following memory ops from
6072   // floating up past the volatile read.  Also prevents commoning
6073   // another volatile read.
6074   if (is_vol) {
6075     // Memory barrier includes bogus read of value to force load BEFORE membar
6076     insert_mem_bar(Op_MemBarAcquire, loadedField);
6077   }
6078   return loadedField;

6079 }
6080 
6081 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6082                                                  bool is_exact = true, bool is_static = false,
6083                                                  ciInstanceKlass * fromKls = NULL) {
6084   if (fromKls == NULL) {
6085     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6086     assert(tinst != NULL, "obj is null");
6087     assert(tinst->klass()->is_loaded(), "obj is not loaded");
6088     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6089     fromKls = tinst->klass()->as_instance_klass();
6090   }
6091   else {
6092     assert(is_static, "only for static field access");
6093   }
6094   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
6095     ciSymbol::make(fieldTypeString),
6096     is_static);
6097 
6098   assert(field != NULL, "undefined field");




 223   bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
 224   bool inline_math_native(vmIntrinsics::ID id);
 225   bool inline_math(vmIntrinsics::ID id);
 226   template <typename OverflowOp>
 227   bool inline_math_overflow(Node* arg1, Node* arg2);
 228   void inline_math_mathExact(Node* math, Node* test);
 229   bool inline_math_addExactI(bool is_increment);
 230   bool inline_math_addExactL(bool is_increment);
 231   bool inline_math_multiplyExactI();
 232   bool inline_math_multiplyExactL();
 233   bool inline_math_negateExactI();
 234   bool inline_math_negateExactL();
 235   bool inline_math_subtractExactI(bool is_decrement);
 236   bool inline_math_subtractExactL(bool is_decrement);
 237   bool inline_min_max(vmIntrinsics::ID id);
 238   bool inline_notify(vmIntrinsics::ID id);
 239   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 240   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 241   int classify_unsafe_addr(Node* &base, Node* &offset);
 242   Node* make_unsafe_address(Node* base, Node* offset);




 243 
 244   typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
 245   bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
 246   static bool klass_needs_init_guard(Node* kls);
 247   bool inline_unsafe_allocate();
 248   bool inline_unsafe_newArray(bool uninitialized);
 249   bool inline_unsafe_copyMemory();
 250   bool inline_native_currentThread();
 251 
 252   bool inline_native_time_funcs(address method, const char* funcName);
 253 #ifdef TRACE_HAVE_INTRINSICS
 254   bool inline_native_classID();
 255   bool inline_native_getBufferWriter();
 256 #endif
 257   bool inline_native_isInterrupted();
 258   bool inline_native_Class_query(vmIntrinsics::ID id);
 259   bool inline_native_subtype_check();
 260   bool inline_native_getLength();
 261   bool inline_array_copyOf(bool is_copyOfRange);
 262   bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
 263   bool inline_preconditions_checkIndex();
 264   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
 265   bool inline_native_clone(bool is_virtual);
 266   bool inline_native_Reflection_getCallerClass();
 267   // Helper function for inlining native object hash method
 268   bool inline_native_hashcode(bool is_virtual, bool is_static);
 269   bool inline_native_getClass();
 270 
 271   // Helper functions for inlining arraycopy
 272   bool inline_arraycopy();
 273   AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
 274                                                 RegionNode* slow_region);
 275   JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
 276   void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp,
 277                                       uint new_idx);
 278 
 279   typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
 280   MemNode::MemOrd access_kind_to_memord_LS(AccessKind access_kind, bool is_store);
 281   MemNode::MemOrd access_kind_to_memord(AccessKind access_kind);
 282   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind, AccessKind access_kind);
 283   bool inline_unsafe_fence(vmIntrinsics::ID id);
 284   bool inline_onspinwait();


2130   Node* n = NULL;
2131   switch (id) {
2132   case vmIntrinsics::_numberOfLeadingZeros_i:   n = new CountLeadingZerosINode( arg);  break;
2133   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2134   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2135   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2136   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2137   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2138   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2139   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2140   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2141   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2142   default:  fatal_unexpected_iid(id);  break;
2143   }
2144   set_result(_gvn.transform(n));
2145   return true;
2146 }
2147 
2148 //----------------------------inline_unsafe_access----------------------------
2149 




































































































2150 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2151   // Attempt to infer a sharper value type from the offset and base type.
2152   ciKlass* sharpened_klass = NULL;
2153 
2154   // See if it is an instance field, with an object type.
2155   if (alias_type->field() != NULL) {
2156     if (alias_type->field()->type()->is_klass()) {
2157       sharpened_klass = alias_type->field()->type()->as_klass();
2158     }
2159   }
2160 
2161   // See if it is a narrow oop array.
2162   if (adr_type->isa_aryptr()) {
2163     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2164       const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2165       if (elem_type != NULL) {
2166         sharpened_klass = elem_type->klass();
2167       }
2168     }
2169   }
2170 
2171   // The sharpened class might be unloaded if there is no class loader
2172   // contraint in place.
2173   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2174     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2175 
2176 #ifndef PRODUCT
2177     if (C->print_intrinsics() || C->print_inlining()) {
2178       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2179       tty->print("  sharpened value: ");  tjp->dump();      tty->cr();
2180     }
2181 #endif
2182     // Sharpen the value type.
2183     return tjp;
2184   }
2185   return NULL;
2186 }
2187 
2188 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2189   if (callee()->is_static())  return false;  // caller must have the capability!
2190   C2DecoratorSet decorators = C2_ACCESS_ON_ANONYMOUS;
2191   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2192   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2193   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2194 
2195   if (unaligned) {
2196     decorators |= C2_ACCESS_UNALIGNED;
2197   }
2198 
2199 #ifndef PRODUCT
2200   {
2201     ResourceMark rm;
2202     // Check the signatures.
2203     ciSignature* sig = callee()->signature();
2204 #ifdef ASSERT
2205     if (!is_store) {
2206       // Object getObject(Object base, int/long offset), etc.
2207       BasicType rtype = sig->return_type()->basic_type();
2208       assert(rtype == type, "getter must return the expected value");
2209       assert(sig->count() == 2, "oop getter has 2 arguments");
2210       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2211       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2212     } else {
2213       // void putObject(Object base, int/long offset, Object x), etc.
2214       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2215       assert(sig->count() == 3, "oop putter has 3 arguments");
2216       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2217       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2218       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();


2236   Node* base = argument(1);  // type: oop
2237   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2238   offset = argument(2);  // type: long
2239   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2240   // to be plain byte offsets, which are also the same as those accepted
2241   // by oopDesc::field_base.
2242   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2243          "fieldOffset must be byte-scaled");
2244   // 32-bit machines ignore the high half!
2245   offset = ConvL2X(offset);
2246   adr = make_unsafe_address(base, offset);
2247   if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2248     heap_base_oop = base;
2249   } else if (type == T_OBJECT) {
2250     return false; // off-heap oop accesses are not supported
2251   }
2252 
2253   // Can base be NULL? Otherwise, always on-heap access.
2254   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2255 
2256   if (!can_access_non_heap) {
2257     decorators |= C2_ACCESS_ON_HEAP;
2258   }
2259 
2260   val = is_store ? argument(4) : NULL;
2261 
2262   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2263 
2264   // Try to categorize the address.
2265   Compile::AliasType* alias_type = C->alias_type(adr_type);
2266   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2267 
2268   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2269       alias_type->adr_type() == TypeAryPtr::RANGE) {
2270     return false; // not supported
2271   }
2272 
2273   bool mismatched = false;
2274   BasicType bt = alias_type->basic_type();
2275   if (bt != T_ILLEGAL) {
2276     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2277     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2278       // Alias type doesn't differentiate between byte[] and boolean[]).
2279       // Use address type to get the element type.
2280       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2281     }
2282     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2283       // accessing an array field with getObject is not a mismatch
2284       bt = T_OBJECT;
2285     }
2286     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2287       // Don't intrinsify mismatched object accesses
2288       return false;
2289     }
2290     mismatched = (bt != type);
2291   } else if (alias_type->adr_type()->isa_oopptr()) {
2292     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2293   }
2294 
2295   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2296 
2297   if (mismatched) {
2298     decorators |= C2_MISMATCHED;
2299   }
2300 
2301   // First guess at the value type.
2302   const Type *value_type = Type::get_const_basic_type(type);
2303 






2304   switch (kind) {
2305       case Relaxed:
2306         decorators |= C2_MO_RELAXED;
2307         break;
2308       case Opaque:
2309         decorators |= C2_ACCESS_ATOMIC;















2310         break;


2311       case Acquire:
2312         decorators |= C2_MO_ACQUIRE;
2313         break;
2314       case Release:
2315         decorators |= C2_MO_RELEASE;
2316         break;
2317       case Volatile:
2318         decorators |= C2_MO_VOLATILE;
2319         break;
2320       default:
2321         ShouldNotReachHere();
2322   }
2323 
2324   if (type == T_OBJECT) {
2325     if (!is_store) {












2326       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2327       if (tjp != NULL) {
2328         value_type = tjp;
2329       }
2330     } else {
2331       Compile::AliasType* at = C->alias_type(adr_type);
2332       if (adr_type->isa_instptr()) {
2333         if (at->field() != NULL) {
2334           // known field.  This code is a copy of the do_put_xxx logic.
2335           ciField* field = at->field();
2336           if (!field->type()->is_loaded()) {
2337             value_type = TypeInstPtr::BOTTOM;
2338           } else {
2339             value_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
2340           }
2341         }
2342       } else if (adr_type->isa_aryptr()) {
2343         value_type = adr_type->is_aryptr()->elem()->make_oopptr();
2344       }
2345       if (value_type == NULL) {
2346         value_type = TypeInstPtr::BOTTOM;
2347       }
2348     }
2349   }
2350 
2351   receiver = null_check(receiver);
2352   if (stopped()) {
2353     return true;
2354   }
2355   // Heap pointers get a null-check from the interpreter,
2356   // as a courtesy.  However, this is not guaranteed by Unsafe,
2357   // and it is not possible to fully distinguish unintended nulls
2358   // from intended ones in this API.
2359 

































2360   if (!is_store) {
2361     Node* p = NULL;
2362     // Try to constant fold a load from a constant field
2363     ciField* field = alias_type->field();
2364     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2365       // final or stable field
2366       p = make_constant_from_field(field, heap_base_oop);
2367     }
2368 
2369     if (p == NULL) { // Could not constant fold the load
2370       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);





2371       // Normalize the value returned by getBoolean in the following cases
2372       if (type == T_BOOLEAN &&
2373           (mismatched ||
2374            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2375            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2376                                                       //   and the unsafe access is made to large offset
2377                                                       //   (i.e., larger than the maximum offset necessary for any
2378                                                       //   field access)
2379             ) {
2380           IdealKit ideal = IdealKit(this);
2381 #define __ ideal.
2382         IdealVariable normalized_result(ideal);
2383         __ declarations_done();
2384         __ set(normalized_result, p);
2385         __ if_then(p, BoolTest::ne, ideal.ConI(0));
2386         __ set(normalized_result, ideal.ConI(1));
2387         ideal.end_if();
2388         final_sync(ideal);
2389         p = __ value(normalized_result);
2390 #undef __
2391       }
2392     }
2393     if (type == T_ADDRESS) {
2394       p = gvn().transform(new CastP2XNode(NULL, p));
















2395       p = ConvX2UL(p);





2396     }
2397     // The load node has the control of the preceding MemBarCPUOrder.  All
2398     // following nodes will have the control of the MemBarCPUOrder inserted at
2399     // the end of this method.  So, pushing the load onto the stack at a later
2400     // point is fine.
2401     set_result(p);
2402   } else {
2403     if (bt == T_ADDRESS) {





2404       // Repackage the long as a pointer.
2405       val = ConvL2X(val);
2406       val = gvn().transform(new CastX2PNode(val));























2407     }
2408     access_store_at(control(), heap_base_oop, adr, adr_type, val, value_type, type, decorators);


2409   }
2410 


2411   return true;
2412 }
2413 
2414 //----------------------------inline_unsafe_load_store----------------------------
2415 // This method serves a couple of different customers (depending on LoadStoreKind):
2416 //
2417 // LS_cmp_swap:
2418 //
2419 //   boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
2420 //   boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
2421 //   boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
2422 //
2423 // LS_cmp_swap_weak:
2424 //
2425 //   boolean weakCompareAndSwapObject(       Object o, long offset, Object expected, Object x);
2426 //   boolean weakCompareAndSwapObjectAcquire(Object o, long offset, Object expected, Object x);
2427 //   boolean weakCompareAndSwapObjectRelease(Object o, long offset, Object expected, Object x);
2428 //
2429 //   boolean weakCompareAndSwapInt(          Object o, long offset, int    expected, int    x);
2430 //   boolean weakCompareAndSwapIntAcquire(   Object o, long offset, int    expected, int    x);


2452 //
2453 //   int  getAndAddInt( Object o, long offset, int  delta)
2454 //   long getAndAddLong(Object o, long offset, long delta)
2455 //
2456 // LS_get_set:
2457 //
2458 //   int    getAndSet(Object o, long offset, int    newValue)
2459 //   long   getAndSet(Object o, long offset, long   newValue)
2460 //   Object getAndSet(Object o, long offset, Object newValue)
2461 //
2462 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2463   // This basic scheme here is the same as inline_unsafe_access, but
2464   // differs in enough details that combining them would make the code
2465   // overly confusing.  (This is a true fact! I originally combined
2466   // them, but even I was confused by it!) As much code/comments as
2467   // possible are retained from inline_unsafe_access though to make
2468   // the correspondences clearer. - dl
2469 
2470   if (callee()->is_static())  return false;  // caller must have the capability!
2471 
2472   C2DecoratorSet decorators = C2_ACCESS_ON_HEAP;
2473 
2474   switch(access_kind) {
2475     case Opaque:
2476     case Relaxed:  decorators |= C2_MO_RELAXED;  break;
2477     case Acquire:  decorators |= C2_MO_ACQUIRE;  break;
2478     case Release:  decorators |= C2_MO_RELEASE;  break;
2479     case Volatile: decorators |= C2_MO_VOLATILE; break;
2480     default:
2481       ShouldNotReachHere();
2482   }
2483 
2484 #ifndef PRODUCT
2485   BasicType rtype;
2486   {
2487     ResourceMark rm;
2488     // Check the signatures.
2489     ciSignature* sig = callee()->signature();
2490     rtype = sig->return_type()->basic_type();
2491     switch(kind) {
2492       case LS_get_add:
2493       case LS_get_set: {
2494       // Check the signatures.
2495 #ifdef ASSERT
2496       assert(rtype == type, "get and set must return the expected type");
2497       assert(sig->count() == 3, "get and set has 3 arguments");
2498       assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2499       assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2500       assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2501       assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2502 #endif // ASSERT
2503         break;


2595         }
2596       }
2597       break;
2598     }
2599     case LS_cmp_swap:
2600     case LS_cmp_swap_weak:
2601     case LS_get_add:
2602       break;
2603     default:
2604       ShouldNotReachHere();
2605   }
2606 
2607   // Null check receiver.
2608   receiver = null_check(receiver);
2609   if (stopped()) {
2610     return true;
2611   }
2612 
2613   int alias_idx = C->get_alias_index(adr_type);
2614 
2615   if (type == T_OBJECT) {
























































































































2616     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2617     // could be delayed during Parse (for example, in adjust_map_after_if()).
2618     // Execute transformation here to avoid barrier generation in such case.
2619     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2620       newval = _gvn.makecon(TypePtr::NULL_PTR);
2621 
2622     // Reference stores need a store barrier.
2623     if (kind == LS_cmp_exchange && _gvn.type(oldval) == TypePtr::NULL_PTR) {















2624       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2625     }






















































2626   }
2627 
2628   Node* result = NULL;









2629   switch (kind) {

2630     case LS_cmp_exchange: {
2631       result = access_cas_val_at(control(), base, adr, adr_type, alias_idx, oldval, newval, value_type, type, decorators);
2632       break;
2633     }
2634     case LS_cmp_swap_weak:
2635       decorators |= C2_WEAK_CAS;
2636     case LS_cmp_swap: {
2637       result = access_cas_bool_at(control(), base, adr, adr_type, alias_idx, oldval, newval, value_type, type, decorators);






2638       break;
2639     }
2640     case LS_get_set: {
2641       result = access_swap_at(control(), base, adr, adr_type, alias_idx, newval, value_type, type, decorators);




2642       break;
2643     }
2644     case LS_get_add: {
2645       result = access_fetch_and_add_at(control(), base, adr, adr_type, alias_idx, newval, value_type, type, decorators);

































2646       break;
2647     }
2648     default:
2649       ShouldNotReachHere();
2650   }
2651 
2652   assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2653   set_result(result);
2654   return true;
2655 }
2656 
2657 MemNode::MemOrd LibraryCallKit::access_kind_to_memord_LS(AccessKind kind, bool is_store) {
2658   MemNode::MemOrd mo = MemNode::unset;
2659   switch(kind) {
2660     case Opaque:
2661     case Relaxed:  mo = MemNode::unordered; break;
2662     case Acquire:  mo = MemNode::acquire;   break;
2663     case Release:  mo = MemNode::release;   break;
2664     case Volatile: mo = is_store ? MemNode::release : MemNode::acquire; break;
2665     default:
2666       ShouldNotReachHere();
2667   }
2668   guarantee(mo != MemNode::unset, "Should select memory ordering");
2669   return mo;
2670 }
2671 
2672 MemNode::MemOrd LibraryCallKit::access_kind_to_memord(AccessKind kind) {
2673   MemNode::MemOrd mo = MemNode::unset;


2954     PhiNode* result_mem  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2955     PhiNode* result_io   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
2956 
2957     result_rgn->init_req(slow_result_path, control());
2958     result_io ->init_req(slow_result_path, i_o());
2959     result_mem->init_req(slow_result_path, reset_memory());
2960     result_val->init_req(slow_result_path, slow_val);
2961 
2962     set_all_memory(_gvn.transform(result_mem));
2963     set_i_o(       _gvn.transform(result_io));
2964   }
2965 
2966   C->set_has_split_ifs(true); // Has chance for split-if optimization
2967   set_result(result_rgn, result_val);
2968   return true;
2969 }
2970 
2971 //---------------------------load_mirror_from_klass----------------------------
2972 // Given a klass oop, load its java mirror (a java.lang.Class oop).
2973 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2974   C2DecoratorSet decorators = C2_ACCESS_ON_HEAP | C2_ACCESS_FREE_CONTROL;
2975   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
2976   return access_load_at(klass, p, p->bottom_type()->is_ptr(),
2977                         TypeInstPtr::MIRROR, T_OBJECT, decorators);
2978 }
2979 
2980 //-----------------------load_klass_from_mirror_common-------------------------
2981 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2982 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2983 // and branch to the given path on the region.
2984 // If never_see_null, take an uncommon trap on null, so we can optimistically
2985 // compile for the non-null case.
2986 // If the region is NULL, force never_see_null = true.
2987 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
2988                                                     bool never_see_null,
2989                                                     RegionNode* region,
2990                                                     int null_path,
2991                                                     int offset) {
2992   if (region == NULL)  never_see_null = true;
2993   Node* p = basic_plus_adr(mirror, offset);
2994   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
2995   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
2996   Node* null_ctl = top();
2997   kls = null_check_oop(kls, &null_ctl, never_see_null);


4103   // Conservatively insert a memory barrier on all memory slices.
4104   // Do not let writes of the copy source or destination float below the copy.
4105   insert_mem_bar(Op_MemBarCPUOrder);
4106 
4107   // Call it.  Note that the length argument is not scaled.
4108   make_runtime_call(RC_LEAF|RC_NO_FP,
4109                     OptoRuntime::fast_arraycopy_Type(),
4110                     StubRoutines::unsafe_arraycopy(),
4111                     "unsafe_arraycopy",
4112                     TypeRawPtr::BOTTOM,
4113                     src, dst, size XTOP);
4114 
4115   // Do not let reads of the copy destination float above the copy.
4116   insert_mem_bar(Op_MemBarCPUOrder);
4117 
4118   return true;
4119 }
4120 
4121 //------------------------clone_coping-----------------------------------
4122 // Helper function for inline_native_clone.
4123 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
4124   assert(obj_size != NULL, "");
4125   Node* raw_obj = alloc_obj->in(1);
4126   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4127 
4128   AllocateNode* alloc = NULL;
4129   if (ReduceBulkZeroing) {
4130     // We will be completely responsible for initializing this object -
4131     // mark Initialize node as complete.
4132     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4133     // The object was just allocated - there should be no any stores!
4134     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4135     // Mark as complete_with_arraycopy so that on AllocateNode
4136     // expansion, we know this AllocateNode is initialized by an array
4137     // copy and a StoreStore barrier exists after the array copy.
4138     alloc->initialization()->set_complete_with_arraycopy();
4139   }
4140 
4141   // Copy the fastest available way.
4142   // TODO: generate fields copies for small objects instead.


4143   Node* size = _gvn.transform(obj_size);
4144 
4145   access_clone(control(), obj, alloc_obj, size, is_array);























































4146 
4147   // Do not let reads from the cloned object float above the arraycopy.
4148   if (alloc != NULL) {
4149     // Do not let stores that initialize this object be reordered with
4150     // a subsequent store that would make this object accessible by
4151     // other threads.
4152     // Record what AllocateNode this StoreStore protects so that
4153     // escape analysis can go from the MemBarStoreStoreNode to the
4154     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4155     // based on the escape status of the AllocateNode.
4156     insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
4157   } else {
4158     insert_mem_bar(Op_MemBarCPUOrder);
4159   }
4160 }
4161 
4162 //------------------------inline_native_clone----------------------------
4163 // protected native Object java.lang.Object.clone();
4164 //
4165 // Here are the simple edge cases:


4215                                 : TypeInstPtr::NOTNULL);
4216 
4217     // Conservatively insert a memory barrier on all memory slices.
4218     // Do not let writes into the original float below the clone.
4219     insert_mem_bar(Op_MemBarCPUOrder);
4220 
4221     // paths into result_reg:
4222     enum {
4223       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4224       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4225       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4226       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4227       PATH_LIMIT
4228     };
4229     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4230     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4231     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4232     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4233     record_for_igvn(result_reg);
4234 



4235     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4236     if (array_ctl != NULL) {
4237       // It's an array.
4238       PreserveJVMState pjvms(this);
4239       set_control(array_ctl);
4240       Node* obj_length = load_array_length(obj);
4241       Node* obj_size  = NULL;
4242       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4243 
4244       C2BarrierSetCodeGen* code_gen = Universe::heap()->barrier_set()->c2_code_gen();
4245       if (code_gen->array_copy_requires_gc_barriers(T_OBJECT)) {
4246         // If it is an oop array, it requires very special treatment,
4247         // because gc barriers are required when accessing the array.
4248         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4249         if (is_obja != NULL) {
4250           PreserveJVMState pjvms2(this);
4251           set_control(is_obja);
4252           // Generate a direct call to the right arraycopy function(s).
4253           Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4254           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
4255           ac->set_cloneoop();
4256           Node* n = _gvn.transform(ac);
4257           assert(n == ac, "cannot disappear");
4258           ac->connect_outputs(this);
4259 
4260           result_reg->init_req(_objArray_path, control());
4261           result_val->init_req(_objArray_path, alloc_obj);
4262           result_i_o ->set_req(_objArray_path, i_o());
4263           result_mem ->set_req(_objArray_path, reset_memory());
4264         }
4265       }
4266       // Otherwise, there are no barriers to worry about.
4267       // (We can dispense with card marks if we know the allocation
4268       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4269       //  causes the non-eden paths to take compensating steps to
4270       //  simulate a fresh allocation, so that no further
4271       //  card marks are required in compiled code to initialize
4272       //  the object.)
4273 
4274       if (!stopped()) {
4275         copy_to_clone(obj, alloc_obj, obj_size, true);
4276 
4277         // Present the results of the copy.
4278         result_reg->init_req(_array_path, control());
4279         result_val->init_req(_array_path, alloc_obj);
4280         result_i_o ->set_req(_array_path, i_o());
4281         result_mem ->set_req(_array_path, reset_memory());
4282       }
4283     }
4284 
4285     // We only go to the instance fast case code if we pass a number of guards.
4286     // The paths which do not pass are accumulated in the slow_region.
4287     RegionNode* slow_region = new RegionNode(1);
4288     record_for_igvn(slow_region);
4289     if (!stopped()) {
4290       // It's an instance (we did array above).  Make the slow-path tests.
4291       // If this is a virtual call, we generate a funny guard.  We grab
4292       // the vtable entry corresponding to clone() from the target object.
4293       // If the target method which we are calling happens to be the
4294       // Object clone() method, we pass the guard.  We do not need this
4295       // guard for non-virtual calls; the caller is known to be the native


4301       // The object must be easily cloneable and must not have a finalizer.
4302       // Both of these conditions may be checked in a single test.
4303       // We could optimize the test further, but we don't care.
4304       generate_access_flags_guard(obj_klass,
4305                                   // Test both conditions:
4306                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4307                                   // Must be cloneable but not finalizer:
4308                                   JVM_ACC_IS_CLONEABLE_FAST,
4309                                   slow_region);
4310     }
4311 
4312     if (!stopped()) {
4313       // It's an instance, and it passed the slow-path tests.
4314       PreserveJVMState pjvms(this);
4315       Node* obj_size  = NULL;
4316       // Need to deoptimize on exception from allocation since Object.clone intrinsic
4317       // is reexecuted if deoptimization occurs and there could be problems when merging
4318       // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
4319       Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
4320 
4321       copy_to_clone(obj, alloc_obj, obj_size, false);
4322 
4323       // Present the results of the slow call.
4324       result_reg->init_req(_instance_path, control());
4325       result_val->init_req(_instance_path, alloc_obj);
4326       result_i_o ->set_req(_instance_path, i_o());
4327       result_mem ->set_req(_instance_path, reset_memory());
4328     }
4329 
4330     // Generate code for the slow case.  We make a call to clone().
4331     set_control(_gvn.transform(slow_region));
4332     if (!stopped()) {
4333       PreserveJVMState pjvms(this);
4334       CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4335       Node* slow_result = set_results_for_java_call(slow_call);
4336       // this->control() comes from set_results_for_java_call
4337       result_reg->init_req(_slow_path, control());
4338       result_val->init_req(_slow_path, slow_result);
4339       result_i_o ->set_req(_slow_path, i_o());
4340       result_mem ->set_req(_slow_path, reset_memory());
4341     }


5500 
5501   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5502                                  stubAddr, stubName, TypePtr::BOTTOM,
5503                                  crc, src_start, length);
5504 
5505   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5506   set_result(result);
5507   return true;
5508 }
5509 
5510 //----------------------------inline_reference_get----------------------------
5511 // public T java.lang.ref.Reference.get();
5512 bool LibraryCallKit::inline_reference_get() {
5513   const int referent_offset = java_lang_ref_Reference::referent_offset;
5514   guarantee(referent_offset > 0, "should have already been set");
5515 
5516   // Get the argument:
5517   Node* reference_obj = null_check_receiver();
5518   if (stopped()) return true;
5519 
5520   const TypeInstPtr* tinst = _gvn.type(reference_obj)->isa_instptr();
5521   assert(tinst != NULL, "obj is null");
5522   assert(tinst->klass()->is_loaded(), "obj is not loaded");
5523   ciInstanceKlass* referenceKlass = tinst->klass()->as_instance_klass();
5524   ciField* field = referenceKlass->get_field_by_name(ciSymbol::make("referent"),
5525                                                      ciSymbol::make("Ljava/lang/Object;"),
5526                                                      false);
5527   assert (field != NULL, "undefined field");
5528 
5529   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
5530   const TypePtr* adr_type = C->alias_type(field)->adr_type();
5531 
5532   ciInstanceKlass* klass = env()->Object_klass();
5533   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5534 
5535   C2DecoratorSet decorators = C2_ACCESS_ON_WEAK | C2_ACCESS_ON_HEAP | C2_ACCESS_FREE_CONTROL;
5536   Node* result = access_load_at(reference_obj, adr, adr_type, object_type, T_OBJECT, decorators);








5537   // Add memory barrier to prevent commoning reads from this field
5538   // across safepoint since GC can change its value.
5539   insert_mem_bar(Op_MemBarCPUOrder);
5540 
5541   set_result(result);
5542   return true;
5543 }
5544 
5545 
5546 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5547                                               bool is_exact=true, bool is_static=false,
5548                                               ciInstanceKlass * fromKls=NULL) {
5549   if (fromKls == NULL) {
5550     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5551     assert(tinst != NULL, "obj is null");
5552     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5553     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5554     fromKls = tinst->klass()->as_instance_klass();
5555   } else {
5556     assert(is_static, "only for static field access");


5569 
5570   // Next code  copied from Parse::do_get_xxx():
5571 
5572   // Compute address and memory type.
5573   int offset  = field->offset_in_bytes();
5574   bool is_vol = field->is_volatile();
5575   ciType* field_klass = field->type();
5576   assert(field_klass->is_loaded(), "should be loaded");
5577   const TypePtr* adr_type = C->alias_type(field)->adr_type();
5578   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5579   BasicType bt = field->layout_type();
5580 
5581   // Build the resultant type of the load
5582   const Type *type;
5583   if (bt == T_OBJECT) {
5584     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5585   } else {
5586     type = Type::get_const_basic_type(bt);
5587   }
5588 
5589   C2DecoratorSet decorators = C2_ACCESS_ON_HEAP | C2_ACCESS_FREE_CONTROL;
5590 







5591   if (is_vol) {
5592     decorators |= C2_MO_VOLATILE;

5593   }
5594 
5595   return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
5596 }
5597 
5598 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5599                                                  bool is_exact = true, bool is_static = false,
5600                                                  ciInstanceKlass * fromKls = NULL) {
5601   if (fromKls == NULL) {
5602     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5603     assert(tinst != NULL, "obj is null");
5604     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5605     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5606     fromKls = tinst->klass()->as_instance_klass();
5607   }
5608   else {
5609     assert(is_static, "only for static field access");
5610   }
5611   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5612     ciSymbol::make(fieldTypeString),
5613     is_static);
5614 
5615   assert(field != NULL, "undefined field");


< prev index next >