< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page




 252   bool inline_math_multiplyExactL();
 253   bool inline_math_multiplyHigh();
 254   bool inline_math_negateExactI();
 255   bool inline_math_negateExactL();
 256   bool inline_math_subtractExactI(bool is_decrement);
 257   bool inline_math_subtractExactL(bool is_decrement);
 258   bool inline_min_max(vmIntrinsics::ID id);
 259   bool inline_notify(vmIntrinsics::ID id);
 260   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 261   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 262   int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
 263   Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
 264 
 265   typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
 266   DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
 267   bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
 268   static bool klass_needs_init_guard(Node* kls);
 269   bool inline_unsafe_allocate();
 270   bool inline_unsafe_newArray(bool uninitialized);
 271   bool inline_unsafe_copyMemory();


 272   bool inline_native_currentThread();
 273 
 274   bool inline_native_time_funcs(address method, const char* funcName);
 275 #ifdef JFR_HAVE_INTRINSICS
 276   bool inline_native_classID();
 277   bool inline_native_getEventWriter();
 278 #endif
 279   bool inline_native_isInterrupted();
 280   bool inline_native_Class_query(vmIntrinsics::ID id);
 281   bool inline_native_subtype_check();
 282   bool inline_native_getLength();
 283   bool inline_array_copyOf(bool is_copyOfRange);
 284   bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
 285   bool inline_preconditions_checkIndex();
 286   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
 287   bool inline_native_clone(bool is_virtual);
 288   bool inline_native_Reflection_getCallerClass();
 289   // Helper function for inlining native object hash method
 290   bool inline_native_hashcode(bool is_virtual, bool is_static);
 291   bool inline_native_getClass();


 588   case vmIntrinsics::_indexOfU:                 return inline_string_indexOf(StrIntrinsicNode::UU);
 589   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 590   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 591   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 592   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 593   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar();
 594 
 595   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 596   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 597 
 598   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 599   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 600   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 601   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 602 
 603   case vmIntrinsics::_compressStringC:
 604   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 605   case vmIntrinsics::_inflateStringC:
 606   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 607 


 608   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 609   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 610   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 611   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 612   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 613   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 614   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 615   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 616   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 617 
 618   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 619   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 620   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 621   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 622   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 623   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 624   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 625   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 626   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 627 
 628   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 629   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 630   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 631   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 632   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 633   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 634   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 635   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 636   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 637 
 638   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 639   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 640   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 641   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 642   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 643   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 644   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 645   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 646   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);


2359   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2360   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2361 
2362   if (type == T_OBJECT || type == T_ARRAY) {
2363     decorators |= ON_UNKNOWN_OOP_REF;
2364   }
2365 
2366   if (unaligned) {
2367     decorators |= C2_UNALIGNED;
2368   }
2369 
2370 #ifndef PRODUCT
2371   {
2372     ResourceMark rm;
2373     // Check the signatures.
2374     ciSignature* sig = callee()->signature();
2375 #ifdef ASSERT
2376     if (!is_store) {
2377       // Object getReference(Object base, int/long offset), etc.
2378       BasicType rtype = sig->return_type()->basic_type();
2379       assert(rtype == type, "getter must return the expected value");
2380       assert(sig->count() == 2, "oop getter has 2 arguments");
2381       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2382       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2383     } else {
2384       // void putReference(Object base, int/long offset, Object x), etc.
2385       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2386       assert(sig->count() == 3, "oop putter has 3 arguments");
2387       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2388       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2389       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2390       assert(vtype == type, "putter must accept the expected value");
2391     }
2392 #endif // ASSERT
2393  }
2394 #endif //PRODUCT
2395 
2396   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2397 
2398   Node* receiver = argument(0);  // type: oop
2399 
2400   // Build address expression.
2401   Node* adr;
2402   Node* heap_base_oop = top();
2403   Node* offset = top();
2404   Node* val;
2405 
2406   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2407   Node* base = argument(1);  // type: oop
2408   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2409   offset = argument(2);  // type: long
2410   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2411   // to be plain byte offsets, which are also the same as those accepted
2412   // by oopDesc::field_addr.
2413   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2414          "fieldOffset must be byte-scaled");
2415 
2416   if (base->is_ValueType()) {
2417     if (is_store) {











2418       return false;
2419     }







2420 

2421     ValueTypeNode* vt = base->as_ValueType();







2422     if (offset->is_Con()) {
2423       long off = find_long_con(offset, 0);
2424       ciValueKlass* vk = _gvn.type(vt)->is_valuetype()->value_klass();
2425       if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2426         return false;
2427       }
2428 
2429       receiver = null_check(receiver);
2430       if (stopped()) {
2431         return true;
2432       }
2433 
2434       set_result(vt->field_value_by_offset((int)off, true));
2435       return true;
2436     } else {
2437       receiver = null_check(receiver);
2438       if (stopped()) {



2439         return true;
2440       }



2441       vt = vt->allocate(this)->as_ValueType();
2442       base = vt->get_oop();
2443     }
2444   }
2445 
2446   // 32-bit machines ignore the high half!
2447   offset = ConvL2X(offset);
2448   adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2449 
2450   if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2451     heap_base_oop = base;
2452   } else if (type == T_OBJECT) {
2453     return false; // off-heap oop accesses are not supported
2454   }
2455 
2456   // Can base be NULL? Otherwise, always on-heap access.
2457   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2458 
2459   if (!can_access_non_heap) {
2460     decorators |= IN_HEAP;
2461   }
2462 
2463   val = is_store ? argument(4) : NULL;
2464 
2465   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2466 
2467   // Try to categorize the address.
2468   Compile::AliasType* alias_type = C->alias_type(adr_type);
2469   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2470 
2471   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2472       alias_type->adr_type() == TypeAryPtr::RANGE) {
2473     return false; // not supported
2474   }
2475 
2476   bool mismatched = false;
2477   BasicType bt = alias_type->basic_type();
























2478   if (bt != T_ILLEGAL) {
2479     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2480     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2481       // Alias type doesn't differentiate between byte[] and boolean[]).
2482       // Use address type to get the element type.
2483       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2484     }
2485     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2486       // accessing an array field with getReference is not a mismatch
2487       bt = T_OBJECT;
2488     }
2489     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2490       // Don't intrinsify mismatched object accesses
2491       return false;
2492     }
2493     mismatched = (bt != type);
2494   } else if (alias_type->adr_type()->isa_oopptr()) {
2495     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2496   }
2497 






















2498   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2499 
2500   if (mismatched) {
2501     decorators |= C2_MISMATCHED;
2502   }
2503 
2504   // First guess at the value type.
2505   const Type *value_type = Type::get_const_basic_type(type);
2506 
2507   // Figure out the memory ordering.
2508   decorators |= mo_decorator_for_access_kind(kind);
2509 
2510   if (!is_store && type == T_OBJECT) {

2511     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2512     if (tjp != NULL) {
2513       value_type = tjp;
2514     }


2515   }
2516 
2517   receiver = null_check(receiver);
2518   if (stopped()) {
2519     return true;
2520   }

2521   // Heap pointers get a null-check from the interpreter,
2522   // as a courtesy.  However, this is not guaranteed by Unsafe,
2523   // and it is not possible to fully distinguish unintended nulls
2524   // from intended ones in this API.
2525 
2526   if (!is_store) {
2527     Node* p = NULL;
2528     // Try to constant fold a load from a constant field
2529     ciField* field = alias_type->field();
2530     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2531       // final or stable field
2532       p = make_constant_from_field(field, heap_base_oop);
2533     }
2534 
2535     if (p == NULL) { // Could not constant fold the load









2536       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);

2537       // Normalize the value returned by getBoolean in the following cases
2538       if (type == T_BOOLEAN &&
2539           (mismatched ||
2540            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2541            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2542                                                       //   and the unsafe access is made to large offset
2543                                                       //   (i.e., larger than the maximum offset necessary for any
2544                                                       //   field access)
2545             ) {
2546           IdealKit ideal = IdealKit(this);
2547 #define __ ideal.
2548           IdealVariable normalized_result(ideal);
2549           __ declarations_done();
2550           __ set(normalized_result, p);
2551           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2552           __ set(normalized_result, ideal.ConI(1));
2553           ideal.end_if();
2554           final_sync(ideal);
2555           p = __ value(normalized_result);
2556 #undef __
2557       }
2558     }
2559     if (type == T_ADDRESS) {
2560       p = gvn().transform(new CastP2XNode(NULL, p));
2561       p = ConvX2UL(p);
2562     }
2563     if (field != NULL && field->is_flattenable()) {
2564       // Load a non-flattened but flattenable value type from memory
2565       assert(!field->is_flattened(), "unsafe value type load from flattened field");
2566       if (value_type->value_klass()->is_scalarizable()) {
2567         p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass());
2568       } else {
2569         p = null2default(p, value_type->value_klass());
2570       }
2571     }
2572     // The load node has the control of the preceding MemBarCPUOrder.  All
2573     // following nodes will have the control of the MemBarCPUOrder inserted at
2574     // the end of this method.  So, pushing the load onto the stack at a later
2575     // point is fine.
2576     set_result(p);
2577   } else {
2578     if (bt == T_ADDRESS) {
2579       // Repackage the long as a pointer.
2580       val = ConvL2X(val);
2581       val = gvn().transform(new CastX2PNode(val));
2582     }









2583     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2584   }















































2585 
2586   return true;
2587 }
2588 
2589 //----------------------------inline_unsafe_load_store----------------------------
2590 // This method serves a couple of different customers (depending on LoadStoreKind):
2591 //
2592 // LS_cmp_swap:
2593 //
2594 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2595 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2596 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2597 //
2598 // LS_cmp_swap_weak:
2599 //
2600 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2601 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2602 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2603 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2604 //




 252   bool inline_math_multiplyExactL();
 253   bool inline_math_multiplyHigh();
 254   bool inline_math_negateExactI();
 255   bool inline_math_negateExactL();
 256   bool inline_math_subtractExactI(bool is_decrement);
 257   bool inline_math_subtractExactL(bool is_decrement);
 258   bool inline_min_max(vmIntrinsics::ID id);
 259   bool inline_notify(vmIntrinsics::ID id);
 260   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 261   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 262   int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
 263   Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
 264 
 265   typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
 266   DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
 267   bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
 268   static bool klass_needs_init_guard(Node* kls);
 269   bool inline_unsafe_allocate();
 270   bool inline_unsafe_newArray(bool uninitialized);
 271   bool inline_unsafe_copyMemory();
 272   bool inline_unsafe_make_private_buffer();
 273   bool inline_unsafe_finish_private_buffer();
 274   bool inline_native_currentThread();
 275 
 276   bool inline_native_time_funcs(address method, const char* funcName);
 277 #ifdef JFR_HAVE_INTRINSICS
 278   bool inline_native_classID();
 279   bool inline_native_getEventWriter();
 280 #endif
 281   bool inline_native_isInterrupted();
 282   bool inline_native_Class_query(vmIntrinsics::ID id);
 283   bool inline_native_subtype_check();
 284   bool inline_native_getLength();
 285   bool inline_array_copyOf(bool is_copyOfRange);
 286   bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
 287   bool inline_preconditions_checkIndex();
 288   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
 289   bool inline_native_clone(bool is_virtual);
 290   bool inline_native_Reflection_getCallerClass();
 291   // Helper function for inlining native object hash method
 292   bool inline_native_hashcode(bool is_virtual, bool is_static);
 293   bool inline_native_getClass();


 590   case vmIntrinsics::_indexOfU:                 return inline_string_indexOf(StrIntrinsicNode::UU);
 591   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 592   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 593   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 594   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 595   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar();
 596 
 597   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 598   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 599 
 600   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 601   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 602   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 603   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 604 
 605   case vmIntrinsics::_compressStringC:
 606   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 607   case vmIntrinsics::_inflateStringC:
 608   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 609 
 610   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 611   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 612   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 613   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 614   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 615   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 616   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 617   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 618   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 619   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 620   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 621   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_VALUETYPE,Relaxed, false);
 622 
 623   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 624   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 625   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 626   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 627   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 628   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 629   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 630   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 631   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 632   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_VALUETYPE,Relaxed, false);
 633 
 634   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 635   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 636   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 637   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 638   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 639   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 640   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 641   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 642   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 643 
 644   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 645   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 646   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 647   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 648   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 649   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 650   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 651   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 652   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);


2365   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2366   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2367 
2368   if (type == T_OBJECT || type == T_ARRAY) {
2369     decorators |= ON_UNKNOWN_OOP_REF;
2370   }
2371 
2372   if (unaligned) {
2373     decorators |= C2_UNALIGNED;
2374   }
2375 
2376 #ifndef PRODUCT
2377   {
2378     ResourceMark rm;
2379     // Check the signatures.
2380     ciSignature* sig = callee()->signature();
2381 #ifdef ASSERT
2382     if (!is_store) {
2383       // Object getReference(Object base, int/long offset), etc.
2384       BasicType rtype = sig->return_type()->basic_type();
2385       assert(rtype == type || (rtype == T_OBJECT && type == T_VALUETYPE), "getter must return the expected value");
2386       assert(sig->count() == 2 || (type == T_VALUETYPE && sig->count() == 3), "oop getter has 2 or 3 arguments");
2387       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2388       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2389     } else {
2390       // void putReference(Object base, int/long offset, Object x), etc.
2391       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2392       assert(sig->count() == 3 || (type == T_VALUETYPE && sig->count() == 4), "oop putter has 3 arguments");
2393       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2394       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2395       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2396       assert(vtype == type || (type == T_VALUETYPE && vtype == T_OBJECT), "putter must accept the expected value");
2397     }
2398 #endif // ASSERT
2399  }
2400 #endif //PRODUCT
2401 
2402   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2403 
2404   Node* receiver = argument(0);  // type: oop
2405 
2406   // Build address expression.
2407   Node* adr;
2408   Node* heap_base_oop = top();
2409   Node* offset = top();
2410   Node* val;
2411 
2412   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2413   Node* base = argument(1);  // type: oop
2414   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2415   offset = argument(2);  // type: long
2416   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2417   // to be plain byte offsets, which are also the same as those accepted
2418   // by oopDesc::field_addr.
2419   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2420          "fieldOffset must be byte-scaled");
2421 
2422   ciValueKlass* value_klass = NULL;
2423   if (type == T_VALUETYPE) {
2424     Node* cls = null_check(argument(4));
2425     if (stopped()) {
2426       return true;
2427     }
2428     Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2429     const TypeKlassPtr* kls_t = _gvn.type(kls)->isa_klassptr();
2430     if (!kls_t->klass_is_exact()) {
2431       return false;
2432     }
2433     ciKlass* klass = kls_t->klass();
2434     if (!klass->is_valuetype()) {
2435       return false;
2436     }
2437     value_klass = klass->as_value_klass();
2438   }
2439   
2440   receiver = null_check(receiver);
2441   if (stopped()) {
2442     return true;
2443   }
2444 
2445   if (base->is_ValueType()) {
2446     ValueTypeNode* vt = base->as_ValueType();
2447 
2448     if (is_store) {
2449       if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
2450         return false;
2451       }
2452       base = vt->get_oop();
2453     } else {
2454       if (offset->is_Con()) {
2455         long off = find_long_con(offset, 0);
2456         ciValueKlass* vk = _gvn.type(vt)->is_valuetype()->value_klass();
2457         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2458           return false;
2459         }
2460 
2461         ciField* f = vk->get_non_flattened_field_by_offset((int)off);



2462 
2463         if (f != NULL) {
2464           BasicType bt = f->layout_type();
2465           if (bt == T_ARRAY || bt == T_NARROWOOP) {
2466             bt = T_OBJECT;
2467           }
2468           if (bt == type) {
2469             if (bt != T_VALUETYPE || f->type() == value_klass) {
2470               set_result(vt->field_value_by_offset((int)off, false));
2471               return true;
2472             }
2473           }
2474         }
2475       }
2476       vt = vt->allocate(this)->as_ValueType();
2477       base = vt->get_oop();
2478     }
2479   }
2480 
2481   // 32-bit machines ignore the high half!
2482   offset = ConvL2X(offset);
2483   adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2484 
2485   if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2486     heap_base_oop = base;
2487   } else if (type == T_OBJECT || (value_klass != NULL && value_klass->has_object_fields())) {
2488     return false; // off-heap oop accesses are not supported
2489   }
2490 
2491   // Can base be NULL? Otherwise, always on-heap access.
2492   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2493 
2494   if (!can_access_non_heap) {
2495     decorators |= IN_HEAP;
2496   }
2497 
2498   val = is_store ? argument(4 + (type == T_VALUETYPE ? 1 : 0)) : NULL;
2499 
2500   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2501 
2502   // Try to categorize the address.
2503   Compile::AliasType* alias_type = C->alias_type(adr_type);
2504   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2505 
2506   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2507       alias_type->adr_type() == TypeAryPtr::RANGE) {
2508     return false; // not supported
2509   }
2510 
2511   bool mismatched = false;
2512   BasicType bt = T_ILLEGAL;
2513   ciField* field = NULL;
2514   if (adr_type->isa_instptr()) {
2515     const TypeInstPtr* instptr = adr_type->is_instptr();
2516     ciInstanceKlass* k = instptr->klass()->as_instance_klass();
2517     int off = instptr->offset();
2518     if (instptr->const_oop() != NULL &&
2519         instptr->klass() == ciEnv::current()->Class_klass() &&
2520         instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) {
2521       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2522       field = k->get_field_by_offset(off, true);
2523     } else {
2524       field = k->get_non_flattened_field_by_offset(off);
2525     }
2526     if (field != NULL) {
2527       bt = field->layout_type();
2528     }
2529     assert(bt == alias_type->basic_type() || bt == T_VALUETYPE, "should match");
2530     if (field != NULL && bt == T_VALUETYPE && !field->is_flattened()) {
2531       bt = T_OBJECT;
2532     }
2533   } else {
2534     bt = alias_type->basic_type();
2535   }
2536 
2537   if (bt != T_ILLEGAL) {
2538     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2539     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2540       // Alias type doesn't differentiate between byte[] and boolean[]).
2541       // Use address type to get the element type.
2542       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2543     }
2544     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2545       // accessing an array field with getReference is not a mismatch
2546       bt = T_OBJECT;
2547     }
2548     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2549       // Don't intrinsify mismatched object accesses
2550       return false;
2551     }
2552     mismatched = (bt != type);
2553   } else if (alias_type->adr_type()->isa_oopptr()) {
2554     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2555   }
2556 
2557   if (type == T_VALUETYPE) {
2558     if (adr_type->isa_instptr()) {
2559       if (field == NULL || field->type() != value_klass) {
2560         mismatched = true;
2561       }
2562     } else if (adr_type->isa_aryptr()) {
2563       const Type* elem = adr_type->is_aryptr()->elem();
2564       if (!elem->isa_valuetype()) {
2565         mismatched = true;
2566       } else if (elem->is_valuetype()->value_klass() != value_klass) {
2567         mismatched = true;
2568       }
2569     }
2570     if (is_store) {
2571       const Type* val_t = _gvn.type(val);
2572       if (!val_t->isa_valuetype() ||
2573           val_t->is_valuetype()->value_klass() != value_klass) {
2574         return false;
2575       }
2576     }
2577   }
2578   
2579   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2580 
2581   if (mismatched) {
2582     decorators |= C2_MISMATCHED;
2583   }
2584 
2585   // First guess at the value type.
2586   const Type *value_type = Type::get_const_basic_type(type);
2587 
2588   // Figure out the memory ordering.
2589   decorators |= mo_decorator_for_access_kind(kind);
2590 
2591   if (!is_store) {
2592     if (type == T_OBJECT) {
2593       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2594       if (tjp != NULL) {
2595         value_type = tjp;
2596       }
2597     } else if (type == T_VALUETYPE) {
2598       value_type = NULL;
2599     }




2600   }
2601 
2602   // Heap pointers get a null-check from the interpreter,
2603   // as a courtesy.  However, this is not guaranteed by Unsafe,
2604   // and it is not possible to fully distinguish unintended nulls
2605   // from intended ones in this API.
2606 
2607   if (!is_store) {
2608     Node* p = NULL;
2609     // Try to constant fold a load from a constant field
2610 
2611     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2612       // final or stable field
2613       p = make_constant_from_field(field, heap_base_oop);
2614     }
2615 
2616     if (p == NULL) { // Could not constant fold the load
2617       if (type == T_VALUETYPE) {
2618         if (adr_type->isa_instptr() && !mismatched) {
2619           ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2620           int offset = adr_type->is_instptr()->offset();
2621           p = ValueTypeNode::make_from_flattened(this, value_klass, base, base, holder, offset, decorators);
2622         } else {
2623           p = ValueTypeNode::make_from_flattened(this, value_klass, base, adr, NULL, 0, decorators);
2624         }
2625       } else {
2626         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2627       }
2628       // Normalize the value returned by getBoolean in the following cases
2629       if (type == T_BOOLEAN &&
2630           (mismatched ||
2631            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2632            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2633                                                       //   and the unsafe access is made to large offset
2634                                                       //   (i.e., larger than the maximum offset necessary for any
2635                                                       //   field access)
2636             ) {
2637           IdealKit ideal = IdealKit(this);
2638 #define __ ideal.
2639           IdealVariable normalized_result(ideal);
2640           __ declarations_done();
2641           __ set(normalized_result, p);
2642           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2643           __ set(normalized_result, ideal.ConI(1));
2644           ideal.end_if();
2645           final_sync(ideal);
2646           p = __ value(normalized_result);
2647 #undef __
2648       }
2649     }
2650     if (type == T_ADDRESS) {
2651       p = gvn().transform(new CastP2XNode(NULL, p));
2652       p = ConvX2UL(p);
2653     }
2654     if (field != NULL && field->is_flattenable()&& !field->is_flattened()) {
2655       // Load a non-flattened but flattenable value type from memory

2656       if (value_type->value_klass()->is_scalarizable()) {
2657         p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass());
2658       } else {
2659         p = null2default(p, value_type->value_klass());
2660       }
2661     }
2662     // The load node has the control of the preceding MemBarCPUOrder.  All
2663     // following nodes will have the control of the MemBarCPUOrder inserted at
2664     // the end of this method.  So, pushing the load onto the stack at a later
2665     // point is fine.
2666     set_result(p);
2667   } else {
2668     if (bt == T_ADDRESS) {
2669       // Repackage the long as a pointer.
2670       val = ConvL2X(val);
2671       val = gvn().transform(new CastX2PNode(val));
2672     }
2673     if (type == T_VALUETYPE) {
2674       if (adr_type->isa_instptr() && !mismatched) {
2675         ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2676         int offset = adr_type->is_instptr()->offset();
2677         val->as_ValueType()->store_flattened(this, base, base, holder, offset, decorators);
2678       } else {
2679         val->as_ValueType()->store_flattened(this, base, adr, NULL, 0, decorators);
2680       }
2681     } else {
2682       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2683     }
2684   }
2685 
2686   if (argument(1)->is_ValueType() && is_store) {
2687     Node* value = ValueTypeNode::make_from_oop(this, base, _gvn.type(base)->value_klass());
2688     value = value->as_ValueType()->make_larval(this, false);
2689     replace_in_map(argument(1), value);
2690   }
2691   
2692   return true;
2693 }
2694 
2695 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2696   Node* receiver = argument(0);
2697   Node* value = argument(1);
2698 
2699   receiver = null_check(receiver);
2700   if (stopped()) {
2701     return true;
2702   }
2703 
2704   if (!value->is_ValueType()) {
2705     return false;
2706   }
2707 
2708   set_result(value->as_ValueType()->make_larval(this, true));
2709 
2710   return true;
2711 }
2712 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2713   Node* receiver = argument(0);
2714   Node* buffer = argument(1);
2715   
2716   receiver = null_check(receiver);
2717   if (stopped()) {
2718     return true;
2719   }
2720 
2721   if (!buffer->is_ValueType()) {
2722     return false;
2723   }
2724 
2725   ValueTypeNode* vt = buffer->as_ValueType();
2726   if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
2727     return false;
2728   }
2729 
2730   set_result(vt->finish_larval(this));
2731   
2732   return true;
2733 }
2734 
2735 //----------------------------inline_unsafe_load_store----------------------------
2736 // This method serves a couple of different customers (depending on LoadStoreKind):
2737 //
2738 // LS_cmp_swap:
2739 //
2740 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2741 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2742 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2743 //
2744 // LS_cmp_swap_weak:
2745 //
2746 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2747 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2748 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2749 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2750 //


< prev index next >