223 Node* round_double_node(Node* n);
224 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
225 bool inline_math_native(vmIntrinsics::ID id);
226 bool inline_math(vmIntrinsics::ID id);
227 template <typename OverflowOp>
228 bool inline_math_overflow(Node* arg1, Node* arg2);
229 void inline_math_mathExact(Node* math, Node* test);
230 bool inline_math_addExactI(bool is_increment);
231 bool inline_math_addExactL(bool is_increment);
232 bool inline_math_multiplyExactI();
233 bool inline_math_multiplyExactL();
234 bool inline_math_negateExactI();
235 bool inline_math_negateExactL();
236 bool inline_math_subtractExactI(bool is_decrement);
237 bool inline_math_subtractExactL(bool is_decrement);
238 bool inline_min_max(vmIntrinsics::ID id);
239 bool inline_notify(vmIntrinsics::ID id);
240 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
241 // This returns Type::AnyPtr, RawPtr, or OopPtr.
242 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
243 Node* make_unsafe_address(Node*& base, Node* offset, BasicType type = T_ILLEGAL);
244 // Helper for inline_unsafe_access.
245 // Generates the guards that check whether the result of
246 // Unsafe.getObject should be recorded in an SATB log buffer.
247 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
248
249 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
250 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
251 static bool klass_needs_init_guard(Node* kls);
252 bool inline_unsafe_allocate();
253 bool inline_unsafe_newArray(bool uninitialized);
254 bool inline_unsafe_copyMemory();
255 bool inline_native_currentThread();
256
257 bool inline_native_time_funcs(address method, const char* funcName);
258 #ifdef TRACE_HAVE_INTRINSICS
259 bool inline_native_classID();
260 bool inline_native_getBufferWriter();
261 #endif
262 bool inline_native_isInterrupted();
263 bool inline_native_Class_query(vmIntrinsics::ID id);
2094 // Offset is small => always a heap address.
2095 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2096 if (offset_type != NULL &&
2097 base_type->offset() == 0 && // (should always be?)
2098 offset_type->_lo >= 0 &&
2099 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2100 return Type::OopPtr;
2101 } else if (type == T_OBJECT) {
2102 // off heap access to an oop doesn't make any sense. Has to be on
2103 // heap.
2104 return Type::OopPtr;
2105 }
2106 // Otherwise, it might either be oop+off or NULL+addr.
2107 return Type::AnyPtr;
2108 } else {
2109 // No information:
2110 return Type::AnyPtr;
2111 }
2112 }
2113
2114 inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type) {
2115 Node* uncasted_base = base;
2116 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2117 if (kind == Type::RawPtr) {
2118 return basic_plus_adr(top(), uncasted_base, offset);
2119 } else if (kind == Type::AnyPtr) {
2120 assert(base == uncasted_base, "unexpected base change");
2121 // We don't know if it's an on heap or off heap access. Fall back
2122 // to raw memory access.
2123 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2124 return basic_plus_adr(top(), raw, offset);
2125 } else {
2126 assert(base == uncasted_base, "unexpected base change");
2127 // We know it's an on heap access so base can't be null
2128 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2129 base = must_be_not_null(base, true);
2130 }
2131 return basic_plus_adr(base, offset);
2132 }
2133 }
2134
2135 //--------------------------inline_number_methods-----------------------------
2136 // inline int Integer.numberOfLeadingZeros(int)
2137 // inline int Long.numberOfLeadingZeros(long)
2138 //
2139 // inline int Integer.numberOfTrailingZeros(int)
2140 // inline int Long.numberOfTrailingZeros(long)
2342
2343 Node* receiver = argument(0); // type: oop
2344
2345 // Build address expression.
2346 Node* adr;
2347 Node* heap_base_oop = top();
2348 Node* offset = top();
2349 Node* val;
2350
2351 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2352 Node* base = argument(1); // type: oop
2353 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2354 offset = argument(2); // type: long
2355 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2356 // to be plain byte offsets, which are also the same as those accepted
2357 // by oopDesc::field_base.
2358 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2359 "fieldOffset must be byte-scaled");
2360 // 32-bit machines ignore the high half!
2361 offset = ConvL2X(offset);
2362 adr = make_unsafe_address(base, offset, type);
2363 if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2364 heap_base_oop = base;
2365 } else if (type == T_OBJECT) {
2366 return false; // off-heap oop accesses are not supported
2367 }
2368
2369 // Can base be NULL? Otherwise, always on-heap access.
2370 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2371
2372 val = is_store ? argument(4) : NULL;
2373
2374 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2375
2376 // Try to categorize the address.
2377 Compile::AliasType* alias_type = C->alias_type(adr_type);
2378 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2379
2380 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2381 alias_type->adr_type() == TypeAryPtr::RANGE) {
2382 return false; // not supported
2400 return false;
2401 }
2402 mismatched = (bt != type);
2403 } else if (alias_type->adr_type()->isa_oopptr()) {
2404 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2405 }
2406
2407 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2408
2409 // First guess at the value type.
2410 const Type *value_type = Type::get_const_basic_type(type);
2411
2412 // We will need memory barriers unless we can determine a unique
2413 // alias category for this reference. (Note: If for some reason
2414 // the barriers get omitted and the unsafe reference begins to "pollute"
2415 // the alias analysis of the rest of the graph, either Compile::can_alias
2416 // or Compile::must_alias will throw a diagnostic assert.)
2417 bool need_mem_bar = false;
2418 switch (kind) {
2419 case Relaxed:
2420 need_mem_bar = mismatched && !adr_type->isa_aryptr();
2421 break;
2422 case Opaque:
2423 // Opaque uses CPUOrder membars for protection against code movement.
2424 case Acquire:
2425 case Release:
2426 case Volatile:
2427 need_mem_bar = true;
2428 break;
2429 default:
2430 ShouldNotReachHere();
2431 }
2432
2433 // Some accesses require access atomicity for all types, notably longs and doubles.
2434 // When AlwaysAtomicAccesses is enabled, all accesses are atomic.
2435 bool requires_atomic_access = false;
2436 switch (kind) {
2437 case Relaxed:
2438 requires_atomic_access = AlwaysAtomicAccesses;
2439 break;
2440 case Opaque:
2504 }
2505
2506 // Memory barrier to prevent normal and 'unsafe' accesses from
2507 // bypassing each other. Happens after null checks, so the
2508 // exception paths do not take memory state from the memory barrier,
2509 // so there's no problems making a strong assert about mixing users
2510 // of safe & unsafe memory.
2511 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2512
2513 if (!is_store) {
2514 Node* p = NULL;
2515 // Try to constant fold a load from a constant field
2516 ciField* field = alias_type->field();
2517 if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2518 // final or stable field
2519 p = make_constant_from_field(field, heap_base_oop);
2520 }
2521 if (p == NULL) {
2522 // To be valid, unsafe loads may depend on other conditions than
2523 // the one that guards them: pin the Load node
2524 p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, requires_atomic_access, unaligned, mismatched);
2525 // load value
2526 switch (type) {
2527 case T_BOOLEAN:
2528 {
2529 // Normalize the value returned by getBoolean in the following cases
2530 if (mismatched ||
2531 heap_base_oop == top() || // - heap_base_oop is NULL or
2532 (can_access_non_heap && alias_type->field() == NULL) // - heap_base_oop is potentially NULL
2533 // and the unsafe access is made to large offset
2534 // (i.e., larger than the maximum offset necessary for any
2535 // field access)
2536 ) {
2537 IdealKit ideal = IdealKit(this);
2538 #define __ ideal.
2539 IdealVariable normalized_result(ideal);
2540 __ declarations_done();
2541 __ set(normalized_result, p);
2542 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2543 __ set(normalized_result, ideal.ConI(1));
2544 ideal.end_if();
2753 case LS_get_add:
2754 case LS_get_set: {
2755 receiver = argument(0); // type: oop
2756 base = argument(1); // type: oop
2757 offset = argument(2); // type: long
2758 oldval = NULL;
2759 newval = argument(4); // type: oop, int, or long
2760 break;
2761 }
2762 default:
2763 ShouldNotReachHere();
2764 }
2765
2766 // Build field offset expression.
2767 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2768 // to be plain byte offsets, which are also the same as those accepted
2769 // by oopDesc::field_base.
2770 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2771 // 32-bit machines ignore the high half of long offsets
2772 offset = ConvL2X(offset);
2773 Node* adr = make_unsafe_address(base, offset, type);
2774 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2775
2776 Compile::AliasType* alias_type = C->alias_type(adr_type);
2777 BasicType bt = alias_type->basic_type();
2778 if (bt != T_ILLEGAL &&
2779 ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2780 // Don't intrinsify mismatched object accesses.
2781 return false;
2782 }
2783
2784 // For CAS, unlike inline_unsafe_access, there seems no point in
2785 // trying to refine types. Just use the coarse types here.
2786 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2787 const Type *value_type = Type::get_const_basic_type(type);
2788
2789 switch (kind) {
2790 case LS_get_set:
2791 case LS_cmp_exchange: {
2792 if (type == T_OBJECT) {
2793 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
|
223 Node* round_double_node(Node* n);
224 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
225 bool inline_math_native(vmIntrinsics::ID id);
226 bool inline_math(vmIntrinsics::ID id);
227 template <typename OverflowOp>
228 bool inline_math_overflow(Node* arg1, Node* arg2);
229 void inline_math_mathExact(Node* math, Node* test);
230 bool inline_math_addExactI(bool is_increment);
231 bool inline_math_addExactL(bool is_increment);
232 bool inline_math_multiplyExactI();
233 bool inline_math_multiplyExactL();
234 bool inline_math_negateExactI();
235 bool inline_math_negateExactL();
236 bool inline_math_subtractExactI(bool is_decrement);
237 bool inline_math_subtractExactL(bool is_decrement);
238 bool inline_min_max(vmIntrinsics::ID id);
239 bool inline_notify(vmIntrinsics::ID id);
240 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
241 // This returns Type::AnyPtr, RawPtr, or OopPtr.
242 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
243 Node* make_unsafe_address(Node*& base, Node* offset, BasicType type = T_ILLEGAL, bool can_cast = false);
244 // Helper for inline_unsafe_access.
245 // Generates the guards that check whether the result of
246 // Unsafe.getObject should be recorded in an SATB log buffer.
247 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
248
249 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
250 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
251 static bool klass_needs_init_guard(Node* kls);
252 bool inline_unsafe_allocate();
253 bool inline_unsafe_newArray(bool uninitialized);
254 bool inline_unsafe_copyMemory();
255 bool inline_native_currentThread();
256
257 bool inline_native_time_funcs(address method, const char* funcName);
258 #ifdef TRACE_HAVE_INTRINSICS
259 bool inline_native_classID();
260 bool inline_native_getBufferWriter();
261 #endif
262 bool inline_native_isInterrupted();
263 bool inline_native_Class_query(vmIntrinsics::ID id);
2094 // Offset is small => always a heap address.
2095 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2096 if (offset_type != NULL &&
2097 base_type->offset() == 0 && // (should always be?)
2098 offset_type->_lo >= 0 &&
2099 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2100 return Type::OopPtr;
2101 } else if (type == T_OBJECT) {
2102 // off heap access to an oop doesn't make any sense. Has to be on
2103 // heap.
2104 return Type::OopPtr;
2105 }
2106 // Otherwise, it might either be oop+off or NULL+addr.
2107 return Type::AnyPtr;
2108 } else {
2109 // No information:
2110 return Type::AnyPtr;
2111 }
2112 }
2113
2114 inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2115 Node* uncasted_base = base;
2116 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2117 if (kind == Type::RawPtr) {
2118 return basic_plus_adr(top(), uncasted_base, offset);
2119 } else if (kind == Type::AnyPtr) {
2120 assert(base == uncasted_base, "unexpected base change");
2121 if (can_cast) {
2122 if (!_gvn.type(base)->speculative_maybe_null() &&
2123 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2124 // According to profiling, this access is always on
2125 // heap. Casting the base to not null and thus avoiding membars
2126 // around the access should allow better optimizations
2127 Node* null_ctl = top();
2128 base = null_check_oop(base, &null_ctl, true, true, true);
2129 assert(null_ctl->is_top(), "no null control here");
2130 return basic_plus_adr(base, offset);
2131 } else if (_gvn.type(base)->speculative_always_null() &&
2132 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2133 // According to profiling, this access is always off
2134 // heap.
2135 base = null_assert(base);
2136 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2137 offset = MakeConX(0);
2138 return basic_plus_adr(top(), raw_base, offset);
2139 }
2140 }
2141 // We don't know if it's an on heap or off heap access. Fall back
2142 // to raw memory access.
2143 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2144 return basic_plus_adr(top(), raw, offset);
2145 } else {
2146 assert(base == uncasted_base, "unexpected base change");
2147 // We know it's an on heap access so base can't be null
2148 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2149 base = must_be_not_null(base, true);
2150 }
2151 return basic_plus_adr(base, offset);
2152 }
2153 }
2154
2155 //--------------------------inline_number_methods-----------------------------
2156 // inline int Integer.numberOfLeadingZeros(int)
2157 // inline int Long.numberOfLeadingZeros(long)
2158 //
2159 // inline int Integer.numberOfTrailingZeros(int)
2160 // inline int Long.numberOfTrailingZeros(long)
2362
2363 Node* receiver = argument(0); // type: oop
2364
2365 // Build address expression.
2366 Node* adr;
2367 Node* heap_base_oop = top();
2368 Node* offset = top();
2369 Node* val;
2370
2371 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2372 Node* base = argument(1); // type: oop
2373 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2374 offset = argument(2); // type: long
2375 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2376 // to be plain byte offsets, which are also the same as those accepted
2377 // by oopDesc::field_base.
2378 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2379 "fieldOffset must be byte-scaled");
2380 // 32-bit machines ignore the high half!
2381 offset = ConvL2X(offset);
2382 adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2383
2384 if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2385 heap_base_oop = base;
2386 } else if (type == T_OBJECT) {
2387 return false; // off-heap oop accesses are not supported
2388 }
2389
2390 // Can base be NULL? Otherwise, always on-heap access.
2391 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2392
2393 val = is_store ? argument(4) : NULL;
2394
2395 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2396
2397 // Try to categorize the address.
2398 Compile::AliasType* alias_type = C->alias_type(adr_type);
2399 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2400
2401 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2402 alias_type->adr_type() == TypeAryPtr::RANGE) {
2403 return false; // not supported
2421 return false;
2422 }
2423 mismatched = (bt != type);
2424 } else if (alias_type->adr_type()->isa_oopptr()) {
2425 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2426 }
2427
2428 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2429
2430 // First guess at the value type.
2431 const Type *value_type = Type::get_const_basic_type(type);
2432
2433 // We will need memory barriers unless we can determine a unique
2434 // alias category for this reference. (Note: If for some reason
2435 // the barriers get omitted and the unsafe reference begins to "pollute"
2436 // the alias analysis of the rest of the graph, either Compile::can_alias
2437 // or Compile::must_alias will throw a diagnostic assert.)
2438 bool need_mem_bar = false;
2439 switch (kind) {
2440 case Relaxed:
2441 need_mem_bar = (mismatched && !adr_type->isa_aryptr()) || can_access_non_heap;
2442 break;
2443 case Opaque:
2444 // Opaque uses CPUOrder membars for protection against code movement.
2445 case Acquire:
2446 case Release:
2447 case Volatile:
2448 need_mem_bar = true;
2449 break;
2450 default:
2451 ShouldNotReachHere();
2452 }
2453
2454 // Some accesses require access atomicity for all types, notably longs and doubles.
2455 // When AlwaysAtomicAccesses is enabled, all accesses are atomic.
2456 bool requires_atomic_access = false;
2457 switch (kind) {
2458 case Relaxed:
2459 requires_atomic_access = AlwaysAtomicAccesses;
2460 break;
2461 case Opaque:
2525 }
2526
2527 // Memory barrier to prevent normal and 'unsafe' accesses from
2528 // bypassing each other. Happens after null checks, so the
2529 // exception paths do not take memory state from the memory barrier,
2530 // so there's no problems making a strong assert about mixing users
2531 // of safe & unsafe memory.
2532 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2533
2534 if (!is_store) {
2535 Node* p = NULL;
2536 // Try to constant fold a load from a constant field
2537 ciField* field = alias_type->field();
2538 if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2539 // final or stable field
2540 p = make_constant_from_field(field, heap_base_oop);
2541 }
2542 if (p == NULL) {
2543 // To be valid, unsafe loads may depend on other conditions than
2544 // the one that guards them: pin the Load node
2545 LoadNode::ControlDependency dep = LoadNode::Pinned;
2546 Node* ctrl = control();
2547 if (adr_type->isa_instptr()) {
2548 assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
2549 intptr_t offset = Type::OffsetBot;
2550 AddPNode::Ideal_base_and_offset(adr, &_gvn, offset);
2551 if (offset >= 0) {
2552 int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
2553 if (offset < s) {
2554 // Guaranteed to be a valid access, no need to pin it
2555 dep = LoadNode::DependsOnlyOnTest;
2556 ctrl = NULL;
2557 }
2558 }
2559 }
2560 p = make_load(ctrl, adr, value_type, type, adr_type, mo, dep, requires_atomic_access, unaligned, mismatched);
2561 // load value
2562 switch (type) {
2563 case T_BOOLEAN:
2564 {
2565 // Normalize the value returned by getBoolean in the following cases
2566 if (mismatched ||
2567 heap_base_oop == top() || // - heap_base_oop is NULL or
2568 (can_access_non_heap && alias_type->field() == NULL) // - heap_base_oop is potentially NULL
2569 // and the unsafe access is made to large offset
2570 // (i.e., larger than the maximum offset necessary for any
2571 // field access)
2572 ) {
2573 IdealKit ideal = IdealKit(this);
2574 #define __ ideal.
2575 IdealVariable normalized_result(ideal);
2576 __ declarations_done();
2577 __ set(normalized_result, p);
2578 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2579 __ set(normalized_result, ideal.ConI(1));
2580 ideal.end_if();
2789 case LS_get_add:
2790 case LS_get_set: {
2791 receiver = argument(0); // type: oop
2792 base = argument(1); // type: oop
2793 offset = argument(2); // type: long
2794 oldval = NULL;
2795 newval = argument(4); // type: oop, int, or long
2796 break;
2797 }
2798 default:
2799 ShouldNotReachHere();
2800 }
2801
2802 // Build field offset expression.
2803 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2804 // to be plain byte offsets, which are also the same as those accepted
2805 // by oopDesc::field_base.
2806 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2807 // 32-bit machines ignore the high half of long offsets
2808 offset = ConvL2X(offset);
2809 Node* adr = make_unsafe_address(base, offset, type, false);
2810 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2811
2812 Compile::AliasType* alias_type = C->alias_type(adr_type);
2813 BasicType bt = alias_type->basic_type();
2814 if (bt != T_ILLEGAL &&
2815 ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2816 // Don't intrinsify mismatched object accesses.
2817 return false;
2818 }
2819
2820 // For CAS, unlike inline_unsafe_access, there seems no point in
2821 // trying to refine types. Just use the coarse types here.
2822 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2823 const Type *value_type = Type::get_const_basic_type(type);
2824
2825 switch (kind) {
2826 case LS_get_set:
2827 case LS_cmp_exchange: {
2828 if (type == T_OBJECT) {
2829 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
|