48
49 public:
50 LibraryIntrinsic(ciMethod* m, bool is_virtual, vmIntrinsics::ID id)
51 : InlineCallGenerator(m),
52 _is_virtual(is_virtual),
53 _intrinsic_id(id)
54 {
55 }
56 virtual bool is_intrinsic() const { return true; }
57 virtual bool is_virtual() const { return _is_virtual; }
58 virtual JVMState* generate(JVMState* jvms);
59 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
60 };
61
62
63 // Local helper class for LibraryIntrinsic:
64 class LibraryCallKit : public GraphKit {
65 private:
66 LibraryIntrinsic* _intrinsic; // the library intrinsic being called
67
68 public:
69 LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
70 : GraphKit(caller),
71 _intrinsic(intrinsic)
72 {
73 }
74
75 ciMethod* caller() const { return jvms()->method(); }
76 int bci() const { return jvms()->bci(); }
77 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
78 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
79 ciMethod* callee() const { return _intrinsic->method(); }
80 ciSignature* signature() const { return callee()->signature(); }
81 int arg_size() const { return callee()->arg_size(); }
82
83 bool try_to_inline();
84
85 // Helper functions to inline natives
86 void push_result(RegionNode* region, PhiNode* value);
87 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
223 Node* dest_size, bool dest_uninitialized);
224 void generate_slow_arraycopy(const TypePtr* adr_type,
225 Node* src, Node* src_offset,
226 Node* dest, Node* dest_offset,
227 Node* copy_length, bool dest_uninitialized);
228 Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
229 Node* dest_elem_klass,
230 Node* src, Node* src_offset,
231 Node* dest, Node* dest_offset,
232 Node* copy_length, bool dest_uninitialized);
233 Node* generate_generic_arraycopy(const TypePtr* adr_type,
234 Node* src, Node* src_offset,
235 Node* dest, Node* dest_offset,
236 Node* copy_length, bool dest_uninitialized);
237 void generate_unchecked_arraycopy(const TypePtr* adr_type,
238 BasicType basic_elem_type,
239 bool disjoint_bases,
240 Node* src, Node* src_offset,
241 Node* dest, Node* dest_offset,
242 Node* copy_length, bool dest_uninitialized);
243 bool inline_unsafe_CAS(BasicType type);
244 bool inline_unsafe_ordered_store(BasicType type);
245 bool inline_fp_conversions(vmIntrinsics::ID id);
246 bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
247 bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
248 bool inline_bitCount(vmIntrinsics::ID id);
249 bool inline_reverseBytes(vmIntrinsics::ID id);
250
251 bool inline_reference_get();
252 };
253
254
255 //---------------------------make_vm_intrinsic----------------------------
256 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
257 vmIntrinsics::ID id = m->intrinsic_id();
258 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
259
260 if (DisableIntrinsic[0] != '\0'
261 && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
262 // disabled by a user request on the command line:
263 // example: -XX:DisableIntrinsic=_hashCode,_getClass
272 // Only a few intrinsics implement a virtual dispatch.
273 // They are expensive calls which are also frequently overridden.
274 if (is_virtual) {
275 switch (id) {
276 case vmIntrinsics::_hashCode:
277 case vmIntrinsics::_clone:
278 // OK, Object.hashCode and Object.clone intrinsics come in both flavors
279 break;
280 default:
281 return NULL;
282 }
283 }
284
285 // -XX:-InlineNatives disables nearly all intrinsics:
286 if (!InlineNatives) {
287 switch (id) {
288 case vmIntrinsics::_indexOf:
289 case vmIntrinsics::_compareTo:
290 case vmIntrinsics::_equals:
291 case vmIntrinsics::_equalsC:
292 break; // InlineNatives does not control String.compareTo
293 default:
294 return NULL;
295 }
296 }
297
298 switch (id) {
299 case vmIntrinsics::_compareTo:
300 if (!SpecialStringCompareTo) return NULL;
301 break;
302 case vmIntrinsics::_indexOf:
303 if (!SpecialStringIndexOf) return NULL;
304 break;
305 case vmIntrinsics::_equals:
306 if (!SpecialStringEquals) return NULL;
307 break;
308 case vmIntrinsics::_equalsC:
309 if (!SpecialArraysEquals) return NULL;
310 break;
311 case vmIntrinsics::_arraycopy:
350 case vmIntrinsics::_numberOfLeadingZeros_l:
351 if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
352 break;
353
354 case vmIntrinsics::_numberOfTrailingZeros_i:
355 if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
356 break;
357
358 case vmIntrinsics::_numberOfTrailingZeros_l:
359 if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
360 break;
361
362 case vmIntrinsics::_Reference_get:
363 // It is only when G1 is enabled that we absolutely
364 // need to use the intrinsic version of Reference.get()
365 // so that the value in the referent field, if necessary,
366 // can be registered by the pre-barrier code.
367 if (!UseG1GC) return NULL;
368 break;
369
370 default:
371 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
372 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
373 break;
374 }
375
376 // -XX:-InlineClassNatives disables natives from the Class class.
377 // The flag applies to all reflective calls, notably Array.newArray
378 // (visible to Java programmers as Array.newInstance).
379 if (m->holder()->name() == ciSymbol::java_lang_Class() ||
380 m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
381 if (!InlineClassNatives) return NULL;
382 }
383
384 // -XX:-InlineThreadNatives disables natives from the Thread class.
385 if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
386 if (!InlineThreadNatives) return NULL;
387 }
388
389 // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes.
601 return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true);
602 case vmIntrinsics::_putIntVolatile:
603 return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true);
604 case vmIntrinsics::_putLongVolatile:
605 return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true);
606 case vmIntrinsics::_putFloatVolatile:
607 return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true);
608 case vmIntrinsics::_putDoubleVolatile:
609 return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true);
610
611 case vmIntrinsics::_prefetchRead:
612 return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
613 case vmIntrinsics::_prefetchWrite:
614 return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
615 case vmIntrinsics::_prefetchReadStatic:
616 return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
617 case vmIntrinsics::_prefetchWriteStatic:
618 return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
619
620 case vmIntrinsics::_compareAndSwapObject:
621 return inline_unsafe_CAS(T_OBJECT);
622 case vmIntrinsics::_compareAndSwapInt:
623 return inline_unsafe_CAS(T_INT);
624 case vmIntrinsics::_compareAndSwapLong:
625 return inline_unsafe_CAS(T_LONG);
626
627 case vmIntrinsics::_putOrderedObject:
628 return inline_unsafe_ordered_store(T_OBJECT);
629 case vmIntrinsics::_putOrderedInt:
630 return inline_unsafe_ordered_store(T_INT);
631 case vmIntrinsics::_putOrderedLong:
632 return inline_unsafe_ordered_store(T_LONG);
633
634 case vmIntrinsics::_currentThread:
635 return inline_native_currentThread();
636 case vmIntrinsics::_isInterrupted:
637 return inline_native_isInterrupted();
638
639 #ifdef TRACE_HAVE_INTRINSICS
640 case vmIntrinsics::_classID:
641 return inline_native_classID();
642 case vmIntrinsics::_threadID:
643 return inline_native_threadID();
644 case vmIntrinsics::_counterTime:
645 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
646 #endif
647 case vmIntrinsics::_currentTimeMillis:
648 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
649 case vmIntrinsics::_nanoTime:
650 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
651 case vmIntrinsics::_allocateInstance:
652 return inline_unsafe_allocate();
653 case vmIntrinsics::_copyMemory:
2221 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2222 pre_val /* pre_val */,
2223 T_OBJECT);
2224
2225 // Update IdealKit from graphKit.
2226 __ sync_kit(this);
2227
2228 } __ end_if(); // _ref_type != ref_none
2229 } __ end_if(); // base != NULL
2230 } __ end_if(); // offset == referent_offset
2231
2232 // Final sync IdealKit and GraphKit.
2233 final_sync(ideal);
2234 #undef __
2235 }
2236
2237
2238 // Interpret Unsafe.fieldOffset cookies correctly:
2239 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2240
2241 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
2242 if (callee()->is_static()) return false; // caller must have the capability!
2243
2244 #ifndef PRODUCT
2245 {
2246 ResourceMark rm;
2247 // Check the signatures.
2248 ciSignature* sig = signature();
2249 #ifdef ASSERT
2250 if (!is_store) {
2251 // Object getObject(Object base, int/long offset), etc.
2252 BasicType rtype = sig->return_type()->basic_type();
2253 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2254 rtype = T_ADDRESS; // it is really a C void*
2255 assert(rtype == type, "getter must return the expected value");
2256 if (!is_native_ptr) {
2257 assert(sig->count() == 2, "oop getter has 2 arguments");
2258 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2259 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2260 } else {
2347 // Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM,
2348 // there was not enough information to nail it down.
2349 Compile::AliasType* alias_type = C->alias_type(adr_type);
2350 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2351
2352 // We will need memory barriers unless we can determine a unique
2353 // alias category for this reference. (Note: If for some reason
2354 // the barriers get omitted and the unsafe reference begins to "pollute"
2355 // the alias analysis of the rest of the graph, either Compile::can_alias
2356 // or Compile::must_alias will throw a diagnostic assert.)
2357 bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2358
2359 // If we are reading the value of the referent field of a Reference
2360 // object (either by using Unsafe directly or through reflection)
2361 // then, if G1 is enabled, we need to record the referent in an
2362 // SATB log buffer using the pre-barrier mechanism.
2363 bool need_read_barrier = UseG1GC && !is_native_ptr && !is_store &&
2364 offset != top() && heap_base_oop != top();
2365
2366 if (!is_store && type == T_OBJECT) {
2367 // Attempt to infer a sharper value type from the offset and base type.
2368 ciKlass* sharpened_klass = NULL;
2369
2370 // See if it is an instance field, with an object type.
2371 if (alias_type->field() != NULL) {
2372 assert(!is_native_ptr, "native pointer op cannot use a java address");
2373 if (alias_type->field()->type()->is_klass()) {
2374 sharpened_klass = alias_type->field()->type()->as_klass();
2375 }
2376 }
2377
2378 // See if it is a narrow oop array.
2379 if (adr_type->isa_aryptr()) {
2380 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2381 const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2382 if (elem_type != NULL) {
2383 sharpened_klass = elem_type->klass();
2384 }
2385 }
2386 }
2387
2388 if (sharpened_klass != NULL) {
2389 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2390
2391 // Sharpen the value type.
2392 value_type = tjp;
2393
2394 #ifndef PRODUCT
2395 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
2396 tty->print(" from base type: "); adr_type->dump();
2397 tty->print(" sharpened value: "); value_type->dump();
2398 }
2399 #endif
2400 }
2401 }
2402
2403 // Null check on self without removing any arguments. The argument
2404 // null check technically happens in the wrong place, which can lead to
2405 // invalid stack traces when the primitive is inlined into a method
2406 // which handles NullPointerExceptions.
2407 _sp += nargs;
2408 do_null_check(receiver, T_OBJECT);
2409 _sp -= nargs;
2410 if (stopped()) {
2411 return true;
2412 }
2413 // Heap pointers get a null-check from the interpreter,
2414 // as a courtesy. However, this is not guaranteed by Unsafe,
2415 // and it is not possible to fully distinguish unintended nulls
2416 // from intended ones in this API.
2417
2418 if (is_volatile) {
2419 // We need to emit leading and trailing CPU membars (see below) in
2590 do_null_check(receiver, T_OBJECT);
2591 _sp -= nargs;
2592 if (stopped()) {
2593 return true;
2594 }
2595 }
2596
2597 // Generate the read or write prefetch
2598 Node *prefetch;
2599 if (is_store) {
2600 prefetch = new (C, 3) PrefetchWriteNode(i_o(), adr);
2601 } else {
2602 prefetch = new (C, 3) PrefetchReadNode(i_o(), adr);
2603 }
2604 prefetch->init_req(0, control());
2605 set_i_o(_gvn.transform(prefetch));
2606
2607 return true;
2608 }
2609
2610 //----------------------------inline_unsafe_CAS----------------------------
2611
2612 bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
2613 // This basic scheme here is the same as inline_unsafe_access, but
2614 // differs in enough details that combining them would make the code
2615 // overly confusing. (This is a true fact! I originally combined
2616 // them, but even I was confused by it!) As much code/comments as
2617 // possible are retained from inline_unsafe_access though to make
2618 // the correspondences clearer. - dl
2619
2620 if (callee()->is_static()) return false; // caller must have the capability!
2621
2622 #ifndef PRODUCT
2623 {
2624 ResourceMark rm;
2625 // Check the signatures.
2626 ciSignature* sig = signature();
2627 #ifdef ASSERT
2628 BasicType rtype = sig->return_type()->basic_type();
2629 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2630 assert(sig->count() == 4, "CAS has 4 arguments");
2631 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2632 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2633 #endif // ASSERT
2634 }
2635 #endif //PRODUCT
2636
2637 // number of stack slots per value argument (1 or 2)
2638 int type_words = type2size[type];
2639
2640 // Cannot inline wide CAS on machines that don't support it natively
2641 if (type2aelembytes(type) > BytesPerInt && !VM_Version::supports_cx8())
2642 return false;
2643
2644 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2645
2646 // Argument words: "this" plus oop plus offset plus oldvalue plus newvalue;
2647 int nargs = 1 + 1 + 2 + type_words + type_words;
2648
2649 // pop arguments: newval, oldval, offset, base, and receiver
2650 debug_only(int saved_sp = _sp);
2651 _sp += nargs;
2652 Node* newval = (type_words == 1) ? pop() : pop_pair();
2653 Node* oldval = (type_words == 1) ? pop() : pop_pair();
2654 Node *offset = pop_pair();
2655 Node *base = pop();
2656 Node *receiver = pop();
2657 assert(saved_sp == _sp, "must have correct argument count");
2658
2659 // Null check receiver.
2660 _sp += nargs;
2661 do_null_check(receiver, T_OBJECT);
2662 _sp -= nargs;
2663 if (stopped()) {
2664 return true;
2665 }
2666
2667 // Build field offset expression.
2668 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2669 // to be plain byte offsets, which are also the same as those accepted
2670 // by oopDesc::field_base.
2671 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2672 // 32-bit machines ignore the high half of long offsets
2673 offset = ConvL2X(offset);
2674 Node* adr = make_unsafe_address(base, offset);
2675 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2676
2677 // (Unlike inline_unsafe_access, there seems no point in trying
2678 // to refine types. Just use the coarse types here.
2679 const Type *value_type = Type::get_const_basic_type(type);
2680 Compile::AliasType* alias_type = C->alias_type(adr_type);
2681 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2682 int alias_idx = C->get_alias_index(adr_type);
2683
2684 // Memory-model-wise, a CAS acts like a little synchronized block,
2685 // so needs barriers on each side. These don't translate into
2686 // actual barriers on most machines, but we still need rest of
2687 // compiler to respect ordering.
2688
2689 insert_mem_bar(Op_MemBarRelease);
2690 insert_mem_bar(Op_MemBarCPUOrder);
2691
2692 // 4984716: MemBars must be inserted before this
2693 // memory node in order to avoid a false
2694 // dependency which will confuse the scheduler.
2695 Node *mem = memory(alias_idx);
2696
2697 // For now, we handle only those cases that actually exist: ints,
2698 // longs, and Object. Adding others should be straightforward.
2699 Node* cas;
2700 switch(type) {
2701 case T_INT:
2702 cas = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval));
2703 break;
2704 case T_LONG:
2705 cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2706 break;
2707 case T_OBJECT:
2708 // Transformation of a value which could be NULL pointer (CastPP #NULL)
2709 // could be delayed during Parse (for example, in adjust_map_after_if()).
2710 // Execute transformation here to avoid barrier generation in such case.
2711 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2712 newval = _gvn.makecon(TypePtr::NULL_PTR);
2713
2714 // Reference stores need a store barrier.
2715 // (They don't if CAS fails, but it isn't worth checking.)
2716 pre_barrier(true /* do_load*/,
2717 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2718 NULL /* pre_val*/,
2719 T_OBJECT);
2720 #ifdef _LP64
2721 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2722 Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2723 Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2724 cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
2725 newval_enc, oldval_enc));
2726 } else
2727 #endif
2728 {
2729 cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2730 }
2731 post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true);
2732 break;
2733 default:
2734 ShouldNotReachHere();
2735 break;
2736 }
2737
2738 // SCMemProjNodes represent the memory state of CAS. Their main
2739 // role is to prevent CAS nodes from being optimized away when their
2740 // results aren't used.
2741 Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
2742 set_memory(proj, alias_idx);
2743
2744 // Add the trailing membar surrounding the access
2745 insert_mem_bar(Op_MemBarCPUOrder);
2746 insert_mem_bar(Op_MemBarAcquire);
2747
2748 push(cas);
2749 return true;
2750 }
2751
2752 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2753 // This is another variant of inline_unsafe_access, differing in
2754 // that it always issues store-store ("release") barrier and ensures
2755 // store-atomicity (which only matters for "long").
2756
2757 if (callee()->is_static()) return false; // caller must have the capability!
2758
2759 #ifndef PRODUCT
2760 {
2761 ResourceMark rm;
2762 // Check the signatures.
2763 ciSignature* sig = signature();
2764 #ifdef ASSERT
2765 BasicType rtype = sig->return_type()->basic_type();
2766 assert(rtype == T_VOID, "must return void");
2767 assert(sig->count() == 3, "has 3 arguments");
2768 assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
|
48
49 public:
50 LibraryIntrinsic(ciMethod* m, bool is_virtual, vmIntrinsics::ID id)
51 : InlineCallGenerator(m),
52 _is_virtual(is_virtual),
53 _intrinsic_id(id)
54 {
55 }
56 virtual bool is_intrinsic() const { return true; }
57 virtual bool is_virtual() const { return _is_virtual; }
58 virtual JVMState* generate(JVMState* jvms);
59 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
60 };
61
62
63 // Local helper class for LibraryIntrinsic:
64 class LibraryCallKit : public GraphKit {
65 private:
66 LibraryIntrinsic* _intrinsic; // the library intrinsic being called
67
68 const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
69
70 public:
71 LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
72 : GraphKit(caller),
73 _intrinsic(intrinsic)
74 {
75 }
76
77 ciMethod* caller() const { return jvms()->method(); }
78 int bci() const { return jvms()->bci(); }
79 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
80 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
81 ciMethod* callee() const { return _intrinsic->method(); }
82 ciSignature* signature() const { return callee()->signature(); }
83 int arg_size() const { return callee()->arg_size(); }
84
85 bool try_to_inline();
86
87 // Helper functions to inline natives
88 void push_result(RegionNode* region, PhiNode* value);
89 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
225 Node* dest_size, bool dest_uninitialized);
226 void generate_slow_arraycopy(const TypePtr* adr_type,
227 Node* src, Node* src_offset,
228 Node* dest, Node* dest_offset,
229 Node* copy_length, bool dest_uninitialized);
230 Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
231 Node* dest_elem_klass,
232 Node* src, Node* src_offset,
233 Node* dest, Node* dest_offset,
234 Node* copy_length, bool dest_uninitialized);
235 Node* generate_generic_arraycopy(const TypePtr* adr_type,
236 Node* src, Node* src_offset,
237 Node* dest, Node* dest_offset,
238 Node* copy_length, bool dest_uninitialized);
239 void generate_unchecked_arraycopy(const TypePtr* adr_type,
240 BasicType basic_elem_type,
241 bool disjoint_bases,
242 Node* src, Node* src_offset,
243 Node* dest, Node* dest_offset,
244 Node* copy_length, bool dest_uninitialized);
245 typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
246 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
247 bool inline_unsafe_ordered_store(BasicType type);
248 bool inline_fp_conversions(vmIntrinsics::ID id);
249 bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
250 bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
251 bool inline_bitCount(vmIntrinsics::ID id);
252 bool inline_reverseBytes(vmIntrinsics::ID id);
253
254 bool inline_reference_get();
255 };
256
257
258 //---------------------------make_vm_intrinsic----------------------------
259 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
260 vmIntrinsics::ID id = m->intrinsic_id();
261 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
262
263 if (DisableIntrinsic[0] != '\0'
264 && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
265 // disabled by a user request on the command line:
266 // example: -XX:DisableIntrinsic=_hashCode,_getClass
275 // Only a few intrinsics implement a virtual dispatch.
276 // They are expensive calls which are also frequently overridden.
277 if (is_virtual) {
278 switch (id) {
279 case vmIntrinsics::_hashCode:
280 case vmIntrinsics::_clone:
281 // OK, Object.hashCode and Object.clone intrinsics come in both flavors
282 break;
283 default:
284 return NULL;
285 }
286 }
287
288 // -XX:-InlineNatives disables nearly all intrinsics:
289 if (!InlineNatives) {
290 switch (id) {
291 case vmIntrinsics::_indexOf:
292 case vmIntrinsics::_compareTo:
293 case vmIntrinsics::_equals:
294 case vmIntrinsics::_equalsC:
295 case vmIntrinsics::_getAndAddInt:
296 case vmIntrinsics::_getAndAddLong:
297 case vmIntrinsics::_getAndSetInt:
298 case vmIntrinsics::_getAndSetLong:
299 case vmIntrinsics::_getAndSetObject:
300 break; // InlineNatives does not control String.compareTo
301 default:
302 return NULL;
303 }
304 }
305
306 switch (id) {
307 case vmIntrinsics::_compareTo:
308 if (!SpecialStringCompareTo) return NULL;
309 break;
310 case vmIntrinsics::_indexOf:
311 if (!SpecialStringIndexOf) return NULL;
312 break;
313 case vmIntrinsics::_equals:
314 if (!SpecialStringEquals) return NULL;
315 break;
316 case vmIntrinsics::_equalsC:
317 if (!SpecialArraysEquals) return NULL;
318 break;
319 case vmIntrinsics::_arraycopy:
358 case vmIntrinsics::_numberOfLeadingZeros_l:
359 if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
360 break;
361
362 case vmIntrinsics::_numberOfTrailingZeros_i:
363 if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
364 break;
365
366 case vmIntrinsics::_numberOfTrailingZeros_l:
367 if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
368 break;
369
370 case vmIntrinsics::_Reference_get:
371 // It is only when G1 is enabled that we absolutely
372 // need to use the intrinsic version of Reference.get()
373 // so that the value in the referent field, if necessary,
374 // can be registered by the pre-barrier code.
375 if (!UseG1GC) return NULL;
376 break;
377
378 case vmIntrinsics::_compareAndSwapObject:
379 #ifdef _LP64
380 if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return NULL;
381 #endif
382 break;
383
384 case vmIntrinsics::_compareAndSwapLong:
385 if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return NULL;
386 break;
387
388 case vmIntrinsics::_getAndAddInt:
389 if (!Matcher::match_rule_supported(Op_GetAndAddI)) return NULL;
390 break;
391
392 case vmIntrinsics::_getAndAddLong:
393 if (!Matcher::match_rule_supported(Op_GetAndAddL)) return NULL;
394 break;
395
396 case vmIntrinsics::_getAndSetInt:
397 if (!Matcher::match_rule_supported(Op_GetAndSetI)) return NULL;
398 break;
399
400 case vmIntrinsics::_getAndSetLong:
401 if (!Matcher::match_rule_supported(Op_GetAndSetL)) return NULL;
402 break;
403
404 case vmIntrinsics::_getAndSetObject:
405 #ifdef _LP64
406 if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
407 if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return NULL;
408 break;
409 #else
410 if (!Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
411 break;
412 #endif
413
414 default:
415 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
416 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
417 break;
418 }
419
420 // -XX:-InlineClassNatives disables natives from the Class class.
421 // The flag applies to all reflective calls, notably Array.newArray
422 // (visible to Java programmers as Array.newInstance).
423 if (m->holder()->name() == ciSymbol::java_lang_Class() ||
424 m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
425 if (!InlineClassNatives) return NULL;
426 }
427
428 // -XX:-InlineThreadNatives disables natives from the Thread class.
429 if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
430 if (!InlineThreadNatives) return NULL;
431 }
432
433 // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes.
645 return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true);
646 case vmIntrinsics::_putIntVolatile:
647 return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true);
648 case vmIntrinsics::_putLongVolatile:
649 return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true);
650 case vmIntrinsics::_putFloatVolatile:
651 return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true);
652 case vmIntrinsics::_putDoubleVolatile:
653 return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true);
654
655 case vmIntrinsics::_prefetchRead:
656 return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
657 case vmIntrinsics::_prefetchWrite:
658 return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
659 case vmIntrinsics::_prefetchReadStatic:
660 return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
661 case vmIntrinsics::_prefetchWriteStatic:
662 return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
663
664 case vmIntrinsics::_compareAndSwapObject:
665 return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
666 case vmIntrinsics::_compareAndSwapInt:
667 return inline_unsafe_load_store(T_INT, LS_cmpxchg);
668 case vmIntrinsics::_compareAndSwapLong:
669 return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
670
671 case vmIntrinsics::_putOrderedObject:
672 return inline_unsafe_ordered_store(T_OBJECT);
673 case vmIntrinsics::_putOrderedInt:
674 return inline_unsafe_ordered_store(T_INT);
675 case vmIntrinsics::_putOrderedLong:
676 return inline_unsafe_ordered_store(T_LONG);
677
678 case vmIntrinsics::_getAndAddInt:
679 return inline_unsafe_load_store(T_INT, LS_xadd);
680 case vmIntrinsics::_getAndAddLong:
681 return inline_unsafe_load_store(T_LONG, LS_xadd);
682 case vmIntrinsics::_getAndSetInt:
683 return inline_unsafe_load_store(T_INT, LS_xchg);
684 case vmIntrinsics::_getAndSetLong:
685 return inline_unsafe_load_store(T_LONG, LS_xchg);
686 case vmIntrinsics::_getAndSetObject:
687 return inline_unsafe_load_store(T_OBJECT, LS_xchg);
688
689 case vmIntrinsics::_currentThread:
690 return inline_native_currentThread();
691 case vmIntrinsics::_isInterrupted:
692 return inline_native_isInterrupted();
693
694 #ifdef TRACE_HAVE_INTRINSICS
695 case vmIntrinsics::_classID:
696 return inline_native_classID();
697 case vmIntrinsics::_threadID:
698 return inline_native_threadID();
699 case vmIntrinsics::_counterTime:
700 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
701 #endif
702 case vmIntrinsics::_currentTimeMillis:
703 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
704 case vmIntrinsics::_nanoTime:
705 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
706 case vmIntrinsics::_allocateInstance:
707 return inline_unsafe_allocate();
708 case vmIntrinsics::_copyMemory:
2276 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2277 pre_val /* pre_val */,
2278 T_OBJECT);
2279
2280 // Update IdealKit from graphKit.
2281 __ sync_kit(this);
2282
2283 } __ end_if(); // _ref_type != ref_none
2284 } __ end_if(); // base != NULL
2285 } __ end_if(); // offset == referent_offset
2286
2287 // Final sync IdealKit and GraphKit.
2288 final_sync(ideal);
2289 #undef __
2290 }
2291
2292
2293 // Interpret Unsafe.fieldOffset cookies correctly:
2294 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2295
2296 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
2297 // Attempt to infer a sharper value type from the offset and base type.
2298 ciKlass* sharpened_klass = NULL;
2299
2300 // See if it is an instance field, with an object type.
2301 if (alias_type->field() != NULL) {
2302 assert(!is_native_ptr, "native pointer op cannot use a java address");
2303 if (alias_type->field()->type()->is_klass()) {
2304 sharpened_klass = alias_type->field()->type()->as_klass();
2305 }
2306 }
2307
2308 // See if it is a narrow oop array.
2309 if (adr_type->isa_aryptr()) {
2310 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2311 const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2312 if (elem_type != NULL) {
2313 sharpened_klass = elem_type->klass();
2314 }
2315 }
2316 }
2317
2318 if (sharpened_klass != NULL) {
2319 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2320
2321 #ifndef PRODUCT
2322 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
2323 tty->print(" from base type: "); adr_type->dump();
2324 tty->print(" sharpened value: "); tjp->dump();
2325 }
2326 #endif
2327 // Sharpen the value type.
2328 return tjp;
2329 }
2330 return NULL;
2331 }
2332
2333 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
2334 if (callee()->is_static()) return false; // caller must have the capability!
2335
2336 #ifndef PRODUCT
2337 {
2338 ResourceMark rm;
2339 // Check the signatures.
2340 ciSignature* sig = signature();
2341 #ifdef ASSERT
2342 if (!is_store) {
2343 // Object getObject(Object base, int/long offset), etc.
2344 BasicType rtype = sig->return_type()->basic_type();
2345 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2346 rtype = T_ADDRESS; // it is really a C void*
2347 assert(rtype == type, "getter must return the expected value");
2348 if (!is_native_ptr) {
2349 assert(sig->count() == 2, "oop getter has 2 arguments");
2350 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2351 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2352 } else {
2439 // Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM,
2440 // there was not enough information to nail it down.
2441 Compile::AliasType* alias_type = C->alias_type(adr_type);
2442 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2443
2444 // We will need memory barriers unless we can determine a unique
2445 // alias category for this reference. (Note: If for some reason
2446 // the barriers get omitted and the unsafe reference begins to "pollute"
2447 // the alias analysis of the rest of the graph, either Compile::can_alias
2448 // or Compile::must_alias will throw a diagnostic assert.)
2449 bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2450
2451 // If we are reading the value of the referent field of a Reference
2452 // object (either by using Unsafe directly or through reflection)
2453 // then, if G1 is enabled, we need to record the referent in an
2454 // SATB log buffer using the pre-barrier mechanism.
2455 bool need_read_barrier = UseG1GC && !is_native_ptr && !is_store &&
2456 offset != top() && heap_base_oop != top();
2457
2458 if (!is_store && type == T_OBJECT) {
2459 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2460 if (tjp != NULL) {
2461 value_type = tjp;
2462 }
2463 }
2464
2465 // Null check on self without removing any arguments. The argument
2466 // null check technically happens in the wrong place, which can lead to
2467 // invalid stack traces when the primitive is inlined into a method
2468 // which handles NullPointerExceptions.
2469 _sp += nargs;
2470 do_null_check(receiver, T_OBJECT);
2471 _sp -= nargs;
2472 if (stopped()) {
2473 return true;
2474 }
2475 // Heap pointers get a null-check from the interpreter,
2476 // as a courtesy. However, this is not guaranteed by Unsafe,
2477 // and it is not possible to fully distinguish unintended nulls
2478 // from intended ones in this API.
2479
2480 if (is_volatile) {
2481 // We need to emit leading and trailing CPU membars (see below) in
2652 do_null_check(receiver, T_OBJECT);
2653 _sp -= nargs;
2654 if (stopped()) {
2655 return true;
2656 }
2657 }
2658
2659 // Generate the read or write prefetch
2660 Node *prefetch;
2661 if (is_store) {
2662 prefetch = new (C, 3) PrefetchWriteNode(i_o(), adr);
2663 } else {
2664 prefetch = new (C, 3) PrefetchReadNode(i_o(), adr);
2665 }
2666 prefetch->init_req(0, control());
2667 set_i_o(_gvn.transform(prefetch));
2668
2669 return true;
2670 }
2671
2672 //----------------------------inline_unsafe_load_store----------------------------
2673
2674 bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
2675 // This basic scheme here is the same as inline_unsafe_access, but
2676 // differs in enough details that combining them would make the code
2677 // overly confusing. (This is a true fact! I originally combined
2678 // them, but even I was confused by it!) As much code/comments as
2679 // possible are retained from inline_unsafe_access though to make
2680 // the correspondences clearer. - dl
2681
2682 if (callee()->is_static()) return false; // caller must have the capability!
2683
2684 #ifndef PRODUCT
2685 BasicType rtype;
2686 {
2687 ResourceMark rm;
2688 ciSignature* sig = signature();
2689 rtype = sig->return_type()->basic_type();
2690 if (kind == LS_xadd || kind == LS_xchg) {
2691 // Check the signatures.
2692 #ifdef ASSERT
2693 assert(rtype == type, "get and set must return the expected type");
2694 assert(sig->count() == 3, "get and set has 3 arguments");
2695 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2696 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2697 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2698 #endif // ASSERT
2699 } else if (kind == LS_cmpxchg) {
2700 // Check the signatures.
2701 #ifdef ASSERT
2702 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2703 assert(sig->count() == 4, "CAS has 4 arguments");
2704 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2705 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2706 #endif // ASSERT
2707 } else {
2708 ShouldNotReachHere();
2709 }
2710 }
2711 #endif //PRODUCT
2712
2713 // number of stack slots per value argument (1 or 2)
2714 int type_words = type2size[type];
2715
2716 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2717
2718 // Argument words: "this" plus oop plus offset (plus oldvalue) plus newvalue/delta;
2719 int nargs = 1 + 1 + 2 + ((kind == LS_cmpxchg) ? type_words : 0) + type_words;
2720
2721 // pop arguments: newval, offset, base, and receiver
2722 debug_only(int saved_sp = _sp);
2723 _sp += nargs;
2724 Node* newval = (type_words == 1) ? pop() : pop_pair();
2725 Node* oldval = (kind == LS_cmpxchg) ? ((type_words == 1) ? pop() : pop_pair()) : NULL;
2726 Node *offset = pop_pair();
2727 Node *base = pop();
2728 Node *receiver = pop();
2729 assert(saved_sp == _sp, "must have correct argument count");
2730
2731 // Null check receiver.
2732 _sp += nargs;
2733 do_null_check(receiver, T_OBJECT);
2734 _sp -= nargs;
2735 if (stopped()) {
2736 return true;
2737 }
2738
2739 // Build field offset expression.
2740 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2741 // to be plain byte offsets, which are also the same as those accepted
2742 // by oopDesc::field_base.
2743 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2744 // 32-bit machines ignore the high half of long offsets
2745 offset = ConvL2X(offset);
2746 Node* adr = make_unsafe_address(base, offset);
2747 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2748
2749 // For CAS, unlike inline_unsafe_access, there seems no point in
2750 // trying to refine types. Just use the coarse types here.
2751 const Type *value_type = Type::get_const_basic_type(type);
2752 Compile::AliasType* alias_type = C->alias_type(adr_type);
2753 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2754
2755 if (kind == LS_xchg && type == T_OBJECT) {
2756 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2757 if (tjp != NULL) {
2758 value_type = tjp;
2759 }
2760 }
2761
2762 int alias_idx = C->get_alias_index(adr_type);
2763
2764 // Memory-model-wise, a LoadStore acts like a little synchronized
2765 // block, so needs barriers on each side. These don't translate
2766 // into actual barriers on most machines, but we still need rest of
2767 // compiler to respect ordering.
2768
2769 insert_mem_bar(Op_MemBarRelease);
2770 insert_mem_bar(Op_MemBarCPUOrder);
2771
2772 // 4984716: MemBars must be inserted before this
2773 // memory node in order to avoid a false
2774 // dependency which will confuse the scheduler.
2775 Node *mem = memory(alias_idx);
2776
2777 // For now, we handle only those cases that actually exist: ints,
2778 // longs, and Object. Adding others should be straightforward.
2779 Node* load_store;
2780 switch(type) {
2781 case T_INT:
2782 if (kind == LS_xadd) {
2783 load_store = _gvn.transform(new (C, 4) GetAndAddINode(control(), mem, adr, newval, adr_type));
2784 } else if (kind == LS_xchg) {
2785 load_store = _gvn.transform(new (C, 4) GetAndSetINode(control(), mem, adr, newval, adr_type));
2786 } else if (kind == LS_cmpxchg) {
2787 load_store = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval));
2788 } else {
2789 ShouldNotReachHere();
2790 }
2791 break;
2792 case T_LONG:
2793 if (kind == LS_xadd) {
2794 load_store = _gvn.transform(new (C, 4) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2795 } else if (kind == LS_xchg) {
2796 load_store = _gvn.transform(new (C, 4) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2797 } else if (kind == LS_cmpxchg) {
2798 load_store = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2799 } else {
2800 ShouldNotReachHere();
2801 }
2802 break;
2803 case T_OBJECT:
2804 // Transformation of a value which could be NULL pointer (CastPP #NULL)
2805 // could be delayed during Parse (for example, in adjust_map_after_if()).
2806 // Execute transformation here to avoid barrier generation in such case.
2807 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2808 newval = _gvn.makecon(TypePtr::NULL_PTR);
2809
2810 // Reference stores need a store barrier.
2811 pre_barrier(true /* do_load*/,
2812 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2813 NULL /* pre_val*/,
2814 T_OBJECT);
2815 #ifdef _LP64
2816 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2817 Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2818 if (kind == LS_xchg) {
2819 load_store = _gvn.transform(new (C, 4) GetAndSetNNode(control(), mem, adr,
2820 newval_enc, adr_type, value_type->make_narrowoop()));
2821 } else {
2822 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2823 Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2824 load_store = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
2825 newval_enc, oldval_enc));
2826 }
2827 } else
2828 #endif
2829 {
2830 if (kind == LS_xchg) {
2831 load_store = _gvn.transform(new (C, 4) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2832 } else {
2833 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2834 load_store = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2835 }
2836 }
2837 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2838 break;
2839 default:
2840 ShouldNotReachHere();
2841 break;
2842 }
2843
2844 // SCMemProjNodes represent the memory state of a LoadStore. Their
2845 // main role is to prevent LoadStore nodes from being optimized away
2846 // when their results aren't used.
2847 Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(load_store));
2848 set_memory(proj, alias_idx);
2849
2850 // Add the trailing membar surrounding the access
2851 insert_mem_bar(Op_MemBarCPUOrder);
2852 insert_mem_bar(Op_MemBarAcquire);
2853
2854 #ifdef _LP64
2855 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
2856 load_store = _gvn.transform(new (C, 2) DecodeNNode(load_store, load_store->bottom_type()->make_ptr()));
2857 }
2858 #endif
2859
2860 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2861 push_node(load_store->bottom_type()->basic_type(), load_store);
2862 return true;
2863 }
2864
2865 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2866 // This is another variant of inline_unsafe_access, differing in
2867 // that it always issues store-store ("release") barrier and ensures
2868 // store-atomicity (which only matters for "long").
2869
2870 if (callee()->is_static()) return false; // caller must have the capability!
2871
2872 #ifndef PRODUCT
2873 {
2874 ResourceMark rm;
2875 // Check the signatures.
2876 ciSignature* sig = signature();
2877 #ifdef ASSERT
2878 BasicType rtype = sig->return_type()->basic_type();
2879 assert(rtype == T_VOID, "must return void");
2880 assert(sig->count() == 3, "has 3 arguments");
2881 assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
|