35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/callGenerator.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/intrinsicnode.hpp"
45 #include "opto/idealKit.hpp"
46 #include "opto/mathexactnode.hpp"
47 #include "opto/movenode.hpp"
48 #include "opto/mulnode.hpp"
49 #include "opto/narrowptrnode.hpp"
50 #include "opto/opaquenode.hpp"
51 #include "opto/parse.hpp"
52 #include "opto/runtime.hpp"
53 #include "opto/rootnode.hpp"
54 #include "opto/subnode.hpp"
55 #include "prims/nativeLookup.hpp"
56 #include "prims/unsafe.hpp"
57 #include "runtime/objectMonitor.hpp"
58 #include "runtime/sharedRuntime.hpp"
59 #include "utilities/macros.hpp"
60
61
62 class LibraryIntrinsic : public InlineCallGenerator {
63 // Extend the set of intrinsics known to the runtime:
64 public:
65 private:
66 bool _is_virtual;
67 bool _does_virtual_dispatch;
68 int8_t _predicates_count; // Intrinsic is predicated by several conditions
69 int8_t _last_predicate; // Last generated predicate
70 vmIntrinsics::ID _intrinsic_id;
71
72 public:
73 LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
74 : InlineCallGenerator(m),
146
147 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
148 void set_result(RegionNode* region, PhiNode* value);
149 Node* result() { return _result; }
150
151 virtual int reexecute_sp() { return _reexecute_sp; }
152
153 // Helper functions to inline natives
154 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
155 Node* generate_slow_guard(Node* test, RegionNode* region);
156 Node* generate_fair_guard(Node* test, RegionNode* region);
157 Node* generate_negative_guard(Node* index, RegionNode* region,
158 // resulting CastII of index:
159 Node* *pos_index = NULL);
160 Node* generate_limit_guard(Node* offset, Node* subseq_length,
161 Node* array_length,
162 RegionNode* region);
163 void generate_string_range_check(Node* array, Node* offset,
164 Node* length, bool char_count);
165 Node* generate_current_thread(Node* &tls_output);
166 Node* load_mirror_from_klass(Node* klass);
167 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
168 RegionNode* region, int null_path,
169 int offset);
170 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
171 RegionNode* region, int null_path) {
172 int offset = java_lang_Class::klass_offset_in_bytes();
173 return load_klass_from_mirror_common(mirror, never_see_null,
174 region, null_path,
175 offset);
176 }
177 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
178 RegionNode* region, int null_path) {
179 int offset = java_lang_Class::array_klass_offset_in_bytes();
180 return load_klass_from_mirror_common(mirror, never_see_null,
181 region, null_path,
182 offset);
183 }
184 Node* generate_access_flags_guard(Node* kls,
185 int modifier_mask, int modifier_bits,
186 RegionNode* region);
187 Node* generate_interface_guard(Node* kls, RegionNode* region);
188 Node* generate_array_guard(Node* kls, RegionNode* region) {
189 return generate_array_guard_common(kls, region, false, false);
190 }
191 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
192 return generate_array_guard_common(kls, region, false, true);
193 }
194 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
195 return generate_array_guard_common(kls, region, true, false);
196 }
197 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
198 return generate_array_guard_common(kls, region, true, true);
199 }
200 Node* generate_array_guard_common(Node* kls, RegionNode* region,
201 bool obj_array, bool not_array);
202 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
203 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
204 bool is_virtual = false, bool is_static = false);
205 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
206 return generate_method_call(method_id, false, true);
207 }
208 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
209 return generate_method_call(method_id, true, false);
210 }
211 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
212 Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
213
214 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
215 bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
216 bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
217 bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
218 Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
219 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae);
220 bool inline_string_indexOfChar();
221 bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
236 bool inline_math_multiplyExactL();
237 bool inline_math_multiplyHigh();
238 bool inline_math_negateExactI();
239 bool inline_math_negateExactL();
240 bool inline_math_subtractExactI(bool is_decrement);
241 bool inline_math_subtractExactL(bool is_decrement);
242 bool inline_min_max(vmIntrinsics::ID id);
243 bool inline_notify(vmIntrinsics::ID id);
244 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
245 // This returns Type::AnyPtr, RawPtr, or OopPtr.
246 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
247 Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
248
249 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
250 DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
251 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
252 static bool klass_needs_init_guard(Node* kls);
253 bool inline_unsafe_allocate();
254 bool inline_unsafe_newArray(bool uninitialized);
255 bool inline_unsafe_copyMemory();
256 bool inline_native_currentThread();
257
258 bool inline_native_time_funcs(address method, const char* funcName);
259 #ifdef JFR_HAVE_INTRINSICS
260 bool inline_native_classID();
261 bool inline_native_getEventWriter();
262 #endif
263 bool inline_native_isInterrupted();
264 bool inline_native_Class_query(vmIntrinsics::ID id);
265 bool inline_native_subtype_check();
266 bool inline_native_getLength();
267 bool inline_array_copyOf(bool is_copyOfRange);
268 bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
269 bool inline_preconditions_checkIndex();
270 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
271 bool inline_native_clone(bool is_virtual);
272 bool inline_native_Reflection_getCallerClass();
273 // Helper function for inlining native object hash method
274 bool inline_native_hashcode(bool is_virtual, bool is_static);
275 bool inline_native_getClass();
572 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
573 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
574 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
575 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
576 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
577 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar();
578
579 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
580 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
581
582 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
583 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
584 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
585 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
586
587 case vmIntrinsics::_compressStringC:
588 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
589 case vmIntrinsics::_inflateStringC:
590 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
591
592 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
593 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
594 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
595 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
596 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
597 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
598 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
599 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
600 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
601
602 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
603 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
604 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
605 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
606 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
607 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
608 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
609 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
610 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
611
612 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
613 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
614 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
615 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
616 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
617 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
618 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
619 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
620 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
621
622 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
623 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
624 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
625 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
626 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
627 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
628 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
629 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
630 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
2343 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2344 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2345
2346 if (type == T_OBJECT || type == T_ARRAY) {
2347 decorators |= ON_UNKNOWN_OOP_REF;
2348 }
2349
2350 if (unaligned) {
2351 decorators |= C2_UNALIGNED;
2352 }
2353
2354 #ifndef PRODUCT
2355 {
2356 ResourceMark rm;
2357 // Check the signatures.
2358 ciSignature* sig = callee()->signature();
2359 #ifdef ASSERT
2360 if (!is_store) {
2361 // Object getReference(Object base, int/long offset), etc.
2362 BasicType rtype = sig->return_type()->basic_type();
2363 assert(rtype == type, "getter must return the expected value");
2364 assert(sig->count() == 2, "oop getter has 2 arguments");
2365 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2366 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2367 } else {
2368 // void putReference(Object base, int/long offset, Object x), etc.
2369 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2370 assert(sig->count() == 3, "oop putter has 3 arguments");
2371 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2372 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2373 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2374 assert(vtype == type, "putter must accept the expected value");
2375 }
2376 #endif // ASSERT
2377 }
2378 #endif //PRODUCT
2379
2380 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2381
2382 Node* receiver = argument(0); // type: oop
2383
2384 // Build address expression.
2385 Node* adr;
2386 Node* heap_base_oop = top();
2387 Node* offset = top();
2388 Node* val;
2389
2390 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2391 Node* base = argument(1); // type: oop
2392 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2393 offset = argument(2); // type: long
2394 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2395 // to be plain byte offsets, which are also the same as those accepted
2396 // by oopDesc::field_addr.
2397 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2398 "fieldOffset must be byte-scaled");
2399 // 32-bit machines ignore the high half!
2400 offset = ConvL2X(offset);
2401 adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2402
2403 if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2404 heap_base_oop = base;
2405 } else if (type == T_OBJECT) {
2406 return false; // off-heap oop accesses are not supported
2407 }
2408
2409 // Can base be NULL? Otherwise, always on-heap access.
2410 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2411
2412 if (!can_access_non_heap) {
2413 decorators |= IN_HEAP;
2414 }
2415
2416 val = is_store ? argument(4) : NULL;
2417
2418 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2419
2420 // Try to categorize the address.
2421 Compile::AliasType* alias_type = C->alias_type(adr_type);
2422 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2423
2424 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2425 alias_type->adr_type() == TypeAryPtr::RANGE) {
2426 return false; // not supported
2427 }
2428
2429 bool mismatched = false;
2430 BasicType bt = alias_type->basic_type();
2431 if (bt != T_ILLEGAL) {
2432 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2433 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2434 // Alias type doesn't differentiate between byte[] and boolean[]).
2435 // Use address type to get the element type.
2436 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2437 }
2438 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2439 // accessing an array field with getReference is not a mismatch
2440 bt = T_OBJECT;
2441 }
2442 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2443 // Don't intrinsify mismatched object accesses
2444 return false;
2445 }
2446 mismatched = (bt != type);
2447 } else if (alias_type->adr_type()->isa_oopptr()) {
2448 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2449 }
2450
2451 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2452
2453 if (mismatched) {
2454 decorators |= C2_MISMATCHED;
2455 }
2456
2457 // First guess at the value type.
2458 const Type *value_type = Type::get_const_basic_type(type);
2459
2460 // Figure out the memory ordering.
2461 decorators |= mo_decorator_for_access_kind(kind);
2462
2463 if (!is_store && type == T_OBJECT) {
2464 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2465 if (tjp != NULL) {
2466 value_type = tjp;
2467 }
2468 }
2469
2470 receiver = null_check(receiver);
2471 if (stopped()) {
2472 return true;
2473 }
2474 // Heap pointers get a null-check from the interpreter,
2475 // as a courtesy. However, this is not guaranteed by Unsafe,
2476 // and it is not possible to fully distinguish unintended nulls
2477 // from intended ones in this API.
2478
2479 if (!is_store) {
2480 Node* p = NULL;
2481 // Try to constant fold a load from a constant field
2482 ciField* field = alias_type->field();
2483 if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2484 // final or stable field
2485 p = make_constant_from_field(field, heap_base_oop);
2486 }
2487
2488 if (p == NULL) { // Could not constant fold the load
2489 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2490 // Normalize the value returned by getBoolean in the following cases
2491 if (type == T_BOOLEAN &&
2492 (mismatched ||
2493 heap_base_oop == top() || // - heap_base_oop is NULL or
2494 (can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL
2495 // and the unsafe access is made to large offset
2496 // (i.e., larger than the maximum offset necessary for any
2497 // field access)
2498 ) {
2499 IdealKit ideal = IdealKit(this);
2500 #define __ ideal.
2501 IdealVariable normalized_result(ideal);
2502 __ declarations_done();
2503 __ set(normalized_result, p);
2504 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2505 __ set(normalized_result, ideal.ConI(1));
2506 ideal.end_if();
2507 final_sync(ideal);
2508 p = __ value(normalized_result);
2509 #undef __
2510 }
2511 }
2512 if (type == T_ADDRESS) {
2513 p = gvn().transform(new CastP2XNode(NULL, p));
2514 p = ConvX2UL(p);
2515 }
2516 // The load node has the control of the preceding MemBarCPUOrder. All
2517 // following nodes will have the control of the MemBarCPUOrder inserted at
2518 // the end of this method. So, pushing the load onto the stack at a later
2519 // point is fine.
2520 set_result(p);
2521 } else {
2522 if (bt == T_ADDRESS) {
2523 // Repackage the long as a pointer.
2524 val = ConvL2X(val);
2525 val = gvn().transform(new CastX2PNode(val));
2526 }
2527 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2528 }
2529
2530 return true;
2531 }
2532
2533 //----------------------------inline_unsafe_load_store----------------------------
2534 // This method serves a couple of different customers (depending on LoadStoreKind):
2535 //
2536 // LS_cmp_swap:
2537 //
2538 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2539 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2540 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2541 //
2542 // LS_cmp_swap_weak:
2543 //
2544 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2545 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2546 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2547 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2548 //
3045 Node* fast_mem = slow_call->in(TypeFunc::Memory);
3046
3047 // These two phis are pre-filled with copies of of the fast IO and Memory
3048 PhiNode* result_mem = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3049 PhiNode* result_io = PhiNode::make(result_rgn, fast_io, Type::ABIO);
3050
3051 result_rgn->init_req(slow_result_path, control());
3052 result_io ->init_req(slow_result_path, i_o());
3053 result_mem->init_req(slow_result_path, reset_memory());
3054 result_val->init_req(slow_result_path, slow_val);
3055
3056 set_all_memory(_gvn.transform(result_mem));
3057 set_i_o( _gvn.transform(result_io));
3058 }
3059
3060 C->set_has_split_ifs(true); // Has chance for split-if optimization
3061 set_result(result_rgn, result_val);
3062 return true;
3063 }
3064
3065 //---------------------------load_mirror_from_klass----------------------------
3066 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3067 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3068 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3069 Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3070 // mirror = ((OopHandle)mirror)->resolve();
3071 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3072 }
3073
3074 //-----------------------load_klass_from_mirror_common-------------------------
3075 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3076 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3077 // and branch to the given path on the region.
3078 // If never_see_null, take an uncommon trap on null, so we can optimistically
3079 // compile for the non-null case.
3080 // If the region is NULL, force never_see_null = true.
3081 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3082 bool never_see_null,
3083 RegionNode* region,
3084 int null_path,
3085 int offset) {
3086 if (region == NULL) never_see_null = true;
3087 Node* p = basic_plus_adr(mirror, offset);
3088 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3089 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3090 Node* null_ctl = top();
3091 kls = null_check_oop(kls, &null_ctl, never_see_null);
3092 if (region != NULL) {
3093 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3100
3101 //--------------------(inline_native_Class_query helpers)---------------------
3102 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3103 // Fall through if (mods & mask) == bits, take the guard otherwise.
3104 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3105 // Branch around if the given klass has the given modifier bit set.
3106 // Like generate_guard, adds a new path onto the region.
3107 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3108 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3109 Node* mask = intcon(modifier_mask);
3110 Node* bits = intcon(modifier_bits);
3111 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3112 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3113 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3114 return generate_fair_guard(bol, region);
3115 }
3116 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3117 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3118 }
3119
3120 //-------------------------inline_native_Class_query-------------------
3121 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3122 const Type* return_type = TypeInt::BOOL;
3123 Node* prim_return_value = top(); // what happens if it's a primitive class?
3124 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3125 bool expect_prim = false; // most of these guys expect to work on refs
3126
3127 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3128
3129 Node* mirror = argument(0);
3130 Node* obj = top();
3131
3132 switch (id) {
3133 case vmIntrinsics::_isInstance:
3134 // nothing is an instance of a primitive type
3135 prim_return_value = intcon(0);
3136 obj = argument(1);
3137 break;
3138 case vmIntrinsics::_getModifiers:
3139 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3284 // Fall-through is the normal case of a query to a real class.
3285 phi->init_req(1, query_value);
3286 region->init_req(1, control());
3287
3288 C->set_has_split_ifs(true); // Has chance for split-if optimization
3289 set_result(region, phi);
3290 return true;
3291 }
3292
3293 //-------------------------inline_Class_cast-------------------
3294 bool LibraryCallKit::inline_Class_cast() {
3295 Node* mirror = argument(0); // Class
3296 Node* obj = argument(1);
3297 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3298 if (mirror_con == NULL) {
3299 return false; // dead path (mirror->is_top()).
3300 }
3301 if (obj == NULL || obj->is_top()) {
3302 return false; // dead path
3303 }
3304 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3305
3306 // First, see if Class.cast() can be folded statically.
3307 // java_mirror_type() returns non-null for compile-time Class constants.
3308 ciType* tm = mirror_con->java_mirror_type();
3309 if (tm != NULL && tm->is_klass() &&
3310 tp != NULL && tp->klass() != NULL) {
3311 if (!tp->klass()->is_loaded()) {
3312 // Don't use intrinsic when class is not loaded.
3313 return false;
3314 } else {
3315 int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
3316 if (static_res == Compile::SSC_always_true) {
3317 // isInstance() is true - fold the code.
3318 set_result(obj);
3319 return true;
3320 } else if (static_res == Compile::SSC_always_false) {
3321 // Don't use intrinsic, have to throw ClassCastException.
3322 // If the reference is null, the non-intrinsic bytecode will
3323 // be optimized appropriately.
3324 return false;
3325 }
3326 }
3327 }
3328
3329 // Bailout intrinsic and do normal inlining if exception path is frequent.
3330 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3331 return false;
3332 }
3333
3334 // Generate dynamic checks.
3335 // Class.cast() is java implementation of _checkcast bytecode.
3463 phi->set_req(_ref_subtype_path, intcon(1));
3464
3465 // pull together the cases:
3466 assert(region->req() == PATH_LIMIT, "sane region");
3467 for (uint i = 1; i < region->req(); i++) {
3468 Node* ctl = region->in(i);
3469 if (ctl == NULL || ctl == top()) {
3470 region->set_req(i, top());
3471 phi ->set_req(i, top());
3472 } else if (phi->in(i) == NULL) {
3473 phi->set_req(i, intcon(0)); // all other paths produce 'false'
3474 }
3475 }
3476
3477 set_control(_gvn.transform(region));
3478 set_result(_gvn.transform(phi));
3479 return true;
3480 }
3481
3482 //---------------------generate_array_guard_common------------------------
3483 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3484 bool obj_array, bool not_array) {
3485
3486 if (stopped()) {
3487 return NULL;
3488 }
3489
3490 // If obj_array/non_array==false/false:
3491 // Branch around if the given klass is in fact an array (either obj or prim).
3492 // If obj_array/non_array==false/true:
3493 // Branch around if the given klass is not an array klass of any kind.
3494 // If obj_array/non_array==true/true:
3495 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3496 // If obj_array/non_array==true/false:
3497 // Branch around if the kls is an oop array (Object[] or subtype)
3498 //
3499 // Like generate_guard, adds a new path onto the region.
3500 jint layout_con = 0;
3501 Node* layout_val = get_layout_helper(kls, layout_con);
3502 if (layout_val == NULL) {
3503 bool query = (obj_array
3504 ? Klass::layout_helper_is_objArray(layout_con)
3505 : Klass::layout_helper_is_array(layout_con));
3506 if (query == not_array) {
3507 return NULL; // never a branch
3508 } else { // always a branch
3509 Node* always_branch = control();
3510 if (region != NULL)
3511 region->add_req(always_branch);
3512 set_control(top());
3513 return always_branch;
3514 }
3515 }
3516 // Now test the correct condition.
3517 jint nval = (obj_array
3518 ? (jint)(Klass::_lh_array_tag_type_value
3519 << Klass::_lh_array_tag_shift)
3520 : Klass::_lh_neutral_value);
3521 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3522 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
3523 // invert the test if we are looking for a non-array
3524 if (not_array) btest = BoolTest(btest).negate();
3525 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3526 return generate_fair_guard(bol, region);
3527 }
3528
3529
3530 //-----------------------inline_native_newArray--------------------------
3531 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3532 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3533 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3534 Node* mirror;
3535 Node* count_val;
3536 if (uninitialized) {
3537 mirror = argument(1);
3538 count_val = argument(2);
3539 } else {
3540 mirror = argument(0);
3541 count_val = argument(1);
3542 }
3543
3544 mirror = null_check(mirror);
3545 // If mirror or obj is dead, only null-path is taken.
3546 if (stopped()) return true;
3547
3548 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3549 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3550 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3551 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3627 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3628 Node* result = load_array_length(array);
3629
3630 C->set_has_split_ifs(true); // Has chance for split-if optimization
3631 set_result(result);
3632 return true;
3633 }
3634
3635 //------------------------inline_array_copyOf----------------------------
3636 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
3637 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
3638 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3639 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3640
3641 // Get the arguments.
3642 Node* original = argument(0);
3643 Node* start = is_copyOfRange? argument(1): intcon(0);
3644 Node* end = is_copyOfRange? argument(2): argument(1);
3645 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3646
3647 Node* newcopy = NULL;
3648
3649 // Set the original stack and the reexecute bit for the interpreter to reexecute
3650 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3651 { PreserveReexecuteState preexecs(this);
3652 jvms()->set_should_reexecute(true);
3653
3654 array_type_mirror = null_check(array_type_mirror);
3655 original = null_check(original);
3656
3657 // Check if a null path was taken unconditionally.
3658 if (stopped()) return true;
3659
3660 Node* orig_length = load_array_length(original);
3661
3662 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3663 klass_node = null_check(klass_node);
3664
3665 RegionNode* bailout = new RegionNode(1);
3666 record_for_igvn(bailout);
3667
3668 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3669 // Bail out if that is so.
3670 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3671 if (not_objArray != NULL) {
3672 // Improve the klass node's type from the new optimistic assumption:
3673 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3674 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3675 Node* cast = new CastPPNode(klass_node, akls);
3676 cast->init_req(0, control());
3677 klass_node = _gvn.transform(cast);
3678 }
3679
3680 // Bail out if either start or end is negative.
3681 generate_negative_guard(start, bailout, &start);
3682 generate_negative_guard(end, bailout, &end);
3683
3684 Node* length = end;
3685 if (_gvn.type(start) != TypeInt::ZERO) {
3686 length = _gvn.transform(new SubINode(end, start));
3687 }
3688
3689 // Bail out if length is negative.
3690 // Without this the new_array would throw
3691 // NegativeArraySizeException but IllegalArgumentException is what
3692 // should be thrown
3693 generate_negative_guard(length, bailout, &length);
3694
3695 if (bailout->req() > 1) {
3696 PreserveJVMState pjvms(this);
3697 set_control(_gvn.transform(bailout));
3698 uncommon_trap(Deoptimization::Reason_intrinsic,
3699 Deoptimization::Action_maybe_recompile);
3700 }
3701
3702 if (!stopped()) {
3703 // How many elements will we copy from the original?
3704 // The answer is MinI(orig_length - start, length).
3705 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3706 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3707
3708 original = access_resolve(original, ACCESS_READ);
3709
3710 // Generate a direct call to the right arraycopy function(s).
3711 // We know the copy is disjoint but we might not know if the
3712 // oop stores need checking.
3713 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
3714 // This will fail a store-check if x contains any non-nulls.
3715
3716 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3717 // loads/stores but it is legal only if we're sure the
3718 // Arrays.copyOf would succeed. So we need all input arguments
3719 // to the copyOf to be validated, including that the copy to the
3720 // new array won't trigger an ArrayStoreException. That subtype
3721 // check can be optimized if we know something on the type of
3722 // the input array from type speculation.
3723 if (_gvn.type(klass_node)->singleton()) {
3724 ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3725 ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3726
3727 int test = C->static_subtype_check(superk, subk);
3728 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3729 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3730 if (t_original->speculative_type() != NULL) {
3731 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3732 }
3733 }
3734 }
3735
3736 bool validated = false;
3737 // Reason_class_check rather than Reason_intrinsic because we
3738 // want to intrinsify even if this traps.
3739 if (!too_many_traps(Deoptimization::Reason_class_check)) {
3740 Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original),
3741 klass_node);
3742
3743 if (not_subtype_ctrl != top()) {
3744 PreserveJVMState pjvms(this);
3745 set_control(not_subtype_ctrl);
3746 uncommon_trap(Deoptimization::Reason_class_check,
3747 Deoptimization::Action_make_not_entrant);
3748 assert(stopped(), "Should be stopped");
3749 }
3750 validated = true;
3751 }
3752
3753 if (!stopped()) {
3754 newcopy = new_array(klass_node, length, 0); // no arguments to push
3755
3756 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
3757 load_object_klass(original), klass_node);
3758 if (!is_copyOfRange) {
3759 ac->set_copyof(validated);
3760 } else {
3761 ac->set_copyofrange(validated);
3762 }
3763 Node* n = _gvn.transform(ac);
3764 if (n == ac) {
3765 ac->connect_outputs(this);
3766 } else {
3767 assert(validated, "shouldn't transform if all arguments not validated");
3768 set_all_memory(n);
3769 }
3770 }
3771 }
3772 } // original reexecute is set back here
3773
3774 C->set_has_split_ifs(true); // Has chance for split-if optimization
3775 if (!stopped()) {
3776 set_result(newcopy);
3777 }
3861 set_edges_for_java_call(slow_call);
3862 return slow_call;
3863 }
3864
3865
3866 /**
3867 * Build special case code for calls to hashCode on an object. This call may
3868 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
3869 * slightly different code.
3870 */
3871 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3872 assert(is_static == callee()->is_static(), "correct intrinsic selection");
3873 assert(!(is_virtual && is_static), "either virtual, special, or static");
3874
3875 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3876
3877 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3878 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
3879 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3880 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3881 Node* obj = NULL;
3882 if (!is_static) {
3883 // Check for hashing null object
3884 obj = null_check_receiver();
3885 if (stopped()) return true; // unconditionally null
3886 result_reg->init_req(_null_path, top());
3887 result_val->init_req(_null_path, top());
3888 } else {
3889 // Do a null check, and return zero if null.
3890 // System.identityHashCode(null) == 0
3891 obj = argument(0);
3892 Node* null_ctl = top();
3893 obj = null_check_oop(obj, &null_ctl);
3894 result_reg->init_req(_null_path, null_ctl);
3895 result_val->init_req(_null_path, _gvn.intcon(0));
3896 }
3897
3898 // Unconditionally null? Then return right away.
3899 if (stopped()) {
3900 set_control( result_reg->in(_null_path));
3901 if (!stopped())
3902 set_result(result_val->in(_null_path));
3903 return true;
3904 }
3905
3906 // We only go to the fast case code if we pass a number of guards. The
3907 // paths which do not pass are accumulated in the slow_region.
3908 RegionNode* slow_region = new RegionNode(1);
3909 record_for_igvn(slow_region);
3910
3911 // If this is a virtual call, we generate a funny guard. We pull out
3912 // the vtable entry corresponding to hashCode() from the target object.
3913 // If the target method which we are calling happens to be the native
3914 // Object hashCode() method, we pass the guard. We do not need this
3915 // guard for non-virtual calls -- the caller is known to be the native
3916 // Object hashCode().
3917 if (is_virtual) {
3918 // After null check, get the object's klass.
3919 Node* obj_klass = load_object_klass(obj);
3920 generate_virtual_guard(obj_klass, slow_region);
3921 }
3922
3923 // Get the header out of the object, use LoadMarkNode when available
3924 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3925 // The control of the load must be NULL. Otherwise, the load can move before
3926 // the null check after castPP removal.
3927 Node* no_ctrl = NULL;
3928 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3929
3930 // Test the header to see if it is unlocked.
3977 // this->control() comes from set_results_for_java_call
3978 result_reg->init_req(_slow_path, control());
3979 result_val->init_req(_slow_path, slow_result);
3980 result_io ->set_req(_slow_path, i_o());
3981 result_mem ->set_req(_slow_path, reset_memory());
3982 }
3983
3984 // Return the combined state.
3985 set_i_o( _gvn.transform(result_io) );
3986 set_all_memory( _gvn.transform(result_mem));
3987
3988 set_result(result_reg, result_val);
3989 return true;
3990 }
3991
3992 //---------------------------inline_native_getClass----------------------------
3993 // public final native Class<?> java.lang.Object.getClass();
3994 //
3995 // Build special case code for calls to getClass on an object.
3996 bool LibraryCallKit::inline_native_getClass() {
3997 Node* obj = null_check_receiver();
3998 if (stopped()) return true;
3999 set_result(load_mirror_from_klass(load_object_klass(obj)));
4000 return true;
4001 }
4002
4003 //-----------------inline_native_Reflection_getCallerClass---------------------
4004 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4005 //
4006 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4007 //
4008 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4009 // in that it must skip particular security frames and checks for
4010 // caller sensitive methods.
4011 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4012 #ifndef PRODUCT
4013 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4014 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4015 }
4016 #endif
4017
4234 Node* raw_obj = alloc_obj->in(1);
4235 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4236
4237 AllocateNode* alloc = NULL;
4238 if (ReduceBulkZeroing) {
4239 // We will be completely responsible for initializing this object -
4240 // mark Initialize node as complete.
4241 alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4242 // The object was just allocated - there should be no any stores!
4243 guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4244 // Mark as complete_with_arraycopy so that on AllocateNode
4245 // expansion, we know this AllocateNode is initialized by an array
4246 // copy and a StoreStore barrier exists after the array copy.
4247 alloc->initialization()->set_complete_with_arraycopy();
4248 }
4249
4250 // Copy the fastest available way.
4251 // TODO: generate fields copies for small objects instead.
4252 Node* size = _gvn.transform(obj_size);
4253
4254 access_clone(obj, alloc_obj, size, is_array);
4255
4256 // Do not let reads from the cloned object float above the arraycopy.
4257 if (alloc != NULL) {
4258 // Do not let stores that initialize this object be reordered with
4259 // a subsequent store that would make this object accessible by
4260 // other threads.
4261 // Record what AllocateNode this StoreStore protects so that
4262 // escape analysis can go from the MemBarStoreStoreNode to the
4263 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4264 // based on the escape status of the AllocateNode.
4265 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
4266 } else {
4267 insert_mem_bar(Op_MemBarCPUOrder);
4268 }
4269 }
4270
4271 //------------------------inline_native_clone----------------------------
4272 // protected native Object java.lang.Object.clone();
4273 //
4274 // Here are the simple edge cases:
4277 // not cloneable or finalizer => slow path to out-of-line Object.clone
4278 //
4279 // The general case has two steps, allocation and copying.
4280 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4281 //
4282 // Copying also has two cases, oop arrays and everything else.
4283 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4284 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4285 //
4286 // These steps fold up nicely if and when the cloned object's klass
4287 // can be sharply typed as an object array, a type array, or an instance.
4288 //
4289 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4290 PhiNode* result_val;
4291
4292 // Set the reexecute bit for the interpreter to reexecute
4293 // the bytecode that invokes Object.clone if deoptimization happens.
4294 { PreserveReexecuteState preexecs(this);
4295 jvms()->set_should_reexecute(true);
4296
4297 Node* obj = null_check_receiver();
4298 if (stopped()) return true;
4299
4300 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4301
4302 // If we are going to clone an instance, we need its exact type to
4303 // know the number and types of fields to convert the clone to
4304 // loads/stores. Maybe a speculative type can help us.
4305 if (!obj_type->klass_is_exact() &&
4306 obj_type->speculative_type() != NULL &&
4307 obj_type->speculative_type()->is_instance_klass()) {
4308 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4309 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4310 !spec_ik->has_injected_fields()) {
4311 ciKlass* k = obj_type->klass();
4312 if (!k->is_instance_klass() ||
4313 k->as_instance_klass()->is_interface() ||
4314 k->as_instance_klass()->has_subklass()) {
4315 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4316 }
4317 }
4318 }
4319
4320 Node* obj_klass = load_object_klass(obj);
4321 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4322 const TypeOopPtr* toop = ((tklass != NULL)
4323 ? tklass->as_instance_type()
4324 : TypeInstPtr::NOTNULL);
4325
4326 // Conservatively insert a memory barrier on all memory slices.
4327 // Do not let writes into the original float below the clone.
4328 insert_mem_bar(Op_MemBarCPUOrder);
4329
4330 // paths into result_reg:
4331 enum {
4332 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4333 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4334 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4335 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4336 PATH_LIMIT
4337 };
4338 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4339 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4340 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
4341 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4342 record_for_igvn(result_reg);
4343
4344 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4345 if (array_ctl != NULL) {
4346 // It's an array.
4347 PreserveJVMState pjvms(this);
4348 set_control(array_ctl);
4349 Node* obj_length = load_array_length(obj);
4350 Node* obj_size = NULL;
4351 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
4352
4353 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4354 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) {
4355 // If it is an oop array, it requires very special treatment,
4356 // because gc barriers are required when accessing the array.
4357 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4358 if (is_obja != NULL) {
4359 PreserveJVMState pjvms2(this);
4360 set_control(is_obja);
4361 obj = access_resolve(obj, ACCESS_READ);
4362 // Generate a direct call to the right arraycopy function(s).
4363 Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4364 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
4365 ac->set_cloneoop();
4366 Node* n = _gvn.transform(ac);
4367 assert(n == ac, "cannot disappear");
4368 ac->connect_outputs(this);
4369
4370 result_reg->init_req(_objArray_path, control());
4371 result_val->init_req(_objArray_path, alloc_obj);
4372 result_i_o ->set_req(_objArray_path, i_o());
4373 result_mem ->set_req(_objArray_path, reset_memory());
4374 }
4375 }
4376 // Otherwise, there are no barriers to worry about.
4377 // (We can dispense with card marks if we know the allocation
4378 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
4379 // causes the non-eden paths to take compensating steps to
4380 // simulate a fresh allocation, so that no further
4381 // card marks are required in compiled code to initialize
4382 // the object.)
4383
4384 if (!stopped()) {
4385 copy_to_clone(obj, alloc_obj, obj_size, true);
4386
4387 // Present the results of the copy.
4388 result_reg->init_req(_array_path, control());
4389 result_val->init_req(_array_path, alloc_obj);
4390 result_i_o ->set_req(_array_path, i_o());
4391 result_mem ->set_req(_array_path, reset_memory());
4392 }
4393 }
4394
4395 // We only go to the instance fast case code if we pass a number of guards.
4396 // The paths which do not pass are accumulated in the slow_region.
4397 RegionNode* slow_region = new RegionNode(1);
4398 record_for_igvn(slow_region);
4399 if (!stopped()) {
4400 // It's an instance (we did array above). Make the slow-path tests.
4401 // If this is a virtual call, we generate a funny guard. We grab
4402 // the vtable entry corresponding to clone() from the target object.
4403 // If the target method which we are calling happens to be the
4404 // Object clone() method, we pass the guard. We do not need this
4405 // guard for non-virtual calls; the caller is known to be the native
4406 // Object clone().
4407 if (is_virtual) {
4408 generate_virtual_guard(obj_klass, slow_region);
4409 }
4410
4411 // The object must be easily cloneable and must not have a finalizer.
4412 // Both of these conditions may be checked in a single test.
4413 // We could optimize the test further, but we don't care.
4414 generate_access_flags_guard(obj_klass,
4415 // Test both conditions:
4416 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4417 // Must be cloneable but not finalizer:
4418 JVM_ACC_IS_CLONEABLE_FAST,
4539 // array in the heap that GCs wouldn't expect. Move the allocation
4540 // after the traps so we don't allocate the array if we
4541 // deoptimize. This is possible because tightly_coupled_allocation()
4542 // guarantees there's no observer of the allocated array at this point
4543 // and the control flow is simple enough.
4544 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4545 int saved_reexecute_sp, uint new_idx) {
4546 if (saved_jvms != NULL && !stopped()) {
4547 assert(alloc != NULL, "only with a tightly coupled allocation");
4548 // restore JVM state to the state at the arraycopy
4549 saved_jvms->map()->set_control(map()->control());
4550 assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4551 assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4552 // If we've improved the types of some nodes (null check) while
4553 // emitting the guards, propagate them to the current state
4554 map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4555 set_jvms(saved_jvms);
4556 _reexecute_sp = saved_reexecute_sp;
4557
4558 // Remove the allocation from above the guards
4559 CallProjections callprojs;
4560 alloc->extract_projections(&callprojs, true);
4561 InitializeNode* init = alloc->initialization();
4562 Node* alloc_mem = alloc->in(TypeFunc::Memory);
4563 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4564 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4565 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4566
4567 // move the allocation here (after the guards)
4568 _gvn.hash_delete(alloc);
4569 alloc->set_req(TypeFunc::Control, control());
4570 alloc->set_req(TypeFunc::I_O, i_o());
4571 Node *mem = reset_memory();
4572 set_all_memory(mem);
4573 alloc->set_req(TypeFunc::Memory, mem);
4574 set_control(init->proj_out_or_null(TypeFunc::Control));
4575 set_i_o(callprojs.fallthrough_ioproj);
4576
4577 // Update memory as done in GraphKit::set_output_for_allocation()
4578 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4579 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4580 if (ary_type->isa_aryptr() && length_type != NULL) {
4581 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4582 }
4583 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4584 int elemidx = C->get_alias_index(telemref);
4585 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4586 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4587
4588 Node* allocx = _gvn.transform(alloc);
4589 assert(allocx == alloc, "where has the allocation gone?");
4590 assert(dest->is_CheckCastPP(), "not an allocation result?");
4591
4592 _gvn.hash_delete(dest);
4593 dest->set_req(0, control());
4594 Node* destx = _gvn.transform(dest);
4595 assert(destx == dest, "where has the allocation result gone?");
4799 // This is also checked in generate_arraycopy() during macro expansion, but
4800 // we also have to check it here for the case where the ArrayCopyNode will
4801 // be eliminated by Escape Analysis.
4802 if (EliminateAllocations) {
4803 generate_negative_guard(length, slow_region);
4804 negative_length_guard_generated = true;
4805 }
4806
4807 // (9) each element of an oop array must be assignable
4808 Node* src_klass = load_object_klass(src);
4809 Node* dest_klass = load_object_klass(dest);
4810 Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
4811
4812 if (not_subtype_ctrl != top()) {
4813 PreserveJVMState pjvms(this);
4814 set_control(not_subtype_ctrl);
4815 uncommon_trap(Deoptimization::Reason_intrinsic,
4816 Deoptimization::Action_make_not_entrant);
4817 assert(stopped(), "Should be stopped");
4818 }
4819 {
4820 PreserveJVMState pjvms(this);
4821 set_control(_gvn.transform(slow_region));
4822 uncommon_trap(Deoptimization::Reason_intrinsic,
4823 Deoptimization::Action_make_not_entrant);
4824 assert(stopped(), "Should be stopped");
4825 }
4826
4827 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
4828 const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
4829 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
4830 }
4831
4832 arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
4833
4834 if (stopped()) {
4835 return true;
4836 }
4837
4838 Node* new_src = access_resolve(src, ACCESS_READ);
4839 Node* new_dest = access_resolve(dest, ACCESS_WRITE);
4840
4841 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, new_src, src_offset, new_dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
4842 // Create LoadRange and LoadKlass nodes for use during macro expansion here
4843 // so the compiler has a chance to eliminate them: during macro expansion,
4844 // we have to set their control (CastPP nodes are eliminated).
4845 load_object_klass(src), load_object_klass(dest),
4846 load_array_length(src), load_array_length(dest));
4847
4848 ac->set_arraycopy(validated);
4849
|
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/callGenerator.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/intrinsicnode.hpp"
45 #include "opto/idealKit.hpp"
46 #include "opto/mathexactnode.hpp"
47 #include "opto/movenode.hpp"
48 #include "opto/mulnode.hpp"
49 #include "opto/narrowptrnode.hpp"
50 #include "opto/opaquenode.hpp"
51 #include "opto/parse.hpp"
52 #include "opto/runtime.hpp"
53 #include "opto/rootnode.hpp"
54 #include "opto/subnode.hpp"
55 #include "opto/valuetypenode.hpp"
56 #include "prims/nativeLookup.hpp"
57 #include "prims/unsafe.hpp"
58 #include "runtime/objectMonitor.hpp"
59 #include "runtime/sharedRuntime.hpp"
60 #include "utilities/macros.hpp"
61
62
63 class LibraryIntrinsic : public InlineCallGenerator {
64 // Extend the set of intrinsics known to the runtime:
65 public:
66 private:
67 bool _is_virtual;
68 bool _does_virtual_dispatch;
69 int8_t _predicates_count; // Intrinsic is predicated by several conditions
70 int8_t _last_predicate; // Last generated predicate
71 vmIntrinsics::ID _intrinsic_id;
72
73 public:
74 LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
75 : InlineCallGenerator(m),
147
148 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
149 void set_result(RegionNode* region, PhiNode* value);
150 Node* result() { return _result; }
151
152 virtual int reexecute_sp() { return _reexecute_sp; }
153
154 // Helper functions to inline natives
155 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
156 Node* generate_slow_guard(Node* test, RegionNode* region);
157 Node* generate_fair_guard(Node* test, RegionNode* region);
158 Node* generate_negative_guard(Node* index, RegionNode* region,
159 // resulting CastII of index:
160 Node* *pos_index = NULL);
161 Node* generate_limit_guard(Node* offset, Node* subseq_length,
162 Node* array_length,
163 RegionNode* region);
164 void generate_string_range_check(Node* array, Node* offset,
165 Node* length, bool char_count);
166 Node* generate_current_thread(Node* &tls_output);
167 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
168 RegionNode* region, int null_path,
169 int offset);
170 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
171 RegionNode* region, int null_path) {
172 int offset = java_lang_Class::klass_offset_in_bytes();
173 return load_klass_from_mirror_common(mirror, never_see_null,
174 region, null_path,
175 offset);
176 }
177 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
178 RegionNode* region, int null_path) {
179 int offset = java_lang_Class::array_klass_offset_in_bytes();
180 return load_klass_from_mirror_common(mirror, never_see_null,
181 region, null_path,
182 offset);
183 }
184 Node* generate_access_flags_guard(Node* kls,
185 int modifier_mask, int modifier_bits,
186 RegionNode* region);
187 Node* generate_interface_guard(Node* kls, RegionNode* region);
188 Node* generate_value_guard(Node* kls, RegionNode* region);
189
190 enum ArrayKind {
191 AnyArray,
192 NonArray,
193 ObjectArray,
194 NonObjectArray,
195 TypeArray,
196 ValueArray
197 };
198
199 Node* generate_array_guard(Node* kls, RegionNode* region) {
200 return generate_array_guard_common(kls, region, AnyArray);
201 }
202 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
203 return generate_array_guard_common(kls, region, NonArray);
204 }
205 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
206 return generate_array_guard_common(kls, region, ObjectArray);
207 }
208 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
209 return generate_array_guard_common(kls, region, NonObjectArray);
210 }
211 Node* generate_typeArray_guard(Node* kls, RegionNode* region) {
212 return generate_array_guard_common(kls, region, TypeArray);
213 }
214 Node* generate_valueArray_guard(Node* kls, RegionNode* region) {
215 return generate_array_guard_common(kls, region, ValueArray);
216 }
217 Node* generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind);
218 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
219 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
220 bool is_virtual = false, bool is_static = false);
221 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
222 return generate_method_call(method_id, false, true);
223 }
224 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
225 return generate_method_call(method_id, true, false);
226 }
227 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
228 Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
229
230 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
231 bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
232 bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
233 bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
234 Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
235 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae);
236 bool inline_string_indexOfChar();
237 bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
252 bool inline_math_multiplyExactL();
253 bool inline_math_multiplyHigh();
254 bool inline_math_negateExactI();
255 bool inline_math_negateExactL();
256 bool inline_math_subtractExactI(bool is_decrement);
257 bool inline_math_subtractExactL(bool is_decrement);
258 bool inline_min_max(vmIntrinsics::ID id);
259 bool inline_notify(vmIntrinsics::ID id);
260 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
261 // This returns Type::AnyPtr, RawPtr, or OopPtr.
262 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
263 Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
264
265 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
266 DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
267 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
268 static bool klass_needs_init_guard(Node* kls);
269 bool inline_unsafe_allocate();
270 bool inline_unsafe_newArray(bool uninitialized);
271 bool inline_unsafe_copyMemory();
272 bool inline_unsafe_make_private_buffer();
273 bool inline_unsafe_finish_private_buffer();
274 bool inline_native_currentThread();
275
276 bool inline_native_time_funcs(address method, const char* funcName);
277 #ifdef JFR_HAVE_INTRINSICS
278 bool inline_native_classID();
279 bool inline_native_getEventWriter();
280 #endif
281 bool inline_native_isInterrupted();
282 bool inline_native_Class_query(vmIntrinsics::ID id);
283 bool inline_native_subtype_check();
284 bool inline_native_getLength();
285 bool inline_array_copyOf(bool is_copyOfRange);
286 bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
287 bool inline_preconditions_checkIndex();
288 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
289 bool inline_native_clone(bool is_virtual);
290 bool inline_native_Reflection_getCallerClass();
291 // Helper function for inlining native object hash method
292 bool inline_native_hashcode(bool is_virtual, bool is_static);
293 bool inline_native_getClass();
590 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
591 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
592 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
593 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
594 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
595 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar();
596
597 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
598 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
599
600 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
601 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
602 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
603 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
604
605 case vmIntrinsics::_compressStringC:
606 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
607 case vmIntrinsics::_inflateStringC:
608 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
609
610 case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
611 case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
612 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
613 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
614 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
615 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
616 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
617 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
618 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
619 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
620 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
621 case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_VALUETYPE,Relaxed, false);
622
623 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
624 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
625 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
626 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
627 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
628 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
629 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
630 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
631 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
632 case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_VALUETYPE,Relaxed, false);
633
634 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
635 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
636 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
637 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
638 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
639 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
640 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
641 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
642 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
643
644 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
645 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
646 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
647 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
648 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
649 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
650 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
651 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
652 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
2365 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2366 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2367
2368 if (type == T_OBJECT || type == T_ARRAY) {
2369 decorators |= ON_UNKNOWN_OOP_REF;
2370 }
2371
2372 if (unaligned) {
2373 decorators |= C2_UNALIGNED;
2374 }
2375
2376 #ifndef PRODUCT
2377 {
2378 ResourceMark rm;
2379 // Check the signatures.
2380 ciSignature* sig = callee()->signature();
2381 #ifdef ASSERT
2382 if (!is_store) {
2383 // Object getReference(Object base, int/long offset), etc.
2384 BasicType rtype = sig->return_type()->basic_type();
2385 assert(rtype == type || (rtype == T_OBJECT && type == T_VALUETYPE), "getter must return the expected value");
2386 assert(sig->count() == 2 || (type == T_VALUETYPE && sig->count() == 3), "oop getter has 2 or 3 arguments");
2387 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2388 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2389 } else {
2390 // void putReference(Object base, int/long offset, Object x), etc.
2391 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2392 assert(sig->count() == 3 || (type == T_VALUETYPE && sig->count() == 4), "oop putter has 3 arguments");
2393 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2394 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2395 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2396 assert(vtype == type || (type == T_VALUETYPE && vtype == T_OBJECT), "putter must accept the expected value");
2397 }
2398 #endif // ASSERT
2399 }
2400 #endif //PRODUCT
2401
2402 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2403
2404 Node* receiver = argument(0); // type: oop
2405
2406 // Build address expression.
2407 Node* adr;
2408 Node* heap_base_oop = top();
2409 Node* offset = top();
2410 Node* val;
2411
2412 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2413 Node* base = argument(1); // type: oop
2414 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2415 offset = argument(2); // type: long
2416 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2417 // to be plain byte offsets, which are also the same as those accepted
2418 // by oopDesc::field_addr.
2419 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2420 "fieldOffset must be byte-scaled");
2421
2422 ciValueKlass* value_klass = NULL;
2423 if (type == T_VALUETYPE) {
2424 Node* cls = null_check(argument(4));
2425 if (stopped()) {
2426 return true;
2427 }
2428 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2429 const TypeKlassPtr* kls_t = _gvn.type(kls)->isa_klassptr();
2430 if (!kls_t->klass_is_exact()) {
2431 return false;
2432 }
2433 ciKlass* klass = kls_t->klass();
2434 if (!klass->is_valuetype()) {
2435 return false;
2436 }
2437 value_klass = klass->as_value_klass();
2438 }
2439
2440 receiver = null_check(receiver);
2441 if (stopped()) {
2442 return true;
2443 }
2444
2445 if (base->is_ValueType()) {
2446 ValueTypeNode* vt = base->as_ValueType();
2447
2448 if (is_store) {
2449 if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
2450 return false;
2451 }
2452 base = vt->get_oop();
2453 } else {
2454 if (offset->is_Con()) {
2455 long off = find_long_con(offset, 0);
2456 ciValueKlass* vk = _gvn.type(vt)->is_valuetype()->value_klass();
2457 if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2458 return false;
2459 }
2460
2461 ciField* f = vk->get_non_flattened_field_by_offset((int)off);
2462
2463 if (f != NULL) {
2464 BasicType bt = f->layout_type();
2465 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2466 bt = T_OBJECT;
2467 }
2468 if (bt == type) {
2469 if (bt != T_VALUETYPE || f->type() == value_klass) {
2470 set_result(vt->field_value_by_offset((int)off, false));
2471 return true;
2472 }
2473 }
2474 }
2475 }
2476 vt = vt->allocate(this)->as_ValueType();
2477 base = vt->get_oop();
2478 }
2479 }
2480
2481 // 32-bit machines ignore the high half!
2482 offset = ConvL2X(offset);
2483 adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2484
2485 if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2486 heap_base_oop = base;
2487 } else if (type == T_OBJECT || (value_klass != NULL && value_klass->has_object_fields())) {
2488 return false; // off-heap oop accesses are not supported
2489 }
2490
2491 // Can base be NULL? Otherwise, always on-heap access.
2492 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2493
2494 if (!can_access_non_heap) {
2495 decorators |= IN_HEAP;
2496 }
2497
2498 val = is_store ? argument(4 + (type == T_VALUETYPE ? 1 : 0)) : NULL;
2499
2500 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2501
2502 // Try to categorize the address.
2503 Compile::AliasType* alias_type = C->alias_type(adr_type);
2504 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2505
2506 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2507 alias_type->adr_type() == TypeAryPtr::RANGE) {
2508 return false; // not supported
2509 }
2510
2511 bool mismatched = false;
2512 BasicType bt = T_ILLEGAL;
2513 ciField* field = NULL;
2514 if (adr_type->isa_instptr()) {
2515 const TypeInstPtr* instptr = adr_type->is_instptr();
2516 ciInstanceKlass* k = instptr->klass()->as_instance_klass();
2517 int off = instptr->offset();
2518 if (instptr->const_oop() != NULL &&
2519 instptr->klass() == ciEnv::current()->Class_klass() &&
2520 instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) {
2521 k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2522 field = k->get_field_by_offset(off, true);
2523 } else {
2524 field = k->get_non_flattened_field_by_offset(off);
2525 }
2526 if (field != NULL) {
2527 bt = field->layout_type();
2528 }
2529 assert(bt == alias_type->basic_type() || bt == T_VALUETYPE, "should match");
2530 if (field != NULL && bt == T_VALUETYPE && !field->is_flattened()) {
2531 bt = T_OBJECT;
2532 }
2533 } else {
2534 bt = alias_type->basic_type();
2535 }
2536
2537 if (bt != T_ILLEGAL) {
2538 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2539 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2540 // Alias type doesn't differentiate between byte[] and boolean[]).
2541 // Use address type to get the element type.
2542 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2543 }
2544 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2545 // accessing an array field with getReference is not a mismatch
2546 bt = T_OBJECT;
2547 }
2548 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2549 // Don't intrinsify mismatched object accesses
2550 return false;
2551 }
2552 mismatched = (bt != type);
2553 } else if (alias_type->adr_type()->isa_oopptr()) {
2554 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2555 }
2556
2557 if (type == T_VALUETYPE) {
2558 if (adr_type->isa_instptr()) {
2559 if (field == NULL || field->type() != value_klass) {
2560 mismatched = true;
2561 }
2562 } else if (adr_type->isa_aryptr()) {
2563 const Type* elem = adr_type->is_aryptr()->elem();
2564 if (!elem->isa_valuetype()) {
2565 mismatched = true;
2566 } else if (elem->is_valuetype()->value_klass() != value_klass) {
2567 mismatched = true;
2568 }
2569 }
2570 if (is_store) {
2571 const Type* val_t = _gvn.type(val);
2572 if (!val_t->isa_valuetype() ||
2573 val_t->is_valuetype()->value_klass() != value_klass) {
2574 return false;
2575 }
2576 }
2577 }
2578
2579 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2580
2581 if (mismatched) {
2582 decorators |= C2_MISMATCHED;
2583 }
2584
2585 // First guess at the value type.
2586 const Type *value_type = Type::get_const_basic_type(type);
2587
2588 // Figure out the memory ordering.
2589 decorators |= mo_decorator_for_access_kind(kind);
2590
2591 if (!is_store) {
2592 if (type == T_OBJECT) {
2593 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2594 if (tjp != NULL) {
2595 value_type = tjp;
2596 }
2597 } else if (type == T_VALUETYPE) {
2598 value_type = NULL;
2599 }
2600 }
2601
2602 // Heap pointers get a null-check from the interpreter,
2603 // as a courtesy. However, this is not guaranteed by Unsafe,
2604 // and it is not possible to fully distinguish unintended nulls
2605 // from intended ones in this API.
2606
2607 if (!is_store) {
2608 Node* p = NULL;
2609 // Try to constant fold a load from a constant field
2610
2611 if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2612 // final or stable field
2613 p = make_constant_from_field(field, heap_base_oop);
2614 }
2615
2616 if (p == NULL) { // Could not constant fold the load
2617 if (type == T_VALUETYPE) {
2618 if (adr_type->isa_instptr() && !mismatched) {
2619 ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2620 int offset = adr_type->is_instptr()->offset();
2621 p = ValueTypeNode::make_from_flattened(this, value_klass, base, base, holder, offset, decorators);
2622 } else {
2623 p = ValueTypeNode::make_from_flattened(this, value_klass, base, adr, NULL, 0, decorators);
2624 }
2625 } else {
2626 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2627 }
2628 // Normalize the value returned by getBoolean in the following cases
2629 if (type == T_BOOLEAN &&
2630 (mismatched ||
2631 heap_base_oop == top() || // - heap_base_oop is NULL or
2632 (can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL
2633 // and the unsafe access is made to large offset
2634 // (i.e., larger than the maximum offset necessary for any
2635 // field access)
2636 ) {
2637 IdealKit ideal = IdealKit(this);
2638 #define __ ideal.
2639 IdealVariable normalized_result(ideal);
2640 __ declarations_done();
2641 __ set(normalized_result, p);
2642 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2643 __ set(normalized_result, ideal.ConI(1));
2644 ideal.end_if();
2645 final_sync(ideal);
2646 p = __ value(normalized_result);
2647 #undef __
2648 }
2649 }
2650 if (type == T_ADDRESS) {
2651 p = gvn().transform(new CastP2XNode(NULL, p));
2652 p = ConvX2UL(p);
2653 }
2654 if (field != NULL && field->is_flattenable()&& !field->is_flattened()) {
2655 // Load a non-flattened but flattenable value type from memory
2656 if (value_type->value_klass()->is_scalarizable()) {
2657 p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass());
2658 } else {
2659 p = null2default(p, value_type->value_klass());
2660 }
2661 }
2662 // The load node has the control of the preceding MemBarCPUOrder. All
2663 // following nodes will have the control of the MemBarCPUOrder inserted at
2664 // the end of this method. So, pushing the load onto the stack at a later
2665 // point is fine.
2666 set_result(p);
2667 } else {
2668 if (bt == T_ADDRESS) {
2669 // Repackage the long as a pointer.
2670 val = ConvL2X(val);
2671 val = gvn().transform(new CastX2PNode(val));
2672 }
2673 if (type == T_VALUETYPE) {
2674 if (adr_type->isa_instptr() && !mismatched) {
2675 ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2676 int offset = adr_type->is_instptr()->offset();
2677 val->as_ValueType()->store_flattened(this, base, base, holder, offset, decorators);
2678 } else {
2679 val->as_ValueType()->store_flattened(this, base, adr, NULL, 0, decorators);
2680 }
2681 } else {
2682 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2683 }
2684 }
2685
2686 if (argument(1)->is_ValueType() && is_store) {
2687 Node* value = ValueTypeNode::make_from_oop(this, base, _gvn.type(base)->value_klass());
2688 value = value->as_ValueType()->make_larval(this, false);
2689 replace_in_map(argument(1), value);
2690 }
2691
2692 return true;
2693 }
2694
2695 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2696 Node* receiver = argument(0);
2697 Node* value = argument(1);
2698
2699 receiver = null_check(receiver);
2700 if (stopped()) {
2701 return true;
2702 }
2703
2704 if (!value->is_ValueType()) {
2705 return false;
2706 }
2707
2708 set_result(value->as_ValueType()->make_larval(this, true));
2709
2710 return true;
2711 }
2712
2713 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2714 Node* receiver = argument(0);
2715 Node* buffer = argument(1);
2716
2717 receiver = null_check(receiver);
2718 if (stopped()) {
2719 return true;
2720 }
2721
2722 if (!buffer->is_ValueType()) {
2723 return false;
2724 }
2725
2726 ValueTypeNode* vt = buffer->as_ValueType();
2727 if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
2728 return false;
2729 }
2730
2731 set_result(vt->finish_larval(this));
2732
2733 return true;
2734 }
2735
2736 //----------------------------inline_unsafe_load_store----------------------------
2737 // This method serves a couple of different customers (depending on LoadStoreKind):
2738 //
2739 // LS_cmp_swap:
2740 //
2741 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2742 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2743 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2744 //
2745 // LS_cmp_swap_weak:
2746 //
2747 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2748 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2749 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2750 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2751 //
3248 Node* fast_mem = slow_call->in(TypeFunc::Memory);
3249
3250 // These two phis are pre-filled with copies of of the fast IO and Memory
3251 PhiNode* result_mem = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3252 PhiNode* result_io = PhiNode::make(result_rgn, fast_io, Type::ABIO);
3253
3254 result_rgn->init_req(slow_result_path, control());
3255 result_io ->init_req(slow_result_path, i_o());
3256 result_mem->init_req(slow_result_path, reset_memory());
3257 result_val->init_req(slow_result_path, slow_val);
3258
3259 set_all_memory(_gvn.transform(result_mem));
3260 set_i_o( _gvn.transform(result_io));
3261 }
3262
3263 C->set_has_split_ifs(true); // Has chance for split-if optimization
3264 set_result(result_rgn, result_val);
3265 return true;
3266 }
3267
3268 //-----------------------load_klass_from_mirror_common-------------------------
3269 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3270 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3271 // and branch to the given path on the region.
3272 // If never_see_null, take an uncommon trap on null, so we can optimistically
3273 // compile for the non-null case.
3274 // If the region is NULL, force never_see_null = true.
3275 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3276 bool never_see_null,
3277 RegionNode* region,
3278 int null_path,
3279 int offset) {
3280 if (region == NULL) never_see_null = true;
3281 Node* p = basic_plus_adr(mirror, offset);
3282 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3283 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3284 Node* null_ctl = top();
3285 kls = null_check_oop(kls, &null_ctl, never_see_null);
3286 if (region != NULL) {
3287 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3294
3295 //--------------------(inline_native_Class_query helpers)---------------------
3296 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3297 // Fall through if (mods & mask) == bits, take the guard otherwise.
3298 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3299 // Branch around if the given klass has the given modifier bit set.
3300 // Like generate_guard, adds a new path onto the region.
3301 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3302 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3303 Node* mask = intcon(modifier_mask);
3304 Node* bits = intcon(modifier_bits);
3305 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3306 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3307 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3308 return generate_fair_guard(bol, region);
3309 }
3310 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3311 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3312 }
3313
3314 Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) {
3315 return generate_access_flags_guard(kls, JVM_ACC_VALUE, 0, region);
3316 }
3317
3318 //-------------------------inline_native_Class_query-------------------
3319 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3320 const Type* return_type = TypeInt::BOOL;
3321 Node* prim_return_value = top(); // what happens if it's a primitive class?
3322 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3323 bool expect_prim = false; // most of these guys expect to work on refs
3324
3325 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3326
3327 Node* mirror = argument(0);
3328 Node* obj = top();
3329
3330 switch (id) {
3331 case vmIntrinsics::_isInstance:
3332 // nothing is an instance of a primitive type
3333 prim_return_value = intcon(0);
3334 obj = argument(1);
3335 break;
3336 case vmIntrinsics::_getModifiers:
3337 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3482 // Fall-through is the normal case of a query to a real class.
3483 phi->init_req(1, query_value);
3484 region->init_req(1, control());
3485
3486 C->set_has_split_ifs(true); // Has chance for split-if optimization
3487 set_result(region, phi);
3488 return true;
3489 }
3490
3491 //-------------------------inline_Class_cast-------------------
3492 bool LibraryCallKit::inline_Class_cast() {
3493 Node* mirror = argument(0); // Class
3494 Node* obj = argument(1);
3495 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3496 if (mirror_con == NULL) {
3497 return false; // dead path (mirror->is_top()).
3498 }
3499 if (obj == NULL || obj->is_top()) {
3500 return false; // dead path
3501 }
3502
3503 ciKlass* obj_klass = NULL;
3504 if (obj->is_ValueType()) {
3505 const TypeValueType* tvt = _gvn.type(obj)->is_valuetype();
3506 obj_klass = tvt->value_klass();
3507 } else {
3508 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3509 if (tp != NULL) {
3510 obj_klass = tp->klass();
3511 }
3512 }
3513
3514 // First, see if Class.cast() can be folded statically.
3515 // java_mirror_type() returns non-null for compile-time Class constants.
3516 ciType* tm = mirror_con->java_mirror_type();
3517 if (tm != NULL && tm->is_klass() &&
3518 obj_klass != NULL) {
3519 if (!obj_klass->is_loaded()) {
3520 // Don't use intrinsic when class is not loaded.
3521 return false;
3522 } else {
3523 int static_res = C->static_subtype_check(tm->as_klass(), obj_klass);
3524 if (static_res == Compile::SSC_always_true) {
3525 // isInstance() is true - fold the code.
3526 set_result(obj);
3527 return true;
3528 } else if (static_res == Compile::SSC_always_false) {
3529 // Don't use intrinsic, have to throw ClassCastException.
3530 // If the reference is null, the non-intrinsic bytecode will
3531 // be optimized appropriately.
3532 return false;
3533 }
3534 }
3535 }
3536
3537 // Bailout intrinsic and do normal inlining if exception path is frequent.
3538 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3539 return false;
3540 }
3541
3542 // Generate dynamic checks.
3543 // Class.cast() is java implementation of _checkcast bytecode.
3671 phi->set_req(_ref_subtype_path, intcon(1));
3672
3673 // pull together the cases:
3674 assert(region->req() == PATH_LIMIT, "sane region");
3675 for (uint i = 1; i < region->req(); i++) {
3676 Node* ctl = region->in(i);
3677 if (ctl == NULL || ctl == top()) {
3678 region->set_req(i, top());
3679 phi ->set_req(i, top());
3680 } else if (phi->in(i) == NULL) {
3681 phi->set_req(i, intcon(0)); // all other paths produce 'false'
3682 }
3683 }
3684
3685 set_control(_gvn.transform(region));
3686 set_result(_gvn.transform(phi));
3687 return true;
3688 }
3689
3690 //---------------------generate_array_guard_common------------------------
3691 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
3692
3693 if (stopped()) {
3694 return NULL;
3695 }
3696
3697 // Like generate_guard, adds a new path onto the region.
3698 jint layout_con = 0;
3699 Node* layout_val = get_layout_helper(kls, layout_con);
3700 if (layout_val == NULL) {
3701 bool query = 0;
3702 switch(kind) {
3703 case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
3704 case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
3705 case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
3706 case ValueArray: query = Klass::layout_helper_is_valueArray(layout_con); break;
3707 case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
3708 case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
3709 default:
3710 ShouldNotReachHere();
3711 }
3712 if (!query) {
3713 return NULL; // never a branch
3714 } else { // always a branch
3715 Node* always_branch = control();
3716 if (region != NULL)
3717 region->add_req(always_branch);
3718 set_control(top());
3719 return always_branch;
3720 }
3721 }
3722 unsigned int value = 0;
3723 BoolTest::mask btest = BoolTest::illegal;
3724 switch(kind) {
3725 case ObjectArray:
3726 case NonObjectArray: {
3727 value = Klass::_lh_array_tag_obj_value;
3728 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3729 btest = kind == ObjectArray ? BoolTest::eq : BoolTest::ne;
3730 break;
3731 }
3732 case TypeArray: {
3733 value = Klass::_lh_array_tag_type_value;
3734 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3735 btest = BoolTest::eq;
3736 break;
3737 }
3738 case ValueArray: {
3739 value = Klass::_lh_array_tag_vt_value;
3740 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3741 btest = BoolTest::eq;
3742 break;
3743 }
3744 case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
3745 case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
3746 default:
3747 ShouldNotReachHere();
3748 }
3749 // Now test the correct condition.
3750 jint nval = (jint)value;
3751 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3752 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3753 return generate_fair_guard(bol, region);
3754 }
3755
3756
3757 //-----------------------inline_native_newArray--------------------------
3758 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
3759 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3760 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3761 Node* mirror;
3762 Node* count_val;
3763 if (uninitialized) {
3764 mirror = argument(1);
3765 count_val = argument(2);
3766 } else {
3767 mirror = argument(0);
3768 count_val = argument(1);
3769 }
3770
3771 mirror = null_check(mirror);
3772 // If mirror or obj is dead, only null-path is taken.
3773 if (stopped()) return true;
3774
3775 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3776 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3777 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3778 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3854 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3855 Node* result = load_array_length(array);
3856
3857 C->set_has_split_ifs(true); // Has chance for split-if optimization
3858 set_result(result);
3859 return true;
3860 }
3861
3862 //------------------------inline_array_copyOf----------------------------
3863 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
3864 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
3865 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3866 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3867
3868 // Get the arguments.
3869 Node* original = argument(0);
3870 Node* start = is_copyOfRange? argument(1): intcon(0);
3871 Node* end = is_copyOfRange? argument(2): argument(1);
3872 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3873
3874 const TypeAryPtr* original_t = _gvn.type(original)->isa_aryptr();
3875 const TypeInstPtr* mirror_t = _gvn.type(array_type_mirror)->isa_instptr();
3876 if (EnableValhalla && ValueArrayFlatten &&
3877 (original_t == NULL || mirror_t == NULL ||
3878 (mirror_t->java_mirror_type() == NULL &&
3879 (original_t->elem()->isa_valuetype() ||
3880 (original_t->elem()->make_oopptr() != NULL &&
3881 original_t->elem()->make_oopptr()->can_be_value_type()))))) {
3882 // We need to know statically if the copy is to a flattened array
3883 // or not but can't tell.
3884 return false;
3885 }
3886
3887 Node* newcopy = NULL;
3888
3889 // Set the original stack and the reexecute bit for the interpreter to reexecute
3890 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3891 { PreserveReexecuteState preexecs(this);
3892 jvms()->set_should_reexecute(true);
3893
3894 array_type_mirror = null_check(array_type_mirror);
3895 original = null_check(original);
3896
3897 // Check if a null path was taken unconditionally.
3898 if (stopped()) return true;
3899
3900 Node* orig_length = load_array_length(original);
3901
3902 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3903 klass_node = null_check(klass_node);
3904
3905 RegionNode* bailout = new RegionNode(1);
3906 record_for_igvn(bailout);
3907
3908 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3909 // Bail out if that is so.
3910 // Value type array may have object field that would require a
3911 // write barrier. Conservatively, go to slow path.
3912 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
3913 Node* not_objArray = !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing) ?
3914 generate_typeArray_guard(klass_node, bailout) : generate_non_objArray_guard(klass_node, bailout);
3915 if (not_objArray != NULL) {
3916 // Improve the klass node's type from the new optimistic assumption:
3917 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3918 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
3919 Node* cast = new CastPPNode(klass_node, akls);
3920 cast->init_req(0, control());
3921 klass_node = _gvn.transform(cast);
3922 }
3923
3924 Node* original_kls = load_object_klass(original);
3925 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3926 // loads/stores but it is legal only if we're sure the
3927 // Arrays.copyOf would succeed. So we need all input arguments
3928 // to the copyOf to be validated, including that the copy to the
3929 // new array won't trigger an ArrayStoreException. That subtype
3930 // check can be optimized if we know something on the type of
3931 // the input array from type speculation.
3932 if (_gvn.type(klass_node)->singleton() && !stopped()) {
3933 ciKlass* subk = _gvn.type(original_kls)->is_klassptr()->klass();
3934 ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3935
3936 int test = C->static_subtype_check(superk, subk);
3937 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3938 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3939 if (t_original->speculative_type() != NULL) {
3940 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3941 original_kls = load_object_klass(original);
3942 }
3943 }
3944 }
3945
3946 if (EnableValhalla) {
3947 // Either both or neither new array klass and original array
3948 // klass must be flattened
3949 Node* flattened_klass = generate_valueArray_guard(klass_node, NULL);
3950 generate_valueArray_guard(original_kls, bailout);
3951 if (flattened_klass != NULL) {
3952 RegionNode* r = new RegionNode(2);
3953 record_for_igvn(r);
3954 r->init_req(1, control());
3955 set_control(flattened_klass);
3956 generate_valueArray_guard(original_kls, r);
3957 bailout->add_req(control());
3958 set_control(_gvn.transform(r));
3959 }
3960 }
3961
3962 // Bail out if either start or end is negative.
3963 generate_negative_guard(start, bailout, &start);
3964 generate_negative_guard(end, bailout, &end);
3965
3966 Node* length = end;
3967 if (_gvn.type(start) != TypeInt::ZERO) {
3968 length = _gvn.transform(new SubINode(end, start));
3969 }
3970
3971 // Bail out if length is negative.
3972 // Without this the new_array would throw
3973 // NegativeArraySizeException but IllegalArgumentException is what
3974 // should be thrown
3975 generate_negative_guard(length, bailout, &length);
3976
3977 if (bailout->req() > 1) {
3978 PreserveJVMState pjvms(this);
3979 set_control(_gvn.transform(bailout));
3980 uncommon_trap(Deoptimization::Reason_intrinsic,
3981 Deoptimization::Action_maybe_recompile);
3982 }
3983
3984 if (!stopped()) {
3985 // How many elements will we copy from the original?
3986 // The answer is MinI(orig_length - start, length).
3987 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3988 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3989
3990 original = access_resolve(original, ACCESS_READ);
3991
3992 // Generate a direct call to the right arraycopy function(s).
3993 // We know the copy is disjoint but we might not know if the
3994 // oop stores need checking.
3995 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
3996 // This will fail a store-check if x contains any non-nulls.
3997
3998 bool validated = false;
3999 // Reason_class_check rather than Reason_intrinsic because we
4000 // want to intrinsify even if this traps.
4001 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4002 Node* not_subtype_ctrl = gen_subtype_check(original_kls,
4003 klass_node);
4004
4005 if (not_subtype_ctrl != top()) {
4006 PreserveJVMState pjvms(this);
4007 set_control(not_subtype_ctrl);
4008 uncommon_trap(Deoptimization::Reason_class_check,
4009 Deoptimization::Action_make_not_entrant);
4010 assert(stopped(), "Should be stopped");
4011 }
4012 validated = true;
4013 }
4014
4015 if (!stopped()) {
4016 newcopy = new_array(klass_node, length, 0); // no arguments to push
4017
4018 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4019 original_kls, klass_node);
4020 if (!is_copyOfRange) {
4021 ac->set_copyof(validated);
4022 } else {
4023 ac->set_copyofrange(validated);
4024 }
4025 Node* n = _gvn.transform(ac);
4026 if (n == ac) {
4027 ac->connect_outputs(this);
4028 } else {
4029 assert(validated, "shouldn't transform if all arguments not validated");
4030 set_all_memory(n);
4031 }
4032 }
4033 }
4034 } // original reexecute is set back here
4035
4036 C->set_has_split_ifs(true); // Has chance for split-if optimization
4037 if (!stopped()) {
4038 set_result(newcopy);
4039 }
4123 set_edges_for_java_call(slow_call);
4124 return slow_call;
4125 }
4126
4127
4128 /**
4129 * Build special case code for calls to hashCode on an object. This call may
4130 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4131 * slightly different code.
4132 */
4133 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4134 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4135 assert(!(is_virtual && is_static), "either virtual, special, or static");
4136
4137 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4138
4139 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4140 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4141 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4142 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4143 Node* obj = argument(0);
4144
4145 if (obj->is_ValueType() || gvn().type(obj)->is_valuetypeptr()) {
4146 return false;
4147 }
4148
4149 if (!is_static) {
4150 // Check for hashing null object
4151 obj = null_check_receiver();
4152 if (stopped()) return true; // unconditionally null
4153 result_reg->init_req(_null_path, top());
4154 result_val->init_req(_null_path, top());
4155 } else {
4156 // Do a null check, and return zero if null.
4157 // System.identityHashCode(null) == 0
4158 Node* null_ctl = top();
4159 obj = null_check_oop(obj, &null_ctl);
4160 result_reg->init_req(_null_path, null_ctl);
4161 result_val->init_req(_null_path, _gvn.intcon(0));
4162 }
4163
4164 // Unconditionally null? Then return right away.
4165 if (stopped()) {
4166 set_control( result_reg->in(_null_path));
4167 if (!stopped())
4168 set_result(result_val->in(_null_path));
4169 return true;
4170 }
4171
4172 // We only go to the fast case code if we pass a number of guards. The
4173 // paths which do not pass are accumulated in the slow_region.
4174 RegionNode* slow_region = new RegionNode(1);
4175 record_for_igvn(slow_region);
4176
4177 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4178 assert(!obj_type->isa_valuetype() || !obj_type->is_valuetypeptr(), "no value type here");
4179 if (is_static && obj_type->can_be_value_type()) {
4180 Node* obj_klass = load_object_klass(obj);
4181 generate_value_guard(obj_klass, slow_region);
4182 }
4183
4184 // If this is a virtual call, we generate a funny guard. We pull out
4185 // the vtable entry corresponding to hashCode() from the target object.
4186 // If the target method which we are calling happens to be the native
4187 // Object hashCode() method, we pass the guard. We do not need this
4188 // guard for non-virtual calls -- the caller is known to be the native
4189 // Object hashCode().
4190 if (is_virtual) {
4191 // After null check, get the object's klass.
4192 Node* obj_klass = load_object_klass(obj);
4193 generate_virtual_guard(obj_klass, slow_region);
4194 }
4195
4196 // Get the header out of the object, use LoadMarkNode when available
4197 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4198 // The control of the load must be NULL. Otherwise, the load can move before
4199 // the null check after castPP removal.
4200 Node* no_ctrl = NULL;
4201 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4202
4203 // Test the header to see if it is unlocked.
4250 // this->control() comes from set_results_for_java_call
4251 result_reg->init_req(_slow_path, control());
4252 result_val->init_req(_slow_path, slow_result);
4253 result_io ->set_req(_slow_path, i_o());
4254 result_mem ->set_req(_slow_path, reset_memory());
4255 }
4256
4257 // Return the combined state.
4258 set_i_o( _gvn.transform(result_io) );
4259 set_all_memory( _gvn.transform(result_mem));
4260
4261 set_result(result_reg, result_val);
4262 return true;
4263 }
4264
4265 //---------------------------inline_native_getClass----------------------------
4266 // public final native Class<?> java.lang.Object.getClass();
4267 //
4268 // Build special case code for calls to getClass on an object.
4269 bool LibraryCallKit::inline_native_getClass() {
4270 Node* obj = argument(0);
4271 if (obj->is_ValueType()) {
4272 ciKlass* vk = _gvn.type(obj)->is_valuetype()->value_klass();
4273 set_result(makecon(TypeInstPtr::make(vk->java_mirror())));
4274 return true;
4275 }
4276 obj = null_check_receiver();
4277 if (stopped()) return true;
4278 set_result(load_mirror_from_klass(load_object_klass(obj)));
4279 return true;
4280 }
4281
4282 //-----------------inline_native_Reflection_getCallerClass---------------------
4283 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4284 //
4285 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4286 //
4287 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4288 // in that it must skip particular security frames and checks for
4289 // caller sensitive methods.
4290 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4291 #ifndef PRODUCT
4292 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4293 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4294 }
4295 #endif
4296
4513 Node* raw_obj = alloc_obj->in(1);
4514 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4515
4516 AllocateNode* alloc = NULL;
4517 if (ReduceBulkZeroing) {
4518 // We will be completely responsible for initializing this object -
4519 // mark Initialize node as complete.
4520 alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4521 // The object was just allocated - there should be no any stores!
4522 guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4523 // Mark as complete_with_arraycopy so that on AllocateNode
4524 // expansion, we know this AllocateNode is initialized by an array
4525 // copy and a StoreStore barrier exists after the array copy.
4526 alloc->initialization()->set_complete_with_arraycopy();
4527 }
4528
4529 // Copy the fastest available way.
4530 // TODO: generate fields copies for small objects instead.
4531 Node* size = _gvn.transform(obj_size);
4532
4533 // Exclude the header but include array length to copy by 8 bytes words.
4534 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
4535 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4536 instanceOopDesc::base_offset_in_bytes();
4537 // base_off:
4538 // 8 - 32-bit VM
4539 // 12 - 64-bit VM, compressed klass
4540 // 16 - 64-bit VM, normal klass
4541 if (base_off % BytesPerLong != 0) {
4542 assert(UseCompressedClassPointers, "");
4543 if (is_array) {
4544 // Exclude length to copy by 8 bytes words.
4545 base_off += sizeof(int);
4546 } else {
4547 // Include klass to copy by 8 bytes words.
4548 base_off = instanceOopDesc::klass_offset_in_bytes();
4549 }
4550 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4551 }
4552 Node* src_base = basic_plus_adr(obj, base_off);
4553 Node* dst_base = basic_plus_adr(alloc_obj, base_off);
4554
4555 // Compute the length also, if needed:
4556 Node* countx = size;
4557 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4558 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
4559
4560 access_clone(src_base, dst_base, countx, is_array);
4561
4562 // Do not let reads from the cloned object float above the arraycopy.
4563 if (alloc != NULL) {
4564 // Do not let stores that initialize this object be reordered with
4565 // a subsequent store that would make this object accessible by
4566 // other threads.
4567 // Record what AllocateNode this StoreStore protects so that
4568 // escape analysis can go from the MemBarStoreStoreNode to the
4569 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4570 // based on the escape status of the AllocateNode.
4571 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
4572 } else {
4573 insert_mem_bar(Op_MemBarCPUOrder);
4574 }
4575 }
4576
4577 //------------------------inline_native_clone----------------------------
4578 // protected native Object java.lang.Object.clone();
4579 //
4580 // Here are the simple edge cases:
4583 // not cloneable or finalizer => slow path to out-of-line Object.clone
4584 //
4585 // The general case has two steps, allocation and copying.
4586 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4587 //
4588 // Copying also has two cases, oop arrays and everything else.
4589 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4590 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4591 //
4592 // These steps fold up nicely if and when the cloned object's klass
4593 // can be sharply typed as an object array, a type array, or an instance.
4594 //
4595 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4596 PhiNode* result_val;
4597
4598 // Set the reexecute bit for the interpreter to reexecute
4599 // the bytecode that invokes Object.clone if deoptimization happens.
4600 { PreserveReexecuteState preexecs(this);
4601 jvms()->set_should_reexecute(true);
4602
4603 Node* obj = argument(0);
4604 if (obj->is_ValueType()) {
4605 return false;
4606 }
4607
4608 obj = null_check_receiver();
4609 if (stopped()) return true;
4610
4611 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4612
4613 // If we are going to clone an instance, we need its exact type to
4614 // know the number and types of fields to convert the clone to
4615 // loads/stores. Maybe a speculative type can help us.
4616 if (!obj_type->klass_is_exact() &&
4617 obj_type->speculative_type() != NULL &&
4618 obj_type->speculative_type()->is_instance_klass() &&
4619 !obj_type->speculative_type()->is_valuetype()) {
4620 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4621 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4622 !spec_ik->has_injected_fields()) {
4623 ciKlass* k = obj_type->klass();
4624 if (!k->is_instance_klass() ||
4625 k->as_instance_klass()->is_interface() ||
4626 k->as_instance_klass()->has_subklass()) {
4627 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4628 }
4629 }
4630 }
4631
4632 Node* obj_klass = load_object_klass(obj);
4633 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4634 const TypeOopPtr* toop = ((tklass != NULL)
4635 ? tklass->as_instance_type()
4636 : TypeInstPtr::NOTNULL);
4637
4638 // Conservatively insert a memory barrier on all memory slices.
4639 // Do not let writes into the original float below the clone.
4640 insert_mem_bar(Op_MemBarCPUOrder);
4641
4642 // paths into result_reg:
4643 enum {
4644 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4645 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4646 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4647 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4648 PATH_LIMIT
4649 };
4650 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4651 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4652 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
4653 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4654 record_for_igvn(result_reg);
4655
4656 // We only go to the fast case code if we pass a number of guards.
4657 // The paths which do not pass are accumulated in the slow_region.
4658 RegionNode* slow_region = new RegionNode(1);
4659 record_for_igvn(slow_region);
4660
4661 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4662 if (array_ctl != NULL) {
4663 // It's an array.
4664 PreserveJVMState pjvms(this);
4665 set_control(array_ctl);
4666
4667 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4668 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) {
4669 // Value type array may have object field that would require a
4670 // write barrier. Conservatively, go to slow path.
4671 generate_valueArray_guard(obj_klass, slow_region);
4672 }
4673
4674 if (!stopped()) {
4675 Node* obj_length = load_array_length(obj);
4676 Node* obj_size = NULL;
4677 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
4678
4679 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4680 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) {
4681 // If it is an oop array, it requires very special treatment,
4682 // because gc barriers are required when accessing the array.
4683 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4684 if (is_obja != NULL) {
4685 PreserveJVMState pjvms2(this);
4686 set_control(is_obja);
4687 // Generate a direct call to the right arraycopy function(s).
4688 Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4689 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
4690 ac->set_cloneoop();
4691 Node* n = _gvn.transform(ac);
4692 assert(n == ac, "cannot disappear");
4693 ac->connect_outputs(this);
4694
4695 result_reg->init_req(_objArray_path, control());
4696 result_val->init_req(_objArray_path, alloc_obj);
4697 result_i_o ->set_req(_objArray_path, i_o());
4698 result_mem ->set_req(_objArray_path, reset_memory());
4699 }
4700 }
4701
4702 // Otherwise, there are no barriers to worry about.
4703 // (We can dispense with card marks if we know the allocation
4704 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
4705 // causes the non-eden paths to take compensating steps to
4706 // simulate a fresh allocation, so that no further
4707 // card marks are required in compiled code to initialize
4708 // the object.)
4709
4710 if (!stopped()) {
4711 copy_to_clone(obj, alloc_obj, obj_size, true);
4712
4713 // Present the results of the copy.
4714 result_reg->init_req(_array_path, control());
4715 result_val->init_req(_array_path, alloc_obj);
4716 result_i_o ->set_req(_array_path, i_o());
4717 result_mem ->set_req(_array_path, reset_memory());
4718 }
4719 }
4720 }
4721
4722 if (!stopped()) {
4723 // It's an instance (we did array above). Make the slow-path tests.
4724 // If this is a virtual call, we generate a funny guard. We grab
4725 // the vtable entry corresponding to clone() from the target object.
4726 // If the target method which we are calling happens to be the
4727 // Object clone() method, we pass the guard. We do not need this
4728 // guard for non-virtual calls; the caller is known to be the native
4729 // Object clone().
4730 if (is_virtual) {
4731 generate_virtual_guard(obj_klass, slow_region);
4732 }
4733
4734 // The object must be easily cloneable and must not have a finalizer.
4735 // Both of these conditions may be checked in a single test.
4736 // We could optimize the test further, but we don't care.
4737 generate_access_flags_guard(obj_klass,
4738 // Test both conditions:
4739 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4740 // Must be cloneable but not finalizer:
4741 JVM_ACC_IS_CLONEABLE_FAST,
4862 // array in the heap that GCs wouldn't expect. Move the allocation
4863 // after the traps so we don't allocate the array if we
4864 // deoptimize. This is possible because tightly_coupled_allocation()
4865 // guarantees there's no observer of the allocated array at this point
4866 // and the control flow is simple enough.
4867 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4868 int saved_reexecute_sp, uint new_idx) {
4869 if (saved_jvms != NULL && !stopped()) {
4870 assert(alloc != NULL, "only with a tightly coupled allocation");
4871 // restore JVM state to the state at the arraycopy
4872 saved_jvms->map()->set_control(map()->control());
4873 assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4874 assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4875 // If we've improved the types of some nodes (null check) while
4876 // emitting the guards, propagate them to the current state
4877 map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4878 set_jvms(saved_jvms);
4879 _reexecute_sp = saved_reexecute_sp;
4880
4881 // Remove the allocation from above the guards
4882 CallProjections* callprojs = alloc->extract_projections(true);
4883 InitializeNode* init = alloc->initialization();
4884 Node* alloc_mem = alloc->in(TypeFunc::Memory);
4885 C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4886 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4887 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4888
4889 // move the allocation here (after the guards)
4890 _gvn.hash_delete(alloc);
4891 alloc->set_req(TypeFunc::Control, control());
4892 alloc->set_req(TypeFunc::I_O, i_o());
4893 Node *mem = reset_memory();
4894 set_all_memory(mem);
4895 alloc->set_req(TypeFunc::Memory, mem);
4896 set_control(init->proj_out_or_null(TypeFunc::Control));
4897 set_i_o(callprojs->fallthrough_ioproj);
4898
4899 // Update memory as done in GraphKit::set_output_for_allocation()
4900 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4901 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4902 if (ary_type->isa_aryptr() && length_type != NULL) {
4903 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4904 }
4905 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4906 int elemidx = C->get_alias_index(telemref);
4907 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4908 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4909
4910 Node* allocx = _gvn.transform(alloc);
4911 assert(allocx == alloc, "where has the allocation gone?");
4912 assert(dest->is_CheckCastPP(), "not an allocation result?");
4913
4914 _gvn.hash_delete(dest);
4915 dest->set_req(0, control());
4916 Node* destx = _gvn.transform(dest);
4917 assert(destx == dest, "where has the allocation result gone?");
5121 // This is also checked in generate_arraycopy() during macro expansion, but
5122 // we also have to check it here for the case where the ArrayCopyNode will
5123 // be eliminated by Escape Analysis.
5124 if (EliminateAllocations) {
5125 generate_negative_guard(length, slow_region);
5126 negative_length_guard_generated = true;
5127 }
5128
5129 // (9) each element of an oop array must be assignable
5130 Node* src_klass = load_object_klass(src);
5131 Node* dest_klass = load_object_klass(dest);
5132 Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
5133
5134 if (not_subtype_ctrl != top()) {
5135 PreserveJVMState pjvms(this);
5136 set_control(not_subtype_ctrl);
5137 uncommon_trap(Deoptimization::Reason_intrinsic,
5138 Deoptimization::Action_make_not_entrant);
5139 assert(stopped(), "Should be stopped");
5140 }
5141
5142 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5143 const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
5144 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5145
5146 src_type = _gvn.type(src);
5147 top_src = src_type->isa_aryptr();
5148
5149 if (top_dest != NULL &&
5150 top_dest->elem()->make_oopptr() != NULL &&
5151 top_dest->elem()->make_oopptr()->can_be_value_type()) {
5152 generate_valueArray_guard(load_object_klass(dest), slow_region);
5153 }
5154
5155 if (top_src != NULL &&
5156 top_src->elem()->make_oopptr() != NULL &&
5157 top_src->elem()->make_oopptr()->can_be_value_type()) {
5158 generate_valueArray_guard(load_object_klass(src), slow_region);
5159 }
5160
5161 {
5162 PreserveJVMState pjvms(this);
5163 set_control(_gvn.transform(slow_region));
5164 uncommon_trap(Deoptimization::Reason_intrinsic,
5165 Deoptimization::Action_make_not_entrant);
5166 assert(stopped(), "Should be stopped");
5167 }
5168 }
5169
5170 arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5171
5172 if (stopped()) {
5173 return true;
5174 }
5175
5176 Node* new_src = access_resolve(src, ACCESS_READ);
5177 Node* new_dest = access_resolve(dest, ACCESS_WRITE);
5178
5179 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, new_src, src_offset, new_dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5180 // Create LoadRange and LoadKlass nodes for use during macro expansion here
5181 // so the compiler has a chance to eliminate them: during macro expansion,
5182 // we have to set their control (CastPP nodes are eliminated).
5183 load_object_klass(src), load_object_klass(dest),
5184 load_array_length(src), load_array_length(dest));
5185
5186 ac->set_arraycopy(validated);
5187
|