227 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
228 bool inline_math_native(vmIntrinsics::ID id);
229 bool inline_math(vmIntrinsics::ID id);
230 template <typename OverflowOp>
231 bool inline_math_overflow(Node* arg1, Node* arg2);
232 void inline_math_mathExact(Node* math, Node* test);
233 bool inline_math_addExactI(bool is_increment);
234 bool inline_math_addExactL(bool is_increment);
235 bool inline_math_multiplyExactI();
236 bool inline_math_multiplyExactL();
237 bool inline_math_multiplyHigh();
238 bool inline_math_negateExactI();
239 bool inline_math_negateExactL();
240 bool inline_math_subtractExactI(bool is_decrement);
241 bool inline_math_subtractExactL(bool is_decrement);
242 bool inline_min_max(vmIntrinsics::ID id);
243 bool inline_notify(vmIntrinsics::ID id);
244 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
245 // This returns Type::AnyPtr, RawPtr, or OopPtr.
246 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
247 Node* make_unsafe_address(Node*& base, Node* offset, BasicType type = T_ILLEGAL, bool can_cast = false);
248
249 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
250 DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
251 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
252 static bool klass_needs_init_guard(Node* kls);
253 bool inline_unsafe_allocate();
254 bool inline_unsafe_newArray(bool uninitialized);
255 bool inline_unsafe_copyMemory();
256 bool inline_native_currentThread();
257
258 bool inline_native_time_funcs(address method, const char* funcName);
259 #ifdef JFR_HAVE_INTRINSICS
260 bool inline_native_classID();
261 bool inline_native_getEventWriter();
262 #endif
263 bool inline_native_isInterrupted();
264 bool inline_native_Class_query(vmIntrinsics::ID id);
265 bool inline_native_subtype_check();
266 bool inline_native_getLength();
267 bool inline_array_copyOf(bool is_copyOfRange);
1667 // Check if a null path was taken unconditionally.
1668 src = null_check(src);
1669 dst = null_check(dst);
1670 if (stopped()) {
1671 return true;
1672 }
1673
1674 // Get length and convert char[] offset to byte[] offset
1675 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1676 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1677
1678 // Range checks
1679 generate_string_range_check(src, src_begin, length, true);
1680 generate_string_range_check(dst, dst_begin, length, false);
1681 if (stopped()) {
1682 return true;
1683 }
1684
1685 if (!stopped()) {
1686 src = access_resolve(src, ACCESS_READ);
1687 dst = access_resolve(dst, ACCESS_READ);
1688
1689 // Calculate starting addresses.
1690 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1691 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1692
1693 // Check if array addresses are aligned to HeapWordSize
1694 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1695 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1696 bool aligned = tsrc->is_con() && ((tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1697 tdst->is_con() && ((tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1698
1699 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1700 const char* copyfunc_name = "arraycopy";
1701 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1702 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1703 OptoRuntime::fast_arraycopy_Type(),
1704 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1705 src_start, dst_start, ConvI2X(length) XTOP);
1706 // Do not let reads from the cloned object float above the arraycopy.
1707 if (alloc != NULL) {
2176 // Offset is small => always a heap address.
2177 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2178 if (offset_type != NULL &&
2179 base_type->offset() == 0 && // (should always be?)
2180 offset_type->_lo >= 0 &&
2181 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2182 return Type::OopPtr;
2183 } else if (type == T_OBJECT) {
2184 // off heap access to an oop doesn't make any sense. Has to be on
2185 // heap.
2186 return Type::OopPtr;
2187 }
2188 // Otherwise, it might either be oop+off or NULL+addr.
2189 return Type::AnyPtr;
2190 } else {
2191 // No information:
2192 return Type::AnyPtr;
2193 }
2194 }
2195
2196 inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2197 Node* uncasted_base = base;
2198 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2199 if (kind == Type::RawPtr) {
2200 return basic_plus_adr(top(), uncasted_base, offset);
2201 } else if (kind == Type::AnyPtr) {
2202 assert(base == uncasted_base, "unexpected base change");
2203 if (can_cast) {
2204 if (!_gvn.type(base)->speculative_maybe_null() &&
2205 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2206 // According to profiling, this access is always on
2207 // heap. Casting the base to not null and thus avoiding membars
2208 // around the access should allow better optimizations
2209 Node* null_ctl = top();
2210 base = null_check_oop(base, &null_ctl, true, true, true);
2211 assert(null_ctl->is_top(), "no null control here");
2212 return basic_plus_adr(base, offset);
2213 } else if (_gvn.type(base)->speculative_always_null() &&
2214 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2215 // According to profiling, this access is always off
2216 // heap.
2217 base = null_assert(base);
2218 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2219 offset = MakeConX(0);
2220 return basic_plus_adr(top(), raw_base, offset);
2221 }
2222 }
2223 // We don't know if it's an on heap or off heap access. Fall back
2224 // to raw memory access.
2225 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2226 return basic_plus_adr(top(), raw, offset);
2227 } else {
2228 assert(base == uncasted_base, "unexpected base change");
2229 // We know it's an on heap access so base can't be null
2230 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2231 base = must_be_not_null(base, true);
2232 }
2233 return basic_plus_adr(base, offset);
2234 }
2235 }
2236
2237 //--------------------------inline_number_methods-----------------------------
2238 // inline int Integer.numberOfLeadingZeros(int)
2239 // inline int Long.numberOfLeadingZeros(long)
2240 //
2241 // inline int Integer.numberOfTrailingZeros(int)
2242 // inline int Long.numberOfTrailingZeros(long)
2243 //
2244 // inline int Integer.bitCount(int)
2371
2372 Node* receiver = argument(0); // type: oop
2373
2374 // Build address expression.
2375 Node* adr;
2376 Node* heap_base_oop = top();
2377 Node* offset = top();
2378 Node* val;
2379
2380 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2381 Node* base = argument(1); // type: oop
2382 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2383 offset = argument(2); // type: long
2384 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2385 // to be plain byte offsets, which are also the same as those accepted
2386 // by oopDesc::field_addr.
2387 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2388 "fieldOffset must be byte-scaled");
2389 // 32-bit machines ignore the high half!
2390 offset = ConvL2X(offset);
2391 adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2392
2393 if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2394 heap_base_oop = base;
2395 } else if (type == T_OBJECT) {
2396 return false; // off-heap oop accesses are not supported
2397 }
2398
2399 // Can base be NULL? Otherwise, always on-heap access.
2400 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2401
2402 if (!can_access_non_heap) {
2403 decorators |= IN_HEAP;
2404 }
2405
2406 val = is_store ? argument(4) : NULL;
2407
2408 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2409
2410 // Try to categorize the address.
2411 Compile::AliasType* alias_type = C->alias_type(adr_type);
2655 case LS_get_add:
2656 case LS_get_set: {
2657 receiver = argument(0); // type: oop
2658 base = argument(1); // type: oop
2659 offset = argument(2); // type: long
2660 oldval = NULL;
2661 newval = argument(4); // type: oop, int, or long
2662 break;
2663 }
2664 default:
2665 ShouldNotReachHere();
2666 }
2667
2668 // Build field offset expression.
2669 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2670 // to be plain byte offsets, which are also the same as those accepted
2671 // by oopDesc::field_addr.
2672 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2673 // 32-bit machines ignore the high half of long offsets
2674 offset = ConvL2X(offset);
2675 Node* adr = make_unsafe_address(base, offset, type, false);
2676 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2677
2678 Compile::AliasType* alias_type = C->alias_type(adr_type);
2679 BasicType bt = alias_type->basic_type();
2680 if (bt != T_ILLEGAL &&
2681 ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2682 // Don't intrinsify mismatched object accesses.
2683 return false;
2684 }
2685
2686 // For CAS, unlike inline_unsafe_access, there seems no point in
2687 // trying to refine types. Just use the coarse types here.
2688 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2689 const Type *value_type = Type::get_const_basic_type(type);
2690
2691 switch (kind) {
2692 case LS_get_set:
2693 case LS_cmp_exchange: {
2694 if (type == T_OBJECT) {
2695 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2950 no_int_result_path = 1, // t == Thread.current() && !TLS._osthread._interrupted
2951 no_clear_result_path = 2, // t == Thread.current() && TLS._osthread._interrupted && !clear_int
2952 slow_result_path = 3, // slow path: t.isInterrupted(clear_int)
2953 PATH_LIMIT
2954 };
2955
2956 // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
2957 // out of the function.
2958 insert_mem_bar(Op_MemBarCPUOrder);
2959
2960 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
2961 PhiNode* result_val = new PhiNode(result_rgn, TypeInt::BOOL);
2962
2963 RegionNode* slow_region = new RegionNode(1);
2964 record_for_igvn(slow_region);
2965
2966 // (a) Receiving thread must be the current thread.
2967 Node* rec_thr = argument(0);
2968 Node* tls_ptr = NULL;
2969 Node* cur_thr = generate_current_thread(tls_ptr);
2970 Node* cmp_thr = _gvn.transform(new CmpPNode(cur_thr, rec_thr));
2971 Node* bol_thr = _gvn.transform(new BoolNode(cmp_thr, BoolTest::ne));
2972
2973 generate_slow_guard(bol_thr, slow_region);
2974
2975 // (b) Interrupt bit on TLS must be false.
2976 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
2977 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
2978 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
2979
2980 // Set the control input on the field _interrupted read to prevent it floating up.
2981 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
2982 Node* cmp_bit = _gvn.transform(new CmpINode(int_bit, intcon(0)));
2983 Node* bol_bit = _gvn.transform(new BoolNode(cmp_bit, BoolTest::ne));
2984
2985 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2986
2987 // First fast path: if (!TLS._interrupted) return false;
2988 Node* false_bit = _gvn.transform(new IfFalseNode(iff_bit));
2989 result_rgn->init_req(no_int_result_path, false_bit);
3384 Node* phi = new PhiNode(region, TypeInt::BOOL);
3385 record_for_igvn(region);
3386
3387 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3388 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3389 int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3390
3391 // First null-check both mirrors and load each mirror's klass metaobject.
3392 int which_arg;
3393 for (which_arg = 0; which_arg <= 1; which_arg++) {
3394 Node* arg = args[which_arg];
3395 arg = null_check(arg);
3396 if (stopped()) break;
3397 args[which_arg] = arg;
3398
3399 Node* p = basic_plus_adr(arg, class_klass_offset);
3400 Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3401 klasses[which_arg] = _gvn.transform(kls);
3402 }
3403
3404 // Having loaded both klasses, test each for null.
3405 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3406 for (which_arg = 0; which_arg <= 1; which_arg++) {
3407 Node* kls = klasses[which_arg];
3408 Node* null_ctl = top();
3409 kls = null_check_oop(kls, &null_ctl, never_see_null);
3410 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3411 region->init_req(prim_path, null_ctl);
3412 if (stopped()) break;
3413 klasses[which_arg] = kls;
3414 }
3415
3416 if (!stopped()) {
3417 // now we have two reference types, in klasses[0..1]
3418 Node* subk = klasses[1]; // the argument to isAssignableFrom
3419 Node* superk = klasses[0]; // the receiver
3420 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3421 // now we have a successful reference subtype check
3422 region->set_req(_ref_subtype_path, control());
3423 }
4161 }
4162
4163 //----------------------inline_unsafe_copyMemory-------------------------
4164 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4165 bool LibraryCallKit::inline_unsafe_copyMemory() {
4166 if (callee()->is_static()) return false; // caller must have the capability!
4167 null_check_receiver(); // null-check receiver
4168 if (stopped()) return true;
4169
4170 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4171
4172 Node* src_ptr = argument(1); // type: oop
4173 Node* src_off = ConvL2X(argument(2)); // type: long
4174 Node* dst_ptr = argument(4); // type: oop
4175 Node* dst_off = ConvL2X(argument(5)); // type: long
4176 Node* size = ConvL2X(argument(7)); // type: long
4177
4178 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4179 "fieldOffset must be byte-scaled");
4180
4181 Node* src = make_unsafe_address(src_ptr, src_off);
4182 Node* dst = make_unsafe_address(dst_ptr, dst_off);
4183
4184 // Conservatively insert a memory barrier on all memory slices.
4185 // Do not let writes of the copy source or destination float below the copy.
4186 insert_mem_bar(Op_MemBarCPUOrder);
4187
4188 // Call it. Note that the length argument is not scaled.
4189 make_runtime_call(RC_LEAF|RC_NO_FP,
4190 OptoRuntime::fast_arraycopy_Type(),
4191 StubRoutines::unsafe_arraycopy(),
4192 "unsafe_arraycopy",
4193 TypeRawPtr::BOTTOM,
4194 src, dst, size XTOP);
4195
4196 // Do not let reads of the copy destination float above the copy.
4197 insert_mem_bar(Op_MemBarCPUOrder);
4198
4199 return true;
4200 }
4201
4202 //------------------------clone_coping-----------------------------------
5317 Node* obja = argument(0);
5318 Node* aoffset = argument(1);
5319 Node* objb = argument(3);
5320 Node* boffset = argument(4);
5321 Node* length = argument(6);
5322 Node* scale = argument(7);
5323
5324 const Type* a_type = obja->Value(&_gvn);
5325 const Type* b_type = objb->Value(&_gvn);
5326 const TypeAryPtr* top_a = a_type->isa_aryptr();
5327 const TypeAryPtr* top_b = b_type->isa_aryptr();
5328 if (top_a == NULL || top_a->klass() == NULL ||
5329 top_b == NULL || top_b->klass() == NULL) {
5330 // failed array check
5331 return false;
5332 }
5333
5334 Node* call;
5335 jvms()->set_should_reexecute(true);
5336
5337 Node* obja_adr = make_unsafe_address(obja, aoffset);
5338 Node* objb_adr = make_unsafe_address(objb, boffset);
5339
5340 call = make_runtime_call(RC_LEAF,
5341 OptoRuntime::vectorizedMismatch_Type(),
5342 stubAddr, stubName, TypePtr::BOTTOM,
5343 obja_adr, objb_adr, length, scale);
5344
5345 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5346 set_result(result);
5347 return true;
5348 }
5349
5350 /**
5351 * Calculate CRC32 for byte.
5352 * int java.util.zip.CRC32.update(int crc, int b)
5353 */
5354 bool LibraryCallKit::inline_updateCRC32() {
5355 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5356 assert(callee()->signature()->size() == 2, "update has 2 parameters");
5357 // no receiver since it is static method
5358 Node* crc = argument(0); // type: int
6099 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6100
6101 // get AESCrypt klass for instanceOf check
6102 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6103 // will have same classloader as CipherBlockChaining object
6104 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6105 assert(tinst != NULL, "CBCobj is null");
6106 assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6107
6108 // we want to do an instanceof comparison against the AESCrypt class
6109 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6110 if (!klass_AESCrypt->is_loaded()) {
6111 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6112 Node* ctrl = control();
6113 set_control(top()); // no regular fast path
6114 return ctrl;
6115 }
6116
6117 src = must_be_not_null(src, true);
6118 dest = must_be_not_null(dest, true);
6119
6120 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6121
6122 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6123 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6124 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6125
6126 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6127
6128 // for encryption, we are done
6129 if (!decrypting)
6130 return instof_false; // even if it is NULL
6131
6132 // for decryption, we need to add a further check to avoid
6133 // taking the intrinsic path when cipher and plain are the same
6134 // see the original java code for why.
6135 RegionNode* region = new RegionNode(3);
6136 region->init_req(1, instof_false);
6137
6138 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
|
227 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
228 bool inline_math_native(vmIntrinsics::ID id);
229 bool inline_math(vmIntrinsics::ID id);
230 template <typename OverflowOp>
231 bool inline_math_overflow(Node* arg1, Node* arg2);
232 void inline_math_mathExact(Node* math, Node* test);
233 bool inline_math_addExactI(bool is_increment);
234 bool inline_math_addExactL(bool is_increment);
235 bool inline_math_multiplyExactI();
236 bool inline_math_multiplyExactL();
237 bool inline_math_multiplyHigh();
238 bool inline_math_negateExactI();
239 bool inline_math_negateExactL();
240 bool inline_math_subtractExactI(bool is_decrement);
241 bool inline_math_subtractExactL(bool is_decrement);
242 bool inline_min_max(vmIntrinsics::ID id);
243 bool inline_notify(vmIntrinsics::ID id);
244 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
245 // This returns Type::AnyPtr, RawPtr, or OopPtr.
246 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
247 Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
248
249 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
250 DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
251 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
252 static bool klass_needs_init_guard(Node* kls);
253 bool inline_unsafe_allocate();
254 bool inline_unsafe_newArray(bool uninitialized);
255 bool inline_unsafe_copyMemory();
256 bool inline_native_currentThread();
257
258 bool inline_native_time_funcs(address method, const char* funcName);
259 #ifdef JFR_HAVE_INTRINSICS
260 bool inline_native_classID();
261 bool inline_native_getEventWriter();
262 #endif
263 bool inline_native_isInterrupted();
264 bool inline_native_Class_query(vmIntrinsics::ID id);
265 bool inline_native_subtype_check();
266 bool inline_native_getLength();
267 bool inline_array_copyOf(bool is_copyOfRange);
1667 // Check if a null path was taken unconditionally.
1668 src = null_check(src);
1669 dst = null_check(dst);
1670 if (stopped()) {
1671 return true;
1672 }
1673
1674 // Get length and convert char[] offset to byte[] offset
1675 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1676 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1677
1678 // Range checks
1679 generate_string_range_check(src, src_begin, length, true);
1680 generate_string_range_check(dst, dst_begin, length, false);
1681 if (stopped()) {
1682 return true;
1683 }
1684
1685 if (!stopped()) {
1686 src = access_resolve(src, ACCESS_READ);
1687 dst = access_resolve(dst, ACCESS_WRITE);
1688
1689 // Calculate starting addresses.
1690 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1691 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1692
1693 // Check if array addresses are aligned to HeapWordSize
1694 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1695 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1696 bool aligned = tsrc->is_con() && ((tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1697 tdst->is_con() && ((tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1698
1699 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1700 const char* copyfunc_name = "arraycopy";
1701 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1702 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1703 OptoRuntime::fast_arraycopy_Type(),
1704 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1705 src_start, dst_start, ConvI2X(length) XTOP);
1706 // Do not let reads from the cloned object float above the arraycopy.
1707 if (alloc != NULL) {
2176 // Offset is small => always a heap address.
2177 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2178 if (offset_type != NULL &&
2179 base_type->offset() == 0 && // (should always be?)
2180 offset_type->_lo >= 0 &&
2181 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2182 return Type::OopPtr;
2183 } else if (type == T_OBJECT) {
2184 // off heap access to an oop doesn't make any sense. Has to be on
2185 // heap.
2186 return Type::OopPtr;
2187 }
2188 // Otherwise, it might either be oop+off or NULL+addr.
2189 return Type::AnyPtr;
2190 } else {
2191 // No information:
2192 return Type::AnyPtr;
2193 }
2194 }
2195
2196 inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type, bool can_cast) {
2197 Node* uncasted_base = base;
2198 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2199 if (kind == Type::RawPtr) {
2200 return basic_plus_adr(top(), uncasted_base, offset);
2201 } else if (kind == Type::AnyPtr) {
2202 assert(base == uncasted_base, "unexpected base change");
2203 if (can_cast) {
2204 if (!_gvn.type(base)->speculative_maybe_null() &&
2205 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2206 // According to profiling, this access is always on
2207 // heap. Casting the base to not null and thus avoiding membars
2208 // around the access should allow better optimizations
2209 Node* null_ctl = top();
2210 base = null_check_oop(base, &null_ctl, true, true, true);
2211 assert(null_ctl->is_top(), "no null control here");
2212 return basic_plus_adr(base, offset);
2213 } else if (_gvn.type(base)->speculative_always_null() &&
2214 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2215 // According to profiling, this access is always off
2216 // heap.
2217 base = null_assert(base);
2218 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2219 offset = MakeConX(0);
2220 return basic_plus_adr(top(), raw_base, offset);
2221 }
2222 }
2223 // We don't know if it's an on heap or off heap access. Fall back
2224 // to raw memory access.
2225 base = access_resolve(base, decorators);
2226 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2227 return basic_plus_adr(top(), raw, offset);
2228 } else {
2229 assert(base == uncasted_base, "unexpected base change");
2230 // We know it's an on heap access so base can't be null
2231 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2232 base = must_be_not_null(base, true);
2233 }
2234 return basic_plus_adr(base, offset);
2235 }
2236 }
2237
2238 //--------------------------inline_number_methods-----------------------------
2239 // inline int Integer.numberOfLeadingZeros(int)
2240 // inline int Long.numberOfLeadingZeros(long)
2241 //
2242 // inline int Integer.numberOfTrailingZeros(int)
2243 // inline int Long.numberOfTrailingZeros(long)
2244 //
2245 // inline int Integer.bitCount(int)
2372
2373 Node* receiver = argument(0); // type: oop
2374
2375 // Build address expression.
2376 Node* adr;
2377 Node* heap_base_oop = top();
2378 Node* offset = top();
2379 Node* val;
2380
2381 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2382 Node* base = argument(1); // type: oop
2383 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2384 offset = argument(2); // type: long
2385 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2386 // to be plain byte offsets, which are also the same as those accepted
2387 // by oopDesc::field_addr.
2388 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2389 "fieldOffset must be byte-scaled");
2390 // 32-bit machines ignore the high half!
2391 offset = ConvL2X(offset);
2392 adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2393
2394 if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2395 heap_base_oop = base;
2396 } else if (type == T_OBJECT) {
2397 return false; // off-heap oop accesses are not supported
2398 }
2399
2400 // Can base be NULL? Otherwise, always on-heap access.
2401 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2402
2403 if (!can_access_non_heap) {
2404 decorators |= IN_HEAP;
2405 }
2406
2407 val = is_store ? argument(4) : NULL;
2408
2409 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2410
2411 // Try to categorize the address.
2412 Compile::AliasType* alias_type = C->alias_type(adr_type);
2656 case LS_get_add:
2657 case LS_get_set: {
2658 receiver = argument(0); // type: oop
2659 base = argument(1); // type: oop
2660 offset = argument(2); // type: long
2661 oldval = NULL;
2662 newval = argument(4); // type: oop, int, or long
2663 break;
2664 }
2665 default:
2666 ShouldNotReachHere();
2667 }
2668
2669 // Build field offset expression.
2670 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2671 // to be plain byte offsets, which are also the same as those accepted
2672 // by oopDesc::field_addr.
2673 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2674 // 32-bit machines ignore the high half of long offsets
2675 offset = ConvL2X(offset);
2676 Node* adr = make_unsafe_address(base, offset, ACCESS_WRITE | ACCESS_READ, type, false);
2677 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2678
2679 Compile::AliasType* alias_type = C->alias_type(adr_type);
2680 BasicType bt = alias_type->basic_type();
2681 if (bt != T_ILLEGAL &&
2682 ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2683 // Don't intrinsify mismatched object accesses.
2684 return false;
2685 }
2686
2687 // For CAS, unlike inline_unsafe_access, there seems no point in
2688 // trying to refine types. Just use the coarse types here.
2689 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2690 const Type *value_type = Type::get_const_basic_type(type);
2691
2692 switch (kind) {
2693 case LS_get_set:
2694 case LS_cmp_exchange: {
2695 if (type == T_OBJECT) {
2696 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2951 no_int_result_path = 1, // t == Thread.current() && !TLS._osthread._interrupted
2952 no_clear_result_path = 2, // t == Thread.current() && TLS._osthread._interrupted && !clear_int
2953 slow_result_path = 3, // slow path: t.isInterrupted(clear_int)
2954 PATH_LIMIT
2955 };
2956
2957 // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
2958 // out of the function.
2959 insert_mem_bar(Op_MemBarCPUOrder);
2960
2961 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
2962 PhiNode* result_val = new PhiNode(result_rgn, TypeInt::BOOL);
2963
2964 RegionNode* slow_region = new RegionNode(1);
2965 record_for_igvn(slow_region);
2966
2967 // (a) Receiving thread must be the current thread.
2968 Node* rec_thr = argument(0);
2969 Node* tls_ptr = NULL;
2970 Node* cur_thr = generate_current_thread(tls_ptr);
2971
2972 // Resolve oops to stable for CmpP below.
2973 cur_thr = access_resolve(cur_thr, 0);
2974 rec_thr = access_resolve(rec_thr, 0);
2975
2976 Node* cmp_thr = _gvn.transform(new CmpPNode(cur_thr, rec_thr));
2977 Node* bol_thr = _gvn.transform(new BoolNode(cmp_thr, BoolTest::ne));
2978
2979 generate_slow_guard(bol_thr, slow_region);
2980
2981 // (b) Interrupt bit on TLS must be false.
2982 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
2983 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
2984 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
2985
2986 // Set the control input on the field _interrupted read to prevent it floating up.
2987 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
2988 Node* cmp_bit = _gvn.transform(new CmpINode(int_bit, intcon(0)));
2989 Node* bol_bit = _gvn.transform(new BoolNode(cmp_bit, BoolTest::ne));
2990
2991 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2992
2993 // First fast path: if (!TLS._interrupted) return false;
2994 Node* false_bit = _gvn.transform(new IfFalseNode(iff_bit));
2995 result_rgn->init_req(no_int_result_path, false_bit);
3390 Node* phi = new PhiNode(region, TypeInt::BOOL);
3391 record_for_igvn(region);
3392
3393 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3394 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3395 int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3396
3397 // First null-check both mirrors and load each mirror's klass metaobject.
3398 int which_arg;
3399 for (which_arg = 0; which_arg <= 1; which_arg++) {
3400 Node* arg = args[which_arg];
3401 arg = null_check(arg);
3402 if (stopped()) break;
3403 args[which_arg] = arg;
3404
3405 Node* p = basic_plus_adr(arg, class_klass_offset);
3406 Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3407 klasses[which_arg] = _gvn.transform(kls);
3408 }
3409
3410 // Resolve oops to stable for CmpP below.
3411 args[0] = access_resolve(args[0], 0);
3412 args[1] = access_resolve(args[1], 0);
3413
3414 // Having loaded both klasses, test each for null.
3415 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3416 for (which_arg = 0; which_arg <= 1; which_arg++) {
3417 Node* kls = klasses[which_arg];
3418 Node* null_ctl = top();
3419 kls = null_check_oop(kls, &null_ctl, never_see_null);
3420 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3421 region->init_req(prim_path, null_ctl);
3422 if (stopped()) break;
3423 klasses[which_arg] = kls;
3424 }
3425
3426 if (!stopped()) {
3427 // now we have two reference types, in klasses[0..1]
3428 Node* subk = klasses[1]; // the argument to isAssignableFrom
3429 Node* superk = klasses[0]; // the receiver
3430 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3431 // now we have a successful reference subtype check
3432 region->set_req(_ref_subtype_path, control());
3433 }
4171 }
4172
4173 //----------------------inline_unsafe_copyMemory-------------------------
4174 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4175 bool LibraryCallKit::inline_unsafe_copyMemory() {
4176 if (callee()->is_static()) return false; // caller must have the capability!
4177 null_check_receiver(); // null-check receiver
4178 if (stopped()) return true;
4179
4180 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4181
4182 Node* src_ptr = argument(1); // type: oop
4183 Node* src_off = ConvL2X(argument(2)); // type: long
4184 Node* dst_ptr = argument(4); // type: oop
4185 Node* dst_off = ConvL2X(argument(5)); // type: long
4186 Node* size = ConvL2X(argument(7)); // type: long
4187
4188 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4189 "fieldOffset must be byte-scaled");
4190
4191 src_ptr = access_resolve(src_ptr, ACCESS_READ);
4192 dst_ptr = access_resolve(dst_ptr, ACCESS_WRITE);
4193 Node* src = make_unsafe_address(src_ptr, src_off, ACCESS_READ);
4194 Node* dst = make_unsafe_address(dst_ptr, dst_off, ACCESS_WRITE);
4195
4196 // Conservatively insert a memory barrier on all memory slices.
4197 // Do not let writes of the copy source or destination float below the copy.
4198 insert_mem_bar(Op_MemBarCPUOrder);
4199
4200 // Call it. Note that the length argument is not scaled.
4201 make_runtime_call(RC_LEAF|RC_NO_FP,
4202 OptoRuntime::fast_arraycopy_Type(),
4203 StubRoutines::unsafe_arraycopy(),
4204 "unsafe_arraycopy",
4205 TypeRawPtr::BOTTOM,
4206 src, dst, size XTOP);
4207
4208 // Do not let reads of the copy destination float above the copy.
4209 insert_mem_bar(Op_MemBarCPUOrder);
4210
4211 return true;
4212 }
4213
4214 //------------------------clone_coping-----------------------------------
5329 Node* obja = argument(0);
5330 Node* aoffset = argument(1);
5331 Node* objb = argument(3);
5332 Node* boffset = argument(4);
5333 Node* length = argument(6);
5334 Node* scale = argument(7);
5335
5336 const Type* a_type = obja->Value(&_gvn);
5337 const Type* b_type = objb->Value(&_gvn);
5338 const TypeAryPtr* top_a = a_type->isa_aryptr();
5339 const TypeAryPtr* top_b = b_type->isa_aryptr();
5340 if (top_a == NULL || top_a->klass() == NULL ||
5341 top_b == NULL || top_b->klass() == NULL) {
5342 // failed array check
5343 return false;
5344 }
5345
5346 Node* call;
5347 jvms()->set_should_reexecute(true);
5348
5349 obja = access_resolve(obja, ACCESS_READ);
5350 objb = access_resolve(objb, ACCESS_READ);
5351 Node* obja_adr = make_unsafe_address(obja, aoffset, ACCESS_READ);
5352 Node* objb_adr = make_unsafe_address(objb, boffset, ACCESS_READ);
5353
5354 call = make_runtime_call(RC_LEAF,
5355 OptoRuntime::vectorizedMismatch_Type(),
5356 stubAddr, stubName, TypePtr::BOTTOM,
5357 obja_adr, objb_adr, length, scale);
5358
5359 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5360 set_result(result);
5361 return true;
5362 }
5363
5364 /**
5365 * Calculate CRC32 for byte.
5366 * int java.util.zip.CRC32.update(int crc, int b)
5367 */
5368 bool LibraryCallKit::inline_updateCRC32() {
5369 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5370 assert(callee()->signature()->size() == 2, "update has 2 parameters");
5371 // no receiver since it is static method
5372 Node* crc = argument(0); // type: int
6113 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6114
6115 // get AESCrypt klass for instanceOf check
6116 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6117 // will have same classloader as CipherBlockChaining object
6118 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6119 assert(tinst != NULL, "CBCobj is null");
6120 assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6121
6122 // we want to do an instanceof comparison against the AESCrypt class
6123 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6124 if (!klass_AESCrypt->is_loaded()) {
6125 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6126 Node* ctrl = control();
6127 set_control(top()); // no regular fast path
6128 return ctrl;
6129 }
6130
6131 src = must_be_not_null(src, true);
6132 dest = must_be_not_null(dest, true);
6133
6134 // Resolve oops to stable for CmpP below.
6135 src = access_resolve(src, 0);
6136 dest = access_resolve(dest, 0);
6137
6138 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6139
6140 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6141 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6142 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6143
6144 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6145
6146 // for encryption, we are done
6147 if (!decrypting)
6148 return instof_false; // even if it is NULL
6149
6150 // for decryption, we need to add a further check to avoid
6151 // taking the intrinsic path when cipher and plain are the same
6152 // see the original java code for why.
6153 RegionNode* region = new RegionNode(3);
6154 region->init_req(1, instof_false);
6155
6156 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
|