323 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
324 __ jcc(Assembler::notZero, L);
325 __ stop("locked object is NULL");
326 __ bind(L);
327 }
328 #endif
329 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
330 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
331 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
332 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
333 }
334 }
335 }
336
337
338 // inline cache check; done before the frame is built.
339 int LIR_Assembler::check_icache() {
340 Register receiver = FrameMap::receiver_opr->as_register();
341 Register ic_klass = IC_Klass;
342 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
343 const bool do_post_padding = VerifyOops || UseCompressedOops;
344 if (!do_post_padding) {
345 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
346 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
347 __ nop();
348 }
349 }
350 int offset = __ offset();
351 __ inline_cache_check(receiver, IC_Klass);
352 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
353 if (do_post_padding) {
354 // force alignment after the cache check.
355 // It's been verified to be aligned if !VerifyOops
356 __ align(CodeEntryAlignment);
357 }
358 return offset;
359 }
360
361
362 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
363 jobject o = NULL;
1245 if (dest->is_double_xmm()) {
1246 __ movdbl(dest->as_xmm_double_reg(), from_addr);
1247 } else {
1248 assert(dest->is_double_fpu(), "must be");
1249 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1250 __ fld_d(from_addr);
1251 }
1252 break;
1253 }
1254
1255 case T_OBJECT: // fall through
1256 case T_ARRAY: // fall through
1257 if (UseCompressedOops && !wide) {
1258 __ movl(dest->as_register(), from_addr);
1259 } else {
1260 __ movptr(dest->as_register(), from_addr);
1261 }
1262 break;
1263
1264 case T_ADDRESS:
1265 __ movptr(dest->as_register(), from_addr);
1266 break;
1267 case T_INT:
1268 __ movl(dest->as_register(), from_addr);
1269 break;
1270
1271 case T_LONG: {
1272 Register to_lo = dest->as_register_lo();
1273 Register to_hi = dest->as_register_hi();
1274 #ifdef _LP64
1275 __ movptr(to_lo, as_Address_lo(addr));
1276 #else
1277 Register base = addr->base()->as_register();
1278 Register index = noreg;
1279 if (addr->index()->is_register()) {
1280 index = addr->index()->as_register();
1281 }
1282 if ((base == to_lo && index == to_hi) ||
1283 (base == to_hi && index == to_lo)) {
1284 // addresses with 2 registers are only formed as a result of
1285 // array access so this code will never have to deal with
1347 __ sarl(dest_reg, 16);
1348 }
1349 break;
1350 }
1351
1352 default:
1353 ShouldNotReachHere();
1354 }
1355
1356 if (patch != NULL) {
1357 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1358 }
1359
1360 if (type == T_ARRAY || type == T_OBJECT) {
1361 #ifdef _LP64
1362 if (UseCompressedOops && !wide) {
1363 __ decode_heap_oop(dest->as_register());
1364 }
1365 #endif
1366 __ verify_oop(dest->as_register());
1367 }
1368 }
1369
1370
1371 void LIR_Assembler::prefetchr(LIR_Opr src) {
1372 LIR_Address* addr = src->as_address_ptr();
1373 Address from_addr = as_Address(addr);
1374
1375 if (VM_Version::supports_sse()) {
1376 switch (ReadPrefetchInstr) {
1377 case 0:
1378 __ prefetchnta(from_addr); break;
1379 case 1:
1380 __ prefetcht0(from_addr); break;
1381 case 2:
1382 __ prefetcht2(from_addr); break;
1383 default:
1384 ShouldNotReachHere(); break;
1385 }
1386 } else if (VM_Version::supports_3dnow_prefetch()) {
1688
1689 if (op->should_profile()) {
1690 ciMethod* method = op->profiled_method();
1691 assert(method != NULL, "Should have method");
1692 int bci = op->profiled_bci();
1693 md = method->method_data_or_null();
1694 assert(md != NULL, "Sanity");
1695 data = md->bci_to_data(bci);
1696 assert(data != NULL, "need data for type check");
1697 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1698 }
1699 Label profile_cast_success, profile_cast_failure;
1700 Label *success_target = op->should_profile() ? &profile_cast_success : success;
1701 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1702
1703 if (obj == k_RInfo) {
1704 k_RInfo = dst;
1705 } else if (obj == klass_RInfo) {
1706 klass_RInfo = dst;
1707 }
1708 if (k->is_loaded() && !UseCompressedOops) {
1709 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1710 } else {
1711 Rtmp1 = op->tmp3()->as_register();
1712 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1713 }
1714
1715 assert_different_registers(obj, k_RInfo, klass_RInfo);
1716 if (!k->is_loaded()) {
1717 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1718 } else {
1719 #ifdef _LP64
1720 __ mov_metadata(k_RInfo, k->constant_encoding());
1721 #endif // _LP64
1722 }
1723 assert(obj != k_RInfo, "must be different");
1724
1725 __ cmpptr(obj, (int32_t)NULL_WORD);
1726 if (op->should_profile()) {
1727 Label not_null;
1728 __ jccb(Assembler::notEqual, not_null);
3429
3430 __ bind(cont);
3431 __ pop(dst);
3432 __ pop(src);
3433 }
3434 }
3435
3436 #ifdef ASSERT
3437 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3438 // Sanity check the known type with the incoming class. For the
3439 // primitive case the types must match exactly with src.klass and
3440 // dst.klass each exactly matching the default type. For the
3441 // object array case, if no type check is needed then either the
3442 // dst type is exactly the expected type and the src type is a
3443 // subtype which we can't check or src is the same array as dst
3444 // but not necessarily exactly of type default_type.
3445 Label known_ok, halt;
3446 __ mov_metadata(tmp, default_type->constant_encoding());
3447 #ifdef _LP64
3448 if (UseCompressedKlassPointers) {
3449 __ encode_heap_oop(tmp);
3450 }
3451 #endif
3452
3453 if (basic_type != T_OBJECT) {
3454
3455 if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
3456 else __ cmpptr(tmp, dst_klass_addr);
3457 __ jcc(Assembler::notEqual, halt);
3458 if (UseCompressedKlassPointers) __ cmpl(tmp, src_klass_addr);
3459 else __ cmpptr(tmp, src_klass_addr);
3460 __ jcc(Assembler::equal, known_ok);
3461 } else {
3462 if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
3463 else __ cmpptr(tmp, dst_klass_addr);
3464 __ jcc(Assembler::equal, known_ok);
3465 __ cmpptr(src, dst);
3466 __ jcc(Assembler::equal, known_ok);
3467 }
3468 __ bind(halt);
3469 __ stop("incorrect type information in arraycopy");
|
323 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
324 __ jcc(Assembler::notZero, L);
325 __ stop("locked object is NULL");
326 __ bind(L);
327 }
328 #endif
329 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
330 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
331 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
332 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
333 }
334 }
335 }
336
337
338 // inline cache check; done before the frame is built.
339 int LIR_Assembler::check_icache() {
340 Register receiver = FrameMap::receiver_opr->as_register();
341 Register ic_klass = IC_Klass;
342 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
343 const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
344 if (!do_post_padding) {
345 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
346 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
347 __ nop();
348 }
349 }
350 int offset = __ offset();
351 __ inline_cache_check(receiver, IC_Klass);
352 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
353 if (do_post_padding) {
354 // force alignment after the cache check.
355 // It's been verified to be aligned if !VerifyOops
356 __ align(CodeEntryAlignment);
357 }
358 return offset;
359 }
360
361
362 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
363 jobject o = NULL;
1245 if (dest->is_double_xmm()) {
1246 __ movdbl(dest->as_xmm_double_reg(), from_addr);
1247 } else {
1248 assert(dest->is_double_fpu(), "must be");
1249 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1250 __ fld_d(from_addr);
1251 }
1252 break;
1253 }
1254
1255 case T_OBJECT: // fall through
1256 case T_ARRAY: // fall through
1257 if (UseCompressedOops && !wide) {
1258 __ movl(dest->as_register(), from_addr);
1259 } else {
1260 __ movptr(dest->as_register(), from_addr);
1261 }
1262 break;
1263
1264 case T_ADDRESS:
1265 if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1266 __ movl(dest->as_register(), from_addr);
1267 } else {
1268 __ movptr(dest->as_register(), from_addr);
1269 }
1270 break;
1271 case T_INT:
1272 __ movl(dest->as_register(), from_addr);
1273 break;
1274
1275 case T_LONG: {
1276 Register to_lo = dest->as_register_lo();
1277 Register to_hi = dest->as_register_hi();
1278 #ifdef _LP64
1279 __ movptr(to_lo, as_Address_lo(addr));
1280 #else
1281 Register base = addr->base()->as_register();
1282 Register index = noreg;
1283 if (addr->index()->is_register()) {
1284 index = addr->index()->as_register();
1285 }
1286 if ((base == to_lo && index == to_hi) ||
1287 (base == to_hi && index == to_lo)) {
1288 // addresses with 2 registers are only formed as a result of
1289 // array access so this code will never have to deal with
1351 __ sarl(dest_reg, 16);
1352 }
1353 break;
1354 }
1355
1356 default:
1357 ShouldNotReachHere();
1358 }
1359
1360 if (patch != NULL) {
1361 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1362 }
1363
1364 if (type == T_ARRAY || type == T_OBJECT) {
1365 #ifdef _LP64
1366 if (UseCompressedOops && !wide) {
1367 __ decode_heap_oop(dest->as_register());
1368 }
1369 #endif
1370 __ verify_oop(dest->as_register());
1371 } else if (type == T_ADDRESS && UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1372 __ decode_klass_not_null(dest->as_register());
1373 }
1374 }
1375
1376
1377 void LIR_Assembler::prefetchr(LIR_Opr src) {
1378 LIR_Address* addr = src->as_address_ptr();
1379 Address from_addr = as_Address(addr);
1380
1381 if (VM_Version::supports_sse()) {
1382 switch (ReadPrefetchInstr) {
1383 case 0:
1384 __ prefetchnta(from_addr); break;
1385 case 1:
1386 __ prefetcht0(from_addr); break;
1387 case 2:
1388 __ prefetcht2(from_addr); break;
1389 default:
1390 ShouldNotReachHere(); break;
1391 }
1392 } else if (VM_Version::supports_3dnow_prefetch()) {
1694
1695 if (op->should_profile()) {
1696 ciMethod* method = op->profiled_method();
1697 assert(method != NULL, "Should have method");
1698 int bci = op->profiled_bci();
1699 md = method->method_data_or_null();
1700 assert(md != NULL, "Sanity");
1701 data = md->bci_to_data(bci);
1702 assert(data != NULL, "need data for type check");
1703 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1704 }
1705 Label profile_cast_success, profile_cast_failure;
1706 Label *success_target = op->should_profile() ? &profile_cast_success : success;
1707 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1708
1709 if (obj == k_RInfo) {
1710 k_RInfo = dst;
1711 } else if (obj == klass_RInfo) {
1712 klass_RInfo = dst;
1713 }
1714 if (k->is_loaded() && !UseCompressedKlassPointers) {
1715 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1716 } else {
1717 Rtmp1 = op->tmp3()->as_register();
1718 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1719 }
1720
1721 assert_different_registers(obj, k_RInfo, klass_RInfo);
1722 if (!k->is_loaded()) {
1723 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1724 } else {
1725 #ifdef _LP64
1726 __ mov_metadata(k_RInfo, k->constant_encoding());
1727 #endif // _LP64
1728 }
1729 assert(obj != k_RInfo, "must be different");
1730
1731 __ cmpptr(obj, (int32_t)NULL_WORD);
1732 if (op->should_profile()) {
1733 Label not_null;
1734 __ jccb(Assembler::notEqual, not_null);
3435
3436 __ bind(cont);
3437 __ pop(dst);
3438 __ pop(src);
3439 }
3440 }
3441
3442 #ifdef ASSERT
3443 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3444 // Sanity check the known type with the incoming class. For the
3445 // primitive case the types must match exactly with src.klass and
3446 // dst.klass each exactly matching the default type. For the
3447 // object array case, if no type check is needed then either the
3448 // dst type is exactly the expected type and the src type is a
3449 // subtype which we can't check or src is the same array as dst
3450 // but not necessarily exactly of type default_type.
3451 Label known_ok, halt;
3452 __ mov_metadata(tmp, default_type->constant_encoding());
3453 #ifdef _LP64
3454 if (UseCompressedKlassPointers) {
3455 __ encode_klass_not_null(tmp);
3456 }
3457 #endif
3458
3459 if (basic_type != T_OBJECT) {
3460
3461 if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
3462 else __ cmpptr(tmp, dst_klass_addr);
3463 __ jcc(Assembler::notEqual, halt);
3464 if (UseCompressedKlassPointers) __ cmpl(tmp, src_klass_addr);
3465 else __ cmpptr(tmp, src_klass_addr);
3466 __ jcc(Assembler::equal, known_ok);
3467 } else {
3468 if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
3469 else __ cmpptr(tmp, dst_klass_addr);
3470 __ jcc(Assembler::equal, known_ok);
3471 __ cmpptr(src, dst);
3472 __ jcc(Assembler::equal, known_ok);
3473 }
3474 __ bind(halt);
3475 __ stop("incorrect type information in arraycopy");
|