1225 ICache::invalidate_range(copy_buff, *byte_count);
1226 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1227 }
1228 }
1229 }
1230 }
1231
1232 // If we are patching in a non-perm oop, make sure the nmethod
1233 // is on the right list.
1234 if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
1235 (appendix.not_null() && appendix->is_scavengable()))) {
1236 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1237 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1238 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1239 if (!nm->on_scavenge_root_list()) {
1240 CodeCache::add_scavenge_root_nmethod(nm);
1241 }
1242
1243 // Since we've patched some oops in the nmethod,
1244 // (re)register it with the heap.
1245 Universe::heap()->register_nmethod(nm);
1246 }
1247 JRT_END
1248
1249 #else // DEOPTIMIZE_WHEN_PATCHING
1250
1251 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
1252 RegisterMap reg_map(thread, false);
1253
1254 NOT_PRODUCT(_patch_code_slowcase_cnt++;)
1255 if (TracePatching) {
1256 tty->print_cr("Deoptimizing because patch is needed");
1257 }
1258
1259 frame runtime_frame = thread->last_frame();
1260 frame caller_frame = runtime_frame.sender(®_map);
1261
1262 // It's possible the nmethod was invalidated in the last
1263 // safepoint, but if it's still alive then make it not_entrant.
1264 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1265 if (nm != NULL) {
1372 // for now we just print out the block id
1373 tty->print("%d ", block_id);
1374 JRT_END
1375
1376
1377 // Array copy return codes.
1378 enum {
1379 ac_failed = -1, // arraycopy failed
1380 ac_ok = 0 // arraycopy succeeded
1381 };
1382
1383
1384 // Below length is the # elements copied.
1385 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1386 oopDesc* dst, T* dst_addr,
1387 int length) {
1388
1389 // For performance reasons, we assume we are using a card marking write
1390 // barrier. The assert will fail if this is not the case.
1391 // Note that we use the non-virtual inlineable variant of write_ref_array.
1392 BarrierSet* bs = Universe::heap()->barrier_set();
1393 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1394 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1395 if (src == dst) {
1396 // same object, no check
1397 bs->write_ref_array_pre(dst_addr, length);
1398 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1399 bs->write_ref_array((HeapWord*)dst_addr, length);
1400 return ac_ok;
1401 } else {
1402 Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
1403 Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
1404 if (stype == bound || stype->is_subtype_of(bound)) {
1405 // Elements are guaranteed to be subtypes, so no check necessary
1406 bs->write_ref_array_pre(dst_addr, length);
1407 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1408 bs->write_ref_array((HeapWord*)dst_addr, length);
1409 return ac_ok;
1410 }
1411 }
1412 return ac_failed;
1452 JRT_END
1453
1454
1455 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
1456 #ifndef PRODUCT
1457 _primitive_arraycopy_cnt++;
1458 #endif
1459
1460 if (length == 0) return;
1461 // Not guaranteed to be word atomic, but that doesn't matter
1462 // for anything but an oop array, which is covered by oop_arraycopy.
1463 Copy::conjoint_jbytes(src, dst, length);
1464 JRT_END
1465
1466 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
1467 #ifndef PRODUCT
1468 _oop_arraycopy_cnt++;
1469 #endif
1470
1471 if (num == 0) return;
1472 BarrierSet* bs = Universe::heap()->barrier_set();
1473 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1474 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1475 if (UseCompressedOops) {
1476 bs->write_ref_array_pre((narrowOop*)dst, num);
1477 Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
1478 } else {
1479 bs->write_ref_array_pre((oop*)dst, num);
1480 Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
1481 }
1482 bs->write_ref_array(dst, num);
1483 JRT_END
1484
1485
1486 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1487 // had to return int instead of bool, otherwise there may be a mismatch
1488 // between the C calling convention and the Java one.
1489 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1490 // JVM takes the whole %eax as the return value, which may misinterpret
1491 // the return value as a boolean true.
1492
|
1225 ICache::invalidate_range(copy_buff, *byte_count);
1226 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1227 }
1228 }
1229 }
1230 }
1231
1232 // If we are patching in a non-perm oop, make sure the nmethod
1233 // is on the right list.
1234 if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
1235 (appendix.not_null() && appendix->is_scavengable()))) {
1236 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1237 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1238 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1239 if (!nm->on_scavenge_root_list()) {
1240 CodeCache::add_scavenge_root_nmethod(nm);
1241 }
1242
1243 // Since we've patched some oops in the nmethod,
1244 // (re)register it with the heap.
1245 GC::gc()->heap()->register_nmethod(nm);
1246 }
1247 JRT_END
1248
1249 #else // DEOPTIMIZE_WHEN_PATCHING
1250
1251 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
1252 RegisterMap reg_map(thread, false);
1253
1254 NOT_PRODUCT(_patch_code_slowcase_cnt++;)
1255 if (TracePatching) {
1256 tty->print_cr("Deoptimizing because patch is needed");
1257 }
1258
1259 frame runtime_frame = thread->last_frame();
1260 frame caller_frame = runtime_frame.sender(®_map);
1261
1262 // It's possible the nmethod was invalidated in the last
1263 // safepoint, but if it's still alive then make it not_entrant.
1264 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1265 if (nm != NULL) {
1372 // for now we just print out the block id
1373 tty->print("%d ", block_id);
1374 JRT_END
1375
1376
1377 // Array copy return codes.
1378 enum {
1379 ac_failed = -1, // arraycopy failed
1380 ac_ok = 0 // arraycopy succeeded
1381 };
1382
1383
1384 // Below length is the # elements copied.
1385 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1386 oopDesc* dst, T* dst_addr,
1387 int length) {
1388
1389 // For performance reasons, we assume we are using a card marking write
1390 // barrier. The assert will fail if this is not the case.
1391 // Note that we use the non-virtual inlineable variant of write_ref_array.
1392 BarrierSet* bs = GC::gc()->heap()->barrier_set();
1393 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1394 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1395 if (src == dst) {
1396 // same object, no check
1397 bs->write_ref_array_pre(dst_addr, length);
1398 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1399 bs->write_ref_array((HeapWord*)dst_addr, length);
1400 return ac_ok;
1401 } else {
1402 Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
1403 Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
1404 if (stype == bound || stype->is_subtype_of(bound)) {
1405 // Elements are guaranteed to be subtypes, so no check necessary
1406 bs->write_ref_array_pre(dst_addr, length);
1407 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1408 bs->write_ref_array((HeapWord*)dst_addr, length);
1409 return ac_ok;
1410 }
1411 }
1412 return ac_failed;
1452 JRT_END
1453
1454
1455 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
1456 #ifndef PRODUCT
1457 _primitive_arraycopy_cnt++;
1458 #endif
1459
1460 if (length == 0) return;
1461 // Not guaranteed to be word atomic, but that doesn't matter
1462 // for anything but an oop array, which is covered by oop_arraycopy.
1463 Copy::conjoint_jbytes(src, dst, length);
1464 JRT_END
1465
1466 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
1467 #ifndef PRODUCT
1468 _oop_arraycopy_cnt++;
1469 #endif
1470
1471 if (num == 0) return;
1472 BarrierSet* bs = GC::gc()->heap()->barrier_set();
1473 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1474 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1475 if (UseCompressedOops) {
1476 bs->write_ref_array_pre((narrowOop*)dst, num);
1477 Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
1478 } else {
1479 bs->write_ref_array_pre((oop*)dst, num);
1480 Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
1481 }
1482 bs->write_ref_array(dst, num);
1483 JRT_END
1484
1485
1486 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1487 // had to return int instead of bool, otherwise there may be a mismatch
1488 // between the C calling convention and the Java one.
1489 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1490 // JVM takes the whole %eax as the return value, which may misinterpret
1491 // the return value as a boolean true.
1492
|