1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)instanceKlass.cpp 1.324 08/11/24 12:22:48 JVM"
3 #endif
4 /*
5 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
777 }
778 }
779
780
781 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
782 instanceKlassHandle h_this(THREAD, as_klassOop());
783 do_local_static_fields_impl(h_this, f, CHECK);
784 }
785
786
787 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
788 fieldDescriptor fd;
789 int length = this_oop->fields()->length();
790 for (int i = 0; i < length; i += next_offset) {
791 fd.initialize(this_oop(), i);
792 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
793 }
794 }
795
796
797 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
798 fieldDescriptor fd;
799 instanceKlass* super = superklass();
800 if (super != NULL) {
801 super->do_nonstatic_fields(cl);
802 }
803 int length = fields()->length();
804 for (int i = 0; i < length; i += next_offset) {
805 fd.initialize(as_klassOop(), i);
806 if (!(fd.is_static())) cl->do_field(&fd);
807 }
808 }
809
810
811 void instanceKlass::array_klasses_do(void f(klassOop k)) {
812 if (array_klasses() != NULL)
813 arrayKlass::cast(array_klasses())->array_klasses_do(f);
814 }
815
816
817 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
818 f(as_klassOop());
819 array_klasses_do(f);
820 }
821
822 #ifdef ASSERT
823 static int linear_search(objArrayOop methods, symbolOop name, symbolOop signature) {
824 int len = methods->length();
825 for (int index = 0; index < len; index++) {
826 methodOop m = (methodOop)(methods->obj_at(index));
827 assert(m->is_method(), "must be method");
936
937
938 // Lookup or create a jmethodID.
939 // This code can be called by the VM thread. For this reason it is critical that
940 // there are no blocking operations (safepoints) while the lock is held -- or a
941 // deadlock can occur.
942 jmethodID instanceKlass::jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h) {
943 size_t idnum = (size_t)method_h->method_idnum();
944 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
945 size_t length = 0;
946 jmethodID id = NULL;
947 // array length stored in first element, other elements offset by one
948 if (jmeths == NULL || // If there is no jmethodID array,
949 (length = (size_t)jmeths[0]) <= idnum || // or if it is too short,
950 (id = jmeths[idnum+1]) == NULL) { // or if this jmethodID isn't allocated
951
952 // Do all the safepointing things (allocations) before grabbing the lock.
953 // These allocations will have to be freed if they are unused.
954
955 // Allocate a new array of methods.
956 jmethodID* to_dealloc_jmeths = NULL;
957 jmethodID* new_jmeths = NULL;
958 if (length <= idnum) {
959 // A new array will be needed (unless some other thread beats us to it)
960 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
961 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
962 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
963 new_jmeths[0] =(jmethodID)size; // array size held in the first element
964 }
965
966 // Allocate a new method ID.
967 jmethodID to_dealloc_id = NULL;
968 jmethodID new_id = NULL;
969 if (method_h->is_old() && !method_h->is_obsolete()) {
970 // The method passed in is old (but not obsolete), we need to use the current version
971 methodOop current_method = ik_h->method_with_idnum((int)idnum);
972 assert(current_method != NULL, "old and but not obsolete, so should exist");
973 methodHandle current_method_h(current_method == NULL? method_h() : current_method);
974 new_id = JNIHandles::make_jmethod_id(current_method_h);
975 } else {
976 // It is the current version of the method or an obsolete method,
977 // use the version passed in
978 new_id = JNIHandles::make_jmethod_id(method_h);
979 }
980
981 {
982 MutexLocker ml(JmethodIdCreation_lock);
983
984 // We must not go to a safepoint while holding this lock.
985 debug_only(No_Safepoint_Verifier nosafepoints;)
986
987 // Retry lookup after we got the lock
988 jmeths = ik_h->methods_jmethod_ids_acquire();
989 if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) {
990 if (jmeths != NULL) {
991 // We have grown the array: copy the existing entries, and delete the old array
992 for (size_t index = 0; index < length; index++) {
993 new_jmeths[index+1] = jmeths[index+1];
994 }
995 to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one
996 }
997 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
998 } else {
999 id = jmeths[idnum+1];
1000 to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one
1001 }
1002 if (id == NULL) {
1003 id = new_id;
1004 jmeths[idnum+1] = id; // install the new method ID
1005 } else {
1006 to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation
1007 }
1008 }
1009
1010 // Free up unneeded or no longer needed resources
1011 FreeHeap(to_dealloc_jmeths);
1012 if (to_dealloc_id != NULL) {
1013 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1014 }
1015 }
1016 return id;
1017 }
1018
1019
1020 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
1021 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1022 size_t idnum = (size_t)method->method_idnum();
1023 jmethodID* jmeths = methods_jmethod_ids_acquire();
1024 size_t length; // length assigned as debugging crumb
1025 jmethodID id = NULL;
1026 if (jmeths != NULL && // If there is a jmethodID array,
1027 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
1028 id = jmeths[idnum+1]; // Look up the id (may be NULL)
1029 }
1030 return id;
1031 }
1032
1033
1034 // Cache an itable index
1035 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1210 tty->print_cr("--- } ");
1211 }
1212 b = b->next();
1213 }
1214 }
1215
1216
1217 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1218 nmethodBucket* b = _dependencies;
1219 while (b != NULL) {
1220 if (nm == b->get_nmethod()) {
1221 return true;
1222 }
1223 b = b->next();
1224 }
1225 return false;
1226 }
1227 #endif //PRODUCT
1228
1229
1230 void instanceKlass::follow_static_fields() {
1231 oop* start = start_of_static_fields();
1232 oop* end = start + static_oop_field_size();
1233 while (start < end) {
1234 if (*start != NULL) {
1235 assert(Universe::heap()->is_in_closed_subset(*start),
1236 "should be in heap");
1237 MarkSweep::mark_and_push(start);
1238 }
1239 start++;
1240 }
1241 }
1242
1243 #ifndef SERIALGC
1244 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1245 oop* start = start_of_static_fields();
1246 oop* end = start + static_oop_field_size();
1247 while (start < end) {
1248 if (*start != NULL) {
1249 assert(Universe::heap()->is_in(*start), "should be in heap");
1250 PSParallelCompact::mark_and_push(cm, start);
1251 }
1252 start++;
1253 }
1254 }
1255 #endif // SERIALGC
1256
1257
1258 void instanceKlass::adjust_static_fields() {
1259 oop* start = start_of_static_fields();
1260 oop* end = start + static_oop_field_size();
1261 while (start < end) {
1262 MarkSweep::adjust_pointer(start);
1263 start++;
1264 }
1265 }
1266
1267 #ifndef SERIALGC
1268 void instanceKlass::update_static_fields() {
1269 oop* const start = start_of_static_fields();
1270 oop* const beg_oop = start;
1271 oop* const end_oop = start + static_oop_field_size();
1272 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
1273 PSParallelCompact::adjust_pointer(cur_oop);
1274 }
1275 }
1276
1277 void
1278 instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
1279 oop* const start = start_of_static_fields();
1280 oop* const beg_oop = MAX2((oop*)beg_addr, start);
1281 oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size());
1282 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
1283 PSParallelCompact::adjust_pointer(cur_oop);
1284 }
1285 }
1286 #endif // SERIALGC
1287
1288 void instanceKlass::oop_follow_contents(oop obj) {
1289 assert (obj!=NULL, "can't follow the content of NULL object");
1290 obj->follow_header();
1291 OopMapBlock* map = start_of_nonstatic_oop_maps();
1292 OopMapBlock* end_map = map + nonstatic_oop_map_size();
1293 while (map < end_map) {
1294 oop* start = obj->obj_field_addr(map->offset());
1295 oop* end = start + map->length();
1296 while (start < end) {
1297 if (*start != NULL) {
1298 assert(Universe::heap()->is_in_closed_subset(*start),
1299 "should be in heap");
1300 MarkSweep::mark_and_push(start);
1301 }
1302 start++;
1303 }
1304 map++;
1305 }
1306 }
1307
1308 #ifndef SERIALGC
1309 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1310 oop obj) {
1311 assert (obj!=NULL, "can't follow the content of NULL object");
1312 obj->follow_header(cm);
1313 OopMapBlock* map = start_of_nonstatic_oop_maps();
1314 OopMapBlock* end_map = map + nonstatic_oop_map_size();
1315 while (map < end_map) {
1316 oop* start = obj->obj_field_addr(map->offset());
1317 oop* end = start + map->length();
1318 while (start < end) {
1319 if (*start != NULL) {
1320 assert(Universe::heap()->is_in(*start), "should be in heap");
1321 PSParallelCompact::mark_and_push(cm, start);
1322 }
1323 start++;
1324 }
1325 map++;
1326 }
1327 }
1328 #endif // SERIALGC
1329
1330 #define invoke_closure_on(start, closure, nv_suffix) { \
1331 oop obj = *(start); \
1332 if (obj != NULL) { \
1333 assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap"); \
1334 (closure)->do_oop##nv_suffix(start); \
1335 } \
1336 }
1337
1338 // closure's do_header() method dicates whether the given closure should be
1339 // applied to the klass ptr in the object header.
1340
1341 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
1342 \
1343 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \
1344 OopClosureType* closure) { \
1345 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1346 /* header */ \
1347 if (closure->do_header()) { \
1348 obj->oop_iterate_header(closure); \
1349 } \
1350 /* instance variables */ \
1351 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1352 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
1353 const intx field_offset = PrefetchFieldsAhead; \
1354 if (field_offset > 0) { \
1355 while (map < end_map) { \
1356 oop* start = obj->obj_field_addr(map->offset()); \
1357 oop* const end = start + map->length(); \
1358 while (start < end) { \
1359 prefetch_beyond(start, (oop*)end, field_offset, \
1360 closure->prefetch_style()); \
1361 SpecializationStats:: \
1362 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1363 invoke_closure_on(start, closure, nv_suffix); \
1364 start++; \
1365 } \
1366 map++; \
1367 } \
1368 } else { \
1369 while (map < end_map) { \
1370 oop* start = obj->obj_field_addr(map->offset()); \
1371 oop* const end = start + map->length(); \
1372 while (start < end) { \
1373 SpecializationStats:: \
1374 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1375 invoke_closure_on(start, closure, nv_suffix); \
1376 start++; \
1377 } \
1378 map++; \
1379 } \
1380 } \
1381 return size_helper(); \
1382 }
1383
1384 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1385 \
1386 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1387 OopClosureType* closure, \
1388 MemRegion mr) { \
1389 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1390 /* header */ \
1391 if (closure->do_header()) { \
1392 obj->oop_iterate_header(closure, mr); \
1393 } \
1394 /* instance variables */ \
1395 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1396 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
1397 HeapWord* bot = mr.start(); \
1398 HeapWord* top = mr.end(); \
1399 oop* start = obj->obj_field_addr(map->offset()); \
1400 HeapWord* end = MIN2((HeapWord*)(start + map->length()), top); \
1401 /* Find the first map entry that extends onto mr. */ \
1402 while (map < end_map && end <= bot) { \
1403 map++; \
1404 start = obj->obj_field_addr(map->offset()); \
1405 end = MIN2((HeapWord*)(start + map->length()), top); \
1406 } \
1407 if (map != end_map) { \
1408 /* The current map's end is past the start of "mr". Skip up to the first \
1409 entry on "mr". */ \
1410 while ((HeapWord*)start < bot) { \
1411 start++; \
1412 } \
1413 const intx field_offset = PrefetchFieldsAhead; \
1414 for (;;) { \
1415 if (field_offset > 0) { \
1416 while ((HeapWord*)start < end) { \
1417 prefetch_beyond(start, (oop*)end, field_offset, \
1418 closure->prefetch_style()); \
1419 invoke_closure_on(start, closure, nv_suffix); \
1420 start++; \
1421 } \
1422 } else { \
1423 while ((HeapWord*)start < end) { \
1424 invoke_closure_on(start, closure, nv_suffix); \
1425 start++; \
1426 } \
1427 } \
1428 /* Go to the next map. */ \
1429 map++; \
1430 if (map == end_map) { \
1431 break; \
1432 } \
1433 /* Otherwise, */ \
1434 start = obj->obj_field_addr(map->offset()); \
1435 if ((HeapWord*)start >= top) { \
1436 break; \
1437 } \
1438 end = MIN2((HeapWord*)(start + map->length()), top); \
1439 } \
1440 } \
1441 return size_helper(); \
1442 }
1443
1444 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1445 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1446 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1447 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1448
1449
1450 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1451 oop* start = start_of_static_fields();
1452 oop* end = start + static_oop_field_size();
1453 while (start < end) {
1454 assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap");
1455 closure->do_oop(start);
1456 start++;
1457 }
1458 }
1459
1460 void instanceKlass::iterate_static_fields(OopClosure* closure,
1461 MemRegion mr) {
1462 oop* start = start_of_static_fields();
1463 oop* end = start + static_oop_field_size();
1464 // I gather that the the static fields of reference types come first,
1465 // hence the name of "oop_field_size", and that is what makes this safe.
1466 assert((intptr_t)mr.start() ==
1467 align_size_up((intptr_t)mr.start(), sizeof(oop)) &&
1468 (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)),
1469 "Memregion must be oop-aligned.");
1470 if ((HeapWord*)start < mr.start()) start = (oop*)mr.start();
1471 if ((HeapWord*)end > mr.end()) end = (oop*)mr.end();
1472 while (start < end) {
1473 invoke_closure_on(start, closure,_v);
1474 start++;
1475 }
1476 }
1477
1478
1479 int instanceKlass::oop_adjust_pointers(oop obj) {
1480 int size = size_helper();
1481
1482 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
1483 OopMapBlock* map = start_of_nonstatic_oop_maps();
1484 OopMapBlock* const end_map = map + nonstatic_oop_map_size();
1485 // Iterate over oopmap blocks
1486 while (map < end_map) {
1487 // Compute oop range for this block
1488 oop* start = obj->obj_field_addr(map->offset());
1489 oop* end = start + map->length();
1490 // Iterate over oops
1491 while (start < end) {
1492 assert(Universe::heap()->is_in_or_null(*start), "should be in heap");
1493 MarkSweep::adjust_pointer(start);
1494 start++;
1495 }
1496 map++;
1497 }
1498
1499 obj->adjust_header();
1500 return size;
1501 }
1502
1503 #ifndef SERIALGC
1504 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
1505 assert(!pm->depth_first(), "invariant");
1506 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
1507 OopMapBlock* start_map = start_of_nonstatic_oop_maps();
1508 OopMapBlock* map = start_map + nonstatic_oop_map_size();
1509
1510 // Iterate over oopmap blocks
1511 while (start_map < map) {
1512 --map;
1513 // Compute oop range for this block
1514 oop* start = obj->obj_field_addr(map->offset());
1515 oop* curr = start + map->length();
1516 // Iterate over oops
1517 while (start < curr) {
1518 --curr;
1519 if (PSScavenge::should_scavenge(*curr)) {
1520 assert(Universe::heap()->is_in(*curr), "should be in heap");
1521 pm->claim_or_forward_breadth(curr);
1522 }
1523 }
1524 }
1525 }
1526
1527 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1528 assert(pm->depth_first(), "invariant");
1529 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
1530 OopMapBlock* start_map = start_of_nonstatic_oop_maps();
1531 OopMapBlock* map = start_map + nonstatic_oop_map_size();
1532
1533 // Iterate over oopmap blocks
1534 while (start_map < map) {
1535 --map;
1536 // Compute oop range for this block
1537 oop* start = obj->obj_field_addr(map->offset());
1538 oop* curr = start + map->length();
1539 // Iterate over oops
1540 while (start < curr) {
1541 --curr;
1542 if (PSScavenge::should_scavenge(*curr)) {
1543 assert(Universe::heap()->is_in(*curr), "should be in heap");
1544 pm->claim_or_forward_depth(curr);
1545 }
1546 }
1547 }
1548 }
1549
1550 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1551 // Compute oopmap block range. The common case is nonstatic_oop_map_size==1.
1552 OopMapBlock* map = start_of_nonstatic_oop_maps();
1553 OopMapBlock* const end_map = map + nonstatic_oop_map_size();
1554 // Iterate over oopmap blocks
1555 while (map < end_map) {
1556 // Compute oop range for this oopmap block.
1557 oop* const map_start = obj->obj_field_addr(map->offset());
1558 oop* const beg_oop = map_start;
1559 oop* const end_oop = map_start + map->length();
1560 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
1561 PSParallelCompact::adjust_pointer(cur_oop);
1562 }
1563 ++map;
1564 }
1565
1566 return size_helper();
1567 }
1568
1569 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
1570 HeapWord* beg_addr, HeapWord* end_addr) {
1571 // Compute oopmap block range. The common case is nonstatic_oop_map_size==1.
1572 OopMapBlock* map = start_of_nonstatic_oop_maps();
1573 OopMapBlock* const end_map = map + nonstatic_oop_map_size();
1574 // Iterate over oopmap blocks
1575 while (map < end_map) {
1576 // Compute oop range for this oopmap block.
1577 oop* const map_start = obj->obj_field_addr(map->offset());
1578 oop* const beg_oop = MAX2((oop*)beg_addr, map_start);
1579 oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length());
1580 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
1581 PSParallelCompact::adjust_pointer(cur_oop);
1582 }
1583 ++map;
1584 }
1585
1586 return size_helper();
1587 }
1588
1589 void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
1590 assert(!pm->depth_first(), "invariant");
1591 // Compute oop range
1592 oop* start = start_of_static_fields();
1593 oop* end = start + static_oop_field_size();
1594 // Iterate over oops
1595 while (start < end) {
1596 if (PSScavenge::should_scavenge(*start)) {
1597 assert(Universe::heap()->is_in(*start), "should be in heap");
1598 pm->claim_or_forward_breadth(start);
1599 }
1600 start++;
1601 }
1602 }
1603
1604 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1605 assert(pm->depth_first(), "invariant");
1606 // Compute oop range
1607 oop* start = start_of_static_fields();
1608 oop* end = start + static_oop_field_size();
1609 // Iterate over oops
1610 while (start < end) {
1611 if (PSScavenge::should_scavenge(*start)) {
1612 assert(Universe::heap()->is_in(*start), "should be in heap");
1613 pm->claim_or_forward_depth(start);
1614 }
1615 start++;
1616 }
1617 }
1618
1619 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1620 // Compute oop range
1621 oop* start = start_of_static_fields();
1622 oop* end = start + static_oop_field_size();
1623 // Iterate over oops
1624 while (start < end) {
1625 if (*start != NULL) {
1626 assert(Universe::heap()->is_in(*start), "should be in heap");
1627 // *start = (oop) cm->summary_data()->calc_new_pointer(*start);
1628 PSParallelCompact::adjust_pointer(start);
1629 }
1630 start++;
1631 }
1632 }
1633 #endif // SERIALGC
1634
1635 // This klass is alive but the implementor link is not followed/updated.
1636 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1637
1638 void instanceKlass::follow_weak_klass_links(
1639 BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1640 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1641 if (ClassUnloading) {
1642 for (int i = 0; i < implementors_limit; i++) {
1643 klassOop impl = _implementors[i];
1644 if (impl == NULL) break; // no more in the list
1645 if (!is_alive->do_object_b(impl)) {
1646 // remove this guy from the list by overwriting him with the tail
1647 int lasti = --_nof_implementors;
1648 assert(lasti >= i && lasti < implementors_limit, "just checking");
1649 _implementors[i] = _implementors[lasti];
1650 _implementors[lasti] = NULL;
1651 --i; // rerun the loop at this index
1652 }
1653 }
1654 } else {
1655 for (int i = 0; i < implementors_limit; i++) {
1656 keep_alive->do_oop(&adr_implementors()[i]);
1657 }
1658 }
1659 Klass::follow_weak_klass_links(is_alive, keep_alive);
1660 }
1661
1662
1663 void instanceKlass::remove_unshareable_info() {
1664 Klass::remove_unshareable_info();
1665 init_implementor();
1666 }
1667
1668
1669 static void clear_all_breakpoints(methodOop m) {
1670 m->clear_all_breakpoints();
1671 }
1672
1673
1674 void instanceKlass::release_C_heap_structures() {
1675 // Deallocate oop map cache
1676 if (_oop_map_cache != NULL) {
1677 delete _oop_map_cache;
1678 _oop_map_cache = NULL;
1679 }
1680
1681 // Deallocate JNI identifiers for jfieldIDs
1682 JNIid::deallocate(jni_ids());
1683 set_jni_ids(NULL);
1684
1685 jmethodID* jmeths = methods_jmethod_ids_acquire();
1686 if (jmeths != (jmethodID*)NULL) {
1687 release_set_methods_jmethod_ids(NULL);
1688 FreeHeap(jmeths);
1689 }
1690
1691 int* indices = methods_cached_itable_indices_acquire();
1692 if (indices != (int*)NULL) {
1693 release_set_methods_cached_itable_indices(NULL);
2002 st->print(" - fake entry for mirror: ");
2003 mirrored_klass->print_value_on(st);
2004 st->cr();
2005 st->print(" - fake entry resolved_constructor: ");
2006 methodOop ctor = java_lang_Class::resolved_constructor(obj);
2007 ctor->print_value_on(st);
2008 klassOop array_klass = java_lang_Class::array_klass(obj);
2009 st->print(" - fake entry for array: ");
2010 array_klass->print_value_on(st);
2011 st->cr();
2012 st->cr();
2013 }
2014 }
2015
2016 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2017 st->print("a ");
2018 name()->print_value_on(st);
2019 obj->print_address_on(st);
2020 }
2021
2022 #endif
2023
2024 const char* instanceKlass::internal_name() const {
2025 return external_name();
2026 }
2027
2028
2029
2030 // Verification
2031
2032 class VerifyFieldClosure: public OopClosure {
2033 public:
2034 void do_oop(oop* p) {
2035 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2036 if (!(*p)->is_oop_or_null()) {
2037 tty->print_cr("Failed: %p -> %p",p,(address)*p);
2038 Universe::print();
2039 guarantee(false, "boom");
2040 }
2041 }
2042 };
2043
2044
2045 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2046 Klass::oop_verify_on(obj, st);
2047 VerifyFieldClosure blk;
2048 oop_oop_iterate(obj, &blk);
2049 }
2050
2051 #ifndef PRODUCT
2052
2053 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2054 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version()
2055 // cannot be called since this function is called before the VM is
2056 // able to determine what JDK version is running with.
2057 // The check below always is false since 1.4.
2058 return;
2059
2060 // This verification code temporarily disabled for the 1.4
2061 // reflection implementation since java.lang.Class now has
2062 // Java-level instance fields. Should rewrite this to handle this
2063 // case.
2064 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2065 // Verify that java.lang.Class instances have a fake oop field added.
2066 instanceKlass* ik = instanceKlass::cast(k);
2067
2068 // Check that we have the right class
2069 static bool first_time = true;
2070 guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps");
2071 first_time = false;
2072 const int extra = java_lang_Class::number_of_fake_oop_fields;
2073 guarantee(ik->nonstatic_field_size() == extra, "just checking");
2074 guarantee(ik->nonstatic_oop_map_size() == 1, "just checking");
2075 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2076
2077 // Check that the map is (2,extra)
2078 int offset = java_lang_Class::klass_offset;
2079
2080 OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2081 guarantee(map->offset() == offset && map->length() == extra, "just checking");
2082 }
2083 }
2084
2085 #endif
2086
2087
2088 /* JNIid class for jfieldIDs only */
2089 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2090 _holder = holder;
2091 _offset = offset;
2092 _next = next;
2093 debug_only(_is_static_field_id = false;)
2094 }
2095
2096
2097 JNIid* JNIid::find(int offset) {
2098 JNIid* current = this;
2099 while (current != NULL) {
2100 if (current->offset() == offset) return current;
2101 current = current->next();
2102 }
2103 return NULL;
2104 }
2105
2106 void JNIid::oops_do(OopClosure* f) {
2107 for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2108 f->do_oop(cur->holder_addr());
2109 }
2110 }
2111
2112 void JNIid::deallocate(JNIid* current) {
2113 while (current != NULL) {
2114 JNIid* next = current->next();
2115 delete current;
2116 current = next;
2117 }
2118 }
2119
2120
2121 void JNIid::verify(klassOop holder) {
2122 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
2123 int end_field_offset;
2124 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2125
2126 JNIid* current = this;
2127 while (current != NULL) {
2128 guarantee(current->holder() == holder, "Invalid klass in JNIid");
2129 #ifdef ASSERT
2130 int o = current->offset();
2131 if (current->is_static_field_id()) {
2132 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2133 }
2134 #endif
2135 current = current->next();
2136 }
2137 }
2138
2139
2140 #ifdef ASSERT
2141 void instanceKlass::set_init_state(ClassState state) {
2142 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2143 : (_init_state < state);
2144 assert(good_state || state == allocated, "illegal state transition");
2145 _init_state = state;
2146 }
2147 #endif
2148
2149
2150 // RedefineClasses() support for previous versions:
2151
2152 // Add an information node that contains weak references to the
2153 // interesting parts of the previous version of the_class.
2154 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2155 BitMap * emcp_methods, int emcp_method_count) {
2156 assert(Thread::current()->is_VM_thread(),
2157 "only VMThread can add previous versions");
2158
2159 if (_previous_versions == NULL) {
2160 // This is the first previous version so make some space.
2161 // Start with 2 elements under the assumption that the class
2162 // won't be redefined much.
2163 _previous_versions = new (ResourceObj::C_HEAP)
2164 GrowableArray<PreviousVersionNode *>(2, true);
2165 }
2166
2167 // RC_TRACE macro has an embedded ResourceMark
2168 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2169 ikh->external_name(), _previous_versions->length(), emcp_method_count));
2170 constantPoolHandle cp_h(ikh->constants());
2171 jobject cp_ref;
2172 if (cp_h->is_shared()) {
2173 // a shared ConstantPool requires a regular reference; a weak
2174 // reference would be collectible
2175 cp_ref = JNIHandles::make_global(cp_h);
|
1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)instanceKlass.cpp 1.324 08/11/24 12:22:48 JVM"
3 #endif
4 /*
5 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
777 }
778 }
779
780
781 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
782 instanceKlassHandle h_this(THREAD, as_klassOop());
783 do_local_static_fields_impl(h_this, f, CHECK);
784 }
785
786
787 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
788 fieldDescriptor fd;
789 int length = this_oop->fields()->length();
790 for (int i = 0; i < length; i += next_offset) {
791 fd.initialize(this_oop(), i);
792 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
793 }
794 }
795
796
797 static int compare_fields_by_offset(int* a, int* b) {
798 return a[0] - b[0];
799 }
800
801 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
802 instanceKlass* super = superklass();
803 if (super != NULL) {
804 super->do_nonstatic_fields(cl);
805 }
806 fieldDescriptor fd;
807 int length = fields()->length();
808 // In DebugInfo nonstatic fields are sorted by offset.
809 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
810 int j = 0;
811 for (int i = 0; i < length; i += next_offset) {
812 fd.initialize(as_klassOop(), i);
813 if (!fd.is_static()) {
814 fields_sorted[j + 0] = fd.offset();
815 fields_sorted[j + 1] = i;
816 j += 2;
817 }
818 }
819 if (j > 0) {
820 length = j;
821 // _sort_Fn is defined in growableArray.hpp.
822 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
823 for (int i = 0; i < length; i += 2) {
824 fd.initialize(as_klassOop(), fields_sorted[i + 1]);
825 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
826 cl->do_field(&fd);
827 }
828 }
829 FREE_C_HEAP_ARRAY(int, fields_sorted);
830 }
831
832
833 void instanceKlass::array_klasses_do(void f(klassOop k)) {
834 if (array_klasses() != NULL)
835 arrayKlass::cast(array_klasses())->array_klasses_do(f);
836 }
837
838
839 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
840 f(as_klassOop());
841 array_klasses_do(f);
842 }
843
844 #ifdef ASSERT
845 static int linear_search(objArrayOop methods, symbolOop name, symbolOop signature) {
846 int len = methods->length();
847 for (int index = 0; index < len; index++) {
848 methodOop m = (methodOop)(methods->obj_at(index));
849 assert(m->is_method(), "must be method");
958
959
960 // Lookup or create a jmethodID.
961 // This code can be called by the VM thread. For this reason it is critical that
962 // there are no blocking operations (safepoints) while the lock is held -- or a
963 // deadlock can occur.
964 jmethodID instanceKlass::jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h) {
965 size_t idnum = (size_t)method_h->method_idnum();
966 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
967 size_t length = 0;
968 jmethodID id = NULL;
969 // array length stored in first element, other elements offset by one
970 if (jmeths == NULL || // If there is no jmethodID array,
971 (length = (size_t)jmeths[0]) <= idnum || // or if it is too short,
972 (id = jmeths[idnum+1]) == NULL) { // or if this jmethodID isn't allocated
973
974 // Do all the safepointing things (allocations) before grabbing the lock.
975 // These allocations will have to be freed if they are unused.
976
977 // Allocate a new array of methods.
978 jmethodID* new_jmeths = NULL;
979 if (length <= idnum) {
980 // A new array will be needed (unless some other thread beats us to it)
981 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
982 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
983 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
984 new_jmeths[0] =(jmethodID)size; // array size held in the first element
985 }
986
987 // Allocate a new method ID.
988 jmethodID new_id = NULL;
989 if (method_h->is_old() && !method_h->is_obsolete()) {
990 // The method passed in is old (but not obsolete), we need to use the current version
991 methodOop current_method = ik_h->method_with_idnum((int)idnum);
992 assert(current_method != NULL, "old and but not obsolete, so should exist");
993 methodHandle current_method_h(current_method == NULL? method_h() : current_method);
994 new_id = JNIHandles::make_jmethod_id(current_method_h);
995 } else {
996 // It is the current version of the method or an obsolete method,
997 // use the version passed in
998 new_id = JNIHandles::make_jmethod_id(method_h);
999 }
1000
1001 if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) {
1002 // No need and unsafe to lock the JmethodIdCreation_lock at safepoint.
1003 id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths);
1004 } else {
1005 MutexLocker ml(JmethodIdCreation_lock);
1006 id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths);
1007 }
1008 }
1009 return id;
1010 }
1011
1012
1013 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, size_t idnum,
1014 jmethodID new_id, jmethodID* new_jmeths) {
1015 // Retry lookup after we got the lock or ensured we are at safepoint
1016 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1017 jmethodID id = NULL;
1018 jmethodID to_dealloc_id = NULL;
1019 jmethodID* to_dealloc_jmeths = NULL;
1020 size_t length;
1021
1022 if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) {
1023 if (jmeths != NULL) {
1024 // We have grown the array: copy the existing entries, and delete the old array
1025 for (size_t index = 0; index < length; index++) {
1026 new_jmeths[index+1] = jmeths[index+1];
1027 }
1028 to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one
1029 }
1030 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1031 } else {
1032 id = jmeths[idnum+1];
1033 to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one
1034 }
1035 if (id == NULL) {
1036 id = new_id;
1037 jmeths[idnum+1] = id; // install the new method ID
1038 } else {
1039 to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation
1040 }
1041
1042 // Free up unneeded or no longer needed resources
1043 FreeHeap(to_dealloc_jmeths);
1044 if (to_dealloc_id != NULL) {
1045 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1046 }
1047 return id;
1048 }
1049
1050
1051 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
1052 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1053 size_t idnum = (size_t)method->method_idnum();
1054 jmethodID* jmeths = methods_jmethod_ids_acquire();
1055 size_t length; // length assigned as debugging crumb
1056 jmethodID id = NULL;
1057 if (jmeths != NULL && // If there is a jmethodID array,
1058 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
1059 id = jmeths[idnum+1]; // Look up the id (may be NULL)
1060 }
1061 return id;
1062 }
1063
1064
1065 // Cache an itable index
1066 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1241 tty->print_cr("--- } ");
1242 }
1243 b = b->next();
1244 }
1245 }
1246
1247
1248 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1249 nmethodBucket* b = _dependencies;
1250 while (b != NULL) {
1251 if (nm == b->get_nmethod()) {
1252 return true;
1253 }
1254 b = b->next();
1255 }
1256 return false;
1257 }
1258 #endif //PRODUCT
1259
1260
1261 #ifdef ASSERT
1262 template <class T> void assert_is_in(T *p) {
1263 T heap_oop = oopDesc::load_heap_oop(p);
1264 if (!oopDesc::is_null(heap_oop)) {
1265 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1266 assert(Universe::heap()->is_in(o), "should be in heap");
1267 }
1268 }
1269 template <class T> void assert_is_in_closed_subset(T *p) {
1270 T heap_oop = oopDesc::load_heap_oop(p);
1271 if (!oopDesc::is_null(heap_oop)) {
1272 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1273 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1274 }
1275 }
1276 template <class T> void assert_is_in_reserved(T *p) {
1277 T heap_oop = oopDesc::load_heap_oop(p);
1278 if (!oopDesc::is_null(heap_oop)) {
1279 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1280 assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1281 }
1282 }
1283 template <class T> void assert_nothing(T *p) {}
1284
1285 #else
1286 template <class T> void assert_is_in(T *p) {}
1287 template <class T> void assert_is_in_closed_subset(T *p) {}
1288 template <class T> void assert_is_in_reserved(T *p) {}
1289 template <class T> void assert_nothing(T *p) {}
1290 #endif // ASSERT
1291
1292 //
1293 // Macros that iterate over areas of oops which are specialized on type of
1294 // oop pointer either narrow or wide, depending on UseCompressedOops
1295 //
1296 // Parameters are:
1297 // T - type of oop to point to (either oop or narrowOop)
1298 // start_p - starting pointer for region to iterate over
1299 // count - number of oops or narrowOops to iterate over
1300 // do_oop - action to perform on each oop (it's arbitrary C code which
1301 // makes it more efficient to put in a macro rather than making
1302 // it a template function)
1303 // assert_fn - assert function which is template function because performance
1304 // doesn't matter when enabled.
1305 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1306 T, start_p, count, do_oop, \
1307 assert_fn) \
1308 { \
1309 T* p = (T*)(start_p); \
1310 T* const end = p + (count); \
1311 while (p < end) { \
1312 (assert_fn)(p); \
1313 do_oop; \
1314 ++p; \
1315 } \
1316 }
1317
1318 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1319 T, start_p, count, do_oop, \
1320 assert_fn) \
1321 { \
1322 T* const start = (T*)(start_p); \
1323 T* p = start + (count); \
1324 while (start < p) { \
1325 --p; \
1326 (assert_fn)(p); \
1327 do_oop; \
1328 } \
1329 }
1330
1331 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1332 T, start_p, count, low, high, \
1333 do_oop, assert_fn) \
1334 { \
1335 T* const l = (T*)(low); \
1336 T* const h = (T*)(high); \
1337 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1338 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
1339 "bounded region must be properly aligned"); \
1340 T* p = (T*)(start_p); \
1341 T* end = p + (count); \
1342 if (p < l) p = l; \
1343 if (end > h) end = h; \
1344 while (p < end) { \
1345 (assert_fn)(p); \
1346 do_oop; \
1347 ++p; \
1348 } \
1349 }
1350
1351
1352 // The following macros call specialized macros, passing either oop or
1353 // narrowOop as the specialization type. These test the UseCompressedOops
1354 // flag.
1355 #define InstanceKlass_OOP_ITERATE(start_p, count, \
1356 do_oop, assert_fn) \
1357 { \
1358 if (UseCompressedOops) { \
1359 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1360 start_p, count, \
1361 do_oop, assert_fn) \
1362 } else { \
1363 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1364 start_p, count, \
1365 do_oop, assert_fn) \
1366 } \
1367 }
1368
1369 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
1370 do_oop, assert_fn) \
1371 { \
1372 if (UseCompressedOops) { \
1373 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1374 start_p, count, \
1375 low, high, \
1376 do_oop, assert_fn) \
1377 } else { \
1378 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1379 start_p, count, \
1380 low, high, \
1381 do_oop, assert_fn) \
1382 } \
1383 }
1384
1385 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
1386 { \
1387 /* Compute oopmap block range. The common case \
1388 is nonstatic_oop_map_size == 1. */ \
1389 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1390 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
1391 if (UseCompressedOops) { \
1392 while (map < end_map) { \
1393 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1394 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
1395 do_oop, assert_fn) \
1396 ++map; \
1397 } \
1398 } else { \
1399 while (map < end_map) { \
1400 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1401 obj->obj_field_addr<oop>(map->offset()), map->length(), \
1402 do_oop, assert_fn) \
1403 ++map; \
1404 } \
1405 } \
1406 }
1407
1408 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
1409 { \
1410 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
1411 OopMapBlock* map = start_map + nonstatic_oop_map_size(); \
1412 if (UseCompressedOops) { \
1413 while (start_map < map) { \
1414 --map; \
1415 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
1416 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
1417 do_oop, assert_fn) \
1418 } \
1419 } else { \
1420 while (start_map < map) { \
1421 --map; \
1422 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
1423 obj->obj_field_addr<oop>(map->offset()), map->length(), \
1424 do_oop, assert_fn) \
1425 } \
1426 } \
1427 }
1428
1429 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
1430 assert_fn) \
1431 { \
1432 /* Compute oopmap block range. The common case is \
1433 nonstatic_oop_map_size == 1, so we accept the \
1434 usually non-existent extra overhead of examining \
1435 all the maps. */ \
1436 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1437 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
1438 if (UseCompressedOops) { \
1439 while (map < end_map) { \
1440 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1441 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
1442 low, high, \
1443 do_oop, assert_fn) \
1444 ++map; \
1445 } \
1446 } else { \
1447 while (map < end_map) { \
1448 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1449 obj->obj_field_addr<oop>(map->offset()), map->length(), \
1450 low, high, \
1451 do_oop, assert_fn) \
1452 ++map; \
1453 } \
1454 } \
1455 }
1456
1457 void instanceKlass::follow_static_fields() {
1458 InstanceKlass_OOP_ITERATE( \
1459 start_of_static_fields(), static_oop_field_size(), \
1460 MarkSweep::mark_and_push(p), \
1461 assert_is_in_closed_subset)
1462 }
1463
1464 #ifndef SERIALGC
1465 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1466 InstanceKlass_OOP_ITERATE( \
1467 start_of_static_fields(), static_oop_field_size(), \
1468 PSParallelCompact::mark_and_push(cm, p), \
1469 assert_is_in)
1470 }
1471 #endif // SERIALGC
1472
1473 void instanceKlass::adjust_static_fields() {
1474 InstanceKlass_OOP_ITERATE( \
1475 start_of_static_fields(), static_oop_field_size(), \
1476 MarkSweep::adjust_pointer(p), \
1477 assert_nothing)
1478 }
1479
1480 #ifndef SERIALGC
1481 void instanceKlass::update_static_fields() {
1482 InstanceKlass_OOP_ITERATE( \
1483 start_of_static_fields(), static_oop_field_size(), \
1484 PSParallelCompact::adjust_pointer(p), \
1485 assert_nothing)
1486 }
1487
1488 void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
1489 InstanceKlass_BOUNDED_OOP_ITERATE( \
1490 start_of_static_fields(), static_oop_field_size(), \
1491 beg_addr, end_addr, \
1492 PSParallelCompact::adjust_pointer(p), \
1493 assert_nothing )
1494 }
1495 #endif // SERIALGC
1496
1497 void instanceKlass::oop_follow_contents(oop obj) {
1498 assert(obj != NULL, "can't follow the content of NULL object");
1499 obj->follow_header();
1500 InstanceKlass_OOP_MAP_ITERATE( \
1501 obj, \
1502 MarkSweep::mark_and_push(p), \
1503 assert_is_in_closed_subset)
1504 }
1505
1506 #ifndef SERIALGC
1507 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1508 oop obj) {
1509 assert(obj != NULL, "can't follow the content of NULL object");
1510 obj->follow_header(cm);
1511 InstanceKlass_OOP_MAP_ITERATE( \
1512 obj, \
1513 PSParallelCompact::mark_and_push(cm, p), \
1514 assert_is_in)
1515 }
1516 #endif // SERIALGC
1517
1518 // closure's do_header() method dicates whether the given closure should be
1519 // applied to the klass ptr in the object header.
1520
1521 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
1522 \
1523 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1524 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1525 /* header */ \
1526 if (closure->do_header()) { \
1527 obj->oop_iterate_header(closure); \
1528 } \
1529 InstanceKlass_OOP_MAP_ITERATE( \
1530 obj, \
1531 SpecializationStats:: \
1532 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1533 (closure)->do_oop##nv_suffix(p), \
1534 assert_is_in_closed_subset) \
1535 return size_helper(); \
1536 }
1537
1538 #ifndef SERIALGC
1539 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1540 \
1541 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
1542 OopClosureType* closure) { \
1543 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1544 /* header */ \
1545 if (closure->do_header()) { \
1546 obj->oop_iterate_header(closure); \
1547 } \
1548 /* instance variables */ \
1549 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1550 obj, \
1551 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1552 (closure)->do_oop##nv_suffix(p), \
1553 assert_is_in_closed_subset) \
1554 return size_helper(); \
1555 }
1556 #endif // !SERIALGC
1557
1558 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1559 \
1560 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1561 OopClosureType* closure, \
1562 MemRegion mr) { \
1563 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1564 if (closure->do_header()) { \
1565 obj->oop_iterate_header(closure, mr); \
1566 } \
1567 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1568 obj, mr.start(), mr.end(), \
1569 (closure)->do_oop##nv_suffix(p), \
1570 assert_is_in_closed_subset) \
1571 return size_helper(); \
1572 }
1573
1574 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1575 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1576 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1577 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1578 #ifndef SERIALGC
1579 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1580 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1581 #endif // !SERIALGC
1582
1583 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1584 InstanceKlass_OOP_ITERATE( \
1585 start_of_static_fields(), static_oop_field_size(), \
1586 closure->do_oop(p), \
1587 assert_is_in_reserved)
1588 }
1589
1590 void instanceKlass::iterate_static_fields(OopClosure* closure,
1591 MemRegion mr) {
1592 InstanceKlass_BOUNDED_OOP_ITERATE( \
1593 start_of_static_fields(), static_oop_field_size(), \
1594 mr.start(), mr.end(), \
1595 (closure)->do_oop_v(p), \
1596 assert_is_in_closed_subset)
1597 }
1598
1599 int instanceKlass::oop_adjust_pointers(oop obj) {
1600 int size = size_helper();
1601 InstanceKlass_OOP_MAP_ITERATE( \
1602 obj, \
1603 MarkSweep::adjust_pointer(p), \
1604 assert_is_in)
1605 obj->adjust_header();
1606 return size;
1607 }
1608
1609 #ifndef SERIALGC
1610 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
1611 assert(!pm->depth_first(), "invariant");
1612 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1613 obj, \
1614 if (PSScavenge::should_scavenge(p)) { \
1615 pm->claim_or_forward_breadth(p); \
1616 }, \
1617 assert_nothing )
1618 }
1619
1620 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1621 assert(pm->depth_first(), "invariant");
1622 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1623 obj, \
1624 if (PSScavenge::should_scavenge(p)) { \
1625 pm->claim_or_forward_depth(p); \
1626 }, \
1627 assert_nothing )
1628 }
1629
1630 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1631 InstanceKlass_OOP_MAP_ITERATE( \
1632 obj, \
1633 PSParallelCompact::adjust_pointer(p), \
1634 assert_nothing)
1635 return size_helper();
1636 }
1637
1638 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
1639 HeapWord* beg_addr, HeapWord* end_addr) {
1640 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1641 obj, beg_addr, end_addr, \
1642 PSParallelCompact::adjust_pointer(p), \
1643 assert_nothing)
1644 return size_helper();
1645 }
1646
1647 void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
1648 assert(!pm->depth_first(), "invariant");
1649 InstanceKlass_OOP_ITERATE( \
1650 start_of_static_fields(), static_oop_field_size(), \
1651 if (PSScavenge::should_scavenge(p)) { \
1652 pm->claim_or_forward_breadth(p); \
1653 }, \
1654 assert_nothing )
1655 }
1656
1657 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1658 assert(pm->depth_first(), "invariant");
1659 InstanceKlass_OOP_ITERATE( \
1660 start_of_static_fields(), static_oop_field_size(), \
1661 if (PSScavenge::should_scavenge(p)) { \
1662 pm->claim_or_forward_depth(p); \
1663 }, \
1664 assert_nothing )
1665 }
1666
1667 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1668 InstanceKlass_OOP_ITERATE( \
1669 start_of_static_fields(), static_oop_field_size(), \
1670 PSParallelCompact::adjust_pointer(p), \
1671 assert_is_in)
1672 }
1673 #endif // SERIALGC
1674
1675 // This klass is alive but the implementor link is not followed/updated.
1676 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1677
1678 void instanceKlass::follow_weak_klass_links(
1679 BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1680 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1681 if (ClassUnloading) {
1682 for (int i = 0; i < implementors_limit; i++) {
1683 klassOop impl = _implementors[i];
1684 if (impl == NULL) break; // no more in the list
1685 if (!is_alive->do_object_b(impl)) {
1686 // remove this guy from the list by overwriting him with the tail
1687 int lasti = --_nof_implementors;
1688 assert(lasti >= i && lasti < implementors_limit, "just checking");
1689 _implementors[i] = _implementors[lasti];
1690 _implementors[lasti] = NULL;
1691 --i; // rerun the loop at this index
1692 }
1693 }
1694 } else {
1695 for (int i = 0; i < implementors_limit; i++) {
1696 keep_alive->do_oop(&adr_implementors()[i]);
1697 }
1698 }
1699 Klass::follow_weak_klass_links(is_alive, keep_alive);
1700 }
1701
1702 void instanceKlass::remove_unshareable_info() {
1703 Klass::remove_unshareable_info();
1704 init_implementor();
1705 }
1706
1707 static void clear_all_breakpoints(methodOop m) {
1708 m->clear_all_breakpoints();
1709 }
1710
1711 void instanceKlass::release_C_heap_structures() {
1712 // Deallocate oop map cache
1713 if (_oop_map_cache != NULL) {
1714 delete _oop_map_cache;
1715 _oop_map_cache = NULL;
1716 }
1717
1718 // Deallocate JNI identifiers for jfieldIDs
1719 JNIid::deallocate(jni_ids());
1720 set_jni_ids(NULL);
1721
1722 jmethodID* jmeths = methods_jmethod_ids_acquire();
1723 if (jmeths != (jmethodID*)NULL) {
1724 release_set_methods_jmethod_ids(NULL);
1725 FreeHeap(jmeths);
1726 }
1727
1728 int* indices = methods_cached_itable_indices_acquire();
1729 if (indices != (int*)NULL) {
1730 release_set_methods_cached_itable_indices(NULL);
2039 st->print(" - fake entry for mirror: ");
2040 mirrored_klass->print_value_on(st);
2041 st->cr();
2042 st->print(" - fake entry resolved_constructor: ");
2043 methodOop ctor = java_lang_Class::resolved_constructor(obj);
2044 ctor->print_value_on(st);
2045 klassOop array_klass = java_lang_Class::array_klass(obj);
2046 st->print(" - fake entry for array: ");
2047 array_klass->print_value_on(st);
2048 st->cr();
2049 st->cr();
2050 }
2051 }
2052
2053 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2054 st->print("a ");
2055 name()->print_value_on(st);
2056 obj->print_address_on(st);
2057 }
2058
2059 #endif // ndef PRODUCT
2060
2061 const char* instanceKlass::internal_name() const {
2062 return external_name();
2063 }
2064
2065 // Verification
2066
2067 class VerifyFieldClosure: public OopClosure {
2068 protected:
2069 template <class T> void do_oop_work(T* p) {
2070 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2071 oop obj = oopDesc::load_decode_heap_oop(p);
2072 if (!obj->is_oop_or_null()) {
2073 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2074 Universe::print();
2075 guarantee(false, "boom");
2076 }
2077 }
2078 public:
2079 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
2080 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2081 };
2082
2083 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2084 Klass::oop_verify_on(obj, st);
2085 VerifyFieldClosure blk;
2086 oop_oop_iterate(obj, &blk);
2087 }
2088
2089 #ifndef PRODUCT
2090
2091 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2092 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version()
2093 // cannot be called since this function is called before the VM is
2094 // able to determine what JDK version is running with.
2095 // The check below always is false since 1.4.
2096 return;
2097
2098 // This verification code temporarily disabled for the 1.4
2099 // reflection implementation since java.lang.Class now has
2100 // Java-level instance fields. Should rewrite this to handle this
2101 // case.
2102 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2103 // Verify that java.lang.Class instances have a fake oop field added.
2104 instanceKlass* ik = instanceKlass::cast(k);
2105
2106 // Check that we have the right class
2107 static bool first_time = true;
2108 guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps");
2109 first_time = false;
2110 const int extra = java_lang_Class::number_of_fake_oop_fields;
2111 guarantee(ik->nonstatic_field_size() == extra, "just checking");
2112 guarantee(ik->nonstatic_oop_map_size() == 1, "just checking");
2113 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2114
2115 // Check that the map is (2,extra)
2116 int offset = java_lang_Class::klass_offset;
2117
2118 OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2119 guarantee(map->offset() == offset && map->length() == extra, "just checking");
2120 }
2121 }
2122
2123 #endif // ndef PRODUCT
2124
2125 // JNIid class for jfieldIDs only
2126 // Note to reviewers:
2127 // These JNI functions are just moved over to column 1 and not changed
2128 // in the compressed oops workspace.
2129 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2130 _holder = holder;
2131 _offset = offset;
2132 _next = next;
2133 debug_only(_is_static_field_id = false;)
2134 }
2135
2136
2137 JNIid* JNIid::find(int offset) {
2138 JNIid* current = this;
2139 while (current != NULL) {
2140 if (current->offset() == offset) return current;
2141 current = current->next();
2142 }
2143 return NULL;
2144 }
2145
2146 void JNIid::oops_do(OopClosure* f) {
2147 for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2148 f->do_oop(cur->holder_addr());
2149 }
2150 }
2151
2152 void JNIid::deallocate(JNIid* current) {
2153 while (current != NULL) {
2154 JNIid* next = current->next();
2155 delete current;
2156 current = next;
2157 }
2158 }
2159
2160
2161 void JNIid::verify(klassOop holder) {
2162 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
2163 int end_field_offset;
2164 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2165
2166 JNIid* current = this;
2167 while (current != NULL) {
2168 guarantee(current->holder() == holder, "Invalid klass in JNIid");
2169 #ifdef ASSERT
2170 int o = current->offset();
2171 if (current->is_static_field_id()) {
2172 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2173 }
2174 #endif
2175 current = current->next();
2176 }
2177 }
2178
2179
2180 #ifdef ASSERT
2181 void instanceKlass::set_init_state(ClassState state) {
2182 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2183 : (_init_state < state);
2184 assert(good_state || state == allocated, "illegal state transition");
2185 _init_state = state;
2186 }
2187 #endif
2188
2189
2190 // RedefineClasses() support for previous versions:
2191
2192 // Add an information node that contains weak references to the
2193 // interesting parts of the previous version of the_class.
2194 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2195 BitMap* emcp_methods, int emcp_method_count) {
2196 assert(Thread::current()->is_VM_thread(),
2197 "only VMThread can add previous versions");
2198
2199 if (_previous_versions == NULL) {
2200 // This is the first previous version so make some space.
2201 // Start with 2 elements under the assumption that the class
2202 // won't be redefined much.
2203 _previous_versions = new (ResourceObj::C_HEAP)
2204 GrowableArray<PreviousVersionNode *>(2, true);
2205 }
2206
2207 // RC_TRACE macro has an embedded ResourceMark
2208 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2209 ikh->external_name(), _previous_versions->length(), emcp_method_count));
2210 constantPoolHandle cp_h(ikh->constants());
2211 jobject cp_ref;
2212 if (cp_h->is_shared()) {
2213 // a shared ConstantPool requires a regular reference; a weak
2214 // reference would be collectible
2215 cp_ref = JNIHandles::make_global(cp_h);
|