1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_instanceKlass.cpp.incl" 27 28 #ifdef DTRACE_ENABLED 29 30 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required, 31 char*, intptr_t, oop, intptr_t); 32 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive, 33 char*, intptr_t, oop, intptr_t, int); 34 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent, 35 char*, intptr_t, oop, intptr_t, int); 36 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous, 37 char*, intptr_t, oop, intptr_t, int); 38 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed, 39 char*, intptr_t, oop, intptr_t, int); 40 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit, 41 char*, intptr_t, oop, intptr_t, int); 42 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error, 43 char*, intptr_t, oop, intptr_t, int); 44 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end, 45 char*, intptr_t, oop, intptr_t, int); 46 47 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \ 48 { \ 49 char* data = NULL; \ 50 int len = 0; \ 51 symbolOop name = (clss)->name(); \ 52 if (name != NULL) { \ 53 data = (char*)name->bytes(); \ 54 len = name->utf8_length(); \ 55 } \ 56 HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \ 57 data, len, (clss)->class_loader(), thread_type); \ 58 } 59 60 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \ 61 { \ 62 char* data = NULL; \ 63 int len = 0; \ 64 symbolOop name = (clss)->name(); \ 65 if (name != NULL) { \ 66 data = (char*)name->bytes(); \ 67 len = name->utf8_length(); \ 68 } \ 69 HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \ 70 data, len, (clss)->class_loader(), thread_type, wait); \ 71 } 72 73 #else // ndef DTRACE_ENABLED 74 75 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) 76 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) 77 78 #endif // ndef DTRACE_ENABLED 79 80 bool instanceKlass::should_be_initialized() const { 81 return !is_initialized(); 82 } 83 84 klassVtable* instanceKlass::vtable() const { 85 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size()); 86 } 87 88 klassItable* instanceKlass::itable() const { 89 return new klassItable(as_klassOop()); 90 } 91 92 void instanceKlass::eager_initialize(Thread *thread) { 93 if (!EagerInitialization) return; 94 95 if (this->is_not_initialized()) { 96 // abort if the the class has a class initializer 97 if (this->class_initializer() != NULL) return; 98 99 // abort if it is java.lang.Object (initialization is handled in genesis) 100 klassOop super = this->super(); 101 if (super == NULL) return; 102 103 // abort if the super class should be initialized 104 if (!instanceKlass::cast(super)->is_initialized()) return; 105 106 // call body to expose the this pointer 107 instanceKlassHandle this_oop(thread, this->as_klassOop()); 108 eager_initialize_impl(this_oop); 109 } 110 } 111 112 113 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { 114 EXCEPTION_MARK; 115 ObjectLocker ol(this_oop, THREAD); 116 117 // abort if someone beat us to the initialization 118 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized() 119 120 ClassState old_state = this_oop->_init_state; 121 link_class_impl(this_oop, true, THREAD); 122 if (HAS_PENDING_EXCEPTION) { 123 CLEAR_PENDING_EXCEPTION; 124 // Abort if linking the class throws an exception. 125 126 // Use a test to avoid redundantly resetting the state if there's 127 // no change. Set_init_state() asserts that state changes make 128 // progress, whereas here we might just be spinning in place. 129 if( old_state != this_oop->_init_state ) 130 this_oop->set_init_state (old_state); 131 } else { 132 // linking successfull, mark class as initialized 133 this_oop->set_init_state (fully_initialized); 134 // trace 135 if (TraceClassInitialization) { 136 ResourceMark rm(THREAD); 137 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name()); 138 } 139 } 140 } 141 142 143 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization 144 // process. The step comments refers to the procedure described in that section. 145 // Note: implementation moved to static method to expose the this pointer. 146 void instanceKlass::initialize(TRAPS) { 147 if (this->should_be_initialized()) { 148 HandleMark hm(THREAD); 149 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 150 initialize_impl(this_oop, CHECK); 151 // Note: at this point the class may be initialized 152 // OR it may be in the state of being initialized 153 // in case of recursive initialization! 154 } else { 155 assert(is_initialized(), "sanity check"); 156 } 157 } 158 159 160 bool instanceKlass::verify_code( 161 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 162 // 1) Verify the bytecodes 163 Verifier::Mode mode = 164 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; 165 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); 166 } 167 168 169 // Used exclusively by the shared spaces dump mechanism to prevent 170 // classes mapped into the shared regions in new VMs from appearing linked. 171 172 void instanceKlass::unlink_class() { 173 assert(is_linked(), "must be linked"); 174 _init_state = loaded; 175 } 176 177 void instanceKlass::link_class(TRAPS) { 178 assert(is_loaded(), "must be loaded"); 179 if (!is_linked()) { 180 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 181 link_class_impl(this_oop, true, CHECK); 182 } 183 } 184 185 // Called to verify that a class can link during initialization, without 186 // throwing a VerifyError. 187 bool instanceKlass::link_class_or_fail(TRAPS) { 188 assert(is_loaded(), "must be loaded"); 189 if (!is_linked()) { 190 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 191 link_class_impl(this_oop, false, CHECK_false); 192 } 193 return is_linked(); 194 } 195 196 bool instanceKlass::link_class_impl( 197 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 198 // check for error state 199 if (this_oop->is_in_error_state()) { 200 ResourceMark rm(THREAD); 201 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(), 202 this_oop->external_name(), false); 203 } 204 // return if already verified 205 if (this_oop->is_linked()) { 206 return true; 207 } 208 209 // Timing 210 // timer handles recursion 211 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl"); 212 JavaThread* jt = (JavaThread*)THREAD; 213 214 // link super class before linking this class 215 instanceKlassHandle super(THREAD, this_oop->super()); 216 if (super.not_null()) { 217 if (super->is_interface()) { // check if super class is an interface 218 ResourceMark rm(THREAD); 219 Exceptions::fthrow( 220 THREAD_AND_LOCATION, 221 vmSymbolHandles::java_lang_IncompatibleClassChangeError(), 222 "class %s has interface %s as super class", 223 this_oop->external_name(), 224 super->external_name() 225 ); 226 return false; 227 } 228 229 link_class_impl(super, throw_verifyerror, CHECK_false); 230 } 231 232 // link all interfaces implemented by this class before linking this class 233 objArrayHandle interfaces (THREAD, this_oop->local_interfaces()); 234 int num_interfaces = interfaces->length(); 235 for (int index = 0; index < num_interfaces; index++) { 236 HandleMark hm(THREAD); 237 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index))); 238 link_class_impl(ih, throw_verifyerror, CHECK_false); 239 } 240 241 // in case the class is linked in the process of linking its superclasses 242 if (this_oop->is_linked()) { 243 return true; 244 } 245 246 // trace only the link time for this klass that includes 247 // the verification time 248 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(), 249 ClassLoader::perf_class_link_selftime(), 250 ClassLoader::perf_classes_linked(), 251 jt->get_thread_stat()->perf_recursion_counts_addr(), 252 jt->get_thread_stat()->perf_timers_addr(), 253 PerfClassTraceTime::CLASS_LINK); 254 255 // verification & rewriting 256 { 257 ObjectLocker ol(this_oop, THREAD); 258 // rewritten will have been set if loader constraint error found 259 // on an earlier link attempt 260 // don't verify or rewrite if already rewritten 261 if (!this_oop->is_linked()) { 262 if (!this_oop->is_rewritten()) { 263 { 264 // Timer includes any side effects of class verification (resolution, 265 // etc), but not recursive entry into verify_code(). 266 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(), 267 ClassLoader::perf_class_verify_selftime(), 268 ClassLoader::perf_classes_verified(), 269 jt->get_thread_stat()->perf_recursion_counts_addr(), 270 jt->get_thread_stat()->perf_timers_addr(), 271 PerfClassTraceTime::CLASS_VERIFY); 272 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); 273 if (!verify_ok) { 274 return false; 275 } 276 } 277 278 // Just in case a side-effect of verify linked this class already 279 // (which can sometimes happen since the verifier loads classes 280 // using custom class loaders, which are free to initialize things) 281 if (this_oop->is_linked()) { 282 return true; 283 } 284 285 // also sets rewritten 286 this_oop->rewrite_class(CHECK_false); 287 } 288 289 // Initialize the vtable and interface table after 290 // methods have been rewritten since rewrite may 291 // fabricate new methodOops. 292 // also does loader constraint checking 293 if (!this_oop()->is_shared()) { 294 ResourceMark rm(THREAD); 295 this_oop->vtable()->initialize_vtable(true, CHECK_false); 296 this_oop->itable()->initialize_itable(true, CHECK_false); 297 } 298 #ifdef ASSERT 299 else { 300 ResourceMark rm(THREAD); 301 this_oop->vtable()->verify(tty, true); 302 // In case itable verification is ever added. 303 // this_oop->itable()->verify(tty, true); 304 } 305 #endif 306 this_oop->set_init_state(linked); 307 if (JvmtiExport::should_post_class_prepare()) { 308 Thread *thread = THREAD; 309 assert(thread->is_Java_thread(), "thread->is_Java_thread()"); 310 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); 311 } 312 } 313 } 314 return true; 315 } 316 317 318 // Rewrite the byte codes of all of the methods of a class. 319 // Three cases: 320 // During the link of a newly loaded class. 321 // During the preloading of classes to be written to the shared spaces. 322 // - Rewrite the methods and update the method entry points. 323 // 324 // During the link of a class in the shared spaces. 325 // - The methods were already rewritten, update the metho entry points. 326 // 327 // The rewriter must be called exactly once. Rewriting must happen after 328 // verification but before the first method of the class is executed. 329 330 void instanceKlass::rewrite_class(TRAPS) { 331 assert(is_loaded(), "must be loaded"); 332 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 333 if (this_oop->is_rewritten()) { 334 assert(this_oop()->is_shared(), "rewriting an unshared class?"); 335 return; 336 } 337 Rewriter::rewrite(this_oop, CHECK); // No exception can happen here 338 this_oop->set_rewritten(); 339 } 340 341 342 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { 343 // Make sure klass is linked (verified) before initialization 344 // A class could already be verified, since it has been reflected upon. 345 this_oop->link_class(CHECK); 346 347 DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1); 348 349 bool wait = false; 350 351 // refer to the JVM book page 47 for description of steps 352 // Step 1 353 { ObjectLocker ol(this_oop, THREAD); 354 355 Thread *self = THREAD; // it's passed the current thread 356 357 // Step 2 358 // If we were to use wait() instead of waitInterruptibly() then 359 // we might end up throwing IE from link/symbol resolution sites 360 // that aren't expected to throw. This would wreak havoc. See 6320309. 361 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) { 362 wait = true; 363 ol.waitUninterruptibly(CHECK); 364 } 365 366 // Step 3 367 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) { 368 DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait); 369 return; 370 } 371 372 // Step 4 373 if (this_oop->is_initialized()) { 374 DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait); 375 return; 376 } 377 378 // Step 5 379 if (this_oop->is_in_error_state()) { 380 DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait); 381 ResourceMark rm(THREAD); 382 const char* desc = "Could not initialize class "; 383 const char* className = this_oop->external_name(); 384 size_t msglen = strlen(desc) + strlen(className) + 1; 385 char* message = NEW_RESOURCE_ARRAY(char, msglen); 386 if (NULL == message) { 387 // Out of memory: can't create detailed error message 388 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className); 389 } else { 390 jio_snprintf(message, msglen, "%s%s", desc, className); 391 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message); 392 } 393 } 394 395 // Step 6 396 this_oop->set_init_state(being_initialized); 397 this_oop->set_init_thread(self); 398 } 399 400 // Step 7 401 klassOop super_klass = this_oop->super(); 402 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) { 403 Klass::cast(super_klass)->initialize(THREAD); 404 405 if (HAS_PENDING_EXCEPTION) { 406 Handle e(THREAD, PENDING_EXCEPTION); 407 CLEAR_PENDING_EXCEPTION; 408 { 409 EXCEPTION_MARK; 410 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads 411 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below 412 } 413 DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait); 414 THROW_OOP(e()); 415 } 416 } 417 418 // Step 8 419 { 420 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl"); 421 JavaThread* jt = (JavaThread*)THREAD; 422 DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait); 423 // Timer includes any side effects of class initialization (resolution, 424 // etc), but not recursive entry into call_class_initializer(). 425 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(), 426 ClassLoader::perf_class_init_selftime(), 427 ClassLoader::perf_classes_inited(), 428 jt->get_thread_stat()->perf_recursion_counts_addr(), 429 jt->get_thread_stat()->perf_timers_addr(), 430 PerfClassTraceTime::CLASS_CLINIT); 431 this_oop->call_class_initializer(THREAD); 432 } 433 434 // Step 9 435 if (!HAS_PENDING_EXCEPTION) { 436 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK); 437 { ResourceMark rm(THREAD); 438 debug_only(this_oop->vtable()->verify(tty, true);) 439 } 440 } 441 else { 442 // Step 10 and 11 443 Handle e(THREAD, PENDING_EXCEPTION); 444 CLEAR_PENDING_EXCEPTION; 445 { 446 EXCEPTION_MARK; 447 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); 448 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below 449 } 450 DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait); 451 if (e->is_a(SystemDictionary::Error_klass())) { 452 THROW_OOP(e()); 453 } else { 454 JavaCallArguments args(e); 455 THROW_ARG(vmSymbolHandles::java_lang_ExceptionInInitializerError(), 456 vmSymbolHandles::throwable_void_signature(), 457 &args); 458 } 459 } 460 DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait); 461 } 462 463 464 // Note: implementation moved to static method to expose the this pointer. 465 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) { 466 instanceKlassHandle kh(THREAD, this->as_klassOop()); 467 set_initialization_state_and_notify_impl(kh, state, CHECK); 468 } 469 470 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) { 471 ObjectLocker ol(this_oop, THREAD); 472 this_oop->set_init_state(state); 473 ol.notify_all(CHECK); 474 } 475 476 void instanceKlass::add_implementor(klassOop k) { 477 assert(Compile_lock->owned_by_self(), ""); 478 // Filter out my subinterfaces. 479 // (Note: Interfaces are never on the subklass list.) 480 if (instanceKlass::cast(k)->is_interface()) return; 481 482 // Filter out subclasses whose supers already implement me. 483 // (Note: CHA must walk subclasses of direct implementors 484 // in order to locate indirect implementors.) 485 klassOop sk = instanceKlass::cast(k)->super(); 486 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop())) 487 // We only need to check one immediate superclass, since the 488 // implements_interface query looks at transitive_interfaces. 489 // Any supers of the super have the same (or fewer) transitive_interfaces. 490 return; 491 492 // Update number of implementors 493 int i = _nof_implementors++; 494 495 // Record this implementor, if there are not too many already 496 if (i < implementors_limit) { 497 assert(_implementors[i] == NULL, "should be exactly one implementor"); 498 oop_store_without_check((oop*)&_implementors[i], k); 499 } else if (i == implementors_limit) { 500 // clear out the list on first overflow 501 for (int i2 = 0; i2 < implementors_limit; i2++) 502 oop_store_without_check((oop*)&_implementors[i2], NULL); 503 } 504 505 // The implementor also implements the transitive_interfaces 506 for (int index = 0; index < local_interfaces()->length(); index++) { 507 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k); 508 } 509 } 510 511 void instanceKlass::init_implementor() { 512 for (int i = 0; i < implementors_limit; i++) 513 oop_store_without_check((oop*)&_implementors[i], NULL); 514 _nof_implementors = 0; 515 } 516 517 518 void instanceKlass::process_interfaces(Thread *thread) { 519 // link this class into the implementors list of every interface it implements 520 KlassHandle this_as_oop (thread, this->as_klassOop()); 521 for (int i = local_interfaces()->length() - 1; i >= 0; i--) { 522 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass"); 523 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i))); 524 assert(interf->is_interface(), "expected interface"); 525 interf->add_implementor(this_as_oop()); 526 } 527 } 528 529 bool instanceKlass::can_be_primary_super_slow() const { 530 if (is_interface()) 531 return false; 532 else 533 return Klass::can_be_primary_super_slow(); 534 } 535 536 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) { 537 // The secondaries are the implemented interfaces. 538 instanceKlass* ik = instanceKlass::cast(as_klassOop()); 539 objArrayHandle interfaces (THREAD, ik->transitive_interfaces()); 540 int num_secondaries = num_extra_slots + interfaces->length(); 541 if (num_secondaries == 0) { 542 return Universe::the_empty_system_obj_array(); 543 } else if (num_extra_slots == 0) { 544 return interfaces(); 545 } else { 546 // a mix of both 547 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL); 548 for (int i = 0; i < interfaces->length(); i++) { 549 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i)); 550 } 551 return secondaries; 552 } 553 } 554 555 bool instanceKlass::compute_is_subtype_of(klassOop k) { 556 if (Klass::cast(k)->is_interface()) { 557 return implements_interface(k); 558 } else { 559 return Klass::compute_is_subtype_of(k); 560 } 561 } 562 563 bool instanceKlass::implements_interface(klassOop k) const { 564 if (as_klassOop() == k) return true; 565 assert(Klass::cast(k)->is_interface(), "should be an interface class"); 566 for (int i = 0; i < transitive_interfaces()->length(); i++) { 567 if (transitive_interfaces()->obj_at(i) == k) { 568 return true; 569 } 570 } 571 return false; 572 } 573 574 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { 575 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); 576 if (length > arrayOopDesc::max_array_length(T_OBJECT)) { 577 report_java_out_of_memory("Requested array size exceeds VM limit"); 578 THROW_OOP_0(Universe::out_of_memory_error_array_size()); 579 } 580 int size = objArrayOopDesc::object_size(length); 581 klassOop ak = array_klass(n, CHECK_NULL); 582 KlassHandle h_ak (THREAD, ak); 583 objArrayOop o = 584 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL); 585 return o; 586 } 587 588 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) { 589 if (TraceFinalizerRegistration) { 590 tty->print("Registered "); 591 i->print_value_on(tty); 592 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i); 593 } 594 instanceHandle h_i(THREAD, i); 595 // Pass the handle as argument, JavaCalls::call expects oop as jobjects 596 JavaValue result(T_VOID); 597 JavaCallArguments args(h_i); 598 methodHandle mh (THREAD, Universe::finalizer_register_method()); 599 JavaCalls::call(&result, mh, &args, CHECK_NULL); 600 return h_i(); 601 } 602 603 instanceOop instanceKlass::allocate_instance(TRAPS) { 604 bool has_finalizer_flag = has_finalizer(); // Query before possible GC 605 int size = size_helper(); // Query before forming handle. 606 607 KlassHandle h_k(THREAD, as_klassOop()); 608 609 instanceOop i; 610 611 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL); 612 if (has_finalizer_flag && !RegisterFinalizersAtInit) { 613 i = register_finalizer(i, CHECK_NULL); 614 } 615 return i; 616 } 617 618 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) { 619 // Finalizer registration occurs in the Object.<init> constructor 620 // and constructors normally aren't run when allocating perm 621 // instances so simply disallow finalizable perm objects. This can 622 // be relaxed if a need for it is found. 623 assert(!has_finalizer(), "perm objects not allowed to have finalizers"); 624 int size = size_helper(); // Query before forming handle. 625 KlassHandle h_k(THREAD, as_klassOop()); 626 instanceOop i = (instanceOop) 627 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL); 628 return i; 629 } 630 631 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) { 632 if (is_interface() || is_abstract()) { 633 ResourceMark rm(THREAD); 634 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError() 635 : vmSymbols::java_lang_InstantiationException(), external_name()); 636 } 637 if (as_klassOop() == SystemDictionary::Class_klass()) { 638 ResourceMark rm(THREAD); 639 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError() 640 : vmSymbols::java_lang_IllegalAccessException(), external_name()); 641 } 642 } 643 644 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) { 645 instanceKlassHandle this_oop(THREAD, as_klassOop()); 646 return array_klass_impl(this_oop, or_null, n, THREAD); 647 } 648 649 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) { 650 if (this_oop->array_klasses() == NULL) { 651 if (or_null) return NULL; 652 653 ResourceMark rm; 654 JavaThread *jt = (JavaThread *)THREAD; 655 { 656 // Atomic creation of array_klasses 657 MutexLocker mc(Compile_lock, THREAD); // for vtables 658 MutexLocker ma(MultiArray_lock, THREAD); 659 660 // Check if update has already taken place 661 if (this_oop->array_klasses() == NULL) { 662 objArrayKlassKlass* oakk = 663 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part(); 664 665 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL); 666 this_oop->set_array_klasses(k); 667 } 668 } 669 } 670 // _this will always be set at this point 671 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part(); 672 if (or_null) { 673 return oak->array_klass_or_null(n); 674 } 675 return oak->array_klass(n, CHECK_NULL); 676 } 677 678 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) { 679 return array_klass_impl(or_null, 1, THREAD); 680 } 681 682 void instanceKlass::call_class_initializer(TRAPS) { 683 instanceKlassHandle ik (THREAD, as_klassOop()); 684 call_class_initializer_impl(ik, THREAD); 685 } 686 687 static int call_class_initializer_impl_counter = 0; // for debugging 688 689 methodOop instanceKlass::class_initializer() { 690 return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); 691 } 692 693 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { 694 methodHandle h_method(THREAD, this_oop->class_initializer()); 695 assert(!this_oop->is_initialized(), "we cannot initialize twice"); 696 if (TraceClassInitialization) { 697 tty->print("%d Initializing ", call_class_initializer_impl_counter++); 698 this_oop->name()->print_value(); 699 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop()); 700 } 701 if (h_method() != NULL) { 702 JavaCallArguments args; // No arguments 703 JavaValue result(T_VOID); 704 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args) 705 } 706 } 707 708 709 void instanceKlass::mask_for(methodHandle method, int bci, 710 InterpreterOopMap* entry_for) { 711 // Dirty read, then double-check under a lock. 712 if (_oop_map_cache == NULL) { 713 // Otherwise, allocate a new one. 714 MutexLocker x(OopMapCacheAlloc_lock); 715 // First time use. Allocate a cache in C heap 716 if (_oop_map_cache == NULL) { 717 _oop_map_cache = new OopMapCache(); 718 } 719 } 720 // _oop_map_cache is constant after init; lookup below does is own locking. 721 _oop_map_cache->lookup(method, bci, entry_for); 722 } 723 724 725 bool instanceKlass::find_local_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const { 726 const int n = fields()->length(); 727 for (int i = 0; i < n; i += next_offset ) { 728 int name_index = fields()->ushort_at(i + name_index_offset); 729 int sig_index = fields()->ushort_at(i + signature_index_offset); 730 symbolOop f_name = constants()->symbol_at(name_index); 731 symbolOop f_sig = constants()->symbol_at(sig_index); 732 if (f_name == name && f_sig == sig) { 733 fd->initialize(as_klassOop(), i); 734 return true; 735 } 736 } 737 return false; 738 } 739 740 741 void instanceKlass::field_names_and_sigs_iterate(OopClosure* closure) { 742 const int n = fields()->length(); 743 for (int i = 0; i < n; i += next_offset ) { 744 int name_index = fields()->ushort_at(i + name_index_offset); 745 symbolOop name = constants()->symbol_at(name_index); 746 closure->do_oop((oop*)&name); 747 748 int sig_index = fields()->ushort_at(i + signature_index_offset); 749 symbolOop sig = constants()->symbol_at(sig_index); 750 closure->do_oop((oop*)&sig); 751 } 752 } 753 754 755 klassOop instanceKlass::find_interface_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const { 756 const int n = local_interfaces()->length(); 757 for (int i = 0; i < n; i++) { 758 klassOop intf1 = klassOop(local_interfaces()->obj_at(i)); 759 assert(Klass::cast(intf1)->is_interface(), "just checking type"); 760 // search for field in current interface 761 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) { 762 assert(fd->is_static(), "interface field must be static"); 763 return intf1; 764 } 765 // search for field in direct superinterfaces 766 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd); 767 if (intf2 != NULL) return intf2; 768 } 769 // otherwise field lookup fails 770 return NULL; 771 } 772 773 774 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const { 775 // search order according to newest JVM spec (5.4.3.2, p.167). 776 // 1) search for field in current klass 777 if (find_local_field(name, sig, fd)) { 778 return as_klassOop(); 779 } 780 // 2) search for field recursively in direct superinterfaces 781 { klassOop intf = find_interface_field(name, sig, fd); 782 if (intf != NULL) return intf; 783 } 784 // 3) apply field lookup recursively if superclass exists 785 { klassOop supr = super(); 786 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd); 787 } 788 // 4) otherwise field lookup fails 789 return NULL; 790 } 791 792 793 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, bool is_static, fieldDescriptor* fd) const { 794 // search order according to newest JVM spec (5.4.3.2, p.167). 795 // 1) search for field in current klass 796 if (find_local_field(name, sig, fd)) { 797 if (fd->is_static() == is_static) return as_klassOop(); 798 } 799 // 2) search for field recursively in direct superinterfaces 800 if (is_static) { 801 klassOop intf = find_interface_field(name, sig, fd); 802 if (intf != NULL) return intf; 803 } 804 // 3) apply field lookup recursively if superclass exists 805 { klassOop supr = super(); 806 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd); 807 } 808 // 4) otherwise field lookup fails 809 return NULL; 810 } 811 812 813 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 814 int length = fields()->length(); 815 for (int i = 0; i < length; i += next_offset) { 816 if (offset_from_fields( i ) == offset) { 817 fd->initialize(as_klassOop(), i); 818 if (fd->is_static() == is_static) return true; 819 } 820 } 821 return false; 822 } 823 824 825 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 826 klassOop klass = as_klassOop(); 827 while (klass != NULL) { 828 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) { 829 return true; 830 } 831 klass = Klass::cast(klass)->super(); 832 } 833 return false; 834 } 835 836 837 void instanceKlass::methods_do(void f(methodOop method)) { 838 int len = methods()->length(); 839 for (int index = 0; index < len; index++) { 840 methodOop m = methodOop(methods()->obj_at(index)); 841 assert(m->is_method(), "must be method"); 842 f(m); 843 } 844 } 845 846 void instanceKlass::do_local_static_fields(FieldClosure* cl) { 847 fieldDescriptor fd; 848 int length = fields()->length(); 849 for (int i = 0; i < length; i += next_offset) { 850 fd.initialize(as_klassOop(), i); 851 if (fd.is_static()) cl->do_field(&fd); 852 } 853 } 854 855 856 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) { 857 instanceKlassHandle h_this(THREAD, as_klassOop()); 858 do_local_static_fields_impl(h_this, f, CHECK); 859 } 860 861 862 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) { 863 fieldDescriptor fd; 864 int length = this_oop->fields()->length(); 865 for (int i = 0; i < length; i += next_offset) { 866 fd.initialize(this_oop(), i); 867 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements) 868 } 869 } 870 871 872 static int compare_fields_by_offset(int* a, int* b) { 873 return a[0] - b[0]; 874 } 875 876 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) { 877 instanceKlass* super = superklass(); 878 if (super != NULL) { 879 super->do_nonstatic_fields(cl); 880 } 881 fieldDescriptor fd; 882 int length = fields()->length(); 883 // In DebugInfo nonstatic fields are sorted by offset. 884 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1)); 885 int j = 0; 886 for (int i = 0; i < length; i += next_offset) { 887 fd.initialize(as_klassOop(), i); 888 if (!fd.is_static()) { 889 fields_sorted[j + 0] = fd.offset(); 890 fields_sorted[j + 1] = i; 891 j += 2; 892 } 893 } 894 if (j > 0) { 895 length = j; 896 // _sort_Fn is defined in growableArray.hpp. 897 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset); 898 for (int i = 0; i < length; i += 2) { 899 fd.initialize(as_klassOop(), fields_sorted[i + 1]); 900 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields"); 901 cl->do_field(&fd); 902 } 903 } 904 FREE_C_HEAP_ARRAY(int, fields_sorted); 905 } 906 907 908 void instanceKlass::array_klasses_do(void f(klassOop k)) { 909 if (array_klasses() != NULL) 910 arrayKlass::cast(array_klasses())->array_klasses_do(f); 911 } 912 913 914 void instanceKlass::with_array_klasses_do(void f(klassOop k)) { 915 f(as_klassOop()); 916 array_klasses_do(f); 917 } 918 919 #ifdef ASSERT 920 static int linear_search(objArrayOop methods, symbolOop name, symbolOop signature) { 921 int len = methods->length(); 922 for (int index = 0; index < len; index++) { 923 methodOop m = (methodOop)(methods->obj_at(index)); 924 assert(m->is_method(), "must be method"); 925 if (m->signature() == signature && m->name() == name) { 926 return index; 927 } 928 } 929 return -1; 930 } 931 #endif 932 933 methodOop instanceKlass::find_method(symbolOop name, symbolOop signature) const { 934 return instanceKlass::find_method(methods(), name, signature); 935 } 936 937 methodOop instanceKlass::find_method(objArrayOop methods, symbolOop name, symbolOop signature) { 938 int len = methods->length(); 939 // methods are sorted, so do binary search 940 int l = 0; 941 int h = len - 1; 942 while (l <= h) { 943 int mid = (l + h) >> 1; 944 methodOop m = (methodOop)methods->obj_at(mid); 945 assert(m->is_method(), "must be method"); 946 int res = m->name()->fast_compare(name); 947 if (res == 0) { 948 // found matching name; do linear search to find matching signature 949 // first, quick check for common case 950 if (m->signature() == signature) return m; 951 // search downwards through overloaded methods 952 int i; 953 for (i = mid - 1; i >= l; i--) { 954 methodOop m = (methodOop)methods->obj_at(i); 955 assert(m->is_method(), "must be method"); 956 if (m->name() != name) break; 957 if (m->signature() == signature) return m; 958 } 959 // search upwards 960 for (i = mid + 1; i <= h; i++) { 961 methodOop m = (methodOop)methods->obj_at(i); 962 assert(m->is_method(), "must be method"); 963 if (m->name() != name) break; 964 if (m->signature() == signature) return m; 965 } 966 // not found 967 #ifdef ASSERT 968 int index = linear_search(methods, name, signature); 969 assert(index == -1, err_msg("binary search should have found entry %d", index)); 970 #endif 971 return NULL; 972 } else if (res < 0) { 973 l = mid + 1; 974 } else { 975 h = mid - 1; 976 } 977 } 978 #ifdef ASSERT 979 int index = linear_search(methods, name, signature); 980 assert(index == -1, err_msg("binary search should have found entry %d", index)); 981 #endif 982 return NULL; 983 } 984 985 methodOop instanceKlass::uncached_lookup_method(symbolOop name, symbolOop signature) const { 986 klassOop klass = as_klassOop(); 987 while (klass != NULL) { 988 methodOop method = instanceKlass::cast(klass)->find_method(name, signature); 989 if (method != NULL) return method; 990 klass = instanceKlass::cast(klass)->super(); 991 } 992 return NULL; 993 } 994 995 // lookup a method in all the interfaces that this class implements 996 methodOop instanceKlass::lookup_method_in_all_interfaces(symbolOop name, 997 symbolOop signature) const { 998 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces(); 999 int num_ifs = all_ifs->length(); 1000 instanceKlass *ik = NULL; 1001 for (int i = 0; i < num_ifs; i++) { 1002 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i))); 1003 methodOop m = ik->lookup_method(name, signature); 1004 if (m != NULL) { 1005 return m; 1006 } 1007 } 1008 return NULL; 1009 } 1010 1011 /* jni_id_for_impl for jfieldIds only */ 1012 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) { 1013 MutexLocker ml(JfieldIdCreation_lock); 1014 // Retry lookup after we got the lock 1015 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset); 1016 if (probe == NULL) { 1017 // Slow case, allocate new static field identifier 1018 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids()); 1019 this_oop->set_jni_ids(probe); 1020 } 1021 return probe; 1022 } 1023 1024 1025 /* jni_id_for for jfieldIds only */ 1026 JNIid* instanceKlass::jni_id_for(int offset) { 1027 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset); 1028 if (probe == NULL) { 1029 probe = jni_id_for_impl(this->as_klassOop(), offset); 1030 } 1031 return probe; 1032 } 1033 1034 1035 // Lookup or create a jmethodID. 1036 // This code is called by the VMThread and JavaThreads so the 1037 // locking has to be done very carefully to avoid deadlocks 1038 // and/or other cache consistency problems. 1039 // 1040 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) { 1041 size_t idnum = (size_t)method_h->method_idnum(); 1042 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1043 size_t length = 0; 1044 jmethodID id = NULL; 1045 1046 // We use a double-check locking idiom here because this cache is 1047 // performance sensitive. In the normal system, this cache only 1048 // transitions from NULL to non-NULL which is safe because we use 1049 // release_set_methods_jmethod_ids() to advertise the new cache. 1050 // A partially constructed cache should never be seen by a racing 1051 // thread. We also use release_store_ptr() to save a new jmethodID 1052 // in the cache so a partially constructed jmethodID should never be 1053 // seen either. Cache reads of existing jmethodIDs proceed without a 1054 // lock, but cache writes of a new jmethodID requires uniqueness and 1055 // creation of the cache itself requires no leaks so a lock is 1056 // generally acquired in those two cases. 1057 // 1058 // If the RedefineClasses() API has been used, then this cache can 1059 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1060 // Cache creation requires no leaks and we require safety between all 1061 // cache accesses and freeing of the old cache so a lock is generally 1062 // acquired when the RedefineClasses() API has been used. 1063 1064 if (jmeths != NULL) { 1065 // the cache already exists 1066 if (!ik_h->idnum_can_increment()) { 1067 // the cache can't grow so we can just get the current values 1068 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1069 } else { 1070 // cache can grow so we have to be more careful 1071 if (Threads::number_of_threads() == 0 || 1072 SafepointSynchronize::is_at_safepoint()) { 1073 // we're single threaded or at a safepoint - no locking needed 1074 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1075 } else { 1076 MutexLocker ml(JmethodIdCreation_lock); 1077 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1078 } 1079 } 1080 } 1081 // implied else: 1082 // we need to allocate a cache so default length and id values are good 1083 1084 if (jmeths == NULL || // no cache yet 1085 length <= idnum || // cache is too short 1086 id == NULL) { // cache doesn't contain entry 1087 1088 // This function can be called by the VMThread so we have to do all 1089 // things that might block on a safepoint before grabbing the lock. 1090 // Otherwise, we can deadlock with the VMThread or have a cache 1091 // consistency issue. These vars keep track of what we might have 1092 // to free after the lock is dropped. 1093 jmethodID to_dealloc_id = NULL; 1094 jmethodID* to_dealloc_jmeths = NULL; 1095 1096 // may not allocate new_jmeths or use it if we allocate it 1097 jmethodID* new_jmeths = NULL; 1098 if (length <= idnum) { 1099 // allocate a new cache that might be used 1100 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count()); 1101 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1); 1102 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID)); 1103 // cache size is stored in element[0], other elements offset by one 1104 new_jmeths[0] = (jmethodID)size; 1105 } 1106 1107 // allocate a new jmethodID that might be used 1108 jmethodID new_id = NULL; 1109 if (method_h->is_old() && !method_h->is_obsolete()) { 1110 // The method passed in is old (but not obsolete), we need to use the current version 1111 methodOop current_method = ik_h->method_with_idnum((int)idnum); 1112 assert(current_method != NULL, "old and but not obsolete, so should exist"); 1113 methodHandle current_method_h(current_method == NULL? method_h() : current_method); 1114 new_id = JNIHandles::make_jmethod_id(current_method_h); 1115 } else { 1116 // It is the current version of the method or an obsolete method, 1117 // use the version passed in 1118 new_id = JNIHandles::make_jmethod_id(method_h); 1119 } 1120 1121 if (Threads::number_of_threads() == 0 || 1122 SafepointSynchronize::is_at_safepoint()) { 1123 // we're single threaded or at a safepoint - no locking needed 1124 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1125 &to_dealloc_id, &to_dealloc_jmeths); 1126 } else { 1127 MutexLocker ml(JmethodIdCreation_lock); 1128 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1129 &to_dealloc_id, &to_dealloc_jmeths); 1130 } 1131 1132 // The lock has been dropped so we can free resources. 1133 // Free up either the old cache or the new cache if we allocated one. 1134 if (to_dealloc_jmeths != NULL) { 1135 FreeHeap(to_dealloc_jmeths); 1136 } 1137 // free up the new ID since it wasn't needed 1138 if (to_dealloc_id != NULL) { 1139 JNIHandles::destroy_jmethod_id(to_dealloc_id); 1140 } 1141 } 1142 return id; 1143 } 1144 1145 1146 // Common code to fetch the jmethodID from the cache or update the 1147 // cache with the new jmethodID. This function should never do anything 1148 // that causes the caller to go to a safepoint or we can deadlock with 1149 // the VMThread or have cache consistency issues. 1150 // 1151 jmethodID instanceKlass::get_jmethod_id_fetch_or_update( 1152 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id, 1153 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p, 1154 jmethodID** to_dealloc_jmeths_p) { 1155 assert(new_id != NULL, "sanity check"); 1156 assert(to_dealloc_id_p != NULL, "sanity check"); 1157 assert(to_dealloc_jmeths_p != NULL, "sanity check"); 1158 assert(Threads::number_of_threads() == 0 || 1159 SafepointSynchronize::is_at_safepoint() || 1160 JmethodIdCreation_lock->owned_by_self(), "sanity check"); 1161 1162 // reacquire the cache - we are locked, single threaded or at a safepoint 1163 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1164 jmethodID id = NULL; 1165 size_t length = 0; 1166 1167 if (jmeths == NULL || // no cache yet 1168 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short 1169 if (jmeths != NULL) { 1170 // copy any existing entries from the old cache 1171 for (size_t index = 0; index < length; index++) { 1172 new_jmeths[index+1] = jmeths[index+1]; 1173 } 1174 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete 1175 } 1176 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); 1177 } else { 1178 // fetch jmethodID (if any) from the existing cache 1179 id = jmeths[idnum+1]; 1180 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete 1181 } 1182 if (id == NULL) { 1183 // No matching jmethodID in the existing cache or we have a new 1184 // cache or we just grew the cache. This cache write is done here 1185 // by the first thread to win the foot race because a jmethodID 1186 // needs to be unique once it is generally available. 1187 id = new_id; 1188 1189 // The jmethodID cache can be read while unlocked so we have to 1190 // make sure the new jmethodID is complete before installing it 1191 // in the cache. 1192 OrderAccess::release_store_ptr(&jmeths[idnum+1], id); 1193 } else { 1194 *to_dealloc_id_p = new_id; // save new id for later delete 1195 } 1196 return id; 1197 } 1198 1199 1200 // Common code to get the jmethodID cache length and the jmethodID 1201 // value at index idnum if there is one. 1202 // 1203 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache, 1204 size_t idnum, size_t *length_p, jmethodID* id_p) { 1205 assert(cache != NULL, "sanity check"); 1206 assert(length_p != NULL, "sanity check"); 1207 assert(id_p != NULL, "sanity check"); 1208 1209 // cache size is stored in element[0], other elements offset by one 1210 *length_p = (size_t)cache[0]; 1211 if (*length_p <= idnum) { // cache is too short 1212 *id_p = NULL; 1213 } else { 1214 *id_p = cache[idnum+1]; // fetch jmethodID (if any) 1215 } 1216 } 1217 1218 1219 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles 1220 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { 1221 size_t idnum = (size_t)method->method_idnum(); 1222 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1223 size_t length; // length assigned as debugging crumb 1224 jmethodID id = NULL; 1225 if (jmeths != NULL && // If there is a cache 1226 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, 1227 id = jmeths[idnum+1]; // Look up the id (may be NULL) 1228 } 1229 return id; 1230 } 1231 1232 1233 // Cache an itable index 1234 void instanceKlass::set_cached_itable_index(size_t idnum, int index) { 1235 int* indices = methods_cached_itable_indices_acquire(); 1236 int* to_dealloc_indices = NULL; 1237 1238 // We use a double-check locking idiom here because this cache is 1239 // performance sensitive. In the normal system, this cache only 1240 // transitions from NULL to non-NULL which is safe because we use 1241 // release_set_methods_cached_itable_indices() to advertise the 1242 // new cache. A partially constructed cache should never be seen 1243 // by a racing thread. Cache reads and writes proceed without a 1244 // lock, but creation of the cache itself requires no leaks so a 1245 // lock is generally acquired in that case. 1246 // 1247 // If the RedefineClasses() API has been used, then this cache can 1248 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1249 // Cache creation requires no leaks and we require safety between all 1250 // cache accesses and freeing of the old cache so a lock is generally 1251 // acquired when the RedefineClasses() API has been used. 1252 1253 if (indices == NULL || idnum_can_increment()) { 1254 // we need a cache or the cache can grow 1255 MutexLocker ml(JNICachedItableIndex_lock); 1256 // reacquire the cache to see if another thread already did the work 1257 indices = methods_cached_itable_indices_acquire(); 1258 size_t length = 0; 1259 // cache size is stored in element[0], other elements offset by one 1260 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { 1261 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); 1262 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); 1263 new_indices[0] = (int)size; 1264 // copy any existing entries 1265 size_t i; 1266 for (i = 0; i < length; i++) { 1267 new_indices[i+1] = indices[i+1]; 1268 } 1269 // Set all the rest to -1 1270 for (i = length; i < size; i++) { 1271 new_indices[i+1] = -1; 1272 } 1273 if (indices != NULL) { 1274 // We have an old cache to delete so save it for after we 1275 // drop the lock. 1276 to_dealloc_indices = indices; 1277 } 1278 release_set_methods_cached_itable_indices(indices = new_indices); 1279 } 1280 1281 if (idnum_can_increment()) { 1282 // this cache can grow so we have to write to it safely 1283 indices[idnum+1] = index; 1284 } 1285 } else { 1286 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1287 } 1288 1289 if (!idnum_can_increment()) { 1290 // The cache cannot grow and this JNI itable index value does not 1291 // have to be unique like a jmethodID. If there is a race to set it, 1292 // it doesn't matter. 1293 indices[idnum+1] = index; 1294 } 1295 1296 if (to_dealloc_indices != NULL) { 1297 // we allocated a new cache so free the old one 1298 FreeHeap(to_dealloc_indices); 1299 } 1300 } 1301 1302 1303 // Retrieve a cached itable index 1304 int instanceKlass::cached_itable_index(size_t idnum) { 1305 int* indices = methods_cached_itable_indices_acquire(); 1306 if (indices != NULL && ((size_t)indices[0]) > idnum) { 1307 // indices exist and are long enough, retrieve possible cached 1308 return indices[idnum+1]; 1309 } 1310 return -1; 1311 } 1312 1313 1314 // 1315 // nmethodBucket is used to record dependent nmethods for 1316 // deoptimization. nmethod dependencies are actually <klass, method> 1317 // pairs but we really only care about the klass part for purposes of 1318 // finding nmethods which might need to be deoptimized. Instead of 1319 // recording the method, a count of how many times a particular nmethod 1320 // was recorded is kept. This ensures that any recording errors are 1321 // noticed since an nmethod should be removed as many times are it's 1322 // added. 1323 // 1324 class nmethodBucket { 1325 private: 1326 nmethod* _nmethod; 1327 int _count; 1328 nmethodBucket* _next; 1329 1330 public: 1331 nmethodBucket(nmethod* nmethod, nmethodBucket* next) { 1332 _nmethod = nmethod; 1333 _next = next; 1334 _count = 1; 1335 } 1336 int count() { return _count; } 1337 int increment() { _count += 1; return _count; } 1338 int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; } 1339 nmethodBucket* next() { return _next; } 1340 void set_next(nmethodBucket* b) { _next = b; } 1341 nmethod* get_nmethod() { return _nmethod; } 1342 }; 1343 1344 1345 // 1346 // Walk the list of dependent nmethods searching for nmethods which 1347 // are dependent on the klassOop that was passed in and mark them for 1348 // deoptimization. Returns the number of nmethods found. 1349 // 1350 int instanceKlass::mark_dependent_nmethods(DepChange& changes) { 1351 assert_locked_or_safepoint(CodeCache_lock); 1352 int found = 0; 1353 nmethodBucket* b = _dependencies; 1354 while (b != NULL) { 1355 nmethod* nm = b->get_nmethod(); 1356 // since dependencies aren't removed until an nmethod becomes a zombie, 1357 // the dependency list may contain nmethods which aren't alive. 1358 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { 1359 if (TraceDependencies) { 1360 ResourceMark rm; 1361 tty->print_cr("Marked for deoptimization"); 1362 tty->print_cr(" context = %s", this->external_name()); 1363 changes.print(); 1364 nm->print(); 1365 nm->print_dependencies(); 1366 } 1367 nm->mark_for_deoptimization(); 1368 found++; 1369 } 1370 b = b->next(); 1371 } 1372 return found; 1373 } 1374 1375 1376 // 1377 // Add an nmethodBucket to the list of dependencies for this nmethod. 1378 // It's possible that an nmethod has multiple dependencies on this klass 1379 // so a count is kept for each bucket to guarantee that creation and 1380 // deletion of dependencies is consistent. 1381 // 1382 void instanceKlass::add_dependent_nmethod(nmethod* nm) { 1383 assert_locked_or_safepoint(CodeCache_lock); 1384 nmethodBucket* b = _dependencies; 1385 nmethodBucket* last = NULL; 1386 while (b != NULL) { 1387 if (nm == b->get_nmethod()) { 1388 b->increment(); 1389 return; 1390 } 1391 b = b->next(); 1392 } 1393 _dependencies = new nmethodBucket(nm, _dependencies); 1394 } 1395 1396 1397 // 1398 // Decrement count of the nmethod in the dependency list and remove 1399 // the bucket competely when the count goes to 0. This method must 1400 // find a corresponding bucket otherwise there's a bug in the 1401 // recording of dependecies. 1402 // 1403 void instanceKlass::remove_dependent_nmethod(nmethod* nm) { 1404 assert_locked_or_safepoint(CodeCache_lock); 1405 nmethodBucket* b = _dependencies; 1406 nmethodBucket* last = NULL; 1407 while (b != NULL) { 1408 if (nm == b->get_nmethod()) { 1409 if (b->decrement() == 0) { 1410 if (last == NULL) { 1411 _dependencies = b->next(); 1412 } else { 1413 last->set_next(b->next()); 1414 } 1415 delete b; 1416 } 1417 return; 1418 } 1419 last = b; 1420 b = b->next(); 1421 } 1422 #ifdef ASSERT 1423 tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); 1424 nm->print(); 1425 #endif // ASSERT 1426 ShouldNotReachHere(); 1427 } 1428 1429 1430 #ifndef PRODUCT 1431 void instanceKlass::print_dependent_nmethods(bool verbose) { 1432 nmethodBucket* b = _dependencies; 1433 int idx = 0; 1434 while (b != NULL) { 1435 nmethod* nm = b->get_nmethod(); 1436 tty->print("[%d] count=%d { ", idx++, b->count()); 1437 if (!verbose) { 1438 nm->print_on(tty, "nmethod"); 1439 tty->print_cr(" } "); 1440 } else { 1441 nm->print(); 1442 nm->print_dependencies(); 1443 tty->print_cr("--- } "); 1444 } 1445 b = b->next(); 1446 } 1447 } 1448 1449 1450 bool instanceKlass::is_dependent_nmethod(nmethod* nm) { 1451 nmethodBucket* b = _dependencies; 1452 while (b != NULL) { 1453 if (nm == b->get_nmethod()) { 1454 return true; 1455 } 1456 b = b->next(); 1457 } 1458 return false; 1459 } 1460 #endif //PRODUCT 1461 1462 1463 #ifdef ASSERT 1464 template <class T> void assert_is_in(T *p) { 1465 T heap_oop = oopDesc::load_heap_oop(p); 1466 if (!oopDesc::is_null(heap_oop)) { 1467 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1468 assert(Universe::heap()->is_in(o), "should be in heap"); 1469 } 1470 } 1471 template <class T> void assert_is_in_closed_subset(T *p) { 1472 T heap_oop = oopDesc::load_heap_oop(p); 1473 if (!oopDesc::is_null(heap_oop)) { 1474 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1475 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); 1476 } 1477 } 1478 template <class T> void assert_is_in_reserved(T *p) { 1479 T heap_oop = oopDesc::load_heap_oop(p); 1480 if (!oopDesc::is_null(heap_oop)) { 1481 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1482 assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); 1483 } 1484 } 1485 template <class T> void assert_nothing(T *p) {} 1486 1487 #else 1488 template <class T> void assert_is_in(T *p) {} 1489 template <class T> void assert_is_in_closed_subset(T *p) {} 1490 template <class T> void assert_is_in_reserved(T *p) {} 1491 template <class T> void assert_nothing(T *p) {} 1492 #endif // ASSERT 1493 1494 // 1495 // Macros that iterate over areas of oops which are specialized on type of 1496 // oop pointer either narrow or wide, depending on UseCompressedOops 1497 // 1498 // Parameters are: 1499 // T - type of oop to point to (either oop or narrowOop) 1500 // start_p - starting pointer for region to iterate over 1501 // count - number of oops or narrowOops to iterate over 1502 // do_oop - action to perform on each oop (it's arbitrary C code which 1503 // makes it more efficient to put in a macro rather than making 1504 // it a template function) 1505 // assert_fn - assert function which is template function because performance 1506 // doesn't matter when enabled. 1507 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ 1508 T, start_p, count, do_oop, \ 1509 assert_fn) \ 1510 { \ 1511 T* p = (T*)(start_p); \ 1512 T* const end = p + (count); \ 1513 while (p < end) { \ 1514 (assert_fn)(p); \ 1515 do_oop; \ 1516 ++p; \ 1517 } \ 1518 } 1519 1520 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ 1521 T, start_p, count, do_oop, \ 1522 assert_fn) \ 1523 { \ 1524 T* const start = (T*)(start_p); \ 1525 T* p = start + (count); \ 1526 while (start < p) { \ 1527 --p; \ 1528 (assert_fn)(p); \ 1529 do_oop; \ 1530 } \ 1531 } 1532 1533 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ 1534 T, start_p, count, low, high, \ 1535 do_oop, assert_fn) \ 1536 { \ 1537 T* const l = (T*)(low); \ 1538 T* const h = (T*)(high); \ 1539 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ 1540 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ 1541 "bounded region must be properly aligned"); \ 1542 T* p = (T*)(start_p); \ 1543 T* end = p + (count); \ 1544 if (p < l) p = l; \ 1545 if (end > h) end = h; \ 1546 while (p < end) { \ 1547 (assert_fn)(p); \ 1548 do_oop; \ 1549 ++p; \ 1550 } \ 1551 } 1552 1553 1554 // The following macros call specialized macros, passing either oop or 1555 // narrowOop as the specialization type. These test the UseCompressedOops 1556 // flag. 1557 #define InstanceKlass_OOP_ITERATE(start_p, count, \ 1558 do_oop, assert_fn) \ 1559 { \ 1560 if (UseCompressedOops) { \ 1561 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 1562 start_p, count, \ 1563 do_oop, assert_fn) \ 1564 } else { \ 1565 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ 1566 start_p, count, \ 1567 do_oop, assert_fn) \ 1568 } \ 1569 } 1570 1571 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ 1572 do_oop, assert_fn) \ 1573 { \ 1574 if (UseCompressedOops) { \ 1575 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 1576 start_p, count, \ 1577 low, high, \ 1578 do_oop, assert_fn) \ 1579 } else { \ 1580 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 1581 start_p, count, \ 1582 low, high, \ 1583 do_oop, assert_fn) \ 1584 } \ 1585 } 1586 1587 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ 1588 { \ 1589 /* Compute oopmap block range. The common case \ 1590 is nonstatic_oop_map_size == 1. */ \ 1591 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1592 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1593 if (UseCompressedOops) { \ 1594 while (map < end_map) { \ 1595 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 1596 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1597 do_oop, assert_fn) \ 1598 ++map; \ 1599 } \ 1600 } else { \ 1601 while (map < end_map) { \ 1602 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ 1603 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1604 do_oop, assert_fn) \ 1605 ++map; \ 1606 } \ 1607 } \ 1608 } 1609 1610 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ 1611 { \ 1612 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ 1613 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ 1614 if (UseCompressedOops) { \ 1615 while (start_map < map) { \ 1616 --map; \ 1617 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ 1618 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1619 do_oop, assert_fn) \ 1620 } \ 1621 } else { \ 1622 while (start_map < map) { \ 1623 --map; \ 1624 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ 1625 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1626 do_oop, assert_fn) \ 1627 } \ 1628 } \ 1629 } 1630 1631 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ 1632 assert_fn) \ 1633 { \ 1634 /* Compute oopmap block range. The common case is \ 1635 nonstatic_oop_map_size == 1, so we accept the \ 1636 usually non-existent extra overhead of examining \ 1637 all the maps. */ \ 1638 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1639 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1640 if (UseCompressedOops) { \ 1641 while (map < end_map) { \ 1642 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 1643 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1644 low, high, \ 1645 do_oop, assert_fn) \ 1646 ++map; \ 1647 } \ 1648 } else { \ 1649 while (map < end_map) { \ 1650 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 1651 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1652 low, high, \ 1653 do_oop, assert_fn) \ 1654 ++map; \ 1655 } \ 1656 } \ 1657 } 1658 1659 void instanceKlass::follow_static_fields() { 1660 InstanceKlass_OOP_ITERATE( \ 1661 start_of_static_fields(), static_oop_field_size(), \ 1662 MarkSweep::mark_and_push(p), \ 1663 assert_is_in_closed_subset) 1664 } 1665 1666 #ifndef SERIALGC 1667 void instanceKlass::follow_static_fields(ParCompactionManager* cm) { 1668 InstanceKlass_OOP_ITERATE( \ 1669 start_of_static_fields(), static_oop_field_size(), \ 1670 PSParallelCompact::mark_and_push(cm, p), \ 1671 assert_is_in) 1672 } 1673 #endif // SERIALGC 1674 1675 void instanceKlass::adjust_static_fields() { 1676 InstanceKlass_OOP_ITERATE( \ 1677 start_of_static_fields(), static_oop_field_size(), \ 1678 MarkSweep::adjust_pointer(p), \ 1679 assert_nothing) 1680 } 1681 1682 #ifndef SERIALGC 1683 void instanceKlass::update_static_fields() { 1684 InstanceKlass_OOP_ITERATE( \ 1685 start_of_static_fields(), static_oop_field_size(), \ 1686 PSParallelCompact::adjust_pointer(p), \ 1687 assert_nothing) 1688 } 1689 1690 void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) { 1691 InstanceKlass_BOUNDED_OOP_ITERATE( \ 1692 start_of_static_fields(), static_oop_field_size(), \ 1693 beg_addr, end_addr, \ 1694 PSParallelCompact::adjust_pointer(p), \ 1695 assert_nothing ) 1696 } 1697 #endif // SERIALGC 1698 1699 void instanceKlass::oop_follow_contents(oop obj) { 1700 assert(obj != NULL, "can't follow the content of NULL object"); 1701 obj->follow_header(); 1702 InstanceKlass_OOP_MAP_ITERATE( \ 1703 obj, \ 1704 MarkSweep::mark_and_push(p), \ 1705 assert_is_in_closed_subset) 1706 } 1707 1708 #ifndef SERIALGC 1709 void instanceKlass::oop_follow_contents(ParCompactionManager* cm, 1710 oop obj) { 1711 assert(obj != NULL, "can't follow the content of NULL object"); 1712 obj->follow_header(cm); 1713 InstanceKlass_OOP_MAP_ITERATE( \ 1714 obj, \ 1715 PSParallelCompact::mark_and_push(cm, p), \ 1716 assert_is_in) 1717 } 1718 #endif // SERIALGC 1719 1720 // closure's do_header() method dicates whether the given closure should be 1721 // applied to the klass ptr in the object header. 1722 1723 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 1724 \ 1725 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ 1726 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1727 /* header */ \ 1728 if (closure->do_header()) { \ 1729 obj->oop_iterate_header(closure); \ 1730 } \ 1731 InstanceKlass_OOP_MAP_ITERATE( \ 1732 obj, \ 1733 SpecializationStats:: \ 1734 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ 1735 (closure)->do_oop##nv_suffix(p), \ 1736 assert_is_in_closed_subset) \ 1737 return size_helper(); \ 1738 } 1739 1740 #ifndef SERIALGC 1741 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 1742 \ 1743 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ 1744 OopClosureType* closure) { \ 1745 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ 1746 /* header */ \ 1747 if (closure->do_header()) { \ 1748 obj->oop_iterate_header(closure); \ 1749 } \ 1750 /* instance variables */ \ 1751 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1752 obj, \ 1753 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\ 1754 (closure)->do_oop##nv_suffix(p), \ 1755 assert_is_in_closed_subset) \ 1756 return size_helper(); \ 1757 } 1758 #endif // !SERIALGC 1759 1760 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ 1761 \ 1762 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ 1763 OopClosureType* closure, \ 1764 MemRegion mr) { \ 1765 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1766 if (closure->do_header()) { \ 1767 obj->oop_iterate_header(closure, mr); \ 1768 } \ 1769 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ 1770 obj, mr.start(), mr.end(), \ 1771 (closure)->do_oop##nv_suffix(p), \ 1772 assert_is_in_closed_subset) \ 1773 return size_helper(); \ 1774 } 1775 1776 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1777 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1778 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1779 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1780 #ifndef SERIALGC 1781 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1782 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1783 #endif // !SERIALGC 1784 1785 void instanceKlass::iterate_static_fields(OopClosure* closure) { 1786 InstanceKlass_OOP_ITERATE( \ 1787 start_of_static_fields(), static_oop_field_size(), \ 1788 closure->do_oop(p), \ 1789 assert_is_in_reserved) 1790 } 1791 1792 void instanceKlass::iterate_static_fields(OopClosure* closure, 1793 MemRegion mr) { 1794 InstanceKlass_BOUNDED_OOP_ITERATE( \ 1795 start_of_static_fields(), static_oop_field_size(), \ 1796 mr.start(), mr.end(), \ 1797 (closure)->do_oop_v(p), \ 1798 assert_is_in_closed_subset) 1799 } 1800 1801 int instanceKlass::oop_adjust_pointers(oop obj) { 1802 int size = size_helper(); 1803 InstanceKlass_OOP_MAP_ITERATE( \ 1804 obj, \ 1805 MarkSweep::adjust_pointer(p), \ 1806 assert_is_in) 1807 obj->adjust_header(); 1808 return size; 1809 } 1810 1811 #ifndef SERIALGC 1812 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 1813 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1814 obj, \ 1815 if (PSScavenge::should_scavenge(p)) { \ 1816 pm->claim_or_forward_depth(p); \ 1817 }, \ 1818 assert_nothing ) 1819 } 1820 1821 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 1822 InstanceKlass_OOP_MAP_ITERATE( \ 1823 obj, \ 1824 PSParallelCompact::adjust_pointer(p), \ 1825 assert_nothing) 1826 return size_helper(); 1827 } 1828 1829 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, 1830 HeapWord* beg_addr, HeapWord* end_addr) { 1831 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ 1832 obj, beg_addr, end_addr, \ 1833 PSParallelCompact::adjust_pointer(p), \ 1834 assert_nothing) 1835 return size_helper(); 1836 } 1837 1838 void instanceKlass::push_static_fields(PSPromotionManager* pm) { 1839 InstanceKlass_OOP_ITERATE( \ 1840 start_of_static_fields(), static_oop_field_size(), \ 1841 if (PSScavenge::should_scavenge(p)) { \ 1842 pm->claim_or_forward_depth(p); \ 1843 }, \ 1844 assert_nothing ) 1845 } 1846 1847 void instanceKlass::copy_static_fields(ParCompactionManager* cm) { 1848 InstanceKlass_OOP_ITERATE( \ 1849 start_of_static_fields(), static_oop_field_size(), \ 1850 PSParallelCompact::adjust_pointer(p), \ 1851 assert_is_in) 1852 } 1853 #endif // SERIALGC 1854 1855 // This klass is alive but the implementor link is not followed/updated. 1856 // Subklass and sibling links are handled by Klass::follow_weak_klass_links 1857 1858 void instanceKlass::follow_weak_klass_links( 1859 BoolObjectClosure* is_alive, OopClosure* keep_alive) { 1860 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live"); 1861 if (ClassUnloading) { 1862 for (int i = 0; i < implementors_limit; i++) { 1863 klassOop impl = _implementors[i]; 1864 if (impl == NULL) break; // no more in the list 1865 if (!is_alive->do_object_b(impl)) { 1866 // remove this guy from the list by overwriting him with the tail 1867 int lasti = --_nof_implementors; 1868 assert(lasti >= i && lasti < implementors_limit, "just checking"); 1869 _implementors[i] = _implementors[lasti]; 1870 _implementors[lasti] = NULL; 1871 --i; // rerun the loop at this index 1872 } 1873 } 1874 } else { 1875 for (int i = 0; i < implementors_limit; i++) { 1876 keep_alive->do_oop(&adr_implementors()[i]); 1877 } 1878 } 1879 Klass::follow_weak_klass_links(is_alive, keep_alive); 1880 } 1881 1882 void instanceKlass::remove_unshareable_info() { 1883 Klass::remove_unshareable_info(); 1884 init_implementor(); 1885 } 1886 1887 static void clear_all_breakpoints(methodOop m) { 1888 m->clear_all_breakpoints(); 1889 } 1890 1891 void instanceKlass::release_C_heap_structures() { 1892 // Deallocate oop map cache 1893 if (_oop_map_cache != NULL) { 1894 delete _oop_map_cache; 1895 _oop_map_cache = NULL; 1896 } 1897 1898 // Deallocate JNI identifiers for jfieldIDs 1899 JNIid::deallocate(jni_ids()); 1900 set_jni_ids(NULL); 1901 1902 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1903 if (jmeths != (jmethodID*)NULL) { 1904 release_set_methods_jmethod_ids(NULL); 1905 FreeHeap(jmeths); 1906 } 1907 1908 int* indices = methods_cached_itable_indices_acquire(); 1909 if (indices != (int*)NULL) { 1910 release_set_methods_cached_itable_indices(NULL); 1911 FreeHeap(indices); 1912 } 1913 1914 // release dependencies 1915 nmethodBucket* b = _dependencies; 1916 _dependencies = NULL; 1917 while (b != NULL) { 1918 nmethodBucket* next = b->next(); 1919 delete b; 1920 b = next; 1921 } 1922 1923 // Deallocate breakpoint records 1924 if (breakpoints() != 0x0) { 1925 methods_do(clear_all_breakpoints); 1926 assert(breakpoints() == 0x0, "should have cleared breakpoints"); 1927 } 1928 1929 // deallocate information about previous versions 1930 if (_previous_versions != NULL) { 1931 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 1932 PreviousVersionNode * pv_node = _previous_versions->at(i); 1933 delete pv_node; 1934 } 1935 delete _previous_versions; 1936 _previous_versions = NULL; 1937 } 1938 1939 // deallocate the cached class file 1940 if (_cached_class_file_bytes != NULL) { 1941 os::free(_cached_class_file_bytes); 1942 _cached_class_file_bytes = NULL; 1943 _cached_class_file_len = 0; 1944 } 1945 } 1946 1947 const char* instanceKlass::signature_name() const { 1948 const char* src = (const char*) (name()->as_C_string()); 1949 const int src_length = (int)strlen(src); 1950 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3); 1951 int src_index = 0; 1952 int dest_index = 0; 1953 dest[dest_index++] = 'L'; 1954 while (src_index < src_length) { 1955 dest[dest_index++] = src[src_index++]; 1956 } 1957 dest[dest_index++] = ';'; 1958 dest[dest_index] = '\0'; 1959 return dest; 1960 } 1961 1962 // different verisons of is_same_class_package 1963 bool instanceKlass::is_same_class_package(klassOop class2) { 1964 klassOop class1 = as_klassOop(); 1965 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 1966 symbolOop classname1 = Klass::cast(class1)->name(); 1967 1968 if (Klass::cast(class2)->oop_is_objArray()) { 1969 class2 = objArrayKlass::cast(class2)->bottom_klass(); 1970 } 1971 oop classloader2; 1972 if (Klass::cast(class2)->oop_is_instance()) { 1973 classloader2 = instanceKlass::cast(class2)->class_loader(); 1974 } else { 1975 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array"); 1976 classloader2 = NULL; 1977 } 1978 symbolOop classname2 = Klass::cast(class2)->name(); 1979 1980 return instanceKlass::is_same_class_package(classloader1, classname1, 1981 classloader2, classname2); 1982 } 1983 1984 bool instanceKlass::is_same_class_package(oop classloader2, symbolOop classname2) { 1985 klassOop class1 = as_klassOop(); 1986 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 1987 symbolOop classname1 = Klass::cast(class1)->name(); 1988 1989 return instanceKlass::is_same_class_package(classloader1, classname1, 1990 classloader2, classname2); 1991 } 1992 1993 // return true if two classes are in the same package, classloader 1994 // and classname information is enough to determine a class's package 1995 bool instanceKlass::is_same_class_package(oop class_loader1, symbolOop class_name1, 1996 oop class_loader2, symbolOop class_name2) { 1997 if (class_loader1 != class_loader2) { 1998 return false; 1999 } else if (class_name1 == class_name2) { 2000 return true; // skip painful bytewise comparison 2001 } else { 2002 ResourceMark rm; 2003 2004 // The symbolOop's are in UTF8 encoding. Since we only need to check explicitly 2005 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding. 2006 // Otherwise, we just compare jbyte values between the strings. 2007 jbyte *name1 = class_name1->base(); 2008 jbyte *name2 = class_name2->base(); 2009 2010 jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/'); 2011 jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/'); 2012 2013 if ((last_slash1 == NULL) || (last_slash2 == NULL)) { 2014 // One of the two doesn't have a package. Only return true 2015 // if the other one also doesn't have a package. 2016 return last_slash1 == last_slash2; 2017 } else { 2018 // Skip over '['s 2019 if (*name1 == '[') { 2020 do { 2021 name1++; 2022 } while (*name1 == '['); 2023 if (*name1 != 'L') { 2024 // Something is terribly wrong. Shouldn't be here. 2025 return false; 2026 } 2027 } 2028 if (*name2 == '[') { 2029 do { 2030 name2++; 2031 } while (*name2 == '['); 2032 if (*name2 != 'L') { 2033 // Something is terribly wrong. Shouldn't be here. 2034 return false; 2035 } 2036 } 2037 2038 // Check that package part is identical 2039 int length1 = last_slash1 - name1; 2040 int length2 = last_slash2 - name2; 2041 2042 return UTF8::equal(name1, length1, name2, length2); 2043 } 2044 } 2045 } 2046 2047 // Returns true iff super_method can be overridden by a method in targetclassname 2048 // See JSL 3rd edition 8.4.6.1 2049 // Assumes name-signature match 2050 // "this" is instanceKlass of super_method which must exist 2051 // note that the instanceKlass of the method in the targetclassname has not always been created yet 2052 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, symbolHandle targetclassname, TRAPS) { 2053 // Private methods can not be overridden 2054 if (super_method->is_private()) { 2055 return false; 2056 } 2057 // If super method is accessible, then override 2058 if ((super_method->is_protected()) || 2059 (super_method->is_public())) { 2060 return true; 2061 } 2062 // Package-private methods are not inherited outside of package 2063 assert(super_method->is_package_private(), "must be package private"); 2064 return(is_same_class_package(targetclassloader(), targetclassname())); 2065 } 2066 2067 /* defined for now in jvm.cpp, for historical reasons *-- 2068 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self, 2069 symbolOop& simple_name_result, TRAPS) { 2070 ... 2071 } 2072 */ 2073 2074 // tell if two classes have the same enclosing class (at package level) 2075 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1, 2076 klassOop class2_oop, TRAPS) { 2077 if (class2_oop == class1->as_klassOop()) return true; 2078 if (!Klass::cast(class2_oop)->oop_is_instance()) return false; 2079 instanceKlassHandle class2(THREAD, class2_oop); 2080 2081 // must be in same package before we try anything else 2082 if (!class1->is_same_class_package(class2->class_loader(), class2->name())) 2083 return false; 2084 2085 // As long as there is an outer1.getEnclosingClass, 2086 // shift the search outward. 2087 instanceKlassHandle outer1 = class1; 2088 for (;;) { 2089 // As we walk along, look for equalities between outer1 and class2. 2090 // Eventually, the walks will terminate as outer1 stops 2091 // at the top-level class around the original class. 2092 bool ignore_inner_is_member; 2093 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member, 2094 CHECK_false); 2095 if (next == NULL) break; 2096 if (next == class2()) return true; 2097 outer1 = instanceKlassHandle(THREAD, next); 2098 } 2099 2100 // Now do the same for class2. 2101 instanceKlassHandle outer2 = class2; 2102 for (;;) { 2103 bool ignore_inner_is_member; 2104 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member, 2105 CHECK_false); 2106 if (next == NULL) break; 2107 // Might as well check the new outer against all available values. 2108 if (next == class1()) return true; 2109 if (next == outer1()) return true; 2110 outer2 = instanceKlassHandle(THREAD, next); 2111 } 2112 2113 // If by this point we have not found an equality between the 2114 // two classes, we know they are in separate package members. 2115 return false; 2116 } 2117 2118 2119 jint instanceKlass::compute_modifier_flags(TRAPS) const { 2120 klassOop k = as_klassOop(); 2121 jint access = access_flags().as_int(); 2122 2123 // But check if it happens to be member class. 2124 typeArrayOop inner_class_list = inner_classes(); 2125 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length(); 2126 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking"); 2127 if (length > 0) { 2128 typeArrayHandle inner_class_list_h(THREAD, inner_class_list); 2129 instanceKlassHandle ik(THREAD, k); 2130 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) { 2131 int ioff = inner_class_list_h->ushort_at( 2132 i + instanceKlass::inner_class_inner_class_info_offset); 2133 2134 // Inner class attribute can be zero, skip it. 2135 // Strange but true: JVM spec. allows null inner class refs. 2136 if (ioff == 0) continue; 2137 2138 // only look at classes that are already loaded 2139 // since we are looking for the flags for our self. 2140 symbolOop inner_name = ik->constants()->klass_name_at(ioff); 2141 if ((ik->name() == inner_name)) { 2142 // This is really a member class. 2143 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset); 2144 break; 2145 } 2146 } 2147 } 2148 // Remember to strip ACC_SUPER bit 2149 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS; 2150 } 2151 2152 jint instanceKlass::jvmti_class_status() const { 2153 jint result = 0; 2154 2155 if (is_linked()) { 2156 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED; 2157 } 2158 2159 if (is_initialized()) { 2160 assert(is_linked(), "Class status is not consistent"); 2161 result |= JVMTI_CLASS_STATUS_INITIALIZED; 2162 } 2163 if (is_in_error_state()) { 2164 result |= JVMTI_CLASS_STATUS_ERROR; 2165 } 2166 return result; 2167 } 2168 2169 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) { 2170 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable(); 2171 int method_table_offset_in_words = ioe->offset()/wordSize; 2172 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words()) 2173 / itableOffsetEntry::size(); 2174 2175 for (int cnt = 0 ; ; cnt ++, ioe ++) { 2176 // If the interface isn't implemented by the receiver class, 2177 // the VM should throw IncompatibleClassChangeError. 2178 if (cnt >= nof_interfaces) { 2179 THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError()); 2180 } 2181 2182 klassOop ik = ioe->interface_klass(); 2183 if (ik == holder) break; 2184 } 2185 2186 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop()); 2187 methodOop m = ime[index].method(); 2188 if (m == NULL) { 2189 THROW_OOP_0(vmSymbols::java_lang_AbstractMethodError()); 2190 } 2191 return m; 2192 } 2193 2194 // On-stack replacement stuff 2195 void instanceKlass::add_osr_nmethod(nmethod* n) { 2196 // only one compilation can be active 2197 NEEDS_CLEANUP 2198 // This is a short non-blocking critical region, so the no safepoint check is ok. 2199 OsrList_lock->lock_without_safepoint_check(); 2200 assert(n->is_osr_method(), "wrong kind of nmethod"); 2201 n->set_osr_link(osr_nmethods_head()); 2202 set_osr_nmethods_head(n); 2203 // Raise the highest osr level if necessary 2204 if (TieredCompilation) { 2205 methodOop m = n->method(); 2206 m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level())); 2207 } 2208 // Remember to unlock again 2209 OsrList_lock->unlock(); 2210 2211 // Get rid of the osr methods for the same bci that have lower levels. 2212 if (TieredCompilation) { 2213 for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) { 2214 nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true); 2215 if (inv != NULL && inv->is_in_use()) { 2216 inv->make_not_entrant(); 2217 } 2218 } 2219 } 2220 } 2221 2222 2223 void instanceKlass::remove_osr_nmethod(nmethod* n) { 2224 // This is a short non-blocking critical region, so the no safepoint check is ok. 2225 OsrList_lock->lock_without_safepoint_check(); 2226 assert(n->is_osr_method(), "wrong kind of nmethod"); 2227 nmethod* last = NULL; 2228 nmethod* cur = osr_nmethods_head(); 2229 int max_level = CompLevel_none; // Find the max comp level excluding n 2230 methodOop m = n->method(); 2231 // Search for match 2232 while(cur != NULL && cur != n) { 2233 if (TieredCompilation) { 2234 // Find max level before n 2235 max_level = MAX2(max_level, cur->comp_level()); 2236 } 2237 last = cur; 2238 cur = cur->osr_link(); 2239 } 2240 nmethod* next = NULL; 2241 if (cur == n) { 2242 next = cur->osr_link(); 2243 if (last == NULL) { 2244 // Remove first element 2245 set_osr_nmethods_head(next); 2246 } else { 2247 last->set_osr_link(next); 2248 } 2249 } 2250 n->set_osr_link(NULL); 2251 if (TieredCompilation) { 2252 cur = next; 2253 while (cur != NULL) { 2254 // Find max level after n 2255 max_level = MAX2(max_level, cur->comp_level()); 2256 cur = cur->osr_link(); 2257 } 2258 m->set_highest_osr_comp_level(max_level); 2259 } 2260 // Remember to unlock again 2261 OsrList_lock->unlock(); 2262 } 2263 2264 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const { 2265 // This is a short non-blocking critical region, so the no safepoint check is ok. 2266 OsrList_lock->lock_without_safepoint_check(); 2267 nmethod* osr = osr_nmethods_head(); 2268 nmethod* best = NULL; 2269 while (osr != NULL) { 2270 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); 2271 // There can be a time when a c1 osr method exists but we are waiting 2272 // for a c2 version. When c2 completes its osr nmethod we will trash 2273 // the c1 version and only be able to find the c2 version. However 2274 // while we overflow in the c1 code at back branches we don't want to 2275 // try and switch to the same code as we are already running 2276 2277 if (osr->method() == m && 2278 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) { 2279 if (match_level) { 2280 if (osr->comp_level() == comp_level) { 2281 // Found a match - return it. 2282 OsrList_lock->unlock(); 2283 return osr; 2284 } 2285 } else { 2286 if (best == NULL || (osr->comp_level() > best->comp_level())) { 2287 if (osr->comp_level() == CompLevel_highest_tier) { 2288 // Found the best possible - return it. 2289 OsrList_lock->unlock(); 2290 return osr; 2291 } 2292 best = osr; 2293 } 2294 } 2295 } 2296 osr = osr->osr_link(); 2297 } 2298 OsrList_lock->unlock(); 2299 if (best != NULL && best->comp_level() >= comp_level && match_level == false) { 2300 return best; 2301 } 2302 return NULL; 2303 } 2304 2305 // ----------------------------------------------------------------------------------------------------- 2306 #ifndef PRODUCT 2307 2308 // Printing 2309 2310 #define BULLET " - " 2311 2312 void FieldPrinter::do_field(fieldDescriptor* fd) { 2313 _st->print(BULLET); 2314 if (fd->is_static() || (_obj == NULL)) { 2315 fd->print_on(_st); 2316 _st->cr(); 2317 } else { 2318 fd->print_on_for(_st, _obj); 2319 _st->cr(); 2320 } 2321 } 2322 2323 2324 void instanceKlass::oop_print_on(oop obj, outputStream* st) { 2325 Klass::oop_print_on(obj, st); 2326 2327 if (as_klassOop() == SystemDictionary::String_klass()) { 2328 typeArrayOop value = java_lang_String::value(obj); 2329 juint offset = java_lang_String::offset(obj); 2330 juint length = java_lang_String::length(obj); 2331 if (value != NULL && 2332 value->is_typeArray() && 2333 offset <= (juint) value->length() && 2334 offset + length <= (juint) value->length()) { 2335 st->print(BULLET"string: "); 2336 Handle h_obj(obj); 2337 java_lang_String::print(h_obj, st); 2338 st->cr(); 2339 if (!WizardMode) return; // that is enough 2340 } 2341 } 2342 2343 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj)); 2344 FieldPrinter print_nonstatic_field(st, obj); 2345 do_nonstatic_fields(&print_nonstatic_field); 2346 2347 if (as_klassOop() == SystemDictionary::Class_klass()) { 2348 st->print(BULLET"signature: "); 2349 java_lang_Class::print_signature(obj, st); 2350 st->cr(); 2351 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj); 2352 st->print(BULLET"fake entry for mirror: "); 2353 mirrored_klass->print_value_on(st); 2354 st->cr(); 2355 st->print(BULLET"fake entry resolved_constructor: "); 2356 methodOop ctor = java_lang_Class::resolved_constructor(obj); 2357 ctor->print_value_on(st); 2358 klassOop array_klass = java_lang_Class::array_klass(obj); 2359 st->cr(); 2360 st->print(BULLET"fake entry for array: "); 2361 array_klass->print_value_on(st); 2362 st->cr(); 2363 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2364 st->print(BULLET"signature: "); 2365 java_dyn_MethodType::print_signature(obj, st); 2366 st->cr(); 2367 } 2368 } 2369 2370 #endif //PRODUCT 2371 2372 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) { 2373 st->print("a "); 2374 name()->print_value_on(st); 2375 obj->print_address_on(st); 2376 if (as_klassOop() == SystemDictionary::String_klass() 2377 && java_lang_String::value(obj) != NULL) { 2378 ResourceMark rm; 2379 int len = java_lang_String::length(obj); 2380 int plen = (len < 24 ? len : 12); 2381 char* str = java_lang_String::as_utf8_string(obj, 0, plen); 2382 st->print(" = \"%s\"", str); 2383 if (len > plen) 2384 st->print("...[%d]", len); 2385 } else if (as_klassOop() == SystemDictionary::Class_klass()) { 2386 klassOop k = java_lang_Class::as_klassOop(obj); 2387 st->print(" = "); 2388 if (k != NULL) { 2389 k->print_value_on(st); 2390 } else { 2391 const char* tname = type2name(java_lang_Class::primitive_type(obj)); 2392 st->print("%s", tname ? tname : "type?"); 2393 } 2394 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2395 st->print(" = "); 2396 java_dyn_MethodType::print_signature(obj, st); 2397 } else if (java_lang_boxing_object::is_instance(obj)) { 2398 st->print(" = "); 2399 java_lang_boxing_object::print(obj, st); 2400 } 2401 } 2402 2403 const char* instanceKlass::internal_name() const { 2404 return external_name(); 2405 } 2406 2407 // Verification 2408 2409 class VerifyFieldClosure: public OopClosure { 2410 protected: 2411 template <class T> void do_oop_work(T* p) { 2412 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap"); 2413 oop obj = oopDesc::load_decode_heap_oop(p); 2414 if (!obj->is_oop_or_null()) { 2415 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj); 2416 Universe::print(); 2417 guarantee(false, "boom"); 2418 } 2419 } 2420 public: 2421 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); } 2422 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } 2423 }; 2424 2425 void instanceKlass::oop_verify_on(oop obj, outputStream* st) { 2426 Klass::oop_verify_on(obj, st); 2427 VerifyFieldClosure blk; 2428 oop_oop_iterate(obj, &blk); 2429 } 2430 2431 #ifndef PRODUCT 2432 2433 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) { 2434 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version() 2435 // cannot be called since this function is called before the VM is 2436 // able to determine what JDK version is running with. 2437 // The check below always is false since 1.4. 2438 return; 2439 2440 // This verification code temporarily disabled for the 1.4 2441 // reflection implementation since java.lang.Class now has 2442 // Java-level instance fields. Should rewrite this to handle this 2443 // case. 2444 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) { 2445 // Verify that java.lang.Class instances have a fake oop field added. 2446 instanceKlass* ik = instanceKlass::cast(k); 2447 2448 // Check that we have the right class 2449 static bool first_time = true; 2450 guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps"); 2451 first_time = false; 2452 const int extra = java_lang_Class::number_of_fake_oop_fields; 2453 guarantee(ik->nonstatic_field_size() == extra, "just checking"); 2454 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking"); 2455 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking"); 2456 2457 // Check that the map is (2,extra) 2458 int offset = java_lang_Class::klass_offset; 2459 2460 OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); 2461 guarantee(map->offset() == offset && map->count() == (unsigned int) extra, 2462 "sanity"); 2463 } 2464 } 2465 2466 #endif // ndef PRODUCT 2467 2468 // JNIid class for jfieldIDs only 2469 // Note to reviewers: 2470 // These JNI functions are just moved over to column 1 and not changed 2471 // in the compressed oops workspace. 2472 JNIid::JNIid(klassOop holder, int offset, JNIid* next) { 2473 _holder = holder; 2474 _offset = offset; 2475 _next = next; 2476 debug_only(_is_static_field_id = false;) 2477 } 2478 2479 2480 JNIid* JNIid::find(int offset) { 2481 JNIid* current = this; 2482 while (current != NULL) { 2483 if (current->offset() == offset) return current; 2484 current = current->next(); 2485 } 2486 return NULL; 2487 } 2488 2489 void JNIid::oops_do(OopClosure* f) { 2490 for (JNIid* cur = this; cur != NULL; cur = cur->next()) { 2491 f->do_oop(cur->holder_addr()); 2492 } 2493 } 2494 2495 void JNIid::deallocate(JNIid* current) { 2496 while (current != NULL) { 2497 JNIid* next = current->next(); 2498 delete current; 2499 current = next; 2500 } 2501 } 2502 2503 2504 void JNIid::verify(klassOop holder) { 2505 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields(); 2506 int end_field_offset; 2507 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); 2508 2509 JNIid* current = this; 2510 while (current != NULL) { 2511 guarantee(current->holder() == holder, "Invalid klass in JNIid"); 2512 #ifdef ASSERT 2513 int o = current->offset(); 2514 if (current->is_static_field_id()) { 2515 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid"); 2516 } 2517 #endif 2518 current = current->next(); 2519 } 2520 } 2521 2522 2523 #ifdef ASSERT 2524 void instanceKlass::set_init_state(ClassState state) { 2525 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state) 2526 : (_init_state < state); 2527 assert(good_state || state == allocated, "illegal state transition"); 2528 _init_state = state; 2529 } 2530 #endif 2531 2532 2533 // RedefineClasses() support for previous versions: 2534 2535 // Add an information node that contains weak references to the 2536 // interesting parts of the previous version of the_class. 2537 // This is also where we clean out any unused weak references. 2538 // Note that while we delete nodes from the _previous_versions 2539 // array, we never delete the array itself until the klass is 2540 // unloaded. The has_been_redefined() query depends on that fact. 2541 // 2542 void instanceKlass::add_previous_version(instanceKlassHandle ikh, 2543 BitMap* emcp_methods, int emcp_method_count) { 2544 assert(Thread::current()->is_VM_thread(), 2545 "only VMThread can add previous versions"); 2546 2547 if (_previous_versions == NULL) { 2548 // This is the first previous version so make some space. 2549 // Start with 2 elements under the assumption that the class 2550 // won't be redefined much. 2551 _previous_versions = new (ResourceObj::C_HEAP) 2552 GrowableArray<PreviousVersionNode *>(2, true); 2553 } 2554 2555 // RC_TRACE macro has an embedded ResourceMark 2556 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d", 2557 ikh->external_name(), _previous_versions->length(), emcp_method_count)); 2558 constantPoolHandle cp_h(ikh->constants()); 2559 jobject cp_ref; 2560 if (cp_h->is_shared()) { 2561 // a shared ConstantPool requires a regular reference; a weak 2562 // reference would be collectible 2563 cp_ref = JNIHandles::make_global(cp_h); 2564 } else { 2565 cp_ref = JNIHandles::make_weak_global(cp_h); 2566 } 2567 PreviousVersionNode * pv_node = NULL; 2568 objArrayOop old_methods = ikh->methods(); 2569 2570 if (emcp_method_count == 0) { 2571 // non-shared ConstantPool gets a weak reference 2572 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL); 2573 RC_TRACE(0x00000400, 2574 ("add: all methods are obsolete; flushing any EMCP weak refs")); 2575 } else { 2576 int local_count = 0; 2577 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP) 2578 GrowableArray<jweak>(emcp_method_count, true); 2579 for (int i = 0; i < old_methods->length(); i++) { 2580 if (emcp_methods->at(i)) { 2581 // this old method is EMCP so save a weak ref 2582 methodOop old_method = (methodOop) old_methods->obj_at(i); 2583 methodHandle old_method_h(old_method); 2584 jweak method_ref = JNIHandles::make_weak_global(old_method_h); 2585 method_refs->append(method_ref); 2586 if (++local_count >= emcp_method_count) { 2587 // no more EMCP methods so bail out now 2588 break; 2589 } 2590 } 2591 } 2592 // non-shared ConstantPool gets a weak reference 2593 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs); 2594 } 2595 2596 _previous_versions->append(pv_node); 2597 2598 // Using weak references allows the interesting parts of previous 2599 // classes to be GC'ed when they are no longer needed. Since the 2600 // caller is the VMThread and we are at a safepoint, this is a good 2601 // time to clear out unused weak references. 2602 2603 RC_TRACE(0x00000400, ("add: previous version length=%d", 2604 _previous_versions->length())); 2605 2606 // skip the last entry since we just added it 2607 for (int i = _previous_versions->length() - 2; i >= 0; i--) { 2608 // check the previous versions array for a GC'ed weak refs 2609 pv_node = _previous_versions->at(i); 2610 cp_ref = pv_node->prev_constant_pool(); 2611 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2612 if (cp_ref == NULL) { 2613 delete pv_node; 2614 _previous_versions->remove_at(i); 2615 // Since we are traversing the array backwards, we don't have to 2616 // do anything special with the index. 2617 continue; // robustness 2618 } 2619 2620 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2621 if (cp == NULL) { 2622 // this entry has been GC'ed so remove it 2623 delete pv_node; 2624 _previous_versions->remove_at(i); 2625 // Since we are traversing the array backwards, we don't have to 2626 // do anything special with the index. 2627 continue; 2628 } else { 2629 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i)); 2630 } 2631 2632 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2633 if (method_refs != NULL) { 2634 RC_TRACE(0x00000400, ("add: previous methods length=%d", 2635 method_refs->length())); 2636 for (int j = method_refs->length() - 1; j >= 0; j--) { 2637 jweak method_ref = method_refs->at(j); 2638 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2639 if (method_ref == NULL) { 2640 method_refs->remove_at(j); 2641 // Since we are traversing the array backwards, we don't have to 2642 // do anything special with the index. 2643 continue; // robustness 2644 } 2645 2646 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2647 if (method == NULL || emcp_method_count == 0) { 2648 // This method entry has been GC'ed or the current 2649 // RedefineClasses() call has made all methods obsolete 2650 // so remove it. 2651 JNIHandles::destroy_weak_global(method_ref); 2652 method_refs->remove_at(j); 2653 } else { 2654 // RC_TRACE macro has an embedded ResourceMark 2655 RC_TRACE(0x00000400, 2656 ("add: %s(%s): previous method @%d in version @%d is alive", 2657 method->name()->as_C_string(), method->signature()->as_C_string(), 2658 j, i)); 2659 } 2660 } 2661 } 2662 } 2663 2664 int obsolete_method_count = old_methods->length() - emcp_method_count; 2665 2666 if (emcp_method_count != 0 && obsolete_method_count != 0 && 2667 _previous_versions->length() > 1) { 2668 // We have a mix of obsolete and EMCP methods. If there is more 2669 // than the previous version that we just added, then we have to 2670 // clear out any matching EMCP method entries the hard way. 2671 int local_count = 0; 2672 for (int i = 0; i < old_methods->length(); i++) { 2673 if (!emcp_methods->at(i)) { 2674 // only obsolete methods are interesting 2675 methodOop old_method = (methodOop) old_methods->obj_at(i); 2676 symbolOop m_name = old_method->name(); 2677 symbolOop m_signature = old_method->signature(); 2678 2679 // skip the last entry since we just added it 2680 for (int j = _previous_versions->length() - 2; j >= 0; j--) { 2681 // check the previous versions array for a GC'ed weak refs 2682 pv_node = _previous_versions->at(j); 2683 cp_ref = pv_node->prev_constant_pool(); 2684 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2685 if (cp_ref == NULL) { 2686 delete pv_node; 2687 _previous_versions->remove_at(j); 2688 // Since we are traversing the array backwards, we don't have to 2689 // do anything special with the index. 2690 continue; // robustness 2691 } 2692 2693 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2694 if (cp == NULL) { 2695 // this entry has been GC'ed so remove it 2696 delete pv_node; 2697 _previous_versions->remove_at(j); 2698 // Since we are traversing the array backwards, we don't have to 2699 // do anything special with the index. 2700 continue; 2701 } 2702 2703 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2704 if (method_refs == NULL) { 2705 // We have run into a PreviousVersion generation where 2706 // all methods were made obsolete during that generation's 2707 // RedefineClasses() operation. At the time of that 2708 // operation, all EMCP methods were flushed so we don't 2709 // have to go back any further. 2710 // 2711 // A NULL method_refs is different than an empty method_refs. 2712 // We cannot infer any optimizations about older generations 2713 // from an empty method_refs for the current generation. 2714 break; 2715 } 2716 2717 for (int k = method_refs->length() - 1; k >= 0; k--) { 2718 jweak method_ref = method_refs->at(k); 2719 assert(method_ref != NULL, 2720 "weak method ref was unexpectedly cleared"); 2721 if (method_ref == NULL) { 2722 method_refs->remove_at(k); 2723 // Since we are traversing the array backwards, we don't 2724 // have to do anything special with the index. 2725 continue; // robustness 2726 } 2727 2728 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2729 if (method == NULL) { 2730 // this method entry has been GC'ed so skip it 2731 JNIHandles::destroy_weak_global(method_ref); 2732 method_refs->remove_at(k); 2733 continue; 2734 } 2735 2736 if (method->name() == m_name && 2737 method->signature() == m_signature) { 2738 // The current RedefineClasses() call has made all EMCP 2739 // versions of this method obsolete so mark it as obsolete 2740 // and remove the weak ref. 2741 RC_TRACE(0x00000400, 2742 ("add: %s(%s): flush obsolete method @%d in version @%d", 2743 m_name->as_C_string(), m_signature->as_C_string(), k, j)); 2744 2745 method->set_is_obsolete(); 2746 JNIHandles::destroy_weak_global(method_ref); 2747 method_refs->remove_at(k); 2748 break; 2749 } 2750 } 2751 2752 // The previous loop may not find a matching EMCP method, but 2753 // that doesn't mean that we can optimize and not go any 2754 // further back in the PreviousVersion generations. The EMCP 2755 // method for this generation could have already been GC'ed, 2756 // but there still may be an older EMCP method that has not 2757 // been GC'ed. 2758 } 2759 2760 if (++local_count >= obsolete_method_count) { 2761 // no more obsolete methods so bail out now 2762 break; 2763 } 2764 } 2765 } 2766 } 2767 } // end add_previous_version() 2768 2769 2770 // Determine if instanceKlass has a previous version. 2771 bool instanceKlass::has_previous_version() const { 2772 if (_previous_versions == NULL) { 2773 // no previous versions array so answer is easy 2774 return false; 2775 } 2776 2777 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 2778 // Check the previous versions array for an info node that hasn't 2779 // been GC'ed 2780 PreviousVersionNode * pv_node = _previous_versions->at(i); 2781 2782 jobject cp_ref = pv_node->prev_constant_pool(); 2783 assert(cp_ref != NULL, "cp reference was unexpectedly cleared"); 2784 if (cp_ref == NULL) { 2785 continue; // robustness 2786 } 2787 2788 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2789 if (cp != NULL) { 2790 // we have at least one previous version 2791 return true; 2792 } 2793 2794 // We don't have to check the method refs. If the constant pool has 2795 // been GC'ed then so have the methods. 2796 } 2797 2798 // all of the underlying nodes' info has been GC'ed 2799 return false; 2800 } // end has_previous_version() 2801 2802 methodOop instanceKlass::method_with_idnum(int idnum) { 2803 methodOop m = NULL; 2804 if (idnum < methods()->length()) { 2805 m = (methodOop) methods()->obj_at(idnum); 2806 } 2807 if (m == NULL || m->method_idnum() != idnum) { 2808 for (int index = 0; index < methods()->length(); ++index) { 2809 m = (methodOop) methods()->obj_at(index); 2810 if (m->method_idnum() == idnum) { 2811 return m; 2812 } 2813 } 2814 } 2815 return m; 2816 } 2817 2818 2819 // Set the annotation at 'idnum' to 'anno'. 2820 // We don't want to create or extend the array if 'anno' is NULL, since that is the 2821 // default value. However, if the array exists and is long enough, we must set NULL values. 2822 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) { 2823 objArrayOop md = *md_p; 2824 if (md != NULL && md->length() > idnum) { 2825 md->obj_at_put(idnum, anno); 2826 } else if (anno != NULL) { 2827 // create the array 2828 int length = MAX2(idnum+1, (int)_idnum_allocated_count); 2829 md = oopFactory::new_system_objArray(length, Thread::current()); 2830 if (*md_p != NULL) { 2831 // copy the existing entries 2832 for (int index = 0; index < (*md_p)->length(); index++) { 2833 md->obj_at_put(index, (*md_p)->obj_at(index)); 2834 } 2835 } 2836 set_annotations(md, md_p); 2837 md->obj_at_put(idnum, anno); 2838 } // if no array and idnum isn't included there is nothing to do 2839 } 2840 2841 // Construct a PreviousVersionNode entry for the array hung off 2842 // the instanceKlass. 2843 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool, 2844 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) { 2845 2846 _prev_constant_pool = prev_constant_pool; 2847 _prev_cp_is_weak = prev_cp_is_weak; 2848 _prev_EMCP_methods = prev_EMCP_methods; 2849 } 2850 2851 2852 // Destroy a PreviousVersionNode 2853 PreviousVersionNode::~PreviousVersionNode() { 2854 if (_prev_constant_pool != NULL) { 2855 if (_prev_cp_is_weak) { 2856 JNIHandles::destroy_weak_global(_prev_constant_pool); 2857 } else { 2858 JNIHandles::destroy_global(_prev_constant_pool); 2859 } 2860 } 2861 2862 if (_prev_EMCP_methods != NULL) { 2863 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) { 2864 jweak method_ref = _prev_EMCP_methods->at(i); 2865 if (method_ref != NULL) { 2866 JNIHandles::destroy_weak_global(method_ref); 2867 } 2868 } 2869 delete _prev_EMCP_methods; 2870 } 2871 } 2872 2873 2874 // Construct a PreviousVersionInfo entry 2875 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) { 2876 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle 2877 _prev_EMCP_method_handles = NULL; 2878 2879 jobject cp_ref = pv_node->prev_constant_pool(); 2880 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared"); 2881 if (cp_ref == NULL) { 2882 return; // robustness 2883 } 2884 2885 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2886 if (cp == NULL) { 2887 // Weak reference has been GC'ed. Since the constant pool has been 2888 // GC'ed, the methods have also been GC'ed. 2889 return; 2890 } 2891 2892 // make the constantPoolOop safe to return 2893 _prev_constant_pool_handle = constantPoolHandle(cp); 2894 2895 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2896 if (method_refs == NULL) { 2897 // the instanceKlass did not have any EMCP methods 2898 return; 2899 } 2900 2901 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10); 2902 2903 int n_methods = method_refs->length(); 2904 for (int i = 0; i < n_methods; i++) { 2905 jweak method_ref = method_refs->at(i); 2906 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2907 if (method_ref == NULL) { 2908 continue; // robustness 2909 } 2910 2911 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2912 if (method == NULL) { 2913 // this entry has been GC'ed so skip it 2914 continue; 2915 } 2916 2917 // make the methodOop safe to return 2918 _prev_EMCP_method_handles->append(methodHandle(method)); 2919 } 2920 } 2921 2922 2923 // Destroy a PreviousVersionInfo 2924 PreviousVersionInfo::~PreviousVersionInfo() { 2925 // Since _prev_EMCP_method_handles is not C-heap allocated, we 2926 // don't have to delete it. 2927 } 2928 2929 2930 // Construct a helper for walking the previous versions array 2931 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) { 2932 _previous_versions = ik->previous_versions(); 2933 _current_index = 0; 2934 // _hm needs no initialization 2935 _current_p = NULL; 2936 } 2937 2938 2939 // Destroy a PreviousVersionWalker 2940 PreviousVersionWalker::~PreviousVersionWalker() { 2941 // Delete the current info just in case the caller didn't walk to 2942 // the end of the previous versions list. No harm if _current_p is 2943 // already NULL. 2944 delete _current_p; 2945 2946 // When _hm is destroyed, all the Handles returned in 2947 // PreviousVersionInfo objects will be destroyed. 2948 // Also, after this destructor is finished it will be 2949 // safe to delete the GrowableArray allocated in the 2950 // PreviousVersionInfo objects. 2951 } 2952 2953 2954 // Return the interesting information for the next previous version 2955 // of the klass. Returns NULL if there are no more previous versions. 2956 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() { 2957 if (_previous_versions == NULL) { 2958 // no previous versions so nothing to return 2959 return NULL; 2960 } 2961 2962 delete _current_p; // cleanup the previous info for the caller 2963 _current_p = NULL; // reset to NULL so we don't delete same object twice 2964 2965 int length = _previous_versions->length(); 2966 2967 while (_current_index < length) { 2968 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); 2969 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP) 2970 PreviousVersionInfo(pv_node); 2971 2972 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle(); 2973 if (cp_h.is_null()) { 2974 delete pv_info; 2975 2976 // The underlying node's info has been GC'ed so try the next one. 2977 // We don't have to check the methods. If the constant pool has 2978 // GC'ed then so have the methods. 2979 continue; 2980 } 2981 2982 // Found a node with non GC'ed info so return it. The caller will 2983 // need to delete pv_info when they are done with it. 2984 _current_p = pv_info; 2985 return pv_info; 2986 } 2987 2988 // all of the underlying nodes' info has been GC'ed 2989 return NULL; 2990 } // end next_previous_version()