Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/oops/instanceKlass.cpp
+++ new/src/share/vm/oops/instanceKlass.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/javaClasses.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "classfile/verifier.hpp"
29 29 #include "classfile/vmSymbols.hpp"
30 30 #include "compiler/compileBroker.hpp"
31 31 #include "gc_implementation/shared/markSweep.inline.hpp"
32 32 #include "gc_interface/collectedHeap.inline.hpp"
33 33 #include "interpreter/oopMapCache.hpp"
34 34 #include "interpreter/rewriter.hpp"
35 35 #include "jvmtifiles/jvmti.h"
36 36 #include "memory/genOopClosures.inline.hpp"
37 37 #include "memory/oopFactory.hpp"
38 38 #include "memory/permGen.hpp"
39 39 #include "oops/instanceKlass.hpp"
40 40 #include "oops/instanceMirrorKlass.hpp"
41 41 #include "oops/instanceOop.hpp"
42 42 #include "oops/methodOop.hpp"
43 43 #include "oops/objArrayKlassKlass.hpp"
44 44 #include "oops/oop.inline.hpp"
45 45 #include "oops/symbol.hpp"
46 46 #include "prims/jvmtiExport.hpp"
47 47 #include "prims/jvmtiRedefineClassesTrace.hpp"
48 48 #include "runtime/fieldDescriptor.hpp"
49 49 #include "runtime/handles.inline.hpp"
50 50 #include "runtime/javaCalls.hpp"
51 51 #include "runtime/mutexLocker.hpp"
52 52 #include "services/threadService.hpp"
53 53 #include "utilities/dtrace.hpp"
54 54 #ifdef TARGET_OS_FAMILY_linux
55 55 # include "thread_linux.inline.hpp"
56 56 #endif
57 57 #ifdef TARGET_OS_FAMILY_solaris
58 58 # include "thread_solaris.inline.hpp"
59 59 #endif
60 60 #ifdef TARGET_OS_FAMILY_windows
61 61 # include "thread_windows.inline.hpp"
62 62 #endif
63 63 #ifndef SERIALGC
64 64 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
65 65 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
66 66 #include "gc_implementation/g1/g1RemSet.inline.hpp"
67 67 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
68 68 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
69 69 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
70 70 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
71 71 #include "oops/oop.pcgc.inline.hpp"
72 72 #endif
73 73 #ifdef COMPILER1
74 74 #include "c1/c1_Compiler.hpp"
75 75 #endif
76 76
77 77 #ifdef DTRACE_ENABLED
78 78
79 79 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
80 80 char*, intptr_t, oop, intptr_t);
81 81 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
82 82 char*, intptr_t, oop, intptr_t, int);
83 83 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
84 84 char*, intptr_t, oop, intptr_t, int);
85 85 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
86 86 char*, intptr_t, oop, intptr_t, int);
87 87 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
88 88 char*, intptr_t, oop, intptr_t, int);
89 89 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
90 90 char*, intptr_t, oop, intptr_t, int);
91 91 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
92 92 char*, intptr_t, oop, intptr_t, int);
93 93 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
94 94 char*, intptr_t, oop, intptr_t, int);
95 95
96 96 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \
97 97 { \
98 98 char* data = NULL; \
99 99 int len = 0; \
100 100 Symbol* name = (clss)->name(); \
101 101 if (name != NULL) { \
102 102 data = (char*)name->bytes(); \
103 103 len = name->utf8_length(); \
104 104 } \
105 105 HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \
106 106 data, len, (clss)->class_loader(), thread_type); \
107 107 }
108 108
109 109 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
110 110 { \
111 111 char* data = NULL; \
112 112 int len = 0; \
113 113 Symbol* name = (clss)->name(); \
114 114 if (name != NULL) { \
115 115 data = (char*)name->bytes(); \
116 116 len = name->utf8_length(); \
117 117 } \
118 118 HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \
119 119 data, len, (clss)->class_loader(), thread_type, wait); \
120 120 }
121 121
122 122 #else // ndef DTRACE_ENABLED
123 123
124 124 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
125 125 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
126 126
127 127 #endif // ndef DTRACE_ENABLED
128 128
129 129 bool instanceKlass::should_be_initialized() const {
130 130 return !is_initialized();
131 131 }
132 132
133 133 klassVtable* instanceKlass::vtable() const {
134 134 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
135 135 }
136 136
137 137 klassItable* instanceKlass::itable() const {
138 138 return new klassItable(as_klassOop());
139 139 }
140 140
141 141 void instanceKlass::eager_initialize(Thread *thread) {
142 142 if (!EagerInitialization) return;
143 143
144 144 if (this->is_not_initialized()) {
145 145 // abort if the the class has a class initializer
146 146 if (this->class_initializer() != NULL) return;
147 147
148 148 // abort if it is java.lang.Object (initialization is handled in genesis)
149 149 klassOop super = this->super();
150 150 if (super == NULL) return;
151 151
152 152 // abort if the super class should be initialized
153 153 if (!instanceKlass::cast(super)->is_initialized()) return;
154 154
155 155 // call body to expose the this pointer
156 156 instanceKlassHandle this_oop(thread, this->as_klassOop());
157 157 eager_initialize_impl(this_oop);
158 158 }
159 159 }
160 160
161 161
162 162 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
163 163 EXCEPTION_MARK;
164 164 ObjectLocker ol(this_oop, THREAD);
165 165
166 166 // abort if someone beat us to the initialization
167 167 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized()
168 168
169 169 ClassState old_state = this_oop->_init_state;
170 170 link_class_impl(this_oop, true, THREAD);
171 171 if (HAS_PENDING_EXCEPTION) {
172 172 CLEAR_PENDING_EXCEPTION;
173 173 // Abort if linking the class throws an exception.
174 174
175 175 // Use a test to avoid redundantly resetting the state if there's
176 176 // no change. Set_init_state() asserts that state changes make
177 177 // progress, whereas here we might just be spinning in place.
178 178 if( old_state != this_oop->_init_state )
179 179 this_oop->set_init_state (old_state);
180 180 } else {
181 181 // linking successfull, mark class as initialized
182 182 this_oop->set_init_state (fully_initialized);
183 183 // trace
184 184 if (TraceClassInitialization) {
185 185 ResourceMark rm(THREAD);
186 186 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
187 187 }
188 188 }
189 189 }
190 190
191 191
192 192 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
193 193 // process. The step comments refers to the procedure described in that section.
194 194 // Note: implementation moved to static method to expose the this pointer.
195 195 void instanceKlass::initialize(TRAPS) {
196 196 if (this->should_be_initialized()) {
197 197 HandleMark hm(THREAD);
198 198 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
199 199 initialize_impl(this_oop, CHECK);
200 200 // Note: at this point the class may be initialized
201 201 // OR it may be in the state of being initialized
202 202 // in case of recursive initialization!
203 203 } else {
204 204 assert(is_initialized(), "sanity check");
205 205 }
206 206 }
207 207
208 208
209 209 bool instanceKlass::verify_code(
210 210 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
211 211 // 1) Verify the bytecodes
212 212 Verifier::Mode mode =
213 213 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
214 214 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
215 215 }
216 216
217 217
218 218 // Used exclusively by the shared spaces dump mechanism to prevent
219 219 // classes mapped into the shared regions in new VMs from appearing linked.
220 220
221 221 void instanceKlass::unlink_class() {
222 222 assert(is_linked(), "must be linked");
223 223 _init_state = loaded;
224 224 }
225 225
226 226 void instanceKlass::link_class(TRAPS) {
227 227 assert(is_loaded(), "must be loaded");
228 228 if (!is_linked()) {
229 229 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
230 230 link_class_impl(this_oop, true, CHECK);
231 231 }
232 232 }
233 233
234 234 // Called to verify that a class can link during initialization, without
235 235 // throwing a VerifyError.
236 236 bool instanceKlass::link_class_or_fail(TRAPS) {
237 237 assert(is_loaded(), "must be loaded");
238 238 if (!is_linked()) {
239 239 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
240 240 link_class_impl(this_oop, false, CHECK_false);
241 241 }
242 242 return is_linked();
243 243 }
244 244
245 245 bool instanceKlass::link_class_impl(
246 246 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
247 247 // check for error state
248 248 if (this_oop->is_in_error_state()) {
249 249 ResourceMark rm(THREAD);
250 250 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
251 251 this_oop->external_name(), false);
252 252 }
253 253 // return if already verified
254 254 if (this_oop->is_linked()) {
255 255 return true;
256 256 }
257 257
258 258 // Timing
259 259 // timer handles recursion
260 260 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
261 261 JavaThread* jt = (JavaThread*)THREAD;
262 262
263 263 // link super class before linking this class
264 264 instanceKlassHandle super(THREAD, this_oop->super());
265 265 if (super.not_null()) {
266 266 if (super->is_interface()) { // check if super class is an interface
267 267 ResourceMark rm(THREAD);
268 268 Exceptions::fthrow(
269 269 THREAD_AND_LOCATION,
270 270 vmSymbols::java_lang_IncompatibleClassChangeError(),
271 271 "class %s has interface %s as super class",
272 272 this_oop->external_name(),
273 273 super->external_name()
274 274 );
275 275 return false;
276 276 }
277 277
278 278 link_class_impl(super, throw_verifyerror, CHECK_false);
279 279 }
280 280
281 281 // link all interfaces implemented by this class before linking this class
282 282 objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
283 283 int num_interfaces = interfaces->length();
284 284 for (int index = 0; index < num_interfaces; index++) {
285 285 HandleMark hm(THREAD);
286 286 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
287 287 link_class_impl(ih, throw_verifyerror, CHECK_false);
288 288 }
289 289
290 290 // in case the class is linked in the process of linking its superclasses
291 291 if (this_oop->is_linked()) {
292 292 return true;
293 293 }
294 294
295 295 // trace only the link time for this klass that includes
296 296 // the verification time
297 297 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
298 298 ClassLoader::perf_class_link_selftime(),
299 299 ClassLoader::perf_classes_linked(),
300 300 jt->get_thread_stat()->perf_recursion_counts_addr(),
301 301 jt->get_thread_stat()->perf_timers_addr(),
302 302 PerfClassTraceTime::CLASS_LINK);
303 303
304 304 // verification & rewriting
305 305 {
306 306 ObjectLocker ol(this_oop, THREAD);
307 307 // rewritten will have been set if loader constraint error found
308 308 // on an earlier link attempt
309 309 // don't verify or rewrite if already rewritten
310 310 if (!this_oop->is_linked()) {
311 311 if (!this_oop->is_rewritten()) {
312 312 {
313 313 // Timer includes any side effects of class verification (resolution,
314 314 // etc), but not recursive entry into verify_code().
315 315 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
316 316 ClassLoader::perf_class_verify_selftime(),
317 317 ClassLoader::perf_classes_verified(),
318 318 jt->get_thread_stat()->perf_recursion_counts_addr(),
319 319 jt->get_thread_stat()->perf_timers_addr(),
320 320 PerfClassTraceTime::CLASS_VERIFY);
321 321 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
322 322 if (!verify_ok) {
323 323 return false;
324 324 }
325 325 }
326 326
327 327 // Just in case a side-effect of verify linked this class already
328 328 // (which can sometimes happen since the verifier loads classes
329 329 // using custom class loaders, which are free to initialize things)
330 330 if (this_oop->is_linked()) {
331 331 return true;
332 332 }
333 333
334 334 // also sets rewritten
335 335 this_oop->rewrite_class(CHECK_false);
336 336 }
337 337
338 338 // relocate jsrs and link methods after they are all rewritten
339 339 this_oop->relocate_and_link_methods(CHECK_false);
340 340
341 341 // Initialize the vtable and interface table after
342 342 // methods have been rewritten since rewrite may
343 343 // fabricate new methodOops.
344 344 // also does loader constraint checking
345 345 if (!this_oop()->is_shared()) {
346 346 ResourceMark rm(THREAD);
347 347 this_oop->vtable()->initialize_vtable(true, CHECK_false);
348 348 this_oop->itable()->initialize_itable(true, CHECK_false);
349 349 }
350 350 #ifdef ASSERT
351 351 else {
352 352 ResourceMark rm(THREAD);
353 353 this_oop->vtable()->verify(tty, true);
354 354 // In case itable verification is ever added.
355 355 // this_oop->itable()->verify(tty, true);
356 356 }
357 357 #endif
358 358 this_oop->set_init_state(linked);
359 359 if (JvmtiExport::should_post_class_prepare()) {
360 360 Thread *thread = THREAD;
361 361 assert(thread->is_Java_thread(), "thread->is_Java_thread()");
362 362 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
363 363 }
364 364 }
365 365 }
366 366 return true;
367 367 }
368 368
369 369
370 370 // Rewrite the byte codes of all of the methods of a class.
371 371 // The rewriter must be called exactly once. Rewriting must happen after
372 372 // verification but before the first method of the class is executed.
373 373 void instanceKlass::rewrite_class(TRAPS) {
374 374 assert(is_loaded(), "must be loaded");
375 375 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
376 376 if (this_oop->is_rewritten()) {
377 377 assert(this_oop()->is_shared(), "rewriting an unshared class?");
378 378 return;
379 379 }
380 380 Rewriter::rewrite(this_oop, CHECK);
381 381 this_oop->set_rewritten();
382 382 }
383 383
384 384 // Now relocate and link method entry points after class is rewritten.
385 385 // This is outside is_rewritten flag. In case of an exception, it can be
386 386 // executed more than once.
387 387 void instanceKlass::relocate_and_link_methods(TRAPS) {
388 388 assert(is_loaded(), "must be loaded");
389 389 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
390 390 Rewriter::relocate_and_link(this_oop, CHECK);
391 391 }
392 392
393 393
394 394 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
395 395 // Make sure klass is linked (verified) before initialization
396 396 // A class could already be verified, since it has been reflected upon.
397 397 this_oop->link_class(CHECK);
398 398
399 399 DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
400 400
401 401 bool wait = false;
402 402
403 403 // refer to the JVM book page 47 for description of steps
404 404 // Step 1
405 405 { ObjectLocker ol(this_oop, THREAD);
406 406
407 407 Thread *self = THREAD; // it's passed the current thread
408 408
409 409 // Step 2
410 410 // If we were to use wait() instead of waitInterruptibly() then
411 411 // we might end up throwing IE from link/symbol resolution sites
412 412 // that aren't expected to throw. This would wreak havoc. See 6320309.
413 413 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
414 414 wait = true;
415 415 ol.waitUninterruptibly(CHECK);
416 416 }
417 417
418 418 // Step 3
419 419 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
420 420 DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
421 421 return;
422 422 }
423 423
424 424 // Step 4
425 425 if (this_oop->is_initialized()) {
426 426 DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
427 427 return;
428 428 }
429 429
430 430 // Step 5
431 431 if (this_oop->is_in_error_state()) {
432 432 DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
433 433 ResourceMark rm(THREAD);
434 434 const char* desc = "Could not initialize class ";
435 435 const char* className = this_oop->external_name();
436 436 size_t msglen = strlen(desc) + strlen(className) + 1;
437 437 char* message = NEW_RESOURCE_ARRAY(char, msglen);
438 438 if (NULL == message) {
439 439 // Out of memory: can't create detailed error message
440 440 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
441 441 } else {
442 442 jio_snprintf(message, msglen, "%s%s", desc, className);
443 443 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
444 444 }
445 445 }
446 446
447 447 // Step 6
448 448 this_oop->set_init_state(being_initialized);
449 449 this_oop->set_init_thread(self);
450 450 }
451 451
452 452 // Step 7
453 453 klassOop super_klass = this_oop->super();
454 454 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
455 455 Klass::cast(super_klass)->initialize(THREAD);
456 456
457 457 if (HAS_PENDING_EXCEPTION) {
458 458 Handle e(THREAD, PENDING_EXCEPTION);
459 459 CLEAR_PENDING_EXCEPTION;
460 460 {
461 461 EXCEPTION_MARK;
462 462 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
463 463 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below
464 464 }
465 465 DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
466 466 THROW_OOP(e());
467 467 }
468 468 }
469 469
470 470 // Step 8
471 471 {
472 472 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
473 473 JavaThread* jt = (JavaThread*)THREAD;
474 474 DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
475 475 // Timer includes any side effects of class initialization (resolution,
476 476 // etc), but not recursive entry into call_class_initializer().
477 477 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
478 478 ClassLoader::perf_class_init_selftime(),
479 479 ClassLoader::perf_classes_inited(),
480 480 jt->get_thread_stat()->perf_recursion_counts_addr(),
481 481 jt->get_thread_stat()->perf_timers_addr(),
482 482 PerfClassTraceTime::CLASS_CLINIT);
483 483 this_oop->call_class_initializer(THREAD);
484 484 }
485 485
486 486 // Step 9
487 487 if (!HAS_PENDING_EXCEPTION) {
488 488 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
489 489 { ResourceMark rm(THREAD);
490 490 debug_only(this_oop->vtable()->verify(tty, true);)
491 491 }
492 492 }
493 493 else {
494 494 // Step 10 and 11
495 495 Handle e(THREAD, PENDING_EXCEPTION);
496 496 CLEAR_PENDING_EXCEPTION;
497 497 {
498 498 EXCEPTION_MARK;
499 499 this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
500 500 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
501 501 }
502 502 DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
503 503 if (e->is_a(SystemDictionary::Error_klass())) {
504 504 THROW_OOP(e());
505 505 } else {
506 506 JavaCallArguments args(e);
507 507 THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(),
508 508 vmSymbols::throwable_void_signature(),
509 509 &args);
510 510 }
511 511 }
512 512 DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
513 513 }
514 514
515 515
516 516 // Note: implementation moved to static method to expose the this pointer.
517 517 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
518 518 instanceKlassHandle kh(THREAD, this->as_klassOop());
519 519 set_initialization_state_and_notify_impl(kh, state, CHECK);
520 520 }
521 521
522 522 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
523 523 ObjectLocker ol(this_oop, THREAD);
524 524 this_oop->set_init_state(state);
525 525 ol.notify_all(CHECK);
526 526 }
527 527
528 528 void instanceKlass::add_implementor(klassOop k) {
529 529 assert(Compile_lock->owned_by_self(), "");
530 530 // Filter out my subinterfaces.
531 531 // (Note: Interfaces are never on the subklass list.)
532 532 if (instanceKlass::cast(k)->is_interface()) return;
533 533
534 534 // Filter out subclasses whose supers already implement me.
535 535 // (Note: CHA must walk subclasses of direct implementors
536 536 // in order to locate indirect implementors.)
537 537 klassOop sk = instanceKlass::cast(k)->super();
538 538 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
539 539 // We only need to check one immediate superclass, since the
540 540 // implements_interface query looks at transitive_interfaces.
541 541 // Any supers of the super have the same (or fewer) transitive_interfaces.
542 542 return;
543 543
544 544 // Update number of implementors
545 545 int i = _nof_implementors++;
546 546
547 547 // Record this implementor, if there are not too many already
548 548 if (i < implementors_limit) {
549 549 assert(_implementors[i] == NULL, "should be exactly one implementor");
550 550 oop_store_without_check((oop*)&_implementors[i], k);
551 551 } else if (i == implementors_limit) {
552 552 // clear out the list on first overflow
553 553 for (int i2 = 0; i2 < implementors_limit; i2++)
554 554 oop_store_without_check((oop*)&_implementors[i2], NULL);
555 555 }
556 556
557 557 // The implementor also implements the transitive_interfaces
558 558 for (int index = 0; index < local_interfaces()->length(); index++) {
559 559 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
560 560 }
561 561 }
562 562
563 563 void instanceKlass::init_implementor() {
564 564 for (int i = 0; i < implementors_limit; i++)
565 565 oop_store_without_check((oop*)&_implementors[i], NULL);
566 566 _nof_implementors = 0;
567 567 }
568 568
569 569
570 570 void instanceKlass::process_interfaces(Thread *thread) {
571 571 // link this class into the implementors list of every interface it implements
572 572 KlassHandle this_as_oop (thread, this->as_klassOop());
573 573 for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
574 574 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
575 575 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
576 576 assert(interf->is_interface(), "expected interface");
577 577 interf->add_implementor(this_as_oop());
578 578 }
579 579 }
580 580
581 581 bool instanceKlass::can_be_primary_super_slow() const {
582 582 if (is_interface())
583 583 return false;
584 584 else
585 585 return Klass::can_be_primary_super_slow();
586 586 }
587 587
588 588 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
589 589 // The secondaries are the implemented interfaces.
590 590 instanceKlass* ik = instanceKlass::cast(as_klassOop());
591 591 objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
592 592 int num_secondaries = num_extra_slots + interfaces->length();
593 593 if (num_secondaries == 0) {
594 594 return Universe::the_empty_system_obj_array();
595 595 } else if (num_extra_slots == 0) {
596 596 return interfaces();
597 597 } else {
598 598 // a mix of both
599 599 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
600 600 for (int i = 0; i < interfaces->length(); i++) {
601 601 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
602 602 }
603 603 return secondaries;
604 604 }
605 605 }
606 606
607 607 bool instanceKlass::compute_is_subtype_of(klassOop k) {
608 608 if (Klass::cast(k)->is_interface()) {
609 609 return implements_interface(k);
610 610 } else {
611 611 return Klass::compute_is_subtype_of(k);
612 612 }
613 613 }
614 614
615 615 bool instanceKlass::implements_interface(klassOop k) const {
616 616 if (as_klassOop() == k) return true;
617 617 assert(Klass::cast(k)->is_interface(), "should be an interface class");
618 618 for (int i = 0; i < transitive_interfaces()->length(); i++) {
619 619 if (transitive_interfaces()->obj_at(i) == k) {
620 620 return true;
621 621 }
622 622 }
623 623 return false;
624 624 }
625 625
626 626 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
627 627 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
628 628 if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
629 629 report_java_out_of_memory("Requested array size exceeds VM limit");
630 630 THROW_OOP_0(Universe::out_of_memory_error_array_size());
631 631 }
632 632 int size = objArrayOopDesc::object_size(length);
633 633 klassOop ak = array_klass(n, CHECK_NULL);
634 634 KlassHandle h_ak (THREAD, ak);
635 635 objArrayOop o =
636 636 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
637 637 return o;
638 638 }
639 639
640 640 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
641 641 if (TraceFinalizerRegistration) {
642 642 tty->print("Registered ");
643 643 i->print_value_on(tty);
644 644 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
645 645 }
646 646 instanceHandle h_i(THREAD, i);
647 647 // Pass the handle as argument, JavaCalls::call expects oop as jobjects
648 648 JavaValue result(T_VOID);
649 649 JavaCallArguments args(h_i);
650 650 methodHandle mh (THREAD, Universe::finalizer_register_method());
651 651 JavaCalls::call(&result, mh, &args, CHECK_NULL);
652 652 return h_i();
653 653 }
654 654
655 655 instanceOop instanceKlass::allocate_instance(TRAPS) {
656 656 assert(!oop_is_instanceMirror(), "wrong allocation path");
657 657 bool has_finalizer_flag = has_finalizer(); // Query before possible GC
658 658 int size = size_helper(); // Query before forming handle.
659 659
660 660 KlassHandle h_k(THREAD, as_klassOop());
661 661
662 662 instanceOop i;
663 663
664 664 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
665 665 if (has_finalizer_flag && !RegisterFinalizersAtInit) {
666 666 i = register_finalizer(i, CHECK_NULL);
667 667 }
668 668 return i;
669 669 }
670 670
671 671 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
672 672 // Finalizer registration occurs in the Object.<init> constructor
673 673 // and constructors normally aren't run when allocating perm
674 674 // instances so simply disallow finalizable perm objects. This can
675 675 // be relaxed if a need for it is found.
676 676 assert(!has_finalizer(), "perm objects not allowed to have finalizers");
677 677 assert(!oop_is_instanceMirror(), "wrong allocation path");
678 678 int size = size_helper(); // Query before forming handle.
679 679 KlassHandle h_k(THREAD, as_klassOop());
680 680 instanceOop i = (instanceOop)
681 681 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
682 682 return i;
683 683 }
684 684
685 685 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
686 686 if (is_interface() || is_abstract()) {
687 687 ResourceMark rm(THREAD);
688 688 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
689 689 : vmSymbols::java_lang_InstantiationException(), external_name());
690 690 }
691 691 if (as_klassOop() == SystemDictionary::Class_klass()) {
692 692 ResourceMark rm(THREAD);
693 693 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
694 694 : vmSymbols::java_lang_IllegalAccessException(), external_name());
695 695 }
696 696 }
697 697
698 698 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
699 699 instanceKlassHandle this_oop(THREAD, as_klassOop());
700 700 return array_klass_impl(this_oop, or_null, n, THREAD);
701 701 }
702 702
703 703 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
704 704 if (this_oop->array_klasses() == NULL) {
705 705 if (or_null) return NULL;
706 706
707 707 ResourceMark rm;
708 708 JavaThread *jt = (JavaThread *)THREAD;
709 709 {
710 710 // Atomic creation of array_klasses
711 711 MutexLocker mc(Compile_lock, THREAD); // for vtables
712 712 MutexLocker ma(MultiArray_lock, THREAD);
713 713
714 714 // Check if update has already taken place
715 715 if (this_oop->array_klasses() == NULL) {
716 716 objArrayKlassKlass* oakk =
717 717 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
718 718
719 719 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
720 720 this_oop->set_array_klasses(k);
721 721 }
722 722 }
723 723 }
724 724 // _this will always be set at this point
725 725 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
726 726 if (or_null) {
727 727 return oak->array_klass_or_null(n);
728 728 }
729 729 return oak->array_klass(n, CHECK_NULL);
730 730 }
731 731
732 732 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
733 733 return array_klass_impl(or_null, 1, THREAD);
734 734 }
735 735
736 736 void instanceKlass::call_class_initializer(TRAPS) {
737 737 instanceKlassHandle ik (THREAD, as_klassOop());
738 738 call_class_initializer_impl(ik, THREAD);
739 739 }
740 740
741 741 static int call_class_initializer_impl_counter = 0; // for debugging
742 742
743 743 methodOop instanceKlass::class_initializer() {
744 744 methodOop clinit = find_method(
745 745 vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
746 746 if (clinit != NULL && clinit->has_valid_initializer_flags()) {
747 747 return clinit;
748 748 }
749 749 return NULL;
750 750 }
751 751
752 752 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
753 753 methodHandle h_method(THREAD, this_oop->class_initializer());
754 754 assert(!this_oop->is_initialized(), "we cannot initialize twice");
755 755 if (TraceClassInitialization) {
756 756 tty->print("%d Initializing ", call_class_initializer_impl_counter++);
757 757 this_oop->name()->print_value();
758 758 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
759 759 }
760 760 if (h_method() != NULL) {
761 761 JavaCallArguments args; // No arguments
762 762 JavaValue result(T_VOID);
763 763 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
764 764 }
765 765 }
766 766
767 767
768 768 void instanceKlass::mask_for(methodHandle method, int bci,
769 769 InterpreterOopMap* entry_for) {
770 770 // Dirty read, then double-check under a lock.
771 771 if (_oop_map_cache == NULL) {
772 772 // Otherwise, allocate a new one.
773 773 MutexLocker x(OopMapCacheAlloc_lock);
774 774 // First time use. Allocate a cache in C heap
775 775 if (_oop_map_cache == NULL) {
776 776 _oop_map_cache = new OopMapCache();
777 777 }
778 778 }
779 779 // _oop_map_cache is constant after init; lookup below does is own locking.
780 780 _oop_map_cache->lookup(method, bci, entry_for);
781 781 }
782 782
783 783
784 784 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
785 785 const int n = fields()->length();
786 786 for (int i = 0; i < n; i += next_offset ) {
787 787 int name_index = fields()->ushort_at(i + name_index_offset);
788 788 int sig_index = fields()->ushort_at(i + signature_index_offset);
789 789 Symbol* f_name = constants()->symbol_at(name_index);
790 790 Symbol* f_sig = constants()->symbol_at(sig_index);
791 791 if (f_name == name && f_sig == sig) {
792 792 fd->initialize(as_klassOop(), i);
793 793 return true;
794 794 }
795 795 }
796 796 return false;
797 797 }
798 798
799 799
800 800 void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) {
801 801 Klass::shared_symbols_iterate(closure);
802 802 closure->do_symbol(&_generic_signature);
803 803 closure->do_symbol(&_source_file_name);
804 804 closure->do_symbol(&_source_debug_extension);
805 805
806 806 const int n = fields()->length();
807 807 for (int i = 0; i < n; i += next_offset ) {
808 808 int name_index = fields()->ushort_at(i + name_index_offset);
809 809 closure->do_symbol(constants()->symbol_at_addr(name_index));
810 810 int sig_index = fields()->ushort_at(i + signature_index_offset);
811 811 closure->do_symbol(constants()->symbol_at_addr(sig_index));
812 812 }
813 813 }
814 814
815 815
816 816 klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
817 817 const int n = local_interfaces()->length();
818 818 for (int i = 0; i < n; i++) {
819 819 klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
820 820 assert(Klass::cast(intf1)->is_interface(), "just checking type");
821 821 // search for field in current interface
822 822 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
823 823 assert(fd->is_static(), "interface field must be static");
824 824 return intf1;
825 825 }
826 826 // search for field in direct superinterfaces
827 827 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
828 828 if (intf2 != NULL) return intf2;
829 829 }
830 830 // otherwise field lookup fails
831 831 return NULL;
832 832 }
833 833
834 834
835 835 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
836 836 // search order according to newest JVM spec (5.4.3.2, p.167).
837 837 // 1) search for field in current klass
838 838 if (find_local_field(name, sig, fd)) {
839 839 return as_klassOop();
840 840 }
841 841 // 2) search for field recursively in direct superinterfaces
842 842 { klassOop intf = find_interface_field(name, sig, fd);
843 843 if (intf != NULL) return intf;
844 844 }
845 845 // 3) apply field lookup recursively if superclass exists
846 846 { klassOop supr = super();
847 847 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
848 848 }
849 849 // 4) otherwise field lookup fails
850 850 return NULL;
851 851 }
852 852
853 853
854 854 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const {
855 855 // search order according to newest JVM spec (5.4.3.2, p.167).
856 856 // 1) search for field in current klass
857 857 if (find_local_field(name, sig, fd)) {
858 858 if (fd->is_static() == is_static) return as_klassOop();
859 859 }
860 860 // 2) search for field recursively in direct superinterfaces
861 861 if (is_static) {
862 862 klassOop intf = find_interface_field(name, sig, fd);
863 863 if (intf != NULL) return intf;
864 864 }
865 865 // 3) apply field lookup recursively if superclass exists
866 866 { klassOop supr = super();
867 867 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
868 868 }
869 869 // 4) otherwise field lookup fails
870 870 return NULL;
871 871 }
872 872
873 873
874 874 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
875 875 int length = fields()->length();
876 876 for (int i = 0; i < length; i += next_offset) {
877 877 if (offset_from_fields( i ) == offset) {
878 878 fd->initialize(as_klassOop(), i);
879 879 if (fd->is_static() == is_static) return true;
880 880 }
881 881 }
882 882 return false;
883 883 }
884 884
885 885
886 886 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
887 887 klassOop klass = as_klassOop();
888 888 while (klass != NULL) {
889 889 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
890 890 return true;
891 891 }
892 892 klass = Klass::cast(klass)->super();
893 893 }
894 894 return false;
895 895 }
896 896
897 897
898 898 void instanceKlass::methods_do(void f(methodOop method)) {
899 899 int len = methods()->length();
900 900 for (int index = 0; index < len; index++) {
901 901 methodOop m = methodOop(methods()->obj_at(index));
902 902 assert(m->is_method(), "must be method");
903 903 f(m);
904 904 }
905 905 }
906 906
907 907
908 908 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
909 909 fieldDescriptor fd;
910 910 int length = fields()->length();
911 911 for (int i = 0; i < length; i += next_offset) {
912 912 fd.initialize(as_klassOop(), i);
913 913 if (fd.is_static()) cl->do_field(&fd);
914 914 }
915 915 }
916 916
917 917
918 918 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
919 919 instanceKlassHandle h_this(THREAD, as_klassOop());
920 920 do_local_static_fields_impl(h_this, f, CHECK);
921 921 }
922 922
923 923
924 924 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
925 925 fieldDescriptor fd;
926 926 int length = this_oop->fields()->length();
927 927 for (int i = 0; i < length; i += next_offset) {
928 928 fd.initialize(this_oop(), i);
929 929 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
930 930 }
931 931 }
932 932
933 933
934 934 static int compare_fields_by_offset(int* a, int* b) {
935 935 return a[0] - b[0];
936 936 }
937 937
938 938 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
939 939 instanceKlass* super = superklass();
940 940 if (super != NULL) {
941 941 super->do_nonstatic_fields(cl);
942 942 }
943 943 fieldDescriptor fd;
944 944 int length = fields()->length();
945 945 // In DebugInfo nonstatic fields are sorted by offset.
946 946 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
947 947 int j = 0;
948 948 for (int i = 0; i < length; i += next_offset) {
949 949 fd.initialize(as_klassOop(), i);
950 950 if (!fd.is_static()) {
951 951 fields_sorted[j + 0] = fd.offset();
952 952 fields_sorted[j + 1] = i;
953 953 j += 2;
954 954 }
955 955 }
956 956 if (j > 0) {
957 957 length = j;
958 958 // _sort_Fn is defined in growableArray.hpp.
959 959 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
960 960 for (int i = 0; i < length; i += 2) {
961 961 fd.initialize(as_klassOop(), fields_sorted[i + 1]);
962 962 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
963 963 cl->do_field(&fd);
964 964 }
965 965 }
966 966 FREE_C_HEAP_ARRAY(int, fields_sorted);
967 967 }
968 968
969 969
970 970 void instanceKlass::array_klasses_do(void f(klassOop k)) {
971 971 if (array_klasses() != NULL)
972 972 arrayKlass::cast(array_klasses())->array_klasses_do(f);
973 973 }
974 974
975 975
976 976 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
977 977 f(as_klassOop());
978 978 array_klasses_do(f);
979 979 }
980 980
981 981 #ifdef ASSERT
982 982 static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) {
983 983 int len = methods->length();
984 984 for (int index = 0; index < len; index++) {
985 985 methodOop m = (methodOop)(methods->obj_at(index));
986 986 assert(m->is_method(), "must be method");
987 987 if (m->signature() == signature && m->name() == name) {
988 988 return index;
989 989 }
990 990 }
991 991 return -1;
992 992 }
993 993 #endif
994 994
995 995 methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const {
996 996 return instanceKlass::find_method(methods(), name, signature);
997 997 }
998 998
999 999 methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) {
1000 1000 int len = methods->length();
1001 1001 // methods are sorted, so do binary search
1002 1002 int l = 0;
1003 1003 int h = len - 1;
1004 1004 while (l <= h) {
1005 1005 int mid = (l + h) >> 1;
1006 1006 methodOop m = (methodOop)methods->obj_at(mid);
1007 1007 assert(m->is_method(), "must be method");
1008 1008 int res = m->name()->fast_compare(name);
1009 1009 if (res == 0) {
1010 1010 // found matching name; do linear search to find matching signature
1011 1011 // first, quick check for common case
1012 1012 if (m->signature() == signature) return m;
1013 1013 // search downwards through overloaded methods
1014 1014 int i;
1015 1015 for (i = mid - 1; i >= l; i--) {
1016 1016 methodOop m = (methodOop)methods->obj_at(i);
1017 1017 assert(m->is_method(), "must be method");
1018 1018 if (m->name() != name) break;
1019 1019 if (m->signature() == signature) return m;
1020 1020 }
1021 1021 // search upwards
1022 1022 for (i = mid + 1; i <= h; i++) {
1023 1023 methodOop m = (methodOop)methods->obj_at(i);
1024 1024 assert(m->is_method(), "must be method");
1025 1025 if (m->name() != name) break;
1026 1026 if (m->signature() == signature) return m;
1027 1027 }
1028 1028 // not found
1029 1029 #ifdef ASSERT
1030 1030 int index = linear_search(methods, name, signature);
1031 1031 assert(index == -1, err_msg("binary search should have found entry %d", index));
1032 1032 #endif
1033 1033 return NULL;
1034 1034 } else if (res < 0) {
1035 1035 l = mid + 1;
1036 1036 } else {
1037 1037 h = mid - 1;
1038 1038 }
1039 1039 }
1040 1040 #ifdef ASSERT
1041 1041 int index = linear_search(methods, name, signature);
1042 1042 assert(index == -1, err_msg("binary search should have found entry %d", index));
1043 1043 #endif
1044 1044 return NULL;
1045 1045 }
1046 1046
1047 1047 methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
1048 1048 klassOop klass = as_klassOop();
1049 1049 while (klass != NULL) {
1050 1050 methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
1051 1051 if (method != NULL) return method;
1052 1052 klass = instanceKlass::cast(klass)->super();
1053 1053 }
1054 1054 return NULL;
1055 1055 }
1056 1056
1057 1057 // lookup a method in all the interfaces that this class implements
1058 1058 methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name,
1059 1059 Symbol* signature) const {
1060 1060 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
1061 1061 int num_ifs = all_ifs->length();
1062 1062 instanceKlass *ik = NULL;
1063 1063 for (int i = 0; i < num_ifs; i++) {
1064 1064 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
1065 1065 methodOop m = ik->lookup_method(name, signature);
1066 1066 if (m != NULL) {
1067 1067 return m;
1068 1068 }
1069 1069 }
1070 1070 return NULL;
1071 1071 }
1072 1072
1073 1073 /* jni_id_for_impl for jfieldIds only */
1074 1074 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
1075 1075 MutexLocker ml(JfieldIdCreation_lock);
1076 1076 // Retry lookup after we got the lock
1077 1077 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
1078 1078 if (probe == NULL) {
1079 1079 // Slow case, allocate new static field identifier
1080 1080 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
1081 1081 this_oop->set_jni_ids(probe);
1082 1082 }
1083 1083 return probe;
1084 1084 }
1085 1085
1086 1086
1087 1087 /* jni_id_for for jfieldIds only */
1088 1088 JNIid* instanceKlass::jni_id_for(int offset) {
1089 1089 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
1090 1090 if (probe == NULL) {
1091 1091 probe = jni_id_for_impl(this->as_klassOop(), offset);
1092 1092 }
1093 1093 return probe;
1094 1094 }
1095 1095
1096 1096
1097 1097 // Lookup or create a jmethodID.
1098 1098 // This code is called by the VMThread and JavaThreads so the
1099 1099 // locking has to be done very carefully to avoid deadlocks
1100 1100 // and/or other cache consistency problems.
1101 1101 //
1102 1102 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
1103 1103 size_t idnum = (size_t)method_h->method_idnum();
1104 1104 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1105 1105 size_t length = 0;
1106 1106 jmethodID id = NULL;
1107 1107
1108 1108 // We use a double-check locking idiom here because this cache is
1109 1109 // performance sensitive. In the normal system, this cache only
1110 1110 // transitions from NULL to non-NULL which is safe because we use
1111 1111 // release_set_methods_jmethod_ids() to advertise the new cache.
1112 1112 // A partially constructed cache should never be seen by a racing
1113 1113 // thread. We also use release_store_ptr() to save a new jmethodID
1114 1114 // in the cache so a partially constructed jmethodID should never be
1115 1115 // seen either. Cache reads of existing jmethodIDs proceed without a
1116 1116 // lock, but cache writes of a new jmethodID requires uniqueness and
1117 1117 // creation of the cache itself requires no leaks so a lock is
1118 1118 // generally acquired in those two cases.
1119 1119 //
1120 1120 // If the RedefineClasses() API has been used, then this cache can
1121 1121 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1122 1122 // Cache creation requires no leaks and we require safety between all
1123 1123 // cache accesses and freeing of the old cache so a lock is generally
1124 1124 // acquired when the RedefineClasses() API has been used.
1125 1125
1126 1126 if (jmeths != NULL) {
1127 1127 // the cache already exists
1128 1128 if (!ik_h->idnum_can_increment()) {
1129 1129 // the cache can't grow so we can just get the current values
1130 1130 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1131 1131 } else {
1132 1132 // cache can grow so we have to be more careful
1133 1133 if (Threads::number_of_threads() == 0 ||
1134 1134 SafepointSynchronize::is_at_safepoint()) {
1135 1135 // we're single threaded or at a safepoint - no locking needed
1136 1136 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1137 1137 } else {
1138 1138 MutexLocker ml(JmethodIdCreation_lock);
1139 1139 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1140 1140 }
1141 1141 }
1142 1142 }
1143 1143 // implied else:
1144 1144 // we need to allocate a cache so default length and id values are good
1145 1145
1146 1146 if (jmeths == NULL || // no cache yet
1147 1147 length <= idnum || // cache is too short
1148 1148 id == NULL) { // cache doesn't contain entry
1149 1149
1150 1150 // This function can be called by the VMThread so we have to do all
1151 1151 // things that might block on a safepoint before grabbing the lock.
1152 1152 // Otherwise, we can deadlock with the VMThread or have a cache
1153 1153 // consistency issue. These vars keep track of what we might have
1154 1154 // to free after the lock is dropped.
1155 1155 jmethodID to_dealloc_id = NULL;
1156 1156 jmethodID* to_dealloc_jmeths = NULL;
1157 1157
1158 1158 // may not allocate new_jmeths or use it if we allocate it
1159 1159 jmethodID* new_jmeths = NULL;
1160 1160 if (length <= idnum) {
1161 1161 // allocate a new cache that might be used
1162 1162 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1163 1163 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
1164 1164 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1165 1165 // cache size is stored in element[0], other elements offset by one
1166 1166 new_jmeths[0] = (jmethodID)size;
1167 1167 }
1168 1168
1169 1169 // allocate a new jmethodID that might be used
1170 1170 jmethodID new_id = NULL;
1171 1171 if (method_h->is_old() && !method_h->is_obsolete()) {
1172 1172 // The method passed in is old (but not obsolete), we need to use the current version
1173 1173 methodOop current_method = ik_h->method_with_idnum((int)idnum);
1174 1174 assert(current_method != NULL, "old and but not obsolete, so should exist");
1175 1175 methodHandle current_method_h(current_method == NULL? method_h() : current_method);
1176 1176 new_id = JNIHandles::make_jmethod_id(current_method_h);
1177 1177 } else {
1178 1178 // It is the current version of the method or an obsolete method,
1179 1179 // use the version passed in
1180 1180 new_id = JNIHandles::make_jmethod_id(method_h);
1181 1181 }
1182 1182
1183 1183 if (Threads::number_of_threads() == 0 ||
1184 1184 SafepointSynchronize::is_at_safepoint()) {
1185 1185 // we're single threaded or at a safepoint - no locking needed
1186 1186 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1187 1187 &to_dealloc_id, &to_dealloc_jmeths);
1188 1188 } else {
1189 1189 MutexLocker ml(JmethodIdCreation_lock);
1190 1190 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1191 1191 &to_dealloc_id, &to_dealloc_jmeths);
1192 1192 }
1193 1193
1194 1194 // The lock has been dropped so we can free resources.
1195 1195 // Free up either the old cache or the new cache if we allocated one.
1196 1196 if (to_dealloc_jmeths != NULL) {
1197 1197 FreeHeap(to_dealloc_jmeths);
1198 1198 }
1199 1199 // free up the new ID since it wasn't needed
1200 1200 if (to_dealloc_id != NULL) {
1201 1201 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1202 1202 }
1203 1203 }
1204 1204 return id;
1205 1205 }
1206 1206
1207 1207
1208 1208 // Common code to fetch the jmethodID from the cache or update the
1209 1209 // cache with the new jmethodID. This function should never do anything
1210 1210 // that causes the caller to go to a safepoint or we can deadlock with
1211 1211 // the VMThread or have cache consistency issues.
1212 1212 //
1213 1213 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1214 1214 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1215 1215 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1216 1216 jmethodID** to_dealloc_jmeths_p) {
1217 1217 assert(new_id != NULL, "sanity check");
1218 1218 assert(to_dealloc_id_p != NULL, "sanity check");
1219 1219 assert(to_dealloc_jmeths_p != NULL, "sanity check");
1220 1220 assert(Threads::number_of_threads() == 0 ||
1221 1221 SafepointSynchronize::is_at_safepoint() ||
1222 1222 JmethodIdCreation_lock->owned_by_self(), "sanity check");
1223 1223
1224 1224 // reacquire the cache - we are locked, single threaded or at a safepoint
1225 1225 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1226 1226 jmethodID id = NULL;
1227 1227 size_t length = 0;
1228 1228
1229 1229 if (jmeths == NULL || // no cache yet
1230 1230 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short
1231 1231 if (jmeths != NULL) {
1232 1232 // copy any existing entries from the old cache
1233 1233 for (size_t index = 0; index < length; index++) {
1234 1234 new_jmeths[index+1] = jmeths[index+1];
1235 1235 }
1236 1236 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete
1237 1237 }
1238 1238 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1239 1239 } else {
1240 1240 // fetch jmethodID (if any) from the existing cache
1241 1241 id = jmeths[idnum+1];
1242 1242 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
1243 1243 }
1244 1244 if (id == NULL) {
1245 1245 // No matching jmethodID in the existing cache or we have a new
1246 1246 // cache or we just grew the cache. This cache write is done here
1247 1247 // by the first thread to win the foot race because a jmethodID
1248 1248 // needs to be unique once it is generally available.
1249 1249 id = new_id;
1250 1250
1251 1251 // The jmethodID cache can be read while unlocked so we have to
1252 1252 // make sure the new jmethodID is complete before installing it
1253 1253 // in the cache.
1254 1254 OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1255 1255 } else {
1256 1256 *to_dealloc_id_p = new_id; // save new id for later delete
1257 1257 }
1258 1258 return id;
1259 1259 }
1260 1260
1261 1261
1262 1262 // Common code to get the jmethodID cache length and the jmethodID
1263 1263 // value at index idnum if there is one.
1264 1264 //
1265 1265 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1266 1266 size_t idnum, size_t *length_p, jmethodID* id_p) {
1267 1267 assert(cache != NULL, "sanity check");
1268 1268 assert(length_p != NULL, "sanity check");
1269 1269 assert(id_p != NULL, "sanity check");
1270 1270
1271 1271 // cache size is stored in element[0], other elements offset by one
1272 1272 *length_p = (size_t)cache[0];
1273 1273 if (*length_p <= idnum) { // cache is too short
1274 1274 *id_p = NULL;
1275 1275 } else {
1276 1276 *id_p = cache[idnum+1]; // fetch jmethodID (if any)
1277 1277 }
1278 1278 }
1279 1279
1280 1280
1281 1281 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
1282 1282 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1283 1283 size_t idnum = (size_t)method->method_idnum();
1284 1284 jmethodID* jmeths = methods_jmethod_ids_acquire();
1285 1285 size_t length; // length assigned as debugging crumb
1286 1286 jmethodID id = NULL;
1287 1287 if (jmeths != NULL && // If there is a cache
1288 1288 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
1289 1289 id = jmeths[idnum+1]; // Look up the id (may be NULL)
1290 1290 }
1291 1291 return id;
1292 1292 }
1293 1293
1294 1294
1295 1295 // Cache an itable index
1296 1296 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1297 1297 int* indices = methods_cached_itable_indices_acquire();
1298 1298 int* to_dealloc_indices = NULL;
1299 1299
1300 1300 // We use a double-check locking idiom here because this cache is
1301 1301 // performance sensitive. In the normal system, this cache only
1302 1302 // transitions from NULL to non-NULL which is safe because we use
1303 1303 // release_set_methods_cached_itable_indices() to advertise the
1304 1304 // new cache. A partially constructed cache should never be seen
1305 1305 // by a racing thread. Cache reads and writes proceed without a
1306 1306 // lock, but creation of the cache itself requires no leaks so a
1307 1307 // lock is generally acquired in that case.
1308 1308 //
1309 1309 // If the RedefineClasses() API has been used, then this cache can
1310 1310 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1311 1311 // Cache creation requires no leaks and we require safety between all
1312 1312 // cache accesses and freeing of the old cache so a lock is generally
1313 1313 // acquired when the RedefineClasses() API has been used.
1314 1314
1315 1315 if (indices == NULL || idnum_can_increment()) {
1316 1316 // we need a cache or the cache can grow
1317 1317 MutexLocker ml(JNICachedItableIndex_lock);
1318 1318 // reacquire the cache to see if another thread already did the work
1319 1319 indices = methods_cached_itable_indices_acquire();
1320 1320 size_t length = 0;
1321 1321 // cache size is stored in element[0], other elements offset by one
1322 1322 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1323 1323 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1324 1324 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
1325 1325 new_indices[0] = (int)size;
1326 1326 // copy any existing entries
1327 1327 size_t i;
1328 1328 for (i = 0; i < length; i++) {
1329 1329 new_indices[i+1] = indices[i+1];
1330 1330 }
1331 1331 // Set all the rest to -1
1332 1332 for (i = length; i < size; i++) {
1333 1333 new_indices[i+1] = -1;
1334 1334 }
1335 1335 if (indices != NULL) {
1336 1336 // We have an old cache to delete so save it for after we
1337 1337 // drop the lock.
1338 1338 to_dealloc_indices = indices;
1339 1339 }
1340 1340 release_set_methods_cached_itable_indices(indices = new_indices);
1341 1341 }
1342 1342
1343 1343 if (idnum_can_increment()) {
1344 1344 // this cache can grow so we have to write to it safely
1345 1345 indices[idnum+1] = index;
1346 1346 }
1347 1347 } else {
1348 1348 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1349 1349 }
1350 1350
1351 1351 if (!idnum_can_increment()) {
1352 1352 // The cache cannot grow and this JNI itable index value does not
1353 1353 // have to be unique like a jmethodID. If there is a race to set it,
1354 1354 // it doesn't matter.
1355 1355 indices[idnum+1] = index;
1356 1356 }
1357 1357
1358 1358 if (to_dealloc_indices != NULL) {
1359 1359 // we allocated a new cache so free the old one
1360 1360 FreeHeap(to_dealloc_indices);
1361 1361 }
1362 1362 }
1363 1363
1364 1364
1365 1365 // Retrieve a cached itable index
1366 1366 int instanceKlass::cached_itable_index(size_t idnum) {
1367 1367 int* indices = methods_cached_itable_indices_acquire();
1368 1368 if (indices != NULL && ((size_t)indices[0]) > idnum) {
1369 1369 // indices exist and are long enough, retrieve possible cached
1370 1370 return indices[idnum+1];
1371 1371 }
1372 1372 return -1;
1373 1373 }
1374 1374
1375 1375
1376 1376 //
1377 1377 // nmethodBucket is used to record dependent nmethods for
1378 1378 // deoptimization. nmethod dependencies are actually <klass, method>
1379 1379 // pairs but we really only care about the klass part for purposes of
1380 1380 // finding nmethods which might need to be deoptimized. Instead of
1381 1381 // recording the method, a count of how many times a particular nmethod
1382 1382 // was recorded is kept. This ensures that any recording errors are
1383 1383 // noticed since an nmethod should be removed as many times are it's
1384 1384 // added.
1385 1385 //
1386 1386 class nmethodBucket {
1387 1387 private:
1388 1388 nmethod* _nmethod;
1389 1389 int _count;
1390 1390 nmethodBucket* _next;
1391 1391
1392 1392 public:
1393 1393 nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
1394 1394 _nmethod = nmethod;
1395 1395 _next = next;
1396 1396 _count = 1;
1397 1397 }
1398 1398 int count() { return _count; }
↓ open down ↓ |
1398 lines elided |
↑ open up ↑ |
1399 1399 int increment() { _count += 1; return _count; }
1400 1400 int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
1401 1401 nmethodBucket* next() { return _next; }
1402 1402 void set_next(nmethodBucket* b) { _next = b; }
1403 1403 nmethod* get_nmethod() { return _nmethod; }
1404 1404 };
1405 1405
1406 1406
1407 1407 //
1408 1408 // Walk the list of dependent nmethods searching for nmethods which
1409 -// are dependent on the klassOop that was passed in and mark them for
1409 +// are dependent on the changes that were passed in and mark them for
1410 1410 // deoptimization. Returns the number of nmethods found.
1411 1411 //
1412 1412 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
1413 1413 assert_locked_or_safepoint(CodeCache_lock);
1414 1414 int found = 0;
1415 1415 nmethodBucket* b = _dependencies;
1416 1416 while (b != NULL) {
1417 1417 nmethod* nm = b->get_nmethod();
1418 1418 // since dependencies aren't removed until an nmethod becomes a zombie,
1419 1419 // the dependency list may contain nmethods which aren't alive.
1420 1420 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1421 1421 if (TraceDependencies) {
1422 1422 ResourceMark rm;
1423 1423 tty->print_cr("Marked for deoptimization");
1424 1424 tty->print_cr(" context = %s", this->external_name());
1425 1425 changes.print();
1426 1426 nm->print();
1427 1427 nm->print_dependencies();
1428 1428 }
1429 1429 nm->mark_for_deoptimization();
1430 1430 found++;
1431 1431 }
1432 1432 b = b->next();
1433 1433 }
1434 1434 return found;
1435 1435 }
1436 1436
1437 1437
1438 1438 //
1439 1439 // Add an nmethodBucket to the list of dependencies for this nmethod.
1440 1440 // It's possible that an nmethod has multiple dependencies on this klass
1441 1441 // so a count is kept for each bucket to guarantee that creation and
1442 1442 // deletion of dependencies is consistent.
1443 1443 //
1444 1444 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
1445 1445 assert_locked_or_safepoint(CodeCache_lock);
1446 1446 nmethodBucket* b = _dependencies;
1447 1447 nmethodBucket* last = NULL;
1448 1448 while (b != NULL) {
1449 1449 if (nm == b->get_nmethod()) {
1450 1450 b->increment();
1451 1451 return;
1452 1452 }
1453 1453 b = b->next();
1454 1454 }
1455 1455 _dependencies = new nmethodBucket(nm, _dependencies);
1456 1456 }
1457 1457
1458 1458
1459 1459 //
1460 1460 // Decrement count of the nmethod in the dependency list and remove
1461 1461 // the bucket competely when the count goes to 0. This method must
1462 1462 // find a corresponding bucket otherwise there's a bug in the
1463 1463 // recording of dependecies.
1464 1464 //
1465 1465 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
1466 1466 assert_locked_or_safepoint(CodeCache_lock);
1467 1467 nmethodBucket* b = _dependencies;
1468 1468 nmethodBucket* last = NULL;
1469 1469 while (b != NULL) {
1470 1470 if (nm == b->get_nmethod()) {
1471 1471 if (b->decrement() == 0) {
1472 1472 if (last == NULL) {
1473 1473 _dependencies = b->next();
1474 1474 } else {
1475 1475 last->set_next(b->next());
1476 1476 }
1477 1477 delete b;
1478 1478 }
1479 1479 return;
1480 1480 }
1481 1481 last = b;
1482 1482 b = b->next();
1483 1483 }
1484 1484 #ifdef ASSERT
1485 1485 tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1486 1486 nm->print();
1487 1487 #endif // ASSERT
1488 1488 ShouldNotReachHere();
1489 1489 }
1490 1490
1491 1491
1492 1492 #ifndef PRODUCT
1493 1493 void instanceKlass::print_dependent_nmethods(bool verbose) {
1494 1494 nmethodBucket* b = _dependencies;
1495 1495 int idx = 0;
1496 1496 while (b != NULL) {
1497 1497 nmethod* nm = b->get_nmethod();
1498 1498 tty->print("[%d] count=%d { ", idx++, b->count());
1499 1499 if (!verbose) {
1500 1500 nm->print_on(tty, "nmethod");
1501 1501 tty->print_cr(" } ");
1502 1502 } else {
1503 1503 nm->print();
1504 1504 nm->print_dependencies();
1505 1505 tty->print_cr("--- } ");
1506 1506 }
1507 1507 b = b->next();
1508 1508 }
1509 1509 }
1510 1510
1511 1511
1512 1512 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1513 1513 nmethodBucket* b = _dependencies;
1514 1514 while (b != NULL) {
1515 1515 if (nm == b->get_nmethod()) {
1516 1516 return true;
1517 1517 }
1518 1518 b = b->next();
1519 1519 }
1520 1520 return false;
1521 1521 }
1522 1522 #endif //PRODUCT
1523 1523
1524 1524
1525 1525 #ifdef ASSERT
1526 1526 template <class T> void assert_is_in(T *p) {
1527 1527 T heap_oop = oopDesc::load_heap_oop(p);
1528 1528 if (!oopDesc::is_null(heap_oop)) {
1529 1529 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1530 1530 assert(Universe::heap()->is_in(o), "should be in heap");
1531 1531 }
1532 1532 }
1533 1533 template <class T> void assert_is_in_closed_subset(T *p) {
1534 1534 T heap_oop = oopDesc::load_heap_oop(p);
1535 1535 if (!oopDesc::is_null(heap_oop)) {
1536 1536 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1537 1537 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1538 1538 }
1539 1539 }
1540 1540 template <class T> void assert_is_in_reserved(T *p) {
1541 1541 T heap_oop = oopDesc::load_heap_oop(p);
1542 1542 if (!oopDesc::is_null(heap_oop)) {
1543 1543 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1544 1544 assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1545 1545 }
1546 1546 }
1547 1547 template <class T> void assert_nothing(T *p) {}
1548 1548
1549 1549 #else
1550 1550 template <class T> void assert_is_in(T *p) {}
1551 1551 template <class T> void assert_is_in_closed_subset(T *p) {}
1552 1552 template <class T> void assert_is_in_reserved(T *p) {}
1553 1553 template <class T> void assert_nothing(T *p) {}
1554 1554 #endif // ASSERT
1555 1555
1556 1556 //
1557 1557 // Macros that iterate over areas of oops which are specialized on type of
1558 1558 // oop pointer either narrow or wide, depending on UseCompressedOops
1559 1559 //
1560 1560 // Parameters are:
1561 1561 // T - type of oop to point to (either oop or narrowOop)
1562 1562 // start_p - starting pointer for region to iterate over
1563 1563 // count - number of oops or narrowOops to iterate over
1564 1564 // do_oop - action to perform on each oop (it's arbitrary C code which
1565 1565 // makes it more efficient to put in a macro rather than making
1566 1566 // it a template function)
1567 1567 // assert_fn - assert function which is template function because performance
1568 1568 // doesn't matter when enabled.
1569 1569 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1570 1570 T, start_p, count, do_oop, \
1571 1571 assert_fn) \
1572 1572 { \
1573 1573 T* p = (T*)(start_p); \
1574 1574 T* const end = p + (count); \
1575 1575 while (p < end) { \
1576 1576 (assert_fn)(p); \
1577 1577 do_oop; \
1578 1578 ++p; \
1579 1579 } \
1580 1580 }
1581 1581
1582 1582 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1583 1583 T, start_p, count, do_oop, \
1584 1584 assert_fn) \
1585 1585 { \
1586 1586 T* const start = (T*)(start_p); \
1587 1587 T* p = start + (count); \
1588 1588 while (start < p) { \
1589 1589 --p; \
1590 1590 (assert_fn)(p); \
1591 1591 do_oop; \
1592 1592 } \
1593 1593 }
1594 1594
1595 1595 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1596 1596 T, start_p, count, low, high, \
1597 1597 do_oop, assert_fn) \
1598 1598 { \
1599 1599 T* const l = (T*)(low); \
1600 1600 T* const h = (T*)(high); \
1601 1601 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1602 1602 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
1603 1603 "bounded region must be properly aligned"); \
1604 1604 T* p = (T*)(start_p); \
1605 1605 T* end = p + (count); \
1606 1606 if (p < l) p = l; \
1607 1607 if (end > h) end = h; \
1608 1608 while (p < end) { \
1609 1609 (assert_fn)(p); \
1610 1610 do_oop; \
1611 1611 ++p; \
1612 1612 } \
1613 1613 }
1614 1614
1615 1615
1616 1616 // The following macros call specialized macros, passing either oop or
1617 1617 // narrowOop as the specialization type. These test the UseCompressedOops
1618 1618 // flag.
1619 1619 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
1620 1620 { \
1621 1621 /* Compute oopmap block range. The common case \
1622 1622 is nonstatic_oop_map_size == 1. */ \
1623 1623 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1624 1624 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1625 1625 if (UseCompressedOops) { \
1626 1626 while (map < end_map) { \
1627 1627 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1628 1628 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1629 1629 do_oop, assert_fn) \
1630 1630 ++map; \
1631 1631 } \
1632 1632 } else { \
1633 1633 while (map < end_map) { \
1634 1634 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1635 1635 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1636 1636 do_oop, assert_fn) \
1637 1637 ++map; \
1638 1638 } \
1639 1639 } \
1640 1640 }
1641 1641
1642 1642 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
1643 1643 { \
1644 1644 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
1645 1645 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \
1646 1646 if (UseCompressedOops) { \
1647 1647 while (start_map < map) { \
1648 1648 --map; \
1649 1649 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
1650 1650 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1651 1651 do_oop, assert_fn) \
1652 1652 } \
1653 1653 } else { \
1654 1654 while (start_map < map) { \
1655 1655 --map; \
1656 1656 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
1657 1657 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1658 1658 do_oop, assert_fn) \
1659 1659 } \
1660 1660 } \
1661 1661 }
1662 1662
1663 1663 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
1664 1664 assert_fn) \
1665 1665 { \
1666 1666 /* Compute oopmap block range. The common case is \
1667 1667 nonstatic_oop_map_size == 1, so we accept the \
1668 1668 usually non-existent extra overhead of examining \
1669 1669 all the maps. */ \
1670 1670 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1671 1671 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1672 1672 if (UseCompressedOops) { \
1673 1673 while (map < end_map) { \
1674 1674 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1675 1675 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1676 1676 low, high, \
1677 1677 do_oop, assert_fn) \
1678 1678 ++map; \
1679 1679 } \
1680 1680 } else { \
1681 1681 while (map < end_map) { \
1682 1682 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1683 1683 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1684 1684 low, high, \
1685 1685 do_oop, assert_fn) \
1686 1686 ++map; \
1687 1687 } \
1688 1688 } \
1689 1689 }
1690 1690
1691 1691 void instanceKlass::oop_follow_contents(oop obj) {
1692 1692 assert(obj != NULL, "can't follow the content of NULL object");
1693 1693 obj->follow_header();
1694 1694 InstanceKlass_OOP_MAP_ITERATE( \
1695 1695 obj, \
1696 1696 MarkSweep::mark_and_push(p), \
1697 1697 assert_is_in_closed_subset)
1698 1698 }
1699 1699
1700 1700 #ifndef SERIALGC
1701 1701 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1702 1702 oop obj) {
1703 1703 assert(obj != NULL, "can't follow the content of NULL object");
1704 1704 obj->follow_header(cm);
1705 1705 InstanceKlass_OOP_MAP_ITERATE( \
1706 1706 obj, \
1707 1707 PSParallelCompact::mark_and_push(cm, p), \
1708 1708 assert_is_in)
1709 1709 }
1710 1710 #endif // SERIALGC
1711 1711
1712 1712 // closure's do_header() method dicates whether the given closure should be
1713 1713 // applied to the klass ptr in the object header.
1714 1714
1715 1715 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
1716 1716 \
1717 1717 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1718 1718 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1719 1719 /* header */ \
1720 1720 if (closure->do_header()) { \
1721 1721 obj->oop_iterate_header(closure); \
1722 1722 } \
1723 1723 InstanceKlass_OOP_MAP_ITERATE( \
1724 1724 obj, \
1725 1725 SpecializationStats:: \
1726 1726 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1727 1727 (closure)->do_oop##nv_suffix(p), \
1728 1728 assert_is_in_closed_subset) \
1729 1729 return size_helper(); \
1730 1730 }
1731 1731
1732 1732 #ifndef SERIALGC
1733 1733 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1734 1734 \
1735 1735 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
1736 1736 OopClosureType* closure) { \
1737 1737 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1738 1738 /* header */ \
1739 1739 if (closure->do_header()) { \
1740 1740 obj->oop_iterate_header(closure); \
1741 1741 } \
1742 1742 /* instance variables */ \
1743 1743 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1744 1744 obj, \
1745 1745 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1746 1746 (closure)->do_oop##nv_suffix(p), \
1747 1747 assert_is_in_closed_subset) \
1748 1748 return size_helper(); \
1749 1749 }
1750 1750 #endif // !SERIALGC
1751 1751
1752 1752 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1753 1753 \
1754 1754 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1755 1755 OopClosureType* closure, \
1756 1756 MemRegion mr) { \
1757 1757 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1758 1758 if (closure->do_header()) { \
1759 1759 obj->oop_iterate_header(closure, mr); \
1760 1760 } \
1761 1761 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1762 1762 obj, mr.start(), mr.end(), \
1763 1763 (closure)->do_oop##nv_suffix(p), \
1764 1764 assert_is_in_closed_subset) \
1765 1765 return size_helper(); \
1766 1766 }
1767 1767
1768 1768 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1769 1769 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1770 1770 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1771 1771 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1772 1772 #ifndef SERIALGC
1773 1773 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1774 1774 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1775 1775 #endif // !SERIALGC
1776 1776
1777 1777 int instanceKlass::oop_adjust_pointers(oop obj) {
1778 1778 int size = size_helper();
1779 1779 InstanceKlass_OOP_MAP_ITERATE( \
1780 1780 obj, \
1781 1781 MarkSweep::adjust_pointer(p), \
1782 1782 assert_is_in)
1783 1783 obj->adjust_header();
1784 1784 return size;
1785 1785 }
1786 1786
1787 1787 #ifndef SERIALGC
1788 1788 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1789 1789 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1790 1790 obj, \
1791 1791 if (PSScavenge::should_scavenge(p)) { \
1792 1792 pm->claim_or_forward_depth(p); \
1793 1793 }, \
1794 1794 assert_nothing )
1795 1795 }
1796 1796
1797 1797 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1798 1798 InstanceKlass_OOP_MAP_ITERATE( \
1799 1799 obj, \
1800 1800 PSParallelCompact::adjust_pointer(p), \
1801 1801 assert_nothing)
1802 1802 return size_helper();
1803 1803 }
1804 1804
1805 1805 #endif // SERIALGC
1806 1806
1807 1807 // This klass is alive but the implementor link is not followed/updated.
1808 1808 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1809 1809
1810 1810 void instanceKlass::follow_weak_klass_links(
1811 1811 BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1812 1812 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1813 1813 if (ClassUnloading) {
1814 1814 for (int i = 0; i < implementors_limit; i++) {
1815 1815 klassOop impl = _implementors[i];
1816 1816 if (impl == NULL) break; // no more in the list
1817 1817 if (!is_alive->do_object_b(impl)) {
1818 1818 // remove this guy from the list by overwriting him with the tail
1819 1819 int lasti = --_nof_implementors;
1820 1820 assert(lasti >= i && lasti < implementors_limit, "just checking");
1821 1821 _implementors[i] = _implementors[lasti];
1822 1822 _implementors[lasti] = NULL;
1823 1823 --i; // rerun the loop at this index
1824 1824 }
1825 1825 }
1826 1826 } else {
1827 1827 for (int i = 0; i < implementors_limit; i++) {
1828 1828 keep_alive->do_oop(&adr_implementors()[i]);
1829 1829 }
1830 1830 }
1831 1831 Klass::follow_weak_klass_links(is_alive, keep_alive);
1832 1832 }
1833 1833
1834 1834 void instanceKlass::remove_unshareable_info() {
1835 1835 Klass::remove_unshareable_info();
1836 1836 init_implementor();
1837 1837 }
1838 1838
1839 1839 static void clear_all_breakpoints(methodOop m) {
1840 1840 m->clear_all_breakpoints();
1841 1841 }
1842 1842
1843 1843 void instanceKlass::release_C_heap_structures() {
1844 1844 // Deallocate oop map cache
1845 1845 if (_oop_map_cache != NULL) {
1846 1846 delete _oop_map_cache;
1847 1847 _oop_map_cache = NULL;
1848 1848 }
1849 1849
1850 1850 // Deallocate JNI identifiers for jfieldIDs
1851 1851 JNIid::deallocate(jni_ids());
1852 1852 set_jni_ids(NULL);
1853 1853
1854 1854 jmethodID* jmeths = methods_jmethod_ids_acquire();
1855 1855 if (jmeths != (jmethodID*)NULL) {
1856 1856 release_set_methods_jmethod_ids(NULL);
1857 1857 FreeHeap(jmeths);
1858 1858 }
1859 1859
1860 1860 int* indices = methods_cached_itable_indices_acquire();
1861 1861 if (indices != (int*)NULL) {
1862 1862 release_set_methods_cached_itable_indices(NULL);
1863 1863 FreeHeap(indices);
1864 1864 }
1865 1865
1866 1866 // release dependencies
1867 1867 nmethodBucket* b = _dependencies;
1868 1868 _dependencies = NULL;
1869 1869 while (b != NULL) {
1870 1870 nmethodBucket* next = b->next();
1871 1871 delete b;
1872 1872 b = next;
1873 1873 }
1874 1874
1875 1875 // Deallocate breakpoint records
1876 1876 if (breakpoints() != 0x0) {
1877 1877 methods_do(clear_all_breakpoints);
1878 1878 assert(breakpoints() == 0x0, "should have cleared breakpoints");
1879 1879 }
1880 1880
1881 1881 // deallocate information about previous versions
1882 1882 if (_previous_versions != NULL) {
1883 1883 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
1884 1884 PreviousVersionNode * pv_node = _previous_versions->at(i);
1885 1885 delete pv_node;
1886 1886 }
1887 1887 delete _previous_versions;
1888 1888 _previous_versions = NULL;
1889 1889 }
1890 1890
1891 1891 // deallocate the cached class file
1892 1892 if (_cached_class_file_bytes != NULL) {
1893 1893 os::free(_cached_class_file_bytes);
1894 1894 _cached_class_file_bytes = NULL;
1895 1895 _cached_class_file_len = 0;
1896 1896 }
1897 1897
1898 1898 // Decrement symbol reference counts associated with the unloaded class.
1899 1899 if (_name != NULL) _name->decrement_refcount();
1900 1900 // unreference array name derived from this class name (arrays of an unloaded
1901 1901 // class can't be referenced anymore).
1902 1902 if (_array_name != NULL) _array_name->decrement_refcount();
1903 1903 if (_source_file_name != NULL) _source_file_name->decrement_refcount();
1904 1904 if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount();
1905 1905 // walk constant pool and decrement symbol reference counts
1906 1906 _constants->unreference_symbols();
1907 1907 }
1908 1908
1909 1909 void instanceKlass::set_source_file_name(Symbol* n) {
1910 1910 _source_file_name = n;
1911 1911 if (_source_file_name != NULL) _source_file_name->increment_refcount();
1912 1912 }
1913 1913
1914 1914 void instanceKlass::set_source_debug_extension(Symbol* n) {
1915 1915 _source_debug_extension = n;
1916 1916 if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount();
1917 1917 }
1918 1918
1919 1919 address instanceKlass::static_field_addr(int offset) {
1920 1920 return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
1921 1921 }
1922 1922
1923 1923
1924 1924 const char* instanceKlass::signature_name() const {
1925 1925 const char* src = (const char*) (name()->as_C_string());
1926 1926 const int src_length = (int)strlen(src);
1927 1927 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
1928 1928 int src_index = 0;
1929 1929 int dest_index = 0;
1930 1930 dest[dest_index++] = 'L';
1931 1931 while (src_index < src_length) {
1932 1932 dest[dest_index++] = src[src_index++];
1933 1933 }
1934 1934 dest[dest_index++] = ';';
1935 1935 dest[dest_index] = '\0';
1936 1936 return dest;
1937 1937 }
1938 1938
1939 1939 // different verisons of is_same_class_package
1940 1940 bool instanceKlass::is_same_class_package(klassOop class2) {
1941 1941 klassOop class1 = as_klassOop();
1942 1942 oop classloader1 = instanceKlass::cast(class1)->class_loader();
1943 1943 Symbol* classname1 = Klass::cast(class1)->name();
1944 1944
1945 1945 if (Klass::cast(class2)->oop_is_objArray()) {
1946 1946 class2 = objArrayKlass::cast(class2)->bottom_klass();
1947 1947 }
1948 1948 oop classloader2;
1949 1949 if (Klass::cast(class2)->oop_is_instance()) {
1950 1950 classloader2 = instanceKlass::cast(class2)->class_loader();
1951 1951 } else {
1952 1952 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
1953 1953 classloader2 = NULL;
1954 1954 }
1955 1955 Symbol* classname2 = Klass::cast(class2)->name();
1956 1956
1957 1957 return instanceKlass::is_same_class_package(classloader1, classname1,
1958 1958 classloader2, classname2);
1959 1959 }
1960 1960
1961 1961 bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
1962 1962 klassOop class1 = as_klassOop();
1963 1963 oop classloader1 = instanceKlass::cast(class1)->class_loader();
1964 1964 Symbol* classname1 = Klass::cast(class1)->name();
1965 1965
1966 1966 return instanceKlass::is_same_class_package(classloader1, classname1,
1967 1967 classloader2, classname2);
1968 1968 }
1969 1969
1970 1970 // return true if two classes are in the same package, classloader
1971 1971 // and classname information is enough to determine a class's package
1972 1972 bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1,
1973 1973 oop class_loader2, Symbol* class_name2) {
1974 1974 if (class_loader1 != class_loader2) {
1975 1975 return false;
1976 1976 } else if (class_name1 == class_name2) {
1977 1977 return true; // skip painful bytewise comparison
1978 1978 } else {
1979 1979 ResourceMark rm;
1980 1980
1981 1981 // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
1982 1982 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
1983 1983 // Otherwise, we just compare jbyte values between the strings.
1984 1984 const jbyte *name1 = class_name1->base();
1985 1985 const jbyte *name2 = class_name2->base();
1986 1986
1987 1987 const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
1988 1988 const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
1989 1989
1990 1990 if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
1991 1991 // One of the two doesn't have a package. Only return true
1992 1992 // if the other one also doesn't have a package.
1993 1993 return last_slash1 == last_slash2;
1994 1994 } else {
1995 1995 // Skip over '['s
1996 1996 if (*name1 == '[') {
1997 1997 do {
1998 1998 name1++;
1999 1999 } while (*name1 == '[');
2000 2000 if (*name1 != 'L') {
2001 2001 // Something is terribly wrong. Shouldn't be here.
2002 2002 return false;
2003 2003 }
2004 2004 }
2005 2005 if (*name2 == '[') {
2006 2006 do {
2007 2007 name2++;
2008 2008 } while (*name2 == '[');
2009 2009 if (*name2 != 'L') {
2010 2010 // Something is terribly wrong. Shouldn't be here.
2011 2011 return false;
2012 2012 }
2013 2013 }
2014 2014
2015 2015 // Check that package part is identical
2016 2016 int length1 = last_slash1 - name1;
2017 2017 int length2 = last_slash2 - name2;
2018 2018
2019 2019 return UTF8::equal(name1, length1, name2, length2);
2020 2020 }
2021 2021 }
2022 2022 }
2023 2023
2024 2024 // Returns true iff super_method can be overridden by a method in targetclassname
2025 2025 // See JSL 3rd edition 8.4.6.1
2026 2026 // Assumes name-signature match
2027 2027 // "this" is instanceKlass of super_method which must exist
2028 2028 // note that the instanceKlass of the method in the targetclassname has not always been created yet
2029 2029 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
2030 2030 // Private methods can not be overridden
2031 2031 if (super_method->is_private()) {
2032 2032 return false;
2033 2033 }
2034 2034 // If super method is accessible, then override
2035 2035 if ((super_method->is_protected()) ||
2036 2036 (super_method->is_public())) {
2037 2037 return true;
2038 2038 }
2039 2039 // Package-private methods are not inherited outside of package
2040 2040 assert(super_method->is_package_private(), "must be package private");
2041 2041 return(is_same_class_package(targetclassloader(), targetclassname));
2042 2042 }
2043 2043
2044 2044 /* defined for now in jvm.cpp, for historical reasons *--
2045 2045 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2046 2046 Symbol*& simple_name_result, TRAPS) {
2047 2047 ...
2048 2048 }
2049 2049 */
2050 2050
2051 2051 // tell if two classes have the same enclosing class (at package level)
2052 2052 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2053 2053 klassOop class2_oop, TRAPS) {
2054 2054 if (class2_oop == class1->as_klassOop()) return true;
2055 2055 if (!Klass::cast(class2_oop)->oop_is_instance()) return false;
2056 2056 instanceKlassHandle class2(THREAD, class2_oop);
2057 2057
2058 2058 // must be in same package before we try anything else
2059 2059 if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2060 2060 return false;
2061 2061
2062 2062 // As long as there is an outer1.getEnclosingClass,
2063 2063 // shift the search outward.
2064 2064 instanceKlassHandle outer1 = class1;
2065 2065 for (;;) {
2066 2066 // As we walk along, look for equalities between outer1 and class2.
2067 2067 // Eventually, the walks will terminate as outer1 stops
2068 2068 // at the top-level class around the original class.
2069 2069 bool ignore_inner_is_member;
2070 2070 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2071 2071 CHECK_false);
2072 2072 if (next == NULL) break;
2073 2073 if (next == class2()) return true;
2074 2074 outer1 = instanceKlassHandle(THREAD, next);
2075 2075 }
2076 2076
2077 2077 // Now do the same for class2.
2078 2078 instanceKlassHandle outer2 = class2;
2079 2079 for (;;) {
2080 2080 bool ignore_inner_is_member;
2081 2081 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2082 2082 CHECK_false);
2083 2083 if (next == NULL) break;
2084 2084 // Might as well check the new outer against all available values.
2085 2085 if (next == class1()) return true;
2086 2086 if (next == outer1()) return true;
2087 2087 outer2 = instanceKlassHandle(THREAD, next);
2088 2088 }
2089 2089
2090 2090 // If by this point we have not found an equality between the
2091 2091 // two classes, we know they are in separate package members.
2092 2092 return false;
2093 2093 }
2094 2094
2095 2095
2096 2096 jint instanceKlass::compute_modifier_flags(TRAPS) const {
2097 2097 klassOop k = as_klassOop();
2098 2098 jint access = access_flags().as_int();
2099 2099
2100 2100 // But check if it happens to be member class.
2101 2101 typeArrayOop inner_class_list = inner_classes();
2102 2102 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
2103 2103 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
2104 2104 if (length > 0) {
2105 2105 typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
2106 2106 instanceKlassHandle ik(THREAD, k);
2107 2107 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
2108 2108 int ioff = inner_class_list_h->ushort_at(
2109 2109 i + instanceKlass::inner_class_inner_class_info_offset);
2110 2110
2111 2111 // Inner class attribute can be zero, skip it.
2112 2112 // Strange but true: JVM spec. allows null inner class refs.
2113 2113 if (ioff == 0) continue;
2114 2114
2115 2115 // only look at classes that are already loaded
2116 2116 // since we are looking for the flags for our self.
2117 2117 Symbol* inner_name = ik->constants()->klass_name_at(ioff);
2118 2118 if ((ik->name() == inner_name)) {
2119 2119 // This is really a member class.
2120 2120 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
2121 2121 break;
2122 2122 }
2123 2123 }
2124 2124 }
2125 2125 // Remember to strip ACC_SUPER bit
2126 2126 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2127 2127 }
2128 2128
2129 2129 jint instanceKlass::jvmti_class_status() const {
2130 2130 jint result = 0;
2131 2131
2132 2132 if (is_linked()) {
2133 2133 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2134 2134 }
2135 2135
2136 2136 if (is_initialized()) {
2137 2137 assert(is_linked(), "Class status is not consistent");
2138 2138 result |= JVMTI_CLASS_STATUS_INITIALIZED;
2139 2139 }
2140 2140 if (is_in_error_state()) {
2141 2141 result |= JVMTI_CLASS_STATUS_ERROR;
2142 2142 }
2143 2143 return result;
2144 2144 }
2145 2145
2146 2146 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
2147 2147 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2148 2148 int method_table_offset_in_words = ioe->offset()/wordSize;
2149 2149 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2150 2150 / itableOffsetEntry::size();
2151 2151
2152 2152 for (int cnt = 0 ; ; cnt ++, ioe ++) {
2153 2153 // If the interface isn't implemented by the receiver class,
2154 2154 // the VM should throw IncompatibleClassChangeError.
2155 2155 if (cnt >= nof_interfaces) {
2156 2156 THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError());
2157 2157 }
2158 2158
2159 2159 klassOop ik = ioe->interface_klass();
2160 2160 if (ik == holder) break;
2161 2161 }
2162 2162
2163 2163 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
2164 2164 methodOop m = ime[index].method();
2165 2165 if (m == NULL) {
2166 2166 THROW_0(vmSymbols::java_lang_AbstractMethodError());
2167 2167 }
2168 2168 return m;
2169 2169 }
2170 2170
2171 2171 // On-stack replacement stuff
2172 2172 void instanceKlass::add_osr_nmethod(nmethod* n) {
2173 2173 // only one compilation can be active
2174 2174 NEEDS_CLEANUP
2175 2175 // This is a short non-blocking critical region, so the no safepoint check is ok.
2176 2176 OsrList_lock->lock_without_safepoint_check();
2177 2177 assert(n->is_osr_method(), "wrong kind of nmethod");
2178 2178 n->set_osr_link(osr_nmethods_head());
2179 2179 set_osr_nmethods_head(n);
2180 2180 // Raise the highest osr level if necessary
2181 2181 if (TieredCompilation) {
2182 2182 methodOop m = n->method();
2183 2183 m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
2184 2184 }
2185 2185 // Remember to unlock again
2186 2186 OsrList_lock->unlock();
2187 2187
2188 2188 // Get rid of the osr methods for the same bci that have lower levels.
2189 2189 if (TieredCompilation) {
2190 2190 for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
2191 2191 nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
2192 2192 if (inv != NULL && inv->is_in_use()) {
2193 2193 inv->make_not_entrant();
2194 2194 }
2195 2195 }
2196 2196 }
2197 2197 }
2198 2198
2199 2199
2200 2200 void instanceKlass::remove_osr_nmethod(nmethod* n) {
2201 2201 // This is a short non-blocking critical region, so the no safepoint check is ok.
2202 2202 OsrList_lock->lock_without_safepoint_check();
2203 2203 assert(n->is_osr_method(), "wrong kind of nmethod");
2204 2204 nmethod* last = NULL;
2205 2205 nmethod* cur = osr_nmethods_head();
2206 2206 int max_level = CompLevel_none; // Find the max comp level excluding n
2207 2207 methodOop m = n->method();
2208 2208 // Search for match
2209 2209 while(cur != NULL && cur != n) {
2210 2210 if (TieredCompilation) {
2211 2211 // Find max level before n
2212 2212 max_level = MAX2(max_level, cur->comp_level());
2213 2213 }
2214 2214 last = cur;
2215 2215 cur = cur->osr_link();
2216 2216 }
2217 2217 nmethod* next = NULL;
2218 2218 if (cur == n) {
2219 2219 next = cur->osr_link();
2220 2220 if (last == NULL) {
2221 2221 // Remove first element
2222 2222 set_osr_nmethods_head(next);
2223 2223 } else {
2224 2224 last->set_osr_link(next);
2225 2225 }
2226 2226 }
2227 2227 n->set_osr_link(NULL);
2228 2228 if (TieredCompilation) {
2229 2229 cur = next;
2230 2230 while (cur != NULL) {
2231 2231 // Find max level after n
2232 2232 max_level = MAX2(max_level, cur->comp_level());
2233 2233 cur = cur->osr_link();
2234 2234 }
2235 2235 m->set_highest_osr_comp_level(max_level);
2236 2236 }
2237 2237 // Remember to unlock again
2238 2238 OsrList_lock->unlock();
2239 2239 }
2240 2240
2241 2241 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
2242 2242 // This is a short non-blocking critical region, so the no safepoint check is ok.
2243 2243 OsrList_lock->lock_without_safepoint_check();
2244 2244 nmethod* osr = osr_nmethods_head();
2245 2245 nmethod* best = NULL;
2246 2246 while (osr != NULL) {
2247 2247 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2248 2248 // There can be a time when a c1 osr method exists but we are waiting
2249 2249 // for a c2 version. When c2 completes its osr nmethod we will trash
2250 2250 // the c1 version and only be able to find the c2 version. However
2251 2251 // while we overflow in the c1 code at back branches we don't want to
2252 2252 // try and switch to the same code as we are already running
2253 2253
2254 2254 if (osr->method() == m &&
2255 2255 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2256 2256 if (match_level) {
2257 2257 if (osr->comp_level() == comp_level) {
2258 2258 // Found a match - return it.
2259 2259 OsrList_lock->unlock();
2260 2260 return osr;
2261 2261 }
2262 2262 } else {
2263 2263 if (best == NULL || (osr->comp_level() > best->comp_level())) {
2264 2264 if (osr->comp_level() == CompLevel_highest_tier) {
2265 2265 // Found the best possible - return it.
2266 2266 OsrList_lock->unlock();
2267 2267 return osr;
2268 2268 }
2269 2269 best = osr;
2270 2270 }
2271 2271 }
2272 2272 }
2273 2273 osr = osr->osr_link();
2274 2274 }
2275 2275 OsrList_lock->unlock();
2276 2276 if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
2277 2277 return best;
2278 2278 }
2279 2279 return NULL;
2280 2280 }
2281 2281
2282 2282 // -----------------------------------------------------------------------------------------------------
2283 2283 #ifndef PRODUCT
2284 2284
2285 2285 // Printing
2286 2286
2287 2287 #define BULLET " - "
2288 2288
2289 2289 void FieldPrinter::do_field(fieldDescriptor* fd) {
2290 2290 _st->print(BULLET);
2291 2291 if (_obj == NULL) {
2292 2292 fd->print_on(_st);
2293 2293 _st->cr();
2294 2294 } else {
2295 2295 fd->print_on_for(_st, _obj);
2296 2296 _st->cr();
2297 2297 }
2298 2298 }
2299 2299
2300 2300
2301 2301 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
2302 2302 Klass::oop_print_on(obj, st);
2303 2303
2304 2304 if (as_klassOop() == SystemDictionary::String_klass()) {
2305 2305 typeArrayOop value = java_lang_String::value(obj);
2306 2306 juint offset = java_lang_String::offset(obj);
2307 2307 juint length = java_lang_String::length(obj);
2308 2308 if (value != NULL &&
2309 2309 value->is_typeArray() &&
2310 2310 offset <= (juint) value->length() &&
2311 2311 offset + length <= (juint) value->length()) {
2312 2312 st->print(BULLET"string: ");
2313 2313 Handle h_obj(obj);
2314 2314 java_lang_String::print(h_obj, st);
2315 2315 st->cr();
2316 2316 if (!WizardMode) return; // that is enough
2317 2317 }
2318 2318 }
2319 2319
2320 2320 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2321 2321 FieldPrinter print_field(st, obj);
2322 2322 do_nonstatic_fields(&print_field);
2323 2323
2324 2324 if (as_klassOop() == SystemDictionary::Class_klass()) {
2325 2325 st->print(BULLET"signature: ");
2326 2326 java_lang_Class::print_signature(obj, st);
2327 2327 st->cr();
2328 2328 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
2329 2329 st->print(BULLET"fake entry for mirror: ");
2330 2330 mirrored_klass->print_value_on(st);
2331 2331 st->cr();
2332 2332 st->print(BULLET"fake entry resolved_constructor: ");
2333 2333 methodOop ctor = java_lang_Class::resolved_constructor(obj);
2334 2334 ctor->print_value_on(st);
2335 2335 klassOop array_klass = java_lang_Class::array_klass(obj);
2336 2336 st->cr();
2337 2337 st->print(BULLET"fake entry for array: ");
2338 2338 array_klass->print_value_on(st);
2339 2339 st->cr();
2340 2340 st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj));
2341 2341 st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj));
2342 2342 klassOop real_klass = java_lang_Class::as_klassOop(obj);
2343 2343 if (real_klass != NULL && real_klass->klass_part()->oop_is_instance()) {
2344 2344 instanceKlass::cast(real_klass)->do_local_static_fields(&print_field);
2345 2345 }
2346 2346 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2347 2347 st->print(BULLET"signature: ");
2348 2348 java_lang_invoke_MethodType::print_signature(obj, st);
2349 2349 st->cr();
2350 2350 }
2351 2351 }
2352 2352
2353 2353 #endif //PRODUCT
2354 2354
2355 2355 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2356 2356 st->print("a ");
2357 2357 name()->print_value_on(st);
2358 2358 obj->print_address_on(st);
2359 2359 if (as_klassOop() == SystemDictionary::String_klass()
2360 2360 && java_lang_String::value(obj) != NULL) {
2361 2361 ResourceMark rm;
2362 2362 int len = java_lang_String::length(obj);
2363 2363 int plen = (len < 24 ? len : 12);
2364 2364 char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2365 2365 st->print(" = \"%s\"", str);
2366 2366 if (len > plen)
2367 2367 st->print("...[%d]", len);
2368 2368 } else if (as_klassOop() == SystemDictionary::Class_klass()) {
2369 2369 klassOop k = java_lang_Class::as_klassOop(obj);
2370 2370 st->print(" = ");
2371 2371 if (k != NULL) {
2372 2372 k->print_value_on(st);
2373 2373 } else {
2374 2374 const char* tname = type2name(java_lang_Class::primitive_type(obj));
2375 2375 st->print("%s", tname ? tname : "type?");
2376 2376 }
2377 2377 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2378 2378 st->print(" = ");
2379 2379 java_lang_invoke_MethodType::print_signature(obj, st);
2380 2380 } else if (java_lang_boxing_object::is_instance(obj)) {
2381 2381 st->print(" = ");
2382 2382 java_lang_boxing_object::print(obj, st);
2383 2383 }
2384 2384 }
2385 2385
2386 2386 const char* instanceKlass::internal_name() const {
2387 2387 return external_name();
2388 2388 }
2389 2389
2390 2390 // Verification
2391 2391
2392 2392 class VerifyFieldClosure: public OopClosure {
2393 2393 protected:
2394 2394 template <class T> void do_oop_work(T* p) {
2395 2395 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2396 2396 oop obj = oopDesc::load_decode_heap_oop(p);
2397 2397 if (!obj->is_oop_or_null()) {
2398 2398 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2399 2399 Universe::print();
2400 2400 guarantee(false, "boom");
2401 2401 }
2402 2402 }
2403 2403 public:
2404 2404 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
2405 2405 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2406 2406 };
2407 2407
2408 2408 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2409 2409 Klass::oop_verify_on(obj, st);
2410 2410 VerifyFieldClosure blk;
2411 2411 oop_oop_iterate(obj, &blk);
2412 2412 }
2413 2413
2414 2414 #ifndef PRODUCT
2415 2415
2416 2416 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2417 2417 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version()
2418 2418 // cannot be called since this function is called before the VM is
2419 2419 // able to determine what JDK version is running with.
2420 2420 // The check below always is false since 1.4.
2421 2421 return;
2422 2422
2423 2423 // This verification code temporarily disabled for the 1.4
2424 2424 // reflection implementation since java.lang.Class now has
2425 2425 // Java-level instance fields. Should rewrite this to handle this
2426 2426 // case.
2427 2427 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2428 2428 // Verify that java.lang.Class instances have a fake oop field added.
2429 2429 instanceKlass* ik = instanceKlass::cast(k);
2430 2430
2431 2431 // Check that we have the right class
2432 2432 static bool first_time = true;
2433 2433 guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
2434 2434 first_time = false;
2435 2435 const int extra = java_lang_Class::number_of_fake_oop_fields;
2436 2436 guarantee(ik->nonstatic_field_size() == extra, "just checking");
2437 2437 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
2438 2438 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2439 2439
2440 2440 // Check that the map is (2,extra)
2441 2441 int offset = java_lang_Class::klass_offset;
2442 2442
2443 2443 OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2444 2444 guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
2445 2445 "sanity");
2446 2446 }
2447 2447 }
2448 2448
2449 2449 #endif // ndef PRODUCT
2450 2450
2451 2451 // JNIid class for jfieldIDs only
2452 2452 // Note to reviewers:
2453 2453 // These JNI functions are just moved over to column 1 and not changed
2454 2454 // in the compressed oops workspace.
2455 2455 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2456 2456 _holder = holder;
2457 2457 _offset = offset;
2458 2458 _next = next;
2459 2459 debug_only(_is_static_field_id = false;)
2460 2460 }
2461 2461
2462 2462
2463 2463 JNIid* JNIid::find(int offset) {
2464 2464 JNIid* current = this;
2465 2465 while (current != NULL) {
2466 2466 if (current->offset() == offset) return current;
2467 2467 current = current->next();
2468 2468 }
2469 2469 return NULL;
2470 2470 }
2471 2471
2472 2472 void JNIid::oops_do(OopClosure* f) {
2473 2473 for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2474 2474 f->do_oop(cur->holder_addr());
2475 2475 }
2476 2476 }
2477 2477
2478 2478 void JNIid::deallocate(JNIid* current) {
2479 2479 while (current != NULL) {
2480 2480 JNIid* next = current->next();
2481 2481 delete current;
2482 2482 current = next;
2483 2483 }
2484 2484 }
2485 2485
2486 2486
2487 2487 void JNIid::verify(klassOop holder) {
2488 2488 int first_field_offset = instanceMirrorKlass::offset_of_static_fields();
2489 2489 int end_field_offset;
2490 2490 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2491 2491
2492 2492 JNIid* current = this;
2493 2493 while (current != NULL) {
2494 2494 guarantee(current->holder() == holder, "Invalid klass in JNIid");
2495 2495 #ifdef ASSERT
2496 2496 int o = current->offset();
2497 2497 if (current->is_static_field_id()) {
2498 2498 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2499 2499 }
2500 2500 #endif
2501 2501 current = current->next();
2502 2502 }
2503 2503 }
2504 2504
2505 2505
2506 2506 #ifdef ASSERT
2507 2507 void instanceKlass::set_init_state(ClassState state) {
2508 2508 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2509 2509 : (_init_state < state);
2510 2510 assert(good_state || state == allocated, "illegal state transition");
2511 2511 _init_state = state;
2512 2512 }
2513 2513 #endif
2514 2514
2515 2515
2516 2516 // RedefineClasses() support for previous versions:
2517 2517
2518 2518 // Add an information node that contains weak references to the
2519 2519 // interesting parts of the previous version of the_class.
2520 2520 // This is also where we clean out any unused weak references.
2521 2521 // Note that while we delete nodes from the _previous_versions
2522 2522 // array, we never delete the array itself until the klass is
2523 2523 // unloaded. The has_been_redefined() query depends on that fact.
2524 2524 //
2525 2525 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2526 2526 BitMap* emcp_methods, int emcp_method_count) {
2527 2527 assert(Thread::current()->is_VM_thread(),
2528 2528 "only VMThread can add previous versions");
2529 2529
2530 2530 if (_previous_versions == NULL) {
2531 2531 // This is the first previous version so make some space.
2532 2532 // Start with 2 elements under the assumption that the class
2533 2533 // won't be redefined much.
2534 2534 _previous_versions = new (ResourceObj::C_HEAP)
2535 2535 GrowableArray<PreviousVersionNode *>(2, true);
2536 2536 }
2537 2537
2538 2538 // RC_TRACE macro has an embedded ResourceMark
2539 2539 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2540 2540 ikh->external_name(), _previous_versions->length(), emcp_method_count));
2541 2541 constantPoolHandle cp_h(ikh->constants());
2542 2542 jobject cp_ref;
2543 2543 if (cp_h->is_shared()) {
2544 2544 // a shared ConstantPool requires a regular reference; a weak
2545 2545 // reference would be collectible
2546 2546 cp_ref = JNIHandles::make_global(cp_h);
2547 2547 } else {
2548 2548 cp_ref = JNIHandles::make_weak_global(cp_h);
2549 2549 }
2550 2550 PreviousVersionNode * pv_node = NULL;
2551 2551 objArrayOop old_methods = ikh->methods();
2552 2552
2553 2553 if (emcp_method_count == 0) {
2554 2554 // non-shared ConstantPool gets a weak reference
2555 2555 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
2556 2556 RC_TRACE(0x00000400,
2557 2557 ("add: all methods are obsolete; flushing any EMCP weak refs"));
2558 2558 } else {
2559 2559 int local_count = 0;
2560 2560 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
2561 2561 GrowableArray<jweak>(emcp_method_count, true);
2562 2562 for (int i = 0; i < old_methods->length(); i++) {
2563 2563 if (emcp_methods->at(i)) {
2564 2564 // this old method is EMCP so save a weak ref
2565 2565 methodOop old_method = (methodOop) old_methods->obj_at(i);
2566 2566 methodHandle old_method_h(old_method);
2567 2567 jweak method_ref = JNIHandles::make_weak_global(old_method_h);
2568 2568 method_refs->append(method_ref);
2569 2569 if (++local_count >= emcp_method_count) {
2570 2570 // no more EMCP methods so bail out now
2571 2571 break;
2572 2572 }
2573 2573 }
2574 2574 }
2575 2575 // non-shared ConstantPool gets a weak reference
2576 2576 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
2577 2577 }
2578 2578
2579 2579 _previous_versions->append(pv_node);
2580 2580
2581 2581 // Using weak references allows the interesting parts of previous
2582 2582 // classes to be GC'ed when they are no longer needed. Since the
2583 2583 // caller is the VMThread and we are at a safepoint, this is a good
2584 2584 // time to clear out unused weak references.
2585 2585
2586 2586 RC_TRACE(0x00000400, ("add: previous version length=%d",
2587 2587 _previous_versions->length()));
2588 2588
2589 2589 // skip the last entry since we just added it
2590 2590 for (int i = _previous_versions->length() - 2; i >= 0; i--) {
2591 2591 // check the previous versions array for a GC'ed weak refs
2592 2592 pv_node = _previous_versions->at(i);
2593 2593 cp_ref = pv_node->prev_constant_pool();
2594 2594 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2595 2595 if (cp_ref == NULL) {
2596 2596 delete pv_node;
2597 2597 _previous_versions->remove_at(i);
2598 2598 // Since we are traversing the array backwards, we don't have to
2599 2599 // do anything special with the index.
2600 2600 continue; // robustness
2601 2601 }
2602 2602
2603 2603 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2604 2604 if (cp == NULL) {
2605 2605 // this entry has been GC'ed so remove it
2606 2606 delete pv_node;
2607 2607 _previous_versions->remove_at(i);
2608 2608 // Since we are traversing the array backwards, we don't have to
2609 2609 // do anything special with the index.
2610 2610 continue;
2611 2611 } else {
2612 2612 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
2613 2613 }
2614 2614
2615 2615 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2616 2616 if (method_refs != NULL) {
2617 2617 RC_TRACE(0x00000400, ("add: previous methods length=%d",
2618 2618 method_refs->length()));
2619 2619 for (int j = method_refs->length() - 1; j >= 0; j--) {
2620 2620 jweak method_ref = method_refs->at(j);
2621 2621 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2622 2622 if (method_ref == NULL) {
2623 2623 method_refs->remove_at(j);
2624 2624 // Since we are traversing the array backwards, we don't have to
2625 2625 // do anything special with the index.
2626 2626 continue; // robustness
2627 2627 }
2628 2628
2629 2629 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2630 2630 if (method == NULL || emcp_method_count == 0) {
2631 2631 // This method entry has been GC'ed or the current
2632 2632 // RedefineClasses() call has made all methods obsolete
2633 2633 // so remove it.
2634 2634 JNIHandles::destroy_weak_global(method_ref);
2635 2635 method_refs->remove_at(j);
2636 2636 } else {
2637 2637 // RC_TRACE macro has an embedded ResourceMark
2638 2638 RC_TRACE(0x00000400,
2639 2639 ("add: %s(%s): previous method @%d in version @%d is alive",
2640 2640 method->name()->as_C_string(), method->signature()->as_C_string(),
2641 2641 j, i));
2642 2642 }
2643 2643 }
2644 2644 }
2645 2645 }
2646 2646
2647 2647 int obsolete_method_count = old_methods->length() - emcp_method_count;
2648 2648
2649 2649 if (emcp_method_count != 0 && obsolete_method_count != 0 &&
2650 2650 _previous_versions->length() > 1) {
2651 2651 // We have a mix of obsolete and EMCP methods. If there is more
2652 2652 // than the previous version that we just added, then we have to
2653 2653 // clear out any matching EMCP method entries the hard way.
2654 2654 int local_count = 0;
2655 2655 for (int i = 0; i < old_methods->length(); i++) {
2656 2656 if (!emcp_methods->at(i)) {
2657 2657 // only obsolete methods are interesting
2658 2658 methodOop old_method = (methodOop) old_methods->obj_at(i);
2659 2659 Symbol* m_name = old_method->name();
2660 2660 Symbol* m_signature = old_method->signature();
2661 2661
2662 2662 // skip the last entry since we just added it
2663 2663 for (int j = _previous_versions->length() - 2; j >= 0; j--) {
2664 2664 // check the previous versions array for a GC'ed weak refs
2665 2665 pv_node = _previous_versions->at(j);
2666 2666 cp_ref = pv_node->prev_constant_pool();
2667 2667 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2668 2668 if (cp_ref == NULL) {
2669 2669 delete pv_node;
2670 2670 _previous_versions->remove_at(j);
2671 2671 // Since we are traversing the array backwards, we don't have to
2672 2672 // do anything special with the index.
2673 2673 continue; // robustness
2674 2674 }
2675 2675
2676 2676 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2677 2677 if (cp == NULL) {
2678 2678 // this entry has been GC'ed so remove it
2679 2679 delete pv_node;
2680 2680 _previous_versions->remove_at(j);
2681 2681 // Since we are traversing the array backwards, we don't have to
2682 2682 // do anything special with the index.
2683 2683 continue;
2684 2684 }
2685 2685
2686 2686 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2687 2687 if (method_refs == NULL) {
2688 2688 // We have run into a PreviousVersion generation where
2689 2689 // all methods were made obsolete during that generation's
2690 2690 // RedefineClasses() operation. At the time of that
2691 2691 // operation, all EMCP methods were flushed so we don't
2692 2692 // have to go back any further.
2693 2693 //
2694 2694 // A NULL method_refs is different than an empty method_refs.
2695 2695 // We cannot infer any optimizations about older generations
2696 2696 // from an empty method_refs for the current generation.
2697 2697 break;
2698 2698 }
2699 2699
2700 2700 for (int k = method_refs->length() - 1; k >= 0; k--) {
2701 2701 jweak method_ref = method_refs->at(k);
2702 2702 assert(method_ref != NULL,
2703 2703 "weak method ref was unexpectedly cleared");
2704 2704 if (method_ref == NULL) {
2705 2705 method_refs->remove_at(k);
2706 2706 // Since we are traversing the array backwards, we don't
2707 2707 // have to do anything special with the index.
2708 2708 continue; // robustness
2709 2709 }
2710 2710
2711 2711 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2712 2712 if (method == NULL) {
2713 2713 // this method entry has been GC'ed so skip it
2714 2714 JNIHandles::destroy_weak_global(method_ref);
2715 2715 method_refs->remove_at(k);
2716 2716 continue;
2717 2717 }
2718 2718
2719 2719 if (method->name() == m_name &&
2720 2720 method->signature() == m_signature) {
2721 2721 // The current RedefineClasses() call has made all EMCP
2722 2722 // versions of this method obsolete so mark it as obsolete
2723 2723 // and remove the weak ref.
2724 2724 RC_TRACE(0x00000400,
2725 2725 ("add: %s(%s): flush obsolete method @%d in version @%d",
2726 2726 m_name->as_C_string(), m_signature->as_C_string(), k, j));
2727 2727
2728 2728 method->set_is_obsolete();
2729 2729 JNIHandles::destroy_weak_global(method_ref);
2730 2730 method_refs->remove_at(k);
2731 2731 break;
2732 2732 }
2733 2733 }
2734 2734
2735 2735 // The previous loop may not find a matching EMCP method, but
2736 2736 // that doesn't mean that we can optimize and not go any
2737 2737 // further back in the PreviousVersion generations. The EMCP
2738 2738 // method for this generation could have already been GC'ed,
2739 2739 // but there still may be an older EMCP method that has not
2740 2740 // been GC'ed.
2741 2741 }
2742 2742
2743 2743 if (++local_count >= obsolete_method_count) {
2744 2744 // no more obsolete methods so bail out now
2745 2745 break;
2746 2746 }
2747 2747 }
2748 2748 }
2749 2749 }
2750 2750 } // end add_previous_version()
2751 2751
2752 2752
2753 2753 // Determine if instanceKlass has a previous version.
2754 2754 bool instanceKlass::has_previous_version() const {
2755 2755 if (_previous_versions == NULL) {
2756 2756 // no previous versions array so answer is easy
2757 2757 return false;
2758 2758 }
2759 2759
2760 2760 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2761 2761 // Check the previous versions array for an info node that hasn't
2762 2762 // been GC'ed
2763 2763 PreviousVersionNode * pv_node = _previous_versions->at(i);
2764 2764
2765 2765 jobject cp_ref = pv_node->prev_constant_pool();
2766 2766 assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
2767 2767 if (cp_ref == NULL) {
2768 2768 continue; // robustness
2769 2769 }
2770 2770
2771 2771 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2772 2772 if (cp != NULL) {
2773 2773 // we have at least one previous version
2774 2774 return true;
2775 2775 }
2776 2776
2777 2777 // We don't have to check the method refs. If the constant pool has
2778 2778 // been GC'ed then so have the methods.
2779 2779 }
2780 2780
2781 2781 // all of the underlying nodes' info has been GC'ed
2782 2782 return false;
2783 2783 } // end has_previous_version()
2784 2784
2785 2785 methodOop instanceKlass::method_with_idnum(int idnum) {
2786 2786 methodOop m = NULL;
2787 2787 if (idnum < methods()->length()) {
2788 2788 m = (methodOop) methods()->obj_at(idnum);
2789 2789 }
2790 2790 if (m == NULL || m->method_idnum() != idnum) {
2791 2791 for (int index = 0; index < methods()->length(); ++index) {
2792 2792 m = (methodOop) methods()->obj_at(index);
2793 2793 if (m->method_idnum() == idnum) {
2794 2794 return m;
2795 2795 }
2796 2796 }
2797 2797 }
2798 2798 return m;
2799 2799 }
2800 2800
2801 2801
2802 2802 // Set the annotation at 'idnum' to 'anno'.
2803 2803 // We don't want to create or extend the array if 'anno' is NULL, since that is the
2804 2804 // default value. However, if the array exists and is long enough, we must set NULL values.
2805 2805 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
2806 2806 objArrayOop md = *md_p;
2807 2807 if (md != NULL && md->length() > idnum) {
2808 2808 md->obj_at_put(idnum, anno);
2809 2809 } else if (anno != NULL) {
2810 2810 // create the array
2811 2811 int length = MAX2(idnum+1, (int)_idnum_allocated_count);
2812 2812 md = oopFactory::new_system_objArray(length, Thread::current());
2813 2813 if (*md_p != NULL) {
2814 2814 // copy the existing entries
2815 2815 for (int index = 0; index < (*md_p)->length(); index++) {
2816 2816 md->obj_at_put(index, (*md_p)->obj_at(index));
2817 2817 }
2818 2818 }
2819 2819 set_annotations(md, md_p);
2820 2820 md->obj_at_put(idnum, anno);
2821 2821 } // if no array and idnum isn't included there is nothing to do
2822 2822 }
2823 2823
2824 2824 // Construct a PreviousVersionNode entry for the array hung off
2825 2825 // the instanceKlass.
2826 2826 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
2827 2827 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
2828 2828
2829 2829 _prev_constant_pool = prev_constant_pool;
2830 2830 _prev_cp_is_weak = prev_cp_is_weak;
2831 2831 _prev_EMCP_methods = prev_EMCP_methods;
2832 2832 }
2833 2833
2834 2834
2835 2835 // Destroy a PreviousVersionNode
2836 2836 PreviousVersionNode::~PreviousVersionNode() {
2837 2837 if (_prev_constant_pool != NULL) {
2838 2838 if (_prev_cp_is_weak) {
2839 2839 JNIHandles::destroy_weak_global(_prev_constant_pool);
2840 2840 } else {
2841 2841 JNIHandles::destroy_global(_prev_constant_pool);
2842 2842 }
2843 2843 }
2844 2844
2845 2845 if (_prev_EMCP_methods != NULL) {
2846 2846 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
2847 2847 jweak method_ref = _prev_EMCP_methods->at(i);
2848 2848 if (method_ref != NULL) {
2849 2849 JNIHandles::destroy_weak_global(method_ref);
2850 2850 }
2851 2851 }
2852 2852 delete _prev_EMCP_methods;
2853 2853 }
2854 2854 }
2855 2855
2856 2856
2857 2857 // Construct a PreviousVersionInfo entry
2858 2858 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
2859 2859 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle
2860 2860 _prev_EMCP_method_handles = NULL;
2861 2861
2862 2862 jobject cp_ref = pv_node->prev_constant_pool();
2863 2863 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
2864 2864 if (cp_ref == NULL) {
2865 2865 return; // robustness
2866 2866 }
2867 2867
2868 2868 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2869 2869 if (cp == NULL) {
2870 2870 // Weak reference has been GC'ed. Since the constant pool has been
2871 2871 // GC'ed, the methods have also been GC'ed.
2872 2872 return;
2873 2873 }
2874 2874
2875 2875 // make the constantPoolOop safe to return
2876 2876 _prev_constant_pool_handle = constantPoolHandle(cp);
2877 2877
2878 2878 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2879 2879 if (method_refs == NULL) {
2880 2880 // the instanceKlass did not have any EMCP methods
2881 2881 return;
2882 2882 }
2883 2883
2884 2884 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
2885 2885
2886 2886 int n_methods = method_refs->length();
2887 2887 for (int i = 0; i < n_methods; i++) {
2888 2888 jweak method_ref = method_refs->at(i);
2889 2889 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2890 2890 if (method_ref == NULL) {
2891 2891 continue; // robustness
2892 2892 }
2893 2893
2894 2894 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2895 2895 if (method == NULL) {
2896 2896 // this entry has been GC'ed so skip it
2897 2897 continue;
2898 2898 }
2899 2899
2900 2900 // make the methodOop safe to return
2901 2901 _prev_EMCP_method_handles->append(methodHandle(method));
2902 2902 }
2903 2903 }
2904 2904
2905 2905
2906 2906 // Destroy a PreviousVersionInfo
2907 2907 PreviousVersionInfo::~PreviousVersionInfo() {
2908 2908 // Since _prev_EMCP_method_handles is not C-heap allocated, we
2909 2909 // don't have to delete it.
2910 2910 }
2911 2911
2912 2912
2913 2913 // Construct a helper for walking the previous versions array
2914 2914 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
2915 2915 _previous_versions = ik->previous_versions();
2916 2916 _current_index = 0;
2917 2917 // _hm needs no initialization
2918 2918 _current_p = NULL;
2919 2919 }
2920 2920
2921 2921
2922 2922 // Destroy a PreviousVersionWalker
2923 2923 PreviousVersionWalker::~PreviousVersionWalker() {
2924 2924 // Delete the current info just in case the caller didn't walk to
2925 2925 // the end of the previous versions list. No harm if _current_p is
2926 2926 // already NULL.
2927 2927 delete _current_p;
2928 2928
2929 2929 // When _hm is destroyed, all the Handles returned in
2930 2930 // PreviousVersionInfo objects will be destroyed.
2931 2931 // Also, after this destructor is finished it will be
2932 2932 // safe to delete the GrowableArray allocated in the
2933 2933 // PreviousVersionInfo objects.
2934 2934 }
2935 2935
2936 2936
2937 2937 // Return the interesting information for the next previous version
2938 2938 // of the klass. Returns NULL if there are no more previous versions.
2939 2939 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
2940 2940 if (_previous_versions == NULL) {
2941 2941 // no previous versions so nothing to return
2942 2942 return NULL;
2943 2943 }
2944 2944
2945 2945 delete _current_p; // cleanup the previous info for the caller
2946 2946 _current_p = NULL; // reset to NULL so we don't delete same object twice
2947 2947
2948 2948 int length = _previous_versions->length();
2949 2949
2950 2950 while (_current_index < length) {
2951 2951 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
2952 2952 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
2953 2953 PreviousVersionInfo(pv_node);
2954 2954
2955 2955 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
2956 2956 if (cp_h.is_null()) {
2957 2957 delete pv_info;
2958 2958
2959 2959 // The underlying node's info has been GC'ed so try the next one.
2960 2960 // We don't have to check the methods. If the constant pool has
2961 2961 // GC'ed then so have the methods.
2962 2962 continue;
2963 2963 }
2964 2964
2965 2965 // Found a node with non GC'ed info so return it. The caller will
2966 2966 // need to delete pv_info when they are done with it.
2967 2967 _current_p = pv_info;
2968 2968 return pv_info;
2969 2969 }
2970 2970
2971 2971 // all of the underlying nodes' info has been GC'ed
2972 2972 return NULL;
2973 2973 } // end next_previous_version()
↓ open down ↓ |
1554 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX