Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/code/codeCache.cpp
+++ new/src/share/vm/code/codeCache.cpp
1 1 /*
2 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 # include "incls/_precompiled.incl"
26 26 # include "incls/_codeCache.cpp.incl"
27 27
28 28 // Helper class for printing in CodeCache
29 29
30 30 class CodeBlob_sizes {
31 31 private:
32 32 int count;
33 33 int total_size;
34 34 int header_size;
35 35 int code_size;
36 36 int stub_size;
37 37 int relocation_size;
38 38 int scopes_oop_size;
39 39 int scopes_data_size;
40 40 int scopes_pcs_size;
41 41
42 42 public:
43 43 CodeBlob_sizes() {
44 44 count = 0;
45 45 total_size = 0;
46 46 header_size = 0;
47 47 code_size = 0;
48 48 stub_size = 0;
49 49 relocation_size = 0;
50 50 scopes_oop_size = 0;
51 51 scopes_data_size = 0;
52 52 scopes_pcs_size = 0;
53 53 }
54 54
55 55 int total() { return total_size; }
56 56 bool is_empty() { return count == 0; }
57 57
58 58 void print(const char* title) {
59 59 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
60 60 count,
61 61 title,
62 62 total() / K,
63 63 header_size * 100 / total_size,
64 64 relocation_size * 100 / total_size,
65 65 code_size * 100 / total_size,
66 66 stub_size * 100 / total_size,
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
67 67 scopes_oop_size * 100 / total_size,
68 68 scopes_data_size * 100 / total_size,
69 69 scopes_pcs_size * 100 / total_size);
70 70 }
71 71
72 72 void add(CodeBlob* cb) {
73 73 count++;
74 74 total_size += cb->size();
75 75 header_size += cb->header_size();
76 76 relocation_size += cb->relocation_size();
77 - scopes_oop_size += cb->oops_size();
78 77 if (cb->is_nmethod()) {
79 - nmethod *nm = (nmethod*)cb;
78 + nmethod* nm = cb->as_nmethod_or_null();
80 79 code_size += nm->code_size();
81 80 stub_size += nm->stub_size();
82 81
82 + scopes_oop_size += nm->oops_size();
83 83 scopes_data_size += nm->scopes_data_size();
84 84 scopes_pcs_size += nm->scopes_pcs_size();
85 85 } else {
86 86 code_size += cb->instructions_size();
87 87 }
88 88 }
89 89 };
90 90
91 91
92 92 // CodeCache implementation
93 93
94 94 CodeHeap * CodeCache::_heap = new CodeHeap();
95 95 int CodeCache::_number_of_blobs = 0;
96 96 int CodeCache::_number_of_nmethods_with_dependencies = 0;
97 97 bool CodeCache::_needs_cache_clean = false;
98 98 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
99 99 nmethod* CodeCache::_saved_nmethods = NULL;
100 100
101 101
102 102 CodeBlob* CodeCache::first() {
103 103 assert_locked_or_safepoint(CodeCache_lock);
104 104 return (CodeBlob*)_heap->first();
105 105 }
106 106
107 107
108 108 CodeBlob* CodeCache::next(CodeBlob* cb) {
109 109 assert_locked_or_safepoint(CodeCache_lock);
110 110 return (CodeBlob*)_heap->next(cb);
111 111 }
112 112
113 113
114 114 CodeBlob* CodeCache::alive(CodeBlob *cb) {
115 115 assert_locked_or_safepoint(CodeCache_lock);
116 116 while (cb != NULL && !cb->is_alive()) cb = next(cb);
117 117 return cb;
118 118 }
119 119
120 120
121 121 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
122 122 assert_locked_or_safepoint(CodeCache_lock);
123 123 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
124 124 return (nmethod*)cb;
125 125 }
126 126
127 127
128 128 CodeBlob* CodeCache::allocate(int size) {
129 129 // Do not seize the CodeCache lock here--if the caller has not
130 130 // already done so, we are going to lose bigtime, since the code
131 131 // cache will contain a garbage CodeBlob until the caller can
132 132 // run the constructor for the CodeBlob subclass he is busy
133 133 // instantiating.
134 134 guarantee(size >= 0, "allocation request must be reasonable");
135 135 assert_locked_or_safepoint(CodeCache_lock);
136 136 CodeBlob* cb = NULL;
137 137 _number_of_blobs++;
138 138 while (true) {
139 139 cb = (CodeBlob*)_heap->allocate(size);
140 140 if (cb != NULL) break;
141 141 if (!_heap->expand_by(CodeCacheExpansionSize)) {
142 142 // Expansion failed
143 143 return NULL;
144 144 }
145 145 if (PrintCodeCacheExtension) {
146 146 ResourceMark rm;
147 147 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
148 148 (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
149 149 (address)_heap->end() - (address)_heap->begin());
150 150 }
151 151 }
152 152 verify_if_often();
153 153 print_trace("allocation", cb, size);
154 154 return cb;
155 155 }
156 156
157 157 void CodeCache::free(CodeBlob* cb) {
158 158 assert_locked_or_safepoint(CodeCache_lock);
159 159 verify_if_often();
160 160
161 161 print_trace("free", cb);
162 162 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
163 163 _number_of_nmethods_with_dependencies--;
164 164 }
165 165 _number_of_blobs--;
166 166
167 167 _heap->deallocate(cb);
168 168
169 169 verify_if_often();
170 170 assert(_number_of_blobs >= 0, "sanity check");
171 171 }
172 172
173 173
174 174 void CodeCache::commit(CodeBlob* cb) {
175 175 // this is called by nmethod::nmethod, which must already own CodeCache_lock
176 176 assert_locked_or_safepoint(CodeCache_lock);
177 177 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
178 178 _number_of_nmethods_with_dependencies++;
179 179 }
180 180 // flush the hardware I-cache
181 181 ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
182 182 }
183 183
184 184
185 185 void CodeCache::flush() {
186 186 assert_locked_or_safepoint(CodeCache_lock);
187 187 Unimplemented();
188 188 }
189 189
190 190
191 191 // Iteration over CodeBlobs
192 192
193 193 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
194 194 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
195 195 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
196 196
197 197
198 198 bool CodeCache::contains(void *p) {
199 199 // It should be ok to call contains without holding a lock
200 200 return _heap->contains(p);
201 201 }
202 202
203 203
204 204 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
205 205 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
206 206 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
207 207 CodeBlob* CodeCache::find_blob(void* start) {
208 208 CodeBlob* result = find_blob_unsafe(start);
209 209 if (result == NULL) return NULL;
210 210 // We could potientially look up non_entrant methods
211 211 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
212 212 return result;
213 213 }
214 214
215 215 nmethod* CodeCache::find_nmethod(void* start) {
216 216 CodeBlob *cb = find_blob(start);
217 217 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
218 218 return (nmethod*)cb;
219 219 }
220 220
221 221
222 222 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
223 223 assert_locked_or_safepoint(CodeCache_lock);
224 224 FOR_ALL_BLOBS(p) {
225 225 f(p);
226 226 }
227 227 }
228 228
229 229
230 230 void CodeCache::nmethods_do(void f(nmethod* nm)) {
231 231 assert_locked_or_safepoint(CodeCache_lock);
232 232 FOR_ALL_BLOBS(nm) {
233 233 if (nm->is_nmethod()) f((nmethod*)nm);
234 234 }
235 235 }
236 236
237 237
238 238 int CodeCache::alignment_unit() {
239 239 return (int)_heap->alignment_unit();
240 240 }
241 241
242 242
243 243 int CodeCache::alignment_offset() {
244 244 return (int)_heap->alignment_offset();
245 245 }
246 246
247 247
248 248 // Mark code blobs for unloading if they contain otherwise
249 249 // unreachable oops.
250 250 void CodeCache::do_unloading(BoolObjectClosure* is_alive,
251 251 OopClosure* keep_alive,
252 252 bool unloading_occurred) {
253 253 assert_locked_or_safepoint(CodeCache_lock);
254 254 FOR_ALL_ALIVE_BLOBS(cb) {
255 255 cb->do_unloading(is_alive, keep_alive, unloading_occurred);
256 256 }
257 257 }
258 258
259 259 void CodeCache::blobs_do(CodeBlobClosure* f) {
260 260 assert_locked_or_safepoint(CodeCache_lock);
261 261 FOR_ALL_ALIVE_BLOBS(cb) {
262 262 f->do_code_blob(cb);
263 263
264 264 #ifdef ASSERT
265 265 if (cb->is_nmethod())
266 266 ((nmethod*)cb)->verify_scavenge_root_oops();
267 267 #endif //ASSERT
268 268 }
269 269 }
270 270
271 271 // Walk the list of methods which might contain non-perm oops.
272 272 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
273 273 assert_locked_or_safepoint(CodeCache_lock);
274 274 debug_only(mark_scavenge_root_nmethods());
275 275
276 276 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
277 277 debug_only(cur->clear_scavenge_root_marked());
278 278 assert(cur->scavenge_root_not_marked(), "");
279 279 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
280 280
281 281 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
282 282 #ifndef PRODUCT
283 283 if (TraceScavenge) {
284 284 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
285 285 }
286 286 #endif //PRODUCT
287 287 if (is_live) {
288 288 // Perform cur->oops_do(f), maybe just once per nmethod.
289 289 f->do_code_blob(cur);
290 290 cur->fix_oop_relocations();
291 291 }
292 292 }
293 293
294 294 // Check for stray marks.
295 295 debug_only(verify_perm_nmethods(NULL));
296 296 }
297 297
298 298 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
299 299 assert_locked_or_safepoint(CodeCache_lock);
300 300 nm->set_on_scavenge_root_list();
301 301 nm->set_scavenge_root_link(_scavenge_root_nmethods);
302 302 set_scavenge_root_nmethods(nm);
303 303 print_trace("add_scavenge_root", nm);
304 304 }
305 305
306 306 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
307 307 assert_locked_or_safepoint(CodeCache_lock);
308 308 print_trace("drop_scavenge_root", nm);
309 309 nmethod* last = NULL;
310 310 nmethod* cur = scavenge_root_nmethods();
311 311 while (cur != NULL) {
312 312 nmethod* next = cur->scavenge_root_link();
313 313 if (cur == nm) {
314 314 if (last != NULL)
315 315 last->set_scavenge_root_link(next);
316 316 else set_scavenge_root_nmethods(next);
317 317 nm->set_scavenge_root_link(NULL);
318 318 nm->clear_on_scavenge_root_list();
319 319 return;
320 320 }
321 321 last = cur;
322 322 cur = next;
323 323 }
324 324 assert(false, "should have been on list");
325 325 }
326 326
327 327 void CodeCache::prune_scavenge_root_nmethods() {
328 328 assert_locked_or_safepoint(CodeCache_lock);
329 329 debug_only(mark_scavenge_root_nmethods());
330 330
331 331 nmethod* last = NULL;
332 332 nmethod* cur = scavenge_root_nmethods();
333 333 while (cur != NULL) {
334 334 nmethod* next = cur->scavenge_root_link();
335 335 debug_only(cur->clear_scavenge_root_marked());
336 336 assert(cur->scavenge_root_not_marked(), "");
337 337 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
338 338
339 339 if (!cur->is_zombie() && !cur->is_unloaded()
340 340 && cur->detect_scavenge_root_oops()) {
341 341 // Keep it. Advance 'last' to prevent deletion.
342 342 last = cur;
343 343 } else {
344 344 // Prune it from the list, so we don't have to look at it any more.
345 345 print_trace("prune_scavenge_root", cur);
346 346 cur->set_scavenge_root_link(NULL);
347 347 cur->clear_on_scavenge_root_list();
348 348 if (last != NULL)
349 349 last->set_scavenge_root_link(next);
350 350 else set_scavenge_root_nmethods(next);
351 351 }
352 352 cur = next;
353 353 }
354 354
355 355 // Check for stray marks.
356 356 debug_only(verify_perm_nmethods(NULL));
357 357 }
358 358
359 359 #ifndef PRODUCT
360 360 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
361 361 // While we are here, verify the integrity of the list.
362 362 mark_scavenge_root_nmethods();
363 363 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
364 364 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
365 365 cur->clear_scavenge_root_marked();
366 366 }
367 367 verify_perm_nmethods(f);
368 368 }
369 369
370 370 // Temporarily mark nmethods that are claimed to be on the non-perm list.
371 371 void CodeCache::mark_scavenge_root_nmethods() {
372 372 FOR_ALL_ALIVE_BLOBS(cb) {
373 373 if (cb->is_nmethod()) {
374 374 nmethod *nm = (nmethod*)cb;
375 375 assert(nm->scavenge_root_not_marked(), "clean state");
376 376 if (nm->on_scavenge_root_list())
377 377 nm->set_scavenge_root_marked();
378 378 }
379 379 }
380 380 }
381 381
382 382 // If the closure is given, run it on the unlisted nmethods.
383 383 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
384 384 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
385 385 FOR_ALL_ALIVE_BLOBS(cb) {
386 386 bool call_f = (f_or_null != NULL);
387 387 if (cb->is_nmethod()) {
388 388 nmethod *nm = (nmethod*)cb;
389 389 assert(nm->scavenge_root_not_marked(), "must be already processed");
390 390 if (nm->on_scavenge_root_list())
391 391 call_f = false; // don't show this one to the client
392 392 nm->verify_scavenge_root_oops();
393 393 } else {
394 394 call_f = false; // not an nmethod
395 395 }
396 396 if (call_f) f_or_null->do_code_blob(cb);
397 397 }
398 398 }
399 399 #endif //PRODUCT
400 400
401 401
402 402 nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
403 403 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
404 404 nmethod* saved = _saved_nmethods;
405 405 nmethod* prev = NULL;
406 406 while (saved != NULL) {
407 407 if (saved->is_in_use() && saved->method() == m) {
408 408 if (prev != NULL) {
409 409 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
410 410 } else {
411 411 _saved_nmethods = saved->saved_nmethod_link();
412 412 }
413 413 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
414 414 saved->set_speculatively_disconnected(false);
415 415 saved->set_saved_nmethod_link(NULL);
416 416 if (PrintMethodFlushing) {
417 417 saved->print_on(tty, " ### nmethod is reconnected");
418 418 }
419 419 if (LogCompilation && (xtty != NULL)) {
420 420 ttyLocker ttyl;
421 421 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
422 422 xtty->method(methodOop(m));
423 423 xtty->stamp();
424 424 xtty->end_elem();
425 425 }
426 426 return saved;
427 427 }
428 428 prev = saved;
429 429 saved = saved->saved_nmethod_link();
430 430 }
431 431 return NULL;
432 432 }
433 433
434 434 void CodeCache::remove_saved_code(nmethod* nm) {
435 435 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
436 436 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
437 437 nmethod* saved = _saved_nmethods;
438 438 nmethod* prev = NULL;
439 439 while (saved != NULL) {
440 440 if (saved == nm) {
441 441 if (prev != NULL) {
442 442 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
443 443 } else {
444 444 _saved_nmethods = saved->saved_nmethod_link();
445 445 }
446 446 if (LogCompilation && (xtty != NULL)) {
447 447 ttyLocker ttyl;
448 448 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
449 449 xtty->stamp();
450 450 xtty->end_elem();
451 451 }
452 452 return;
453 453 }
454 454 prev = saved;
455 455 saved = saved->saved_nmethod_link();
456 456 }
457 457 ShouldNotReachHere();
458 458 }
459 459
460 460 void CodeCache::speculatively_disconnect(nmethod* nm) {
461 461 assert_locked_or_safepoint(CodeCache_lock);
462 462 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
463 463 nm->set_saved_nmethod_link(_saved_nmethods);
464 464 _saved_nmethods = nm;
465 465 if (PrintMethodFlushing) {
466 466 nm->print_on(tty, " ### nmethod is speculatively disconnected");
467 467 }
468 468 if (LogCompilation && (xtty != NULL)) {
469 469 ttyLocker ttyl;
470 470 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
471 471 xtty->method(methodOop(nm->method()));
472 472 xtty->stamp();
473 473 xtty->end_elem();
474 474 }
475 475 nm->method()->clear_code();
476 476 nm->set_speculatively_disconnected(true);
477 477 }
478 478
479 479
480 480 void CodeCache::gc_prologue() {
481 481 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
482 482 }
483 483
↓ open down ↓ |
391 lines elided |
↑ open up ↑ |
484 484
485 485 void CodeCache::gc_epilogue() {
486 486 assert_locked_or_safepoint(CodeCache_lock);
487 487 FOR_ALL_ALIVE_BLOBS(cb) {
488 488 if (cb->is_nmethod()) {
489 489 nmethod *nm = (nmethod*)cb;
490 490 assert(!nm->is_unloaded(), "Tautology");
491 491 if (needs_cache_clean()) {
492 492 nm->cleanup_inline_caches();
493 493 }
494 - debug_only(nm->verify();)
494 + DEBUG_ONLY(nm->verify());
495 + nm->fix_oop_relocations();
495 496 }
496 - cb->fix_oop_relocations();
497 497 }
498 498 set_needs_cache_clean(false);
499 499 prune_scavenge_root_nmethods();
500 500 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
501 501 }
502 502
503 503
504 504 address CodeCache::first_address() {
505 505 assert_locked_or_safepoint(CodeCache_lock);
506 506 return (address)_heap->begin();
507 507 }
508 508
509 509
510 510 address CodeCache::last_address() {
511 511 assert_locked_or_safepoint(CodeCache_lock);
512 512 return (address)_heap->end();
513 513 }
514 514
515 515
516 516 void icache_init();
517 517
518 518 void CodeCache::initialize() {
519 519 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
520 520 #ifdef COMPILER2
521 521 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
522 522 #endif
523 523 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
524 524 // This was originally just a check of the alignment, causing failure, instead, round
525 525 // the code cache to the page size. In particular, Solaris is moving to a larger
526 526 // default page size.
527 527 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
528 528 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
529 529 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
530 530 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
531 531 vm_exit_during_initialization("Could not reserve enough space for code cache");
532 532 }
533 533
534 534 MemoryService::add_code_heap_memory_pool(_heap);
535 535
536 536 // Initialize ICache flush mechanism
537 537 // This service is needed for os::register_code_area
538 538 icache_init();
539 539
540 540 // Give OS a chance to register generated code area.
541 541 // This is used on Windows 64 bit platforms to register
542 542 // Structured Exception Handlers for our generated code.
543 543 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
544 544 }
545 545
546 546
547 547 void codeCache_init() {
548 548 CodeCache::initialize();
549 549 }
550 550
551 551 //------------------------------------------------------------------------------------------------
552 552
553 553 int CodeCache::number_of_nmethods_with_dependencies() {
554 554 return _number_of_nmethods_with_dependencies;
555 555 }
556 556
557 557 void CodeCache::clear_inline_caches() {
558 558 assert_locked_or_safepoint(CodeCache_lock);
559 559 FOR_ALL_ALIVE_NMETHODS(nm) {
560 560 nm->clear_inline_caches();
561 561 }
562 562 }
563 563
564 564 #ifndef PRODUCT
565 565 // used to keep track of how much time is spent in mark_for_deoptimization
566 566 static elapsedTimer dependentCheckTime;
567 567 static int dependentCheckCount = 0;
568 568 #endif // PRODUCT
569 569
570 570
571 571 int CodeCache::mark_for_deoptimization(DepChange& changes) {
572 572 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
573 573
574 574 #ifndef PRODUCT
575 575 dependentCheckTime.start();
576 576 dependentCheckCount++;
577 577 #endif // PRODUCT
578 578
579 579 int number_of_marked_CodeBlobs = 0;
580 580
581 581 // search the hierarchy looking for nmethods which are affected by the loading of this class
582 582
583 583 // then search the interfaces this class implements looking for nmethods
584 584 // which might be dependent of the fact that an interface only had one
585 585 // implementor.
586 586
587 587 { No_Safepoint_Verifier nsv;
588 588 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
589 589 klassOop d = str.klass();
590 590 number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
591 591 }
592 592 }
593 593
594 594 if (VerifyDependencies) {
595 595 // Turn off dependency tracing while actually testing deps.
596 596 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
597 597 FOR_ALL_ALIVE_NMETHODS(nm) {
598 598 if (!nm->is_marked_for_deoptimization() &&
599 599 nm->check_all_dependencies()) {
600 600 ResourceMark rm;
601 601 tty->print_cr("Should have been marked for deoptimization:");
602 602 changes.print();
603 603 nm->print();
604 604 nm->print_dependencies();
605 605 }
606 606 }
607 607 }
608 608
609 609 #ifndef PRODUCT
610 610 dependentCheckTime.stop();
611 611 #endif // PRODUCT
612 612
613 613 return number_of_marked_CodeBlobs;
614 614 }
615 615
616 616
617 617 #ifdef HOTSWAP
618 618 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
619 619 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
620 620 int number_of_marked_CodeBlobs = 0;
621 621
622 622 // Deoptimize all methods of the evolving class itself
623 623 objArrayOop old_methods = dependee->methods();
624 624 for (int i = 0; i < old_methods->length(); i++) {
625 625 ResourceMark rm;
626 626 methodOop old_method = (methodOop) old_methods->obj_at(i);
627 627 nmethod *nm = old_method->code();
628 628 if (nm != NULL) {
629 629 nm->mark_for_deoptimization();
630 630 number_of_marked_CodeBlobs++;
631 631 }
632 632 }
633 633
634 634 FOR_ALL_ALIVE_NMETHODS(nm) {
635 635 if (nm->is_marked_for_deoptimization()) {
636 636 // ...Already marked in the previous pass; don't count it again.
637 637 } else if (nm->is_evol_dependent_on(dependee())) {
638 638 ResourceMark rm;
639 639 nm->mark_for_deoptimization();
640 640 number_of_marked_CodeBlobs++;
641 641 } else {
642 642 // flush caches in case they refer to a redefined methodOop
643 643 nm->clear_inline_caches();
644 644 }
645 645 }
646 646
647 647 return number_of_marked_CodeBlobs;
648 648 }
649 649 #endif // HOTSWAP
650 650
651 651
652 652 // Deoptimize all methods
653 653 void CodeCache::mark_all_nmethods_for_deoptimization() {
654 654 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
655 655 FOR_ALL_ALIVE_NMETHODS(nm) {
656 656 nm->mark_for_deoptimization();
657 657 }
658 658 }
659 659
660 660
661 661 int CodeCache::mark_for_deoptimization(methodOop dependee) {
662 662 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
663 663 int number_of_marked_CodeBlobs = 0;
664 664
665 665 FOR_ALL_ALIVE_NMETHODS(nm) {
666 666 if (nm->is_dependent_on_method(dependee)) {
667 667 ResourceMark rm;
668 668 nm->mark_for_deoptimization();
669 669 number_of_marked_CodeBlobs++;
670 670 }
671 671 }
672 672
673 673 return number_of_marked_CodeBlobs;
674 674 }
675 675
676 676 void CodeCache::make_marked_nmethods_zombies() {
677 677 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
678 678 FOR_ALL_ALIVE_NMETHODS(nm) {
679 679 if (nm->is_marked_for_deoptimization()) {
680 680
681 681 // If the nmethod has already been made non-entrant and it can be converted
682 682 // then zombie it now. Otherwise make it non-entrant and it will eventually
683 683 // be zombied when it is no longer seen on the stack. Note that the nmethod
684 684 // might be "entrant" and not on the stack and so could be zombied immediately
685 685 // but we can't tell because we don't track it on stack until it becomes
686 686 // non-entrant.
687 687
688 688 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
689 689 nm->make_zombie();
690 690 } else {
691 691 nm->make_not_entrant();
692 692 }
693 693 }
694 694 }
695 695 }
696 696
697 697 void CodeCache::make_marked_nmethods_not_entrant() {
698 698 assert_locked_or_safepoint(CodeCache_lock);
699 699 FOR_ALL_ALIVE_NMETHODS(nm) {
700 700 if (nm->is_marked_for_deoptimization()) {
701 701 nm->make_not_entrant();
702 702 }
703 703 }
704 704 }
705 705
706 706 void CodeCache::verify() {
707 707 _heap->verify();
708 708 FOR_ALL_ALIVE_BLOBS(p) {
709 709 p->verify();
710 710 }
711 711 }
712 712
713 713 //------------------------------------------------------------------------------------------------
714 714 // Non-product version
715 715
716 716 #ifndef PRODUCT
717 717
718 718 void CodeCache::verify_if_often() {
719 719 if (VerifyCodeCacheOften) {
720 720 _heap->verify();
721 721 }
722 722 }
723 723
724 724 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
725 725 if (PrintCodeCache2) { // Need to add a new flag
726 726 ResourceMark rm;
727 727 if (size == 0) size = cb->size();
728 728 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
729 729 }
730 730 }
731 731
732 732 void CodeCache::print_internals() {
733 733 int nmethodCount = 0;
734 734 int runtimeStubCount = 0;
735 735 int adapterCount = 0;
736 736 int deoptimizationStubCount = 0;
737 737 int uncommonTrapStubCount = 0;
738 738 int bufferBlobCount = 0;
739 739 int total = 0;
740 740 int nmethodAlive = 0;
741 741 int nmethodNotEntrant = 0;
742 742 int nmethodZombie = 0;
743 743 int nmethodUnloaded = 0;
744 744 int nmethodJava = 0;
745 745 int nmethodNative = 0;
746 746 int maxCodeSize = 0;
747 747 ResourceMark rm;
748 748
749 749 CodeBlob *cb;
750 750 for (cb = first(); cb != NULL; cb = next(cb)) {
751 751 total++;
752 752 if (cb->is_nmethod()) {
753 753 nmethod* nm = (nmethod*)cb;
754 754
755 755 if (Verbose && nm->method() != NULL) {
756 756 ResourceMark rm;
757 757 char *method_name = nm->method()->name_and_sig_as_C_string();
758 758 tty->print("%s", method_name);
759 759 if(nm->is_alive()) { tty->print_cr(" alive"); }
760 760 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
761 761 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
762 762 }
763 763
764 764 nmethodCount++;
765 765
766 766 if(nm->is_alive()) { nmethodAlive++; }
767 767 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
768 768 if(nm->is_zombie()) { nmethodZombie++; }
769 769 if(nm->is_unloaded()) { nmethodUnloaded++; }
770 770 if(nm->is_native_method()) { nmethodNative++; }
771 771
772 772 if(nm->method() != NULL && nm->is_java_method()) {
773 773 nmethodJava++;
774 774 if(nm->code_size() > maxCodeSize) {
775 775 maxCodeSize = nm->code_size();
776 776 }
777 777 }
778 778 } else if (cb->is_runtime_stub()) {
779 779 runtimeStubCount++;
780 780 } else if (cb->is_deoptimization_stub()) {
781 781 deoptimizationStubCount++;
782 782 } else if (cb->is_uncommon_trap_stub()) {
783 783 uncommonTrapStubCount++;
784 784 } else if (cb->is_adapter_blob()) {
785 785 adapterCount++;
786 786 } else if (cb->is_buffer_blob()) {
787 787 bufferBlobCount++;
788 788 }
789 789 }
790 790
791 791 int bucketSize = 512;
792 792 int bucketLimit = maxCodeSize / bucketSize + 1;
793 793 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
794 794 memset(buckets,0,sizeof(int) * bucketLimit);
795 795
796 796 for (cb = first(); cb != NULL; cb = next(cb)) {
797 797 if (cb->is_nmethod()) {
798 798 nmethod* nm = (nmethod*)cb;
799 799 if(nm->is_java_method()) {
800 800 buckets[nm->code_size() / bucketSize]++;
801 801 }
802 802 }
803 803 }
804 804 tty->print_cr("Code Cache Entries (total of %d)",total);
805 805 tty->print_cr("-------------------------------------------------");
806 806 tty->print_cr("nmethods: %d",nmethodCount);
807 807 tty->print_cr("\talive: %d",nmethodAlive);
808 808 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
809 809 tty->print_cr("\tzombie: %d",nmethodZombie);
810 810 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
811 811 tty->print_cr("\tjava: %d",nmethodJava);
812 812 tty->print_cr("\tnative: %d",nmethodNative);
813 813 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
814 814 tty->print_cr("adapters: %d",adapterCount);
815 815 tty->print_cr("buffer blobs: %d",bufferBlobCount);
816 816 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
817 817 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
818 818 tty->print_cr("\nnmethod size distribution (non-zombie java)");
819 819 tty->print_cr("-------------------------------------------------");
820 820
821 821 for(int i=0; i<bucketLimit; i++) {
822 822 if(buckets[i] != 0) {
823 823 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
824 824 tty->fill_to(40);
825 825 tty->print_cr("%d",buckets[i]);
826 826 }
827 827 }
828 828
829 829 FREE_C_HEAP_ARRAY(int, buckets);
830 830 }
831 831
832 832 void CodeCache::print() {
833 833 CodeBlob_sizes live;
834 834 CodeBlob_sizes dead;
835 835
836 836 FOR_ALL_BLOBS(p) {
837 837 if (!p->is_alive()) {
838 838 dead.add(p);
839 839 } else {
840 840 live.add(p);
841 841 }
842 842 }
843 843
844 844 tty->print_cr("CodeCache:");
845 845
846 846 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
847 847 dependentCheckTime.seconds() / dependentCheckCount);
848 848
849 849 if (!live.is_empty()) {
850 850 live.print("live");
851 851 }
852 852 if (!dead.is_empty()) {
853 853 dead.print("dead");
854 854 }
855 855
856 856
857 857 if (Verbose) {
858 858 // print the oop_map usage
859 859 int code_size = 0;
860 860 int number_of_blobs = 0;
861 861 int number_of_oop_maps = 0;
862 862 int map_size = 0;
863 863 FOR_ALL_BLOBS(p) {
864 864 if (p->is_alive()) {
865 865 number_of_blobs++;
866 866 code_size += p->instructions_size();
867 867 OopMapSet* set = p->oop_maps();
868 868 if (set != NULL) {
869 869 number_of_oop_maps += set->size();
870 870 map_size += set->heap_size();
871 871 }
872 872 }
873 873 }
874 874 tty->print_cr("OopMaps");
875 875 tty->print_cr(" #blobs = %d", number_of_blobs);
876 876 tty->print_cr(" code size = %d", code_size);
877 877 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
878 878 tty->print_cr(" map size = %d", map_size);
879 879 }
880 880
881 881 }
882 882
883 883 #endif // PRODUCT
↓ open down ↓ |
377 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX