Print this page
rev 2691 : [mq]: g1-reference-processing
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/genCollectedHeap.cpp
+++ new/src/share/vm/memory/genCollectedHeap.cpp
1 1 /*
2 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/symbolTable.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "classfile/vmSymbols.hpp"
29 29 #include "code/icBuffer.hpp"
30 30 #include "gc_implementation/shared/collectorCounters.hpp"
31 31 #include "gc_implementation/shared/vmGCOperations.hpp"
32 32 #include "gc_interface/collectedHeap.inline.hpp"
33 33 #include "memory/compactPermGen.hpp"
34 34 #include "memory/filemap.hpp"
35 35 #include "memory/gcLocker.inline.hpp"
36 36 #include "memory/genCollectedHeap.hpp"
37 37 #include "memory/genOopClosures.inline.hpp"
38 38 #include "memory/generation.inline.hpp"
39 39 #include "memory/generationSpec.hpp"
40 40 #include "memory/permGen.hpp"
41 41 #include "memory/resourceArea.hpp"
42 42 #include "memory/sharedHeap.hpp"
43 43 #include "memory/space.hpp"
44 44 #include "oops/oop.inline.hpp"
45 45 #include "oops/oop.inline2.hpp"
46 46 #include "runtime/aprofiler.hpp"
47 47 #include "runtime/biasedLocking.hpp"
48 48 #include "runtime/fprofiler.hpp"
49 49 #include "runtime/handles.hpp"
50 50 #include "runtime/handles.inline.hpp"
51 51 #include "runtime/java.hpp"
52 52 #include "runtime/vmThread.hpp"
53 53 #include "services/memoryService.hpp"
54 54 #include "utilities/vmError.hpp"
55 55 #include "utilities/workgroup.hpp"
56 56 #ifndef SERIALGC
57 57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
58 58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
59 59 #endif
60 60
61 61 GenCollectedHeap* GenCollectedHeap::_gch;
62 62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
63 63
64 64 // The set of potentially parallel tasks in strong root scanning.
65 65 enum GCH_process_strong_roots_tasks {
66 66 // We probably want to parallelize both of these internally, but for now...
67 67 GCH_PS_younger_gens,
68 68 // Leave this one last.
69 69 GCH_PS_NumElements
70 70 };
71 71
72 72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
73 73 SharedHeap(policy),
74 74 _gen_policy(policy),
75 75 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
76 76 _full_collections_completed(0)
77 77 {
78 78 if (_gen_process_strong_tasks == NULL ||
79 79 !_gen_process_strong_tasks->valid()) {
80 80 vm_exit_during_initialization("Failed necessary allocation.");
81 81 }
82 82 assert(policy != NULL, "Sanity check");
83 83 _preloading_shared_classes = false;
84 84 }
85 85
86 86 jint GenCollectedHeap::initialize() {
87 87 CollectedHeap::pre_initialize();
88 88
89 89 int i;
90 90 _n_gens = gen_policy()->number_of_generations();
91 91
92 92 // While there are no constraints in the GC code that HeapWordSize
93 93 // be any particular value, there are multiple other areas in the
94 94 // system which believe this to be true (e.g. oop->object_size in some
95 95 // cases incorrectly returns the size in wordSize units rather than
96 96 // HeapWordSize).
97 97 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
98 98
99 99 // The heap must be at least as aligned as generations.
100 100 size_t alignment = Generation::GenGrain;
101 101
102 102 _gen_specs = gen_policy()->generations();
103 103 PermanentGenerationSpec *perm_gen_spec =
104 104 collector_policy()->permanent_generation();
105 105
106 106 // Make sure the sizes are all aligned.
107 107 for (i = 0; i < _n_gens; i++) {
108 108 _gen_specs[i]->align(alignment);
109 109 }
110 110 perm_gen_spec->align(alignment);
111 111
112 112 // If we are dumping the heap, then allocate a wasted block of address
113 113 // space in order to push the heap to a lower address. This extra
114 114 // address range allows for other (or larger) libraries to be loaded
115 115 // without them occupying the space required for the shared spaces.
116 116
117 117 if (DumpSharedSpaces) {
118 118 uintx reserved = 0;
119 119 uintx block_size = 64*1024*1024;
120 120 while (reserved < SharedDummyBlockSize) {
121 121 char* dummy = os::reserve_memory(block_size);
122 122 reserved += block_size;
123 123 }
124 124 }
125 125
126 126 // Allocate space for the heap.
127 127
128 128 char* heap_address;
129 129 size_t total_reserved = 0;
130 130 int n_covered_regions = 0;
131 131 ReservedSpace heap_rs(0);
132 132
133 133 heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
134 134 &n_covered_regions, &heap_rs);
135 135
136 136 if (UseSharedSpaces) {
137 137 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
138 138 if (heap_rs.is_reserved()) {
139 139 heap_rs.release();
140 140 }
141 141 FileMapInfo* mapinfo = FileMapInfo::current_info();
142 142 mapinfo->fail_continue("Unable to reserve shared region.");
143 143 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
144 144 &heap_rs);
145 145 }
146 146 }
147 147
148 148 if (!heap_rs.is_reserved()) {
149 149 vm_shutdown_during_initialization(
150 150 "Could not reserve enough space for object heap");
151 151 return JNI_ENOMEM;
152 152 }
153 153
154 154 _reserved = MemRegion((HeapWord*)heap_rs.base(),
155 155 (HeapWord*)(heap_rs.base() + heap_rs.size()));
156 156
157 157 // It is important to do this in a way such that concurrent readers can't
158 158 // temporarily think somethings in the heap. (Seen this happen in asserts.)
159 159 _reserved.set_word_size(0);
160 160 _reserved.set_start((HeapWord*)heap_rs.base());
161 161 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
162 162 - perm_gen_spec->misc_code_size();
163 163 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
164 164
165 165 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
166 166 set_barrier_set(rem_set()->bs());
167 167
168 168 _gch = this;
169 169
170 170 for (i = 0; i < _n_gens; i++) {
171 171 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
172 172 UseSharedSpaces, UseSharedSpaces);
173 173 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
174 174 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
175 175 }
176 176 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
177 177
178 178 clear_incremental_collection_failed();
179 179
180 180 #ifndef SERIALGC
181 181 // If we are running CMS, create the collector responsible
182 182 // for collecting the CMS generations.
183 183 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
184 184 bool success = create_cms_collector();
185 185 if (!success) return JNI_ENOMEM;
186 186 }
187 187 #endif // SERIALGC
188 188
189 189 return JNI_OK;
190 190 }
191 191
192 192
193 193 char* GenCollectedHeap::allocate(size_t alignment,
194 194 PermanentGenerationSpec* perm_gen_spec,
195 195 size_t* _total_reserved,
196 196 int* _n_covered_regions,
197 197 ReservedSpace* heap_rs){
198 198 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
199 199 "the maximum representable size";
200 200
201 201 // Now figure out the total size.
202 202 size_t total_reserved = 0;
203 203 int n_covered_regions = 0;
204 204 const size_t pageSize = UseLargePages ?
205 205 os::large_page_size() : os::vm_page_size();
206 206
207 207 for (int i = 0; i < _n_gens; i++) {
208 208 total_reserved += _gen_specs[i]->max_size();
209 209 if (total_reserved < _gen_specs[i]->max_size()) {
210 210 vm_exit_during_initialization(overflow_msg);
211 211 }
212 212 n_covered_regions += _gen_specs[i]->n_covered_regions();
213 213 }
214 214 assert(total_reserved % pageSize == 0,
215 215 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
216 216 SIZE_FORMAT, total_reserved, pageSize));
217 217 total_reserved += perm_gen_spec->max_size();
218 218 assert(total_reserved % pageSize == 0,
219 219 err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
220 220 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
221 221 pageSize, perm_gen_spec->max_size()));
222 222
223 223 if (total_reserved < perm_gen_spec->max_size()) {
224 224 vm_exit_during_initialization(overflow_msg);
225 225 }
226 226 n_covered_regions += perm_gen_spec->n_covered_regions();
227 227
228 228 // Add the size of the data area which shares the same reserved area
229 229 // as the heap, but which is not actually part of the heap.
230 230 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
231 231
232 232 total_reserved += s;
233 233 if (total_reserved < s) {
234 234 vm_exit_during_initialization(overflow_msg);
235 235 }
236 236
237 237 if (UseLargePages) {
238 238 assert(total_reserved != 0, "total_reserved cannot be 0");
239 239 total_reserved = round_to(total_reserved, os::large_page_size());
240 240 if (total_reserved < os::large_page_size()) {
241 241 vm_exit_during_initialization(overflow_msg);
242 242 }
243 243 }
244 244
245 245 // Calculate the address at which the heap must reside in order for
246 246 // the shared data to be at the required address.
247 247
248 248 char* heap_address;
249 249 if (UseSharedSpaces) {
250 250
251 251 // Calculate the address of the first word beyond the heap.
252 252 FileMapInfo* mapinfo = FileMapInfo::current_info();
253 253 int lr = CompactingPermGenGen::n_regions - 1;
254 254 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
255 255 heap_address = mapinfo->region_base(lr) + capacity;
256 256
257 257 // Calculate the address of the first word of the heap.
258 258 heap_address -= total_reserved;
259 259 } else {
260 260 heap_address = NULL; // any address will do.
261 261 if (UseCompressedOops) {
262 262 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
263 263 *_total_reserved = total_reserved;
264 264 *_n_covered_regions = n_covered_regions;
265 265 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
266 266 UseLargePages, heap_address);
267 267
268 268 if (heap_address != NULL && !heap_rs->is_reserved()) {
269 269 // Failed to reserve at specified address - the requested memory
270 270 // region is taken already, for example, by 'java' launcher.
271 271 // Try again to reserver heap higher.
272 272 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
273 273 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
274 274 UseLargePages, heap_address);
275 275
276 276 if (heap_address != NULL && !heap_rs->is_reserved()) {
277 277 // Failed to reserve at specified address again - give up.
278 278 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
279 279 assert(heap_address == NULL, "");
280 280 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
281 281 UseLargePages, heap_address);
282 282 }
283 283 }
284 284 return heap_address;
285 285 }
286 286 }
287 287
288 288 *_total_reserved = total_reserved;
289 289 *_n_covered_regions = n_covered_regions;
290 290 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
291 291 UseLargePages, heap_address);
292 292
293 293 return heap_address;
294 294 }
295 295
296 296
297 297 void GenCollectedHeap::post_initialize() {
298 298 SharedHeap::post_initialize();
299 299 TwoGenerationCollectorPolicy *policy =
300 300 (TwoGenerationCollectorPolicy *)collector_policy();
301 301 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
302 302 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
303 303 assert(def_new_gen->kind() == Generation::DefNew ||
304 304 def_new_gen->kind() == Generation::ParNew ||
305 305 def_new_gen->kind() == Generation::ASParNew,
306 306 "Wrong generation kind");
307 307
308 308 Generation* old_gen = get_gen(1);
309 309 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
310 310 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
311 311 old_gen->kind() == Generation::MarkSweepCompact,
312 312 "Wrong generation kind");
313 313
314 314 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
315 315 old_gen->capacity(),
316 316 def_new_gen->from()->capacity());
317 317 policy->initialize_gc_policy_counters();
318 318 }
319 319
320 320 void GenCollectedHeap::ref_processing_init() {
321 321 SharedHeap::ref_processing_init();
322 322 for (int i = 0; i < _n_gens; i++) {
323 323 _gens[i]->ref_processor_init();
324 324 }
325 325 }
326 326
327 327 size_t GenCollectedHeap::capacity() const {
328 328 size_t res = 0;
329 329 for (int i = 0; i < _n_gens; i++) {
330 330 res += _gens[i]->capacity();
331 331 }
332 332 return res;
333 333 }
334 334
335 335 size_t GenCollectedHeap::used() const {
336 336 size_t res = 0;
337 337 for (int i = 0; i < _n_gens; i++) {
338 338 res += _gens[i]->used();
339 339 }
340 340 return res;
341 341 }
342 342
343 343 // Save the "used_region" for generations level and lower,
344 344 // and, if perm is true, for perm gen.
345 345 void GenCollectedHeap::save_used_regions(int level, bool perm) {
346 346 assert(level < _n_gens, "Illegal level parameter");
347 347 for (int i = level; i >= 0; i--) {
348 348 _gens[i]->save_used_region();
349 349 }
350 350 if (perm) {
351 351 perm_gen()->save_used_region();
352 352 }
353 353 }
354 354
355 355 size_t GenCollectedHeap::max_capacity() const {
356 356 size_t res = 0;
357 357 for (int i = 0; i < _n_gens; i++) {
358 358 res += _gens[i]->max_capacity();
359 359 }
360 360 return res;
361 361 }
362 362
363 363 // Update the _full_collections_completed counter
364 364 // at the end of a stop-world full GC.
365 365 unsigned int GenCollectedHeap::update_full_collections_completed() {
366 366 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
367 367 assert(_full_collections_completed <= _total_full_collections,
368 368 "Can't complete more collections than were started");
369 369 _full_collections_completed = _total_full_collections;
370 370 ml.notify_all();
371 371 return _full_collections_completed;
372 372 }
373 373
374 374 // Update the _full_collections_completed counter, as appropriate,
375 375 // at the end of a concurrent GC cycle. Note the conditional update
376 376 // below to allow this method to be called by a concurrent collector
377 377 // without synchronizing in any manner with the VM thread (which
378 378 // may already have initiated a STW full collection "concurrently").
379 379 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
380 380 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
381 381 assert((_full_collections_completed <= _total_full_collections) &&
382 382 (count <= _total_full_collections),
383 383 "Can't complete more collections than were started");
384 384 if (count > _full_collections_completed) {
385 385 _full_collections_completed = count;
386 386 ml.notify_all();
387 387 }
388 388 return _full_collections_completed;
389 389 }
390 390
391 391
392 392 #ifndef PRODUCT
393 393 // Override of memory state checking method in CollectedHeap:
394 394 // Some collectors (CMS for example) can't have badHeapWordVal written
395 395 // in the first two words of an object. (For instance , in the case of
396 396 // CMS these words hold state used to synchronize between certain
397 397 // (concurrent) GC steps and direct allocating mutators.)
398 398 // The skip_header_HeapWords() method below, allows us to skip
399 399 // over the requisite number of HeapWord's. Note that (for
400 400 // generational collectors) this means that those many words are
401 401 // skipped in each object, irrespective of the generation in which
402 402 // that object lives. The resultant loss of precision seems to be
403 403 // harmless and the pain of avoiding that imprecision appears somewhat
404 404 // higher than we are prepared to pay for such rudimentary debugging
405 405 // support.
406 406 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
407 407 size_t size) {
408 408 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
409 409 // We are asked to check a size in HeapWords,
410 410 // but the memory is mangled in juint words.
411 411 juint* start = (juint*) (addr + skip_header_HeapWords());
412 412 juint* end = (juint*) (addr + size);
413 413 for (juint* slot = start; slot < end; slot += 1) {
414 414 assert(*slot == badHeapWordVal,
415 415 "Found non badHeapWordValue in pre-allocation check");
416 416 }
417 417 }
418 418 }
419 419 #endif
420 420
421 421 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
422 422 bool is_tlab,
423 423 bool first_only) {
424 424 HeapWord* res;
425 425 for (int i = 0; i < _n_gens; i++) {
426 426 if (_gens[i]->should_allocate(size, is_tlab)) {
427 427 res = _gens[i]->allocate(size, is_tlab);
428 428 if (res != NULL) return res;
429 429 else if (first_only) break;
430 430 }
431 431 }
432 432 // Otherwise...
433 433 return NULL;
434 434 }
435 435
436 436 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
437 437 bool* gc_overhead_limit_was_exceeded) {
438 438 return collector_policy()->mem_allocate_work(size,
439 439 false /* is_tlab */,
440 440 gc_overhead_limit_was_exceeded);
441 441 }
442 442
443 443 bool GenCollectedHeap::must_clear_all_soft_refs() {
444 444 return _gc_cause == GCCause::_last_ditch_collection;
445 445 }
446 446
447 447 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
448 448 return UseConcMarkSweepGC &&
449 449 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
450 450 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
451 451 }
452 452
453 453 void GenCollectedHeap::do_collection(bool full,
454 454 bool clear_all_soft_refs,
455 455 size_t size,
456 456 bool is_tlab,
457 457 int max_level) {
458 458 bool prepared_for_verification = false;
459 459 ResourceMark rm;
460 460 DEBUG_ONLY(Thread* my_thread = Thread::current();)
461 461
462 462 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
463 463 assert(my_thread->is_VM_thread() ||
464 464 my_thread->is_ConcurrentGC_thread(),
465 465 "incorrect thread type capability");
466 466 assert(Heap_lock->is_locked(),
467 467 "the requesting thread should have the Heap_lock");
468 468 guarantee(!is_gc_active(), "collection is not reentrant");
469 469 assert(max_level < n_gens(), "sanity check");
470 470
471 471 if (GC_locker::check_active_before_gc()) {
472 472 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
473 473 }
474 474
475 475 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
476 476 collector_policy()->should_clear_all_soft_refs();
477 477
478 478 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
479 479
480 480 const size_t perm_prev_used = perm_gen()->used();
481 481
482 482 if (PrintHeapAtGC) {
483 483 Universe::print_heap_before_gc();
484 484 if (Verbose) {
485 485 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
486 486 }
487 487 }
488 488
489 489 {
490 490 FlagSetting fl(_is_gc_active, true);
491 491
492 492 bool complete = full && (max_level == (n_gens()-1));
493 493 const char* gc_cause_str = "GC ";
494 494 if (complete) {
495 495 GCCause::Cause cause = gc_cause();
496 496 if (cause == GCCause::_java_lang_system_gc) {
497 497 gc_cause_str = "Full GC (System) ";
498 498 } else {
499 499 gc_cause_str = "Full GC ";
500 500 }
501 501 }
502 502 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
503 503 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
504 504 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
505 505
506 506 gc_prologue(complete);
507 507 increment_total_collections(complete);
508 508
509 509 size_t gch_prev_used = used();
510 510
511 511 int starting_level = 0;
512 512 if (full) {
513 513 // Search for the oldest generation which will collect all younger
514 514 // generations, and start collection loop there.
515 515 for (int i = max_level; i >= 0; i--) {
516 516 if (_gens[i]->full_collects_younger_generations()) {
517 517 starting_level = i;
518 518 break;
519 519 }
520 520 }
521 521 }
522 522
523 523 bool must_restore_marks_for_biased_locking = false;
524 524
525 525 int max_level_collected = starting_level;
526 526 for (int i = starting_level; i <= max_level; i++) {
527 527 if (_gens[i]->should_collect(full, size, is_tlab)) {
528 528 if (i == n_gens() - 1) { // a major collection is to happen
529 529 if (!complete) {
530 530 // The full_collections increment was missed above.
531 531 increment_total_full_collections();
532 532 }
533 533 pre_full_gc_dump(); // do any pre full gc dumps
534 534 }
535 535 // Timer for individual generations. Last argument is false: no CR
536 536 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
537 537 TraceCollectorStats tcs(_gens[i]->counters());
538 538 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
539 539
540 540 size_t prev_used = _gens[i]->used();
541 541 _gens[i]->stat_record()->invocations++;
542 542 _gens[i]->stat_record()->accumulated_time.start();
543 543
544 544 // Must be done anew before each collection because
545 545 // a previous collection will do mangling and will
546 546 // change top of some spaces.
547 547 record_gen_tops_before_GC();
548 548
549 549 if (PrintGC && Verbose) {
550 550 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
551 551 i,
552 552 _gens[i]->stat_record()->invocations,
553 553 size*HeapWordSize);
554 554 }
555 555
556 556 if (VerifyBeforeGC && i >= VerifyGCLevel &&
557 557 total_collections() >= VerifyGCStartAt) {
558 558 HandleMark hm; // Discard invalid handles created during verification
559 559 if (!prepared_for_verification) {
560 560 prepare_for_verify();
561 561 prepared_for_verification = true;
562 562 }
563 563 gclog_or_tty->print(" VerifyBeforeGC:");
564 564 Universe::verify(true);
565 565 }
566 566 COMPILER2_PRESENT(DerivedPointerTable::clear());
567 567
568 568 if (!must_restore_marks_for_biased_locking &&
569 569 _gens[i]->performs_in_place_marking()) {
570 570 // We perform this mark word preservation work lazily
571 571 // because it's only at this point that we know whether we
572 572 // absolutely have to do it; we want to avoid doing it for
573 573 // scavenge-only collections where it's unnecessary
574 574 must_restore_marks_for_biased_locking = true;
575 575 BiasedLocking::preserve_marks();
576 576 }
577 577
578 578 // Do collection work
579 579 {
580 580 // Note on ref discovery: For what appear to be historical reasons,
581 581 // GCH enables and disabled (by enqueing) refs discovery.
582 582 // In the future this should be moved into the generation's
583 583 // collect method so that ref discovery and enqueueing concerns
584 584 // are local to a generation. The collect method could return
585 585 // an appropriate indication in the case that notification on
586 586 // the ref lock was needed. This will make the treatment of
587 587 // weak refs more uniform (and indeed remove such concerns
588 588 // from GCH). XXX
589 589
590 590 HandleMark hm; // Discard invalid handles created during gc
591 591 save_marks(); // save marks for all gens
↓ open down ↓ |
591 lines elided |
↑ open up ↑ |
592 592 // We want to discover references, but not process them yet.
593 593 // This mode is disabled in process_discovered_references if the
594 594 // generation does some collection work, or in
595 595 // enqueue_discovered_references if the generation returns
596 596 // without doing any work.
597 597 ReferenceProcessor* rp = _gens[i]->ref_processor();
598 598 // If the discovery of ("weak") refs in this generation is
599 599 // atomic wrt other collectors in this configuration, we
600 600 // are guaranteed to have empty discovered ref lists.
601 601 if (rp->discovery_is_atomic()) {
602 - rp->verify_no_references_recorded();
603 - rp->enable_discovery();
602 + rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
604 603 rp->setup_policy(do_clear_all_soft_refs);
605 604 } else {
606 605 // collect() below will enable discovery as appropriate
607 606 }
608 607 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
609 608 if (!rp->enqueuing_is_done()) {
610 609 rp->enqueue_discovered_references();
611 610 } else {
612 611 rp->set_enqueuing_is_done(false);
613 612 }
614 613 rp->verify_no_references_recorded();
615 614 }
616 615 max_level_collected = i;
617 616
618 617 // Determine if allocation request was met.
619 618 if (size > 0) {
620 619 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
621 620 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
622 621 size = 0;
623 622 }
624 623 }
625 624 }
626 625
627 626 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
628 627
629 628 _gens[i]->stat_record()->accumulated_time.stop();
630 629
631 630 update_gc_stats(i, full);
632 631
633 632 if (VerifyAfterGC && i >= VerifyGCLevel &&
634 633 total_collections() >= VerifyGCStartAt) {
635 634 HandleMark hm; // Discard invalid handles created during verification
636 635 gclog_or_tty->print(" VerifyAfterGC:");
637 636 Universe::verify(false);
638 637 }
639 638
640 639 if (PrintGCDetails) {
641 640 gclog_or_tty->print(":");
642 641 _gens[i]->print_heap_change(prev_used);
643 642 }
644 643 }
645 644 }
646 645
647 646 // Update "complete" boolean wrt what actually transpired --
648 647 // for instance, a promotion failure could have led to
649 648 // a whole heap collection.
650 649 complete = complete || (max_level_collected == n_gens() - 1);
651 650
652 651 if (complete) { // We did a "major" collection
653 652 post_full_gc_dump(); // do any post full gc dumps
654 653 }
655 654
656 655 if (PrintGCDetails) {
657 656 print_heap_change(gch_prev_used);
658 657
659 658 // Print perm gen info for full GC with PrintGCDetails flag.
660 659 if (complete) {
661 660 print_perm_heap_change(perm_prev_used);
662 661 }
663 662 }
664 663
665 664 for (int j = max_level_collected; j >= 0; j -= 1) {
666 665 // Adjust generation sizes.
667 666 _gens[j]->compute_new_size();
668 667 }
669 668
670 669 if (complete) {
671 670 // Ask the permanent generation to adjust size for full collections
672 671 perm()->compute_new_size();
673 672 update_full_collections_completed();
674 673 }
675 674
676 675 // Track memory usage and detect low memory after GC finishes
677 676 MemoryService::track_memory_usage();
678 677
679 678 gc_epilogue(complete);
680 679
681 680 if (must_restore_marks_for_biased_locking) {
682 681 BiasedLocking::restore_marks();
683 682 }
684 683 }
685 684
686 685 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
687 686 AdaptiveSizePolicyOutput(sp, total_collections());
688 687
689 688 if (PrintHeapAtGC) {
690 689 Universe::print_heap_after_gc();
691 690 }
692 691
693 692 #ifdef TRACESPINNING
694 693 ParallelTaskTerminator::print_termination_counts();
695 694 #endif
696 695
697 696 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
698 697 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
699 698 vm_exit(-1);
700 699 }
701 700 }
702 701
703 702 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
704 703 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
705 704 }
706 705
707 706 void GenCollectedHeap::set_par_threads(int t) {
708 707 SharedHeap::set_par_threads(t);
709 708 _gen_process_strong_tasks->set_n_threads(t);
710 709 }
711 710
712 711 void GenCollectedHeap::
713 712 gen_process_strong_roots(int level,
714 713 bool younger_gens_as_roots,
715 714 bool activate_scope,
716 715 bool collecting_perm_gen,
717 716 SharedHeap::ScanningOption so,
718 717 OopsInGenClosure* not_older_gens,
719 718 bool do_code_roots,
720 719 OopsInGenClosure* older_gens) {
721 720 // General strong roots.
722 721
723 722 if (!do_code_roots) {
724 723 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
725 724 not_older_gens, NULL, older_gens);
726 725 } else {
727 726 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
728 727 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
729 728 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
730 729 not_older_gens, &code_roots, older_gens);
731 730 }
732 731
733 732 if (younger_gens_as_roots) {
734 733 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
735 734 for (int i = 0; i < level; i++) {
736 735 not_older_gens->set_generation(_gens[i]);
737 736 _gens[i]->oop_iterate(not_older_gens);
738 737 }
739 738 not_older_gens->reset_generation();
740 739 }
741 740 }
742 741 // When collection is parallel, all threads get to cooperate to do
743 742 // older-gen scanning.
744 743 for (int i = level+1; i < _n_gens; i++) {
745 744 older_gens->set_generation(_gens[i]);
746 745 rem_set()->younger_refs_iterate(_gens[i], older_gens);
747 746 older_gens->reset_generation();
748 747 }
749 748
750 749 _gen_process_strong_tasks->all_tasks_completed();
751 750 }
752 751
753 752 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
754 753 CodeBlobClosure* code_roots,
755 754 OopClosure* non_root_closure) {
756 755 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
757 756 // "Local" "weak" refs
758 757 for (int i = 0; i < _n_gens; i++) {
759 758 _gens[i]->ref_processor()->weak_oops_do(root_closure);
760 759 }
761 760 }
762 761
763 762 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
764 763 void GenCollectedHeap:: \
765 764 oop_since_save_marks_iterate(int level, \
766 765 OopClosureType* cur, \
767 766 OopClosureType* older) { \
768 767 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
769 768 for (int i = level+1; i < n_gens(); i++) { \
770 769 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
771 770 } \
772 771 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
773 772 }
774 773
775 774 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
776 775
777 776 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
778 777
779 778 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
780 779 for (int i = level; i < _n_gens; i++) {
781 780 if (!_gens[i]->no_allocs_since_save_marks()) return false;
782 781 }
783 782 return perm_gen()->no_allocs_since_save_marks();
784 783 }
785 784
786 785 bool GenCollectedHeap::supports_inline_contig_alloc() const {
787 786 return _gens[0]->supports_inline_contig_alloc();
788 787 }
789 788
790 789 HeapWord** GenCollectedHeap::top_addr() const {
791 790 return _gens[0]->top_addr();
792 791 }
793 792
794 793 HeapWord** GenCollectedHeap::end_addr() const {
795 794 return _gens[0]->end_addr();
796 795 }
797 796
798 797 size_t GenCollectedHeap::unsafe_max_alloc() {
799 798 return _gens[0]->unsafe_max_alloc_nogc();
800 799 }
801 800
802 801 // public collection interfaces
803 802
804 803 void GenCollectedHeap::collect(GCCause::Cause cause) {
805 804 if (should_do_concurrent_full_gc(cause)) {
806 805 #ifndef SERIALGC
807 806 // mostly concurrent full collection
808 807 collect_mostly_concurrent(cause);
809 808 #else // SERIALGC
810 809 ShouldNotReachHere();
811 810 #endif // SERIALGC
812 811 } else {
813 812 #ifdef ASSERT
814 813 if (cause == GCCause::_scavenge_alot) {
815 814 // minor collection only
816 815 collect(cause, 0);
817 816 } else {
818 817 // Stop-the-world full collection
819 818 collect(cause, n_gens() - 1);
820 819 }
821 820 #else
822 821 // Stop-the-world full collection
823 822 collect(cause, n_gens() - 1);
824 823 #endif
825 824 }
826 825 }
827 826
828 827 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
829 828 // The caller doesn't have the Heap_lock
830 829 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
831 830 MutexLocker ml(Heap_lock);
832 831 collect_locked(cause, max_level);
833 832 }
834 833
835 834 // This interface assumes that it's being called by the
836 835 // vm thread. It collects the heap assuming that the
837 836 // heap lock is already held and that we are executing in
838 837 // the context of the vm thread.
839 838 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
840 839 assert(Thread::current()->is_VM_thread(), "Precondition#1");
841 840 assert(Heap_lock->is_locked(), "Precondition#2");
842 841 GCCauseSetter gcs(this, cause);
843 842 switch (cause) {
844 843 case GCCause::_heap_inspection:
845 844 case GCCause::_heap_dump: {
846 845 HandleMark hm;
847 846 do_full_collection(false, // don't clear all soft refs
848 847 n_gens() - 1);
849 848 break;
850 849 }
851 850 default: // XXX FIX ME
852 851 ShouldNotReachHere(); // Unexpected use of this function
853 852 }
854 853 }
855 854
856 855 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
857 856 // The caller has the Heap_lock
858 857 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
859 858 collect_locked(cause, n_gens() - 1);
860 859 }
861 860
862 861 // this is the private collection interface
863 862 // The Heap_lock is expected to be held on entry.
864 863
865 864 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
866 865 if (_preloading_shared_classes) {
867 866 report_out_of_shared_space(SharedPermGen);
868 867 }
869 868 // Read the GC count while holding the Heap_lock
870 869 unsigned int gc_count_before = total_collections();
871 870 unsigned int full_gc_count_before = total_full_collections();
872 871 {
873 872 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
874 873 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
875 874 cause, max_level);
876 875 VMThread::execute(&op);
877 876 }
878 877 }
879 878
880 879 #ifndef SERIALGC
881 880 bool GenCollectedHeap::create_cms_collector() {
882 881
883 882 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
884 883 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
885 884 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
886 885 "Unexpected generation kinds");
887 886 // Skip two header words in the block content verification
888 887 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
889 888 CMSCollector* collector = new CMSCollector(
890 889 (ConcurrentMarkSweepGeneration*)_gens[1],
891 890 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
892 891 _rem_set->as_CardTableRS(),
893 892 (ConcurrentMarkSweepPolicy*) collector_policy());
894 893
895 894 if (collector == NULL || !collector->completed_initialization()) {
896 895 if (collector) {
897 896 delete collector; // Be nice in embedded situation
898 897 }
899 898 vm_shutdown_during_initialization("Could not create CMS collector");
900 899 return false;
901 900 }
902 901 return true; // success
903 902 }
904 903
905 904 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
906 905 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
907 906
908 907 MutexLocker ml(Heap_lock);
909 908 // Read the GC counts while holding the Heap_lock
910 909 unsigned int full_gc_count_before = total_full_collections();
911 910 unsigned int gc_count_before = total_collections();
912 911 {
913 912 MutexUnlocker mu(Heap_lock);
914 913 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
915 914 VMThread::execute(&op);
916 915 }
917 916 }
918 917 #endif // SERIALGC
919 918
920 919
921 920 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
922 921 int max_level) {
923 922 int local_max_level;
924 923 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
925 924 gc_cause() == GCCause::_gc_locker) {
926 925 local_max_level = 0;
927 926 } else {
928 927 local_max_level = max_level;
929 928 }
930 929
931 930 do_collection(true /* full */,
932 931 clear_all_soft_refs /* clear_all_soft_refs */,
933 932 0 /* size */,
934 933 false /* is_tlab */,
935 934 local_max_level /* max_level */);
936 935 // Hack XXX FIX ME !!!
937 936 // A scavenge may not have been attempted, or may have
938 937 // been attempted and failed, because the old gen was too full
939 938 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
940 939 incremental_collection_will_fail(false /* don't consult_young */)) {
941 940 if (PrintGCDetails) {
942 941 gclog_or_tty->print_cr("GC locker: Trying a full collection "
943 942 "because scavenge failed");
944 943 }
945 944 // This time allow the old gen to be collected as well
946 945 do_collection(true /* full */,
947 946 clear_all_soft_refs /* clear_all_soft_refs */,
948 947 0 /* size */,
949 948 false /* is_tlab */,
950 949 n_gens() - 1 /* max_level */);
951 950 }
952 951 }
953 952
954 953 bool GenCollectedHeap::is_in_young(oop p) {
955 954 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
956 955 assert(result == _gens[0]->is_in_reserved(p),
957 956 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
958 957 return result;
959 958 }
960 959
961 960 // Returns "TRUE" iff "p" points into the allocated area of the heap.
962 961 bool GenCollectedHeap::is_in(const void* p) const {
963 962 #ifndef ASSERT
964 963 guarantee(VerifyBeforeGC ||
965 964 VerifyDuringGC ||
966 965 VerifyBeforeExit ||
967 966 PrintAssembly ||
968 967 tty->count() != 0 || // already printing
969 968 VerifyAfterGC ||
970 969 VMError::fatal_error_in_progress(), "too expensive");
971 970
972 971 #endif
973 972 // This might be sped up with a cache of the last generation that
974 973 // answered yes.
975 974 for (int i = 0; i < _n_gens; i++) {
976 975 if (_gens[i]->is_in(p)) return true;
977 976 }
978 977 if (_perm_gen->as_gen()->is_in(p)) return true;
979 978 // Otherwise...
980 979 return false;
981 980 }
982 981
983 982 #ifdef ASSERT
984 983 // Don't implement this by using is_in_young(). This method is used
985 984 // in some cases to check that is_in_young() is correct.
986 985 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
987 986 assert(is_in_reserved(p) || p == NULL,
988 987 "Does not work if address is non-null and outside of the heap");
989 988 // The order of the generations is young (low addr), old, perm (high addr)
990 989 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
991 990 }
992 991 #endif
993 992
994 993 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
995 994 for (int i = 0; i < _n_gens; i++) {
996 995 _gens[i]->oop_iterate(cl);
997 996 }
998 997 }
999 998
1000 999 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
1001 1000 for (int i = 0; i < _n_gens; i++) {
1002 1001 _gens[i]->oop_iterate(mr, cl);
1003 1002 }
1004 1003 }
1005 1004
1006 1005 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
1007 1006 for (int i = 0; i < _n_gens; i++) {
1008 1007 _gens[i]->object_iterate(cl);
1009 1008 }
1010 1009 perm_gen()->object_iterate(cl);
1011 1010 }
1012 1011
1013 1012 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
1014 1013 for (int i = 0; i < _n_gens; i++) {
1015 1014 _gens[i]->safe_object_iterate(cl);
1016 1015 }
1017 1016 perm_gen()->safe_object_iterate(cl);
1018 1017 }
1019 1018
1020 1019 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1021 1020 for (int i = 0; i < _n_gens; i++) {
1022 1021 _gens[i]->object_iterate_since_last_GC(cl);
1023 1022 }
1024 1023 }
1025 1024
1026 1025 Space* GenCollectedHeap::space_containing(const void* addr) const {
1027 1026 for (int i = 0; i < _n_gens; i++) {
1028 1027 Space* res = _gens[i]->space_containing(addr);
1029 1028 if (res != NULL) return res;
1030 1029 }
1031 1030 Space* res = perm_gen()->space_containing(addr);
1032 1031 if (res != NULL) return res;
1033 1032 // Otherwise...
1034 1033 assert(false, "Could not find containing space");
1035 1034 return NULL;
1036 1035 }
1037 1036
1038 1037
1039 1038 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1040 1039 assert(is_in_reserved(addr), "block_start of address outside of heap");
1041 1040 for (int i = 0; i < _n_gens; i++) {
1042 1041 if (_gens[i]->is_in_reserved(addr)) {
1043 1042 assert(_gens[i]->is_in(addr),
1044 1043 "addr should be in allocated part of generation");
1045 1044 return _gens[i]->block_start(addr);
1046 1045 }
1047 1046 }
1048 1047 if (perm_gen()->is_in_reserved(addr)) {
1049 1048 assert(perm_gen()->is_in(addr),
1050 1049 "addr should be in allocated part of perm gen");
1051 1050 return perm_gen()->block_start(addr);
1052 1051 }
1053 1052 assert(false, "Some generation should contain the address");
1054 1053 return NULL;
1055 1054 }
1056 1055
1057 1056 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1058 1057 assert(is_in_reserved(addr), "block_size of address outside of heap");
1059 1058 for (int i = 0; i < _n_gens; i++) {
1060 1059 if (_gens[i]->is_in_reserved(addr)) {
1061 1060 assert(_gens[i]->is_in(addr),
1062 1061 "addr should be in allocated part of generation");
1063 1062 return _gens[i]->block_size(addr);
1064 1063 }
1065 1064 }
1066 1065 if (perm_gen()->is_in_reserved(addr)) {
1067 1066 assert(perm_gen()->is_in(addr),
1068 1067 "addr should be in allocated part of perm gen");
1069 1068 return perm_gen()->block_size(addr);
1070 1069 }
1071 1070 assert(false, "Some generation should contain the address");
1072 1071 return 0;
1073 1072 }
1074 1073
1075 1074 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1076 1075 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1077 1076 assert(block_start(addr) == addr, "addr must be a block start");
1078 1077 for (int i = 0; i < _n_gens; i++) {
1079 1078 if (_gens[i]->is_in_reserved(addr)) {
1080 1079 return _gens[i]->block_is_obj(addr);
1081 1080 }
1082 1081 }
1083 1082 if (perm_gen()->is_in_reserved(addr)) {
1084 1083 return perm_gen()->block_is_obj(addr);
1085 1084 }
1086 1085 assert(false, "Some generation should contain the address");
1087 1086 return false;
1088 1087 }
1089 1088
1090 1089 bool GenCollectedHeap::supports_tlab_allocation() const {
1091 1090 for (int i = 0; i < _n_gens; i += 1) {
1092 1091 if (_gens[i]->supports_tlab_allocation()) {
1093 1092 return true;
1094 1093 }
1095 1094 }
1096 1095 return false;
1097 1096 }
1098 1097
1099 1098 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1100 1099 size_t result = 0;
1101 1100 for (int i = 0; i < _n_gens; i += 1) {
1102 1101 if (_gens[i]->supports_tlab_allocation()) {
1103 1102 result += _gens[i]->tlab_capacity();
1104 1103 }
1105 1104 }
1106 1105 return result;
1107 1106 }
1108 1107
1109 1108 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1110 1109 size_t result = 0;
1111 1110 for (int i = 0; i < _n_gens; i += 1) {
1112 1111 if (_gens[i]->supports_tlab_allocation()) {
1113 1112 result += _gens[i]->unsafe_max_tlab_alloc();
1114 1113 }
1115 1114 }
1116 1115 return result;
1117 1116 }
1118 1117
1119 1118 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1120 1119 bool gc_overhead_limit_was_exceeded;
1121 1120 return collector_policy()->mem_allocate_work(size /* size */,
1122 1121 true /* is_tlab */,
1123 1122 &gc_overhead_limit_was_exceeded);
1124 1123 }
1125 1124
1126 1125 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
1127 1126 // from the list headed by "*prev_ptr".
1128 1127 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1129 1128 bool first = true;
1130 1129 size_t min_size = 0; // "first" makes this conceptually infinite.
1131 1130 ScratchBlock **smallest_ptr, *smallest;
1132 1131 ScratchBlock *cur = *prev_ptr;
1133 1132 while (cur) {
1134 1133 assert(*prev_ptr == cur, "just checking");
1135 1134 if (first || cur->num_words < min_size) {
1136 1135 smallest_ptr = prev_ptr;
1137 1136 smallest = cur;
1138 1137 min_size = smallest->num_words;
1139 1138 first = false;
1140 1139 }
1141 1140 prev_ptr = &cur->next;
1142 1141 cur = cur->next;
1143 1142 }
1144 1143 smallest = *smallest_ptr;
1145 1144 *smallest_ptr = smallest->next;
1146 1145 return smallest;
1147 1146 }
1148 1147
1149 1148 // Sort the scratch block list headed by res into decreasing size order,
1150 1149 // and set "res" to the result.
1151 1150 static void sort_scratch_list(ScratchBlock*& list) {
1152 1151 ScratchBlock* sorted = NULL;
1153 1152 ScratchBlock* unsorted = list;
1154 1153 while (unsorted) {
1155 1154 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1156 1155 smallest->next = sorted;
1157 1156 sorted = smallest;
1158 1157 }
1159 1158 list = sorted;
1160 1159 }
1161 1160
1162 1161 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1163 1162 size_t max_alloc_words) {
1164 1163 ScratchBlock* res = NULL;
1165 1164 for (int i = 0; i < _n_gens; i++) {
1166 1165 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1167 1166 }
1168 1167 sort_scratch_list(res);
1169 1168 return res;
1170 1169 }
1171 1170
1172 1171 void GenCollectedHeap::release_scratch() {
1173 1172 for (int i = 0; i < _n_gens; i++) {
1174 1173 _gens[i]->reset_scratch();
1175 1174 }
1176 1175 }
1177 1176
1178 1177 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1179 1178 void do_generation(Generation* gen) {
1180 1179 gen->prepare_for_verify();
1181 1180 }
1182 1181 };
1183 1182
1184 1183 void GenCollectedHeap::prepare_for_verify() {
1185 1184 ensure_parsability(false); // no need to retire TLABs
1186 1185 GenPrepareForVerifyClosure blk;
1187 1186 generation_iterate(&blk, false);
1188 1187 perm_gen()->prepare_for_verify();
1189 1188 }
1190 1189
1191 1190
1192 1191 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1193 1192 bool old_to_young) {
1194 1193 if (old_to_young) {
1195 1194 for (int i = _n_gens-1; i >= 0; i--) {
1196 1195 cl->do_generation(_gens[i]);
1197 1196 }
1198 1197 } else {
1199 1198 for (int i = 0; i < _n_gens; i++) {
1200 1199 cl->do_generation(_gens[i]);
1201 1200 }
1202 1201 }
1203 1202 }
1204 1203
1205 1204 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1206 1205 for (int i = 0; i < _n_gens; i++) {
1207 1206 _gens[i]->space_iterate(cl, true);
1208 1207 }
1209 1208 perm_gen()->space_iterate(cl, true);
1210 1209 }
1211 1210
1212 1211 bool GenCollectedHeap::is_maximal_no_gc() const {
1213 1212 for (int i = 0; i < _n_gens; i++) { // skip perm gen
1214 1213 if (!_gens[i]->is_maximal_no_gc()) {
1215 1214 return false;
1216 1215 }
1217 1216 }
1218 1217 return true;
1219 1218 }
1220 1219
1221 1220 void GenCollectedHeap::save_marks() {
1222 1221 for (int i = 0; i < _n_gens; i++) {
1223 1222 _gens[i]->save_marks();
1224 1223 }
1225 1224 perm_gen()->save_marks();
1226 1225 }
1227 1226
1228 1227 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1229 1228 for (int i = 0; i <= collectedGen; i++) {
1230 1229 _gens[i]->compute_new_size();
1231 1230 }
1232 1231 }
1233 1232
1234 1233 GenCollectedHeap* GenCollectedHeap::heap() {
1235 1234 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1236 1235 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1237 1236 return _gch;
1238 1237 }
1239 1238
1240 1239
1241 1240 void GenCollectedHeap::prepare_for_compaction() {
1242 1241 Generation* scanning_gen = _gens[_n_gens-1];
1243 1242 // Start by compacting into same gen.
1244 1243 CompactPoint cp(scanning_gen, NULL, NULL);
1245 1244 while (scanning_gen != NULL) {
1246 1245 scanning_gen->prepare_for_compaction(&cp);
1247 1246 scanning_gen = prev_gen(scanning_gen);
1248 1247 }
1249 1248 }
1250 1249
1251 1250 GCStats* GenCollectedHeap::gc_stats(int level) const {
1252 1251 return _gens[level]->gc_stats();
1253 1252 }
1254 1253
1255 1254 void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
1256 1255 if (!silent) {
1257 1256 gclog_or_tty->print("permgen ");
1258 1257 }
1259 1258 perm_gen()->verify(allow_dirty);
1260 1259 for (int i = _n_gens-1; i >= 0; i--) {
1261 1260 Generation* g = _gens[i];
1262 1261 if (!silent) {
1263 1262 gclog_or_tty->print(g->name());
1264 1263 gclog_or_tty->print(" ");
1265 1264 }
1266 1265 g->verify(allow_dirty);
1267 1266 }
1268 1267 if (!silent) {
1269 1268 gclog_or_tty->print("remset ");
1270 1269 }
1271 1270 rem_set()->verify();
1272 1271 }
1273 1272
1274 1273 void GenCollectedHeap::print() const { print_on(tty); }
1275 1274 void GenCollectedHeap::print_on(outputStream* st) const {
1276 1275 for (int i = 0; i < _n_gens; i++) {
1277 1276 _gens[i]->print_on(st);
1278 1277 }
1279 1278 perm_gen()->print_on(st);
1280 1279 }
1281 1280
1282 1281 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1283 1282 if (workers() != NULL) {
1284 1283 workers()->threads_do(tc);
1285 1284 }
1286 1285 #ifndef SERIALGC
1287 1286 if (UseConcMarkSweepGC) {
1288 1287 ConcurrentMarkSweepThread::threads_do(tc);
1289 1288 }
1290 1289 #endif // SERIALGC
1291 1290 }
1292 1291
1293 1292 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1294 1293 #ifndef SERIALGC
1295 1294 if (UseParNewGC) {
1296 1295 workers()->print_worker_threads_on(st);
1297 1296 }
1298 1297 if (UseConcMarkSweepGC) {
1299 1298 ConcurrentMarkSweepThread::print_all_on(st);
1300 1299 }
1301 1300 #endif // SERIALGC
1302 1301 }
1303 1302
1304 1303 void GenCollectedHeap::print_tracing_info() const {
1305 1304 if (TraceGen0Time) {
1306 1305 get_gen(0)->print_summary_info();
1307 1306 }
1308 1307 if (TraceGen1Time) {
1309 1308 get_gen(1)->print_summary_info();
1310 1309 }
1311 1310 }
1312 1311
1313 1312 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1314 1313 if (PrintGCDetails && Verbose) {
1315 1314 gclog_or_tty->print(" " SIZE_FORMAT
1316 1315 "->" SIZE_FORMAT
1317 1316 "(" SIZE_FORMAT ")",
1318 1317 prev_used, used(), capacity());
1319 1318 } else {
1320 1319 gclog_or_tty->print(" " SIZE_FORMAT "K"
1321 1320 "->" SIZE_FORMAT "K"
1322 1321 "(" SIZE_FORMAT "K)",
1323 1322 prev_used / K, used() / K, capacity() / K);
1324 1323 }
1325 1324 }
1326 1325
1327 1326 //New method to print perm gen info with PrintGCDetails flag
1328 1327 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1329 1328 gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1330 1329 perm_gen()->print_heap_change(perm_prev_used);
1331 1330 gclog_or_tty->print("]");
1332 1331 }
1333 1332
1334 1333 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1335 1334 private:
1336 1335 bool _full;
1337 1336 public:
1338 1337 void do_generation(Generation* gen) {
1339 1338 gen->gc_prologue(_full);
1340 1339 }
1341 1340 GenGCPrologueClosure(bool full) : _full(full) {};
1342 1341 };
1343 1342
1344 1343 void GenCollectedHeap::gc_prologue(bool full) {
1345 1344 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1346 1345
1347 1346 always_do_update_barrier = false;
1348 1347 // Fill TLAB's and such
1349 1348 CollectedHeap::accumulate_statistics_all_tlabs();
1350 1349 ensure_parsability(true); // retire TLABs
1351 1350
1352 1351 // Call allocation profiler
1353 1352 AllocationProfiler::iterate_since_last_gc();
1354 1353 // Walk generations
1355 1354 GenGCPrologueClosure blk(full);
1356 1355 generation_iterate(&blk, false); // not old-to-young.
1357 1356 perm_gen()->gc_prologue(full);
1358 1357 };
1359 1358
1360 1359 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1361 1360 private:
1362 1361 bool _full;
1363 1362 public:
1364 1363 void do_generation(Generation* gen) {
1365 1364 gen->gc_epilogue(_full);
1366 1365 }
1367 1366 GenGCEpilogueClosure(bool full) : _full(full) {};
1368 1367 };
1369 1368
1370 1369 void GenCollectedHeap::gc_epilogue(bool full) {
1371 1370 #ifdef COMPILER2
1372 1371 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1373 1372 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1374 1373 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1375 1374 #endif /* COMPILER2 */
1376 1375
1377 1376 resize_all_tlabs();
1378 1377
1379 1378 GenGCEpilogueClosure blk(full);
1380 1379 generation_iterate(&blk, false); // not old-to-young.
1381 1380 perm_gen()->gc_epilogue(full);
1382 1381
1383 1382 if (!CleanChunkPoolAsync) {
1384 1383 Chunk::clean_chunk_pool();
1385 1384 }
1386 1385
1387 1386 always_do_update_barrier = UseConcMarkSweepGC;
1388 1387 };
1389 1388
1390 1389 #ifndef PRODUCT
1391 1390 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1392 1391 private:
1393 1392 public:
1394 1393 void do_generation(Generation* gen) {
1395 1394 gen->record_spaces_top();
1396 1395 }
1397 1396 };
1398 1397
1399 1398 void GenCollectedHeap::record_gen_tops_before_GC() {
1400 1399 if (ZapUnusedHeapArea) {
1401 1400 GenGCSaveTopsBeforeGCClosure blk;
1402 1401 generation_iterate(&blk, false); // not old-to-young.
1403 1402 perm_gen()->record_spaces_top();
1404 1403 }
1405 1404 }
1406 1405 #endif // not PRODUCT
1407 1406
1408 1407 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1409 1408 public:
1410 1409 void do_generation(Generation* gen) {
1411 1410 gen->ensure_parsability();
1412 1411 }
1413 1412 };
1414 1413
1415 1414 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1416 1415 CollectedHeap::ensure_parsability(retire_tlabs);
1417 1416 GenEnsureParsabilityClosure ep_cl;
1418 1417 generation_iterate(&ep_cl, false);
1419 1418 perm_gen()->ensure_parsability();
1420 1419 }
1421 1420
1422 1421 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1423 1422 oop obj,
1424 1423 size_t obj_size) {
1425 1424 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1426 1425 HeapWord* result = NULL;
1427 1426
1428 1427 // First give each higher generation a chance to allocate the promoted object.
1429 1428 Generation* allocator = next_gen(gen);
1430 1429 if (allocator != NULL) {
1431 1430 do {
1432 1431 result = allocator->allocate(obj_size, false);
1433 1432 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1434 1433 }
1435 1434
1436 1435 if (result == NULL) {
1437 1436 // Then give gen and higher generations a chance to expand and allocate the
1438 1437 // object.
1439 1438 do {
1440 1439 result = gen->expand_and_allocate(obj_size, false);
1441 1440 } while (result == NULL && (gen = next_gen(gen)) != NULL);
1442 1441 }
1443 1442
1444 1443 if (result != NULL) {
1445 1444 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1446 1445 }
1447 1446 return oop(result);
1448 1447 }
1449 1448
1450 1449 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1451 1450 jlong _time; // in ms
1452 1451 jlong _now; // in ms
1453 1452
1454 1453 public:
1455 1454 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1456 1455
1457 1456 jlong time() { return _time; }
1458 1457
1459 1458 void do_generation(Generation* gen) {
1460 1459 _time = MIN2(_time, gen->time_of_last_gc(_now));
1461 1460 }
1462 1461 };
1463 1462
1464 1463 jlong GenCollectedHeap::millis_since_last_gc() {
1465 1464 jlong now = os::javaTimeMillis();
1466 1465 GenTimeOfLastGCClosure tolgc_cl(now);
1467 1466 // iterate over generations getting the oldest
1468 1467 // time that a generation was collected
1469 1468 generation_iterate(&tolgc_cl, false);
1470 1469 tolgc_cl.do_generation(perm_gen());
1471 1470 // XXX Despite the assert above, since javaTimeMillis()
1472 1471 // doesnot guarantee monotonically increasing return
1473 1472 // values (note, i didn't say "strictly monotonic"),
1474 1473 // we need to guard against getting back a time
1475 1474 // later than now. This should be fixed by basing
1476 1475 // on someting like gethrtime() which guarantees
1477 1476 // monotonicity. Note that cond_wait() is susceptible
1478 1477 // to a similar problem, because its interface is
1479 1478 // based on absolute time in the form of the
1480 1479 // system time's notion of UCT. See also 4506635
1481 1480 // for yet another problem of similar nature. XXX
1482 1481 jlong retVal = now - tolgc_cl.time();
1483 1482 if (retVal < 0) {
1484 1483 NOT_PRODUCT(warning("time warp: %d", retVal);)
1485 1484 return 0;
1486 1485 }
1487 1486 return retVal;
1488 1487 }
↓ open down ↓ |
875 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX