Print this page
rev 4515 : 7176220: 'Full GC' events miss date stamp information occasionally
Summary: Move date stamp logic into GCTraceTime
Reviewed-by: johnc, brutisso, jmasa
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/genCollectedHeap.cpp
+++ new/src/share/vm/memory/genCollectedHeap.cpp
1 1 /*
2 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/symbolTable.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "classfile/vmSymbols.hpp"
29 29 #include "code/icBuffer.hpp"
30 30 #include "gc_implementation/shared/collectorCounters.hpp"
31 31 #include "gc_implementation/shared/gcTraceTime.hpp"
32 32 #include "gc_implementation/shared/vmGCOperations.hpp"
33 33 #include "gc_interface/collectedHeap.inline.hpp"
34 34 #include "memory/compactPermGen.hpp"
35 35 #include "memory/filemap.hpp"
36 36 #include "memory/gcLocker.inline.hpp"
37 37 #include "memory/genCollectedHeap.hpp"
38 38 #include "memory/genOopClosures.inline.hpp"
39 39 #include "memory/generation.inline.hpp"
40 40 #include "memory/generationSpec.hpp"
41 41 #include "memory/permGen.hpp"
42 42 #include "memory/resourceArea.hpp"
43 43 #include "memory/sharedHeap.hpp"
44 44 #include "memory/space.hpp"
45 45 #include "oops/oop.inline.hpp"
46 46 #include "oops/oop.inline2.hpp"
47 47 #include "runtime/aprofiler.hpp"
48 48 #include "runtime/biasedLocking.hpp"
49 49 #include "runtime/fprofiler.hpp"
50 50 #include "runtime/handles.hpp"
51 51 #include "runtime/handles.inline.hpp"
52 52 #include "runtime/java.hpp"
53 53 #include "runtime/vmThread.hpp"
54 54 #include "services/memoryService.hpp"
55 55 #include "services/memTracker.hpp"
56 56 #include "utilities/vmError.hpp"
57 57 #include "utilities/workgroup.hpp"
58 58 #ifndef SERIALGC
59 59 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
60 60 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
61 61 #endif
62 62
63 63 GenCollectedHeap* GenCollectedHeap::_gch;
64 64 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
65 65
66 66 // The set of potentially parallel tasks in strong root scanning.
67 67 enum GCH_process_strong_roots_tasks {
68 68 // We probably want to parallelize both of these internally, but for now...
69 69 GCH_PS_younger_gens,
70 70 // Leave this one last.
71 71 GCH_PS_NumElements
72 72 };
73 73
74 74 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
75 75 SharedHeap(policy),
76 76 _gen_policy(policy),
77 77 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
78 78 _full_collections_completed(0)
79 79 {
80 80 if (_gen_process_strong_tasks == NULL ||
81 81 !_gen_process_strong_tasks->valid()) {
82 82 vm_exit_during_initialization("Failed necessary allocation.");
83 83 }
84 84 assert(policy != NULL, "Sanity check");
85 85 _preloading_shared_classes = false;
86 86 }
87 87
88 88 jint GenCollectedHeap::initialize() {
89 89 CollectedHeap::pre_initialize();
90 90
91 91 int i;
92 92 _n_gens = gen_policy()->number_of_generations();
93 93
94 94 // While there are no constraints in the GC code that HeapWordSize
95 95 // be any particular value, there are multiple other areas in the
96 96 // system which believe this to be true (e.g. oop->object_size in some
97 97 // cases incorrectly returns the size in wordSize units rather than
98 98 // HeapWordSize).
99 99 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
100 100
101 101 // The heap must be at least as aligned as generations.
102 102 size_t alignment = Generation::GenGrain;
103 103
104 104 _gen_specs = gen_policy()->generations();
105 105 PermanentGenerationSpec *perm_gen_spec =
106 106 collector_policy()->permanent_generation();
107 107
108 108 // Make sure the sizes are all aligned.
109 109 for (i = 0; i < _n_gens; i++) {
110 110 _gen_specs[i]->align(alignment);
111 111 }
112 112 perm_gen_spec->align(alignment);
113 113
114 114 // If we are dumping the heap, then allocate a wasted block of address
115 115 // space in order to push the heap to a lower address. This extra
116 116 // address range allows for other (or larger) libraries to be loaded
117 117 // without them occupying the space required for the shared spaces.
118 118
119 119 if (DumpSharedSpaces) {
120 120 uintx reserved = 0;
121 121 uintx block_size = 64*1024*1024;
122 122 while (reserved < SharedDummyBlockSize) {
123 123 char* dummy = os::reserve_memory(block_size);
124 124 reserved += block_size;
125 125 }
126 126 }
127 127
128 128 // Allocate space for the heap.
129 129
130 130 char* heap_address;
131 131 size_t total_reserved = 0;
132 132 int n_covered_regions = 0;
133 133 ReservedSpace heap_rs(0);
134 134
135 135 heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
136 136 &n_covered_regions, &heap_rs);
137 137
138 138 if (UseSharedSpaces) {
139 139 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
140 140 if (heap_rs.is_reserved()) {
141 141 heap_rs.release();
142 142 }
143 143 FileMapInfo* mapinfo = FileMapInfo::current_info();
144 144 mapinfo->fail_continue("Unable to reserve shared region.");
145 145 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
146 146 &heap_rs);
147 147 }
148 148 }
149 149
150 150 if (!heap_rs.is_reserved()) {
151 151 vm_shutdown_during_initialization(
152 152 "Could not reserve enough space for object heap");
153 153 return JNI_ENOMEM;
154 154 }
155 155
156 156 _reserved = MemRegion((HeapWord*)heap_rs.base(),
157 157 (HeapWord*)(heap_rs.base() + heap_rs.size()));
158 158
159 159 // It is important to do this in a way such that concurrent readers can't
160 160 // temporarily think somethings in the heap. (Seen this happen in asserts.)
161 161 _reserved.set_word_size(0);
162 162 _reserved.set_start((HeapWord*)heap_rs.base());
163 163 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
164 164 - perm_gen_spec->misc_code_size();
165 165 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
166 166
167 167 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
168 168 set_barrier_set(rem_set()->bs());
169 169
170 170 _gch = this;
171 171
172 172 for (i = 0; i < _n_gens; i++) {
173 173 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
174 174 UseSharedSpaces, UseSharedSpaces);
175 175 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
176 176 // tag generations in JavaHeap
177 177 MemTracker::record_virtual_memory_type((address)this_rs.base(), mtJavaHeap);
178 178 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
179 179 }
180 180 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
181 181 // tag PermGen
182 182 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
183 183
184 184 clear_incremental_collection_failed();
185 185
186 186 #ifndef SERIALGC
187 187 // If we are running CMS, create the collector responsible
188 188 // for collecting the CMS generations.
189 189 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
190 190 bool success = create_cms_collector();
191 191 if (!success) return JNI_ENOMEM;
192 192 }
193 193 #endif // SERIALGC
194 194
195 195 return JNI_OK;
196 196 }
197 197
198 198
199 199 char* GenCollectedHeap::allocate(size_t alignment,
200 200 PermanentGenerationSpec* perm_gen_spec,
201 201 size_t* _total_reserved,
202 202 int* _n_covered_regions,
203 203 ReservedSpace* heap_rs){
204 204 // Now figure out the total size.
205 205 size_t total_reserved = 0;
206 206 int n_covered_regions = 0;
207 207 const size_t pageSize = UseLargePages ?
208 208 os::large_page_size() : os::vm_page_size();
209 209
210 210 for (int i = 0; i < _n_gens; i++) {
211 211 total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
212 212 n_covered_regions += _gen_specs[i]->n_covered_regions();
213 213 }
214 214
215 215 assert(total_reserved % pageSize == 0,
216 216 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
217 217 SIZE_FORMAT, total_reserved, pageSize));
218 218 total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
219 219 assert(total_reserved % pageSize == 0,
220 220 err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
221 221 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
222 222 pageSize, perm_gen_spec->max_size()));
223 223
224 224 n_covered_regions += perm_gen_spec->n_covered_regions();
225 225
226 226 // Add the size of the data area which shares the same reserved area
227 227 // as the heap, but which is not actually part of the heap.
228 228 size_t misc = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
229 229 total_reserved = add_and_check_overflow(total_reserved, misc);
230 230
231 231 if (UseLargePages) {
232 232 assert(total_reserved != 0, "total_reserved cannot be 0");
233 233 total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
234 234 }
235 235
236 236 // Calculate the address at which the heap must reside in order for
237 237 // the shared data to be at the required address.
238 238
239 239 char* heap_address;
240 240 if (UseSharedSpaces) {
241 241
242 242 // Calculate the address of the first word beyond the heap.
243 243 FileMapInfo* mapinfo = FileMapInfo::current_info();
244 244 int lr = CompactingPermGenGen::n_regions - 1;
245 245 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
246 246 heap_address = mapinfo->region_base(lr) + capacity;
247 247
248 248 // Calculate the address of the first word of the heap.
249 249 heap_address -= total_reserved;
250 250 } else {
251 251 heap_address = NULL; // any address will do.
252 252 if (UseCompressedOops) {
253 253 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
254 254 *_total_reserved = total_reserved;
255 255 *_n_covered_regions = n_covered_regions;
256 256 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
257 257 UseLargePages, heap_address);
258 258
259 259 if (heap_address != NULL && !heap_rs->is_reserved()) {
260 260 // Failed to reserve at specified address - the requested memory
261 261 // region is taken already, for example, by 'java' launcher.
262 262 // Try again to reserver heap higher.
263 263 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
264 264 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
265 265 UseLargePages, heap_address);
266 266
267 267 if (heap_address != NULL && !heap_rs->is_reserved()) {
268 268 // Failed to reserve at specified address again - give up.
269 269 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
270 270 assert(heap_address == NULL, "");
271 271 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
272 272 UseLargePages, heap_address);
273 273 }
274 274 }
275 275 return heap_address;
276 276 }
277 277 }
278 278
279 279 *_total_reserved = total_reserved;
280 280 *_n_covered_regions = n_covered_regions;
281 281 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
282 282 UseLargePages, heap_address);
283 283
284 284 return heap_address;
285 285 }
286 286
287 287
288 288 void GenCollectedHeap::post_initialize() {
289 289 SharedHeap::post_initialize();
290 290 TwoGenerationCollectorPolicy *policy =
291 291 (TwoGenerationCollectorPolicy *)collector_policy();
292 292 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
293 293 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
294 294 assert(def_new_gen->kind() == Generation::DefNew ||
295 295 def_new_gen->kind() == Generation::ParNew ||
296 296 def_new_gen->kind() == Generation::ASParNew,
297 297 "Wrong generation kind");
298 298
299 299 Generation* old_gen = get_gen(1);
300 300 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
301 301 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
302 302 old_gen->kind() == Generation::MarkSweepCompact,
303 303 "Wrong generation kind");
304 304
305 305 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
306 306 old_gen->capacity(),
307 307 def_new_gen->from()->capacity());
308 308 policy->initialize_gc_policy_counters();
309 309 }
310 310
311 311 void GenCollectedHeap::ref_processing_init() {
312 312 SharedHeap::ref_processing_init();
313 313 for (int i = 0; i < _n_gens; i++) {
314 314 _gens[i]->ref_processor_init();
315 315 }
316 316 }
317 317
318 318 size_t GenCollectedHeap::capacity() const {
319 319 size_t res = 0;
320 320 for (int i = 0; i < _n_gens; i++) {
321 321 res += _gens[i]->capacity();
322 322 }
323 323 return res;
324 324 }
325 325
326 326 size_t GenCollectedHeap::used() const {
327 327 size_t res = 0;
328 328 for (int i = 0; i < _n_gens; i++) {
329 329 res += _gens[i]->used();
330 330 }
331 331 return res;
332 332 }
333 333
334 334 // Save the "used_region" for generations level and lower,
335 335 // and, if perm is true, for perm gen.
336 336 void GenCollectedHeap::save_used_regions(int level, bool perm) {
337 337 assert(level < _n_gens, "Illegal level parameter");
338 338 for (int i = level; i >= 0; i--) {
339 339 _gens[i]->save_used_region();
340 340 }
341 341 if (perm) {
342 342 perm_gen()->save_used_region();
343 343 }
344 344 }
345 345
346 346 size_t GenCollectedHeap::max_capacity() const {
347 347 size_t res = 0;
348 348 for (int i = 0; i < _n_gens; i++) {
349 349 res += _gens[i]->max_capacity();
350 350 }
351 351 return res;
352 352 }
353 353
354 354 // Update the _full_collections_completed counter
355 355 // at the end of a stop-world full GC.
356 356 unsigned int GenCollectedHeap::update_full_collections_completed() {
357 357 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
358 358 assert(_full_collections_completed <= _total_full_collections,
359 359 "Can't complete more collections than were started");
360 360 _full_collections_completed = _total_full_collections;
361 361 ml.notify_all();
362 362 return _full_collections_completed;
363 363 }
364 364
365 365 // Update the _full_collections_completed counter, as appropriate,
366 366 // at the end of a concurrent GC cycle. Note the conditional update
367 367 // below to allow this method to be called by a concurrent collector
368 368 // without synchronizing in any manner with the VM thread (which
369 369 // may already have initiated a STW full collection "concurrently").
370 370 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
371 371 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
372 372 assert((_full_collections_completed <= _total_full_collections) &&
373 373 (count <= _total_full_collections),
374 374 "Can't complete more collections than were started");
375 375 if (count > _full_collections_completed) {
376 376 _full_collections_completed = count;
377 377 ml.notify_all();
378 378 }
379 379 return _full_collections_completed;
380 380 }
381 381
382 382
383 383 #ifndef PRODUCT
384 384 // Override of memory state checking method in CollectedHeap:
385 385 // Some collectors (CMS for example) can't have badHeapWordVal written
386 386 // in the first two words of an object. (For instance , in the case of
387 387 // CMS these words hold state used to synchronize between certain
388 388 // (concurrent) GC steps and direct allocating mutators.)
389 389 // The skip_header_HeapWords() method below, allows us to skip
390 390 // over the requisite number of HeapWord's. Note that (for
391 391 // generational collectors) this means that those many words are
392 392 // skipped in each object, irrespective of the generation in which
393 393 // that object lives. The resultant loss of precision seems to be
394 394 // harmless and the pain of avoiding that imprecision appears somewhat
395 395 // higher than we are prepared to pay for such rudimentary debugging
396 396 // support.
397 397 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
398 398 size_t size) {
399 399 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
400 400 // We are asked to check a size in HeapWords,
401 401 // but the memory is mangled in juint words.
402 402 juint* start = (juint*) (addr + skip_header_HeapWords());
403 403 juint* end = (juint*) (addr + size);
404 404 for (juint* slot = start; slot < end; slot += 1) {
405 405 assert(*slot == badHeapWordVal,
406 406 "Found non badHeapWordValue in pre-allocation check");
407 407 }
408 408 }
409 409 }
410 410 #endif
411 411
412 412 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
413 413 bool is_tlab,
414 414 bool first_only) {
415 415 HeapWord* res;
416 416 for (int i = 0; i < _n_gens; i++) {
417 417 if (_gens[i]->should_allocate(size, is_tlab)) {
418 418 res = _gens[i]->allocate(size, is_tlab);
419 419 if (res != NULL) return res;
420 420 else if (first_only) break;
421 421 }
422 422 }
423 423 // Otherwise...
424 424 return NULL;
425 425 }
426 426
427 427 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
428 428 bool* gc_overhead_limit_was_exceeded) {
429 429 return collector_policy()->mem_allocate_work(size,
430 430 false /* is_tlab */,
431 431 gc_overhead_limit_was_exceeded);
432 432 }
433 433
434 434 bool GenCollectedHeap::must_clear_all_soft_refs() {
435 435 return _gc_cause == GCCause::_last_ditch_collection;
436 436 }
437 437
438 438 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
439 439 return UseConcMarkSweepGC &&
440 440 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
441 441 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
442 442 }
443 443
444 444 void GenCollectedHeap::do_collection(bool full,
445 445 bool clear_all_soft_refs,
446 446 size_t size,
447 447 bool is_tlab,
448 448 int max_level) {
449 449 bool prepared_for_verification = false;
450 450 ResourceMark rm;
451 451 DEBUG_ONLY(Thread* my_thread = Thread::current();)
452 452
453 453 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
454 454 assert(my_thread->is_VM_thread() ||
455 455 my_thread->is_ConcurrentGC_thread(),
456 456 "incorrect thread type capability");
457 457 assert(Heap_lock->is_locked(),
458 458 "the requesting thread should have the Heap_lock");
459 459 guarantee(!is_gc_active(), "collection is not reentrant");
460 460 assert(max_level < n_gens(), "sanity check");
461 461
462 462 if (GC_locker::check_active_before_gc()) {
463 463 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
464 464 }
465 465
466 466 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
467 467 collector_policy()->should_clear_all_soft_refs();
468 468
469 469 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
↓ open down ↓ |
469 lines elided |
↑ open up ↑ |
470 470
471 471 const size_t perm_prev_used = perm_gen()->used();
472 472
473 473 print_heap_before_gc();
474 474
475 475 {
476 476 FlagSetting fl(_is_gc_active, true);
477 477
478 478 bool complete = full && (max_level == (n_gens()-1));
479 479 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
480 - gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
481 480 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
482 481 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
483 482
484 483 gc_prologue(complete);
485 484 increment_total_collections(complete);
486 485
487 486 size_t gch_prev_used = used();
488 487
489 488 int starting_level = 0;
490 489 if (full) {
491 490 // Search for the oldest generation which will collect all younger
492 491 // generations, and start collection loop there.
493 492 for (int i = max_level; i >= 0; i--) {
494 493 if (_gens[i]->full_collects_younger_generations()) {
495 494 starting_level = i;
496 495 break;
497 496 }
498 497 }
499 498 }
500 499
501 500 bool must_restore_marks_for_biased_locking = false;
502 501
503 502 int max_level_collected = starting_level;
504 503 for (int i = starting_level; i <= max_level; i++) {
505 504 if (_gens[i]->should_collect(full, size, is_tlab)) {
506 505 if (i == n_gens() - 1) { // a major collection is to happen
507 506 if (!complete) {
508 507 // The full_collections increment was missed above.
509 508 increment_total_full_collections();
510 509 }
511 510 pre_full_gc_dump(NULL); // do any pre full gc dumps
512 511 }
513 512 // Timer for individual generations. Last argument is false: no CR
514 513 // FIXME: We should try to start the timing earlier to cover more of the GC pause
515 514 GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
516 515 TraceCollectorStats tcs(_gens[i]->counters());
517 516 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
518 517
519 518 size_t prev_used = _gens[i]->used();
520 519 _gens[i]->stat_record()->invocations++;
521 520 _gens[i]->stat_record()->accumulated_time.start();
522 521
523 522 // Must be done anew before each collection because
524 523 // a previous collection will do mangling and will
525 524 // change top of some spaces.
526 525 record_gen_tops_before_GC();
527 526
528 527 if (PrintGC && Verbose) {
529 528 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
530 529 i,
531 530 _gens[i]->stat_record()->invocations,
532 531 size*HeapWordSize);
533 532 }
534 533
535 534 if (VerifyBeforeGC && i >= VerifyGCLevel &&
536 535 total_collections() >= VerifyGCStartAt) {
537 536 HandleMark hm; // Discard invalid handles created during verification
538 537 if (!prepared_for_verification) {
539 538 prepare_for_verify();
540 539 prepared_for_verification = true;
541 540 }
542 541 gclog_or_tty->print(" VerifyBeforeGC:");
543 542 Universe::verify();
544 543 }
545 544 COMPILER2_PRESENT(DerivedPointerTable::clear());
546 545
547 546 if (!must_restore_marks_for_biased_locking &&
548 547 _gens[i]->performs_in_place_marking()) {
549 548 // We perform this mark word preservation work lazily
550 549 // because it's only at this point that we know whether we
551 550 // absolutely have to do it; we want to avoid doing it for
552 551 // scavenge-only collections where it's unnecessary
553 552 must_restore_marks_for_biased_locking = true;
554 553 BiasedLocking::preserve_marks();
555 554 }
556 555
557 556 // Do collection work
558 557 {
559 558 // Note on ref discovery: For what appear to be historical reasons,
560 559 // GCH enables and disabled (by enqueing) refs discovery.
561 560 // In the future this should be moved into the generation's
562 561 // collect method so that ref discovery and enqueueing concerns
563 562 // are local to a generation. The collect method could return
564 563 // an appropriate indication in the case that notification on
565 564 // the ref lock was needed. This will make the treatment of
566 565 // weak refs more uniform (and indeed remove such concerns
567 566 // from GCH). XXX
568 567
569 568 HandleMark hm; // Discard invalid handles created during gc
570 569 save_marks(); // save marks for all gens
571 570 // We want to discover references, but not process them yet.
572 571 // This mode is disabled in process_discovered_references if the
573 572 // generation does some collection work, or in
574 573 // enqueue_discovered_references if the generation returns
575 574 // without doing any work.
576 575 ReferenceProcessor* rp = _gens[i]->ref_processor();
577 576 // If the discovery of ("weak") refs in this generation is
578 577 // atomic wrt other collectors in this configuration, we
579 578 // are guaranteed to have empty discovered ref lists.
580 579 if (rp->discovery_is_atomic()) {
581 580 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
582 581 rp->setup_policy(do_clear_all_soft_refs);
583 582 } else {
584 583 // collect() below will enable discovery as appropriate
585 584 }
586 585 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
587 586 if (!rp->enqueuing_is_done()) {
588 587 rp->enqueue_discovered_references();
589 588 } else {
590 589 rp->set_enqueuing_is_done(false);
591 590 }
592 591 rp->verify_no_references_recorded();
593 592 }
594 593 max_level_collected = i;
595 594
596 595 // Determine if allocation request was met.
597 596 if (size > 0) {
598 597 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
599 598 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
600 599 size = 0;
601 600 }
602 601 }
603 602 }
604 603
605 604 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
606 605
607 606 _gens[i]->stat_record()->accumulated_time.stop();
608 607
609 608 update_gc_stats(i, full);
610 609
611 610 if (VerifyAfterGC && i >= VerifyGCLevel &&
612 611 total_collections() >= VerifyGCStartAt) {
613 612 HandleMark hm; // Discard invalid handles created during verification
614 613 gclog_or_tty->print(" VerifyAfterGC:");
615 614 Universe::verify();
616 615 }
617 616
618 617 if (PrintGCDetails) {
619 618 gclog_or_tty->print(":");
620 619 _gens[i]->print_heap_change(prev_used);
621 620 }
622 621 }
623 622 }
624 623
625 624 // Update "complete" boolean wrt what actually transpired --
626 625 // for instance, a promotion failure could have led to
627 626 // a whole heap collection.
628 627 complete = complete || (max_level_collected == n_gens() - 1);
629 628
630 629 if (complete) { // We did a "major" collection
631 630 // FIXME: See comment at pre_full_gc_dump call
632 631 post_full_gc_dump(NULL); // do any post full gc dumps
633 632 }
634 633
635 634 if (PrintGCDetails) {
636 635 print_heap_change(gch_prev_used);
637 636
638 637 // Print perm gen info for full GC with PrintGCDetails flag.
639 638 if (complete) {
640 639 print_perm_heap_change(perm_prev_used);
641 640 }
642 641 }
643 642
644 643 for (int j = max_level_collected; j >= 0; j -= 1) {
645 644 // Adjust generation sizes.
646 645 _gens[j]->compute_new_size();
647 646 }
648 647
649 648 if (complete) {
650 649 // Ask the permanent generation to adjust size for full collections
651 650 perm()->compute_new_size();
652 651 update_full_collections_completed();
653 652 }
654 653
655 654 // Track memory usage and detect low memory after GC finishes
656 655 MemoryService::track_memory_usage();
657 656
658 657 gc_epilogue(complete);
659 658
660 659 if (must_restore_marks_for_biased_locking) {
661 660 BiasedLocking::restore_marks();
662 661 }
663 662 }
664 663
665 664 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
666 665 AdaptiveSizePolicyOutput(sp, total_collections());
667 666
668 667 print_heap_after_gc();
669 668
670 669 #ifdef TRACESPINNING
671 670 ParallelTaskTerminator::print_termination_counts();
672 671 #endif
673 672 }
674 673
675 674 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
676 675 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
677 676 }
678 677
679 678 void GenCollectedHeap::set_par_threads(uint t) {
680 679 SharedHeap::set_par_threads(t);
681 680 _gen_process_strong_tasks->set_n_threads(t);
682 681 }
683 682
684 683 void GenCollectedHeap::
685 684 gen_process_strong_roots(int level,
686 685 bool younger_gens_as_roots,
687 686 bool activate_scope,
688 687 bool collecting_perm_gen,
689 688 SharedHeap::ScanningOption so,
690 689 OopsInGenClosure* not_older_gens,
691 690 bool do_code_roots,
692 691 OopsInGenClosure* older_gens) {
693 692 // General strong roots.
694 693
695 694 if (!do_code_roots) {
696 695 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
697 696 not_older_gens, NULL, older_gens);
698 697 } else {
699 698 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
700 699 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
701 700 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
702 701 not_older_gens, &code_roots, older_gens);
703 702 }
704 703
705 704 if (younger_gens_as_roots) {
706 705 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
707 706 for (int i = 0; i < level; i++) {
708 707 not_older_gens->set_generation(_gens[i]);
709 708 _gens[i]->oop_iterate(not_older_gens);
710 709 }
711 710 not_older_gens->reset_generation();
712 711 }
713 712 }
714 713 // When collection is parallel, all threads get to cooperate to do
715 714 // older-gen scanning.
716 715 for (int i = level+1; i < _n_gens; i++) {
717 716 older_gens->set_generation(_gens[i]);
718 717 rem_set()->younger_refs_iterate(_gens[i], older_gens);
719 718 older_gens->reset_generation();
720 719 }
721 720
722 721 _gen_process_strong_tasks->all_tasks_completed();
723 722 }
724 723
725 724 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
726 725 CodeBlobClosure* code_roots,
727 726 OopClosure* non_root_closure) {
728 727 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
729 728 // "Local" "weak" refs
730 729 for (int i = 0; i < _n_gens; i++) {
731 730 _gens[i]->ref_processor()->weak_oops_do(root_closure);
732 731 }
733 732 }
734 733
735 734 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
736 735 void GenCollectedHeap:: \
737 736 oop_since_save_marks_iterate(int level, \
738 737 OopClosureType* cur, \
739 738 OopClosureType* older) { \
740 739 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
741 740 for (int i = level+1; i < n_gens(); i++) { \
742 741 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
743 742 } \
744 743 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
745 744 }
746 745
747 746 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
748 747
749 748 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
750 749
751 750 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
752 751 for (int i = level; i < _n_gens; i++) {
753 752 if (!_gens[i]->no_allocs_since_save_marks()) return false;
754 753 }
755 754 return perm_gen()->no_allocs_since_save_marks();
756 755 }
757 756
758 757 bool GenCollectedHeap::supports_inline_contig_alloc() const {
759 758 return _gens[0]->supports_inline_contig_alloc();
760 759 }
761 760
762 761 HeapWord** GenCollectedHeap::top_addr() const {
763 762 return _gens[0]->top_addr();
764 763 }
765 764
766 765 HeapWord** GenCollectedHeap::end_addr() const {
767 766 return _gens[0]->end_addr();
768 767 }
769 768
770 769 size_t GenCollectedHeap::unsafe_max_alloc() {
771 770 return _gens[0]->unsafe_max_alloc_nogc();
772 771 }
773 772
774 773 // public collection interfaces
775 774
776 775 void GenCollectedHeap::collect(GCCause::Cause cause) {
777 776 if (should_do_concurrent_full_gc(cause)) {
778 777 #ifndef SERIALGC
779 778 // mostly concurrent full collection
780 779 collect_mostly_concurrent(cause);
781 780 #else // SERIALGC
782 781 ShouldNotReachHere();
783 782 #endif // SERIALGC
784 783 } else {
785 784 #ifdef ASSERT
786 785 if (cause == GCCause::_scavenge_alot) {
787 786 // minor collection only
788 787 collect(cause, 0);
789 788 } else {
790 789 // Stop-the-world full collection
791 790 collect(cause, n_gens() - 1);
792 791 }
793 792 #else
794 793 // Stop-the-world full collection
795 794 collect(cause, n_gens() - 1);
796 795 #endif
797 796 }
798 797 }
799 798
800 799 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
801 800 // The caller doesn't have the Heap_lock
802 801 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
803 802 MutexLocker ml(Heap_lock);
804 803 collect_locked(cause, max_level);
805 804 }
806 805
807 806 // This interface assumes that it's being called by the
808 807 // vm thread. It collects the heap assuming that the
809 808 // heap lock is already held and that we are executing in
810 809 // the context of the vm thread.
811 810 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
812 811 assert(Thread::current()->is_VM_thread(), "Precondition#1");
813 812 assert(Heap_lock->is_locked(), "Precondition#2");
814 813 GCCauseSetter gcs(this, cause);
815 814 switch (cause) {
816 815 case GCCause::_heap_inspection:
817 816 case GCCause::_heap_dump: {
818 817 HandleMark hm;
819 818 do_full_collection(false, // don't clear all soft refs
820 819 n_gens() - 1);
821 820 break;
822 821 }
823 822 default: // XXX FIX ME
824 823 ShouldNotReachHere(); // Unexpected use of this function
825 824 }
826 825 }
827 826
828 827 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
829 828 // The caller has the Heap_lock
830 829 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
831 830 collect_locked(cause, n_gens() - 1);
832 831 }
833 832
834 833 // this is the private collection interface
835 834 // The Heap_lock is expected to be held on entry.
836 835
837 836 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
838 837 if (_preloading_shared_classes) {
839 838 report_out_of_shared_space(SharedPermGen);
840 839 }
841 840 // Read the GC count while holding the Heap_lock
842 841 unsigned int gc_count_before = total_collections();
843 842 unsigned int full_gc_count_before = total_full_collections();
844 843 {
845 844 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
846 845 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
847 846 cause, max_level);
848 847 VMThread::execute(&op);
849 848 }
850 849 }
851 850
852 851 #ifndef SERIALGC
853 852 bool GenCollectedHeap::create_cms_collector() {
854 853
855 854 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
856 855 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
857 856 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
858 857 "Unexpected generation kinds");
859 858 // Skip two header words in the block content verification
860 859 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
861 860 CMSCollector* collector = new CMSCollector(
862 861 (ConcurrentMarkSweepGeneration*)_gens[1],
863 862 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
864 863 _rem_set->as_CardTableRS(),
865 864 (ConcurrentMarkSweepPolicy*) collector_policy());
866 865
867 866 if (collector == NULL || !collector->completed_initialization()) {
868 867 if (collector) {
869 868 delete collector; // Be nice in embedded situation
870 869 }
871 870 vm_shutdown_during_initialization("Could not create CMS collector");
872 871 return false;
873 872 }
874 873 return true; // success
875 874 }
876 875
877 876 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
878 877 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
879 878
880 879 MutexLocker ml(Heap_lock);
881 880 // Read the GC counts while holding the Heap_lock
882 881 unsigned int full_gc_count_before = total_full_collections();
883 882 unsigned int gc_count_before = total_collections();
884 883 {
885 884 MutexUnlocker mu(Heap_lock);
886 885 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
887 886 VMThread::execute(&op);
888 887 }
889 888 }
890 889 #endif // SERIALGC
891 890
892 891
893 892 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
894 893 int max_level) {
895 894 int local_max_level;
896 895 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
897 896 gc_cause() == GCCause::_gc_locker) {
898 897 local_max_level = 0;
899 898 } else {
900 899 local_max_level = max_level;
901 900 }
902 901
903 902 do_collection(true /* full */,
904 903 clear_all_soft_refs /* clear_all_soft_refs */,
905 904 0 /* size */,
906 905 false /* is_tlab */,
907 906 local_max_level /* max_level */);
908 907 // Hack XXX FIX ME !!!
909 908 // A scavenge may not have been attempted, or may have
910 909 // been attempted and failed, because the old gen was too full
911 910 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
912 911 incremental_collection_will_fail(false /* don't consult_young */)) {
913 912 if (PrintGCDetails) {
914 913 gclog_or_tty->print_cr("GC locker: Trying a full collection "
915 914 "because scavenge failed");
916 915 }
917 916 // This time allow the old gen to be collected as well
918 917 do_collection(true /* full */,
919 918 clear_all_soft_refs /* clear_all_soft_refs */,
920 919 0 /* size */,
921 920 false /* is_tlab */,
922 921 n_gens() - 1 /* max_level */);
923 922 }
924 923 }
925 924
926 925 bool GenCollectedHeap::is_in_young(oop p) {
927 926 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
928 927 assert(result == _gens[0]->is_in_reserved(p),
929 928 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
930 929 return result;
931 930 }
932 931
933 932 // Returns "TRUE" iff "p" points into the committed areas of the heap.
934 933 bool GenCollectedHeap::is_in(const void* p) const {
935 934 #ifndef ASSERT
936 935 guarantee(VerifyBeforeGC ||
937 936 VerifyDuringGC ||
938 937 VerifyBeforeExit ||
939 938 PrintAssembly ||
940 939 tty->count() != 0 || // already printing
941 940 VerifyAfterGC ||
942 941 VMError::fatal_error_in_progress(), "too expensive");
943 942
944 943 #endif
945 944 // This might be sped up with a cache of the last generation that
946 945 // answered yes.
947 946 for (int i = 0; i < _n_gens; i++) {
948 947 if (_gens[i]->is_in(p)) return true;
949 948 }
950 949 if (_perm_gen->as_gen()->is_in(p)) return true;
951 950 // Otherwise...
952 951 return false;
953 952 }
954 953
955 954 #ifdef ASSERT
956 955 // Don't implement this by using is_in_young(). This method is used
957 956 // in some cases to check that is_in_young() is correct.
958 957 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
959 958 assert(is_in_reserved(p) || p == NULL,
960 959 "Does not work if address is non-null and outside of the heap");
961 960 // The order of the generations is young (low addr), old, perm (high addr)
962 961 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
963 962 }
964 963 #endif
965 964
966 965 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
967 966 for (int i = 0; i < _n_gens; i++) {
968 967 _gens[i]->oop_iterate(cl);
969 968 }
970 969 }
971 970
972 971 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
973 972 for (int i = 0; i < _n_gens; i++) {
974 973 _gens[i]->oop_iterate(mr, cl);
975 974 }
976 975 }
977 976
978 977 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
979 978 for (int i = 0; i < _n_gens; i++) {
980 979 _gens[i]->object_iterate(cl);
981 980 }
982 981 perm_gen()->object_iterate(cl);
983 982 }
984 983
985 984 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
986 985 for (int i = 0; i < _n_gens; i++) {
987 986 _gens[i]->safe_object_iterate(cl);
988 987 }
989 988 perm_gen()->safe_object_iterate(cl);
990 989 }
991 990
992 991 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
993 992 for (int i = 0; i < _n_gens; i++) {
994 993 _gens[i]->object_iterate_since_last_GC(cl);
995 994 }
996 995 }
997 996
998 997 Space* GenCollectedHeap::space_containing(const void* addr) const {
999 998 for (int i = 0; i < _n_gens; i++) {
1000 999 Space* res = _gens[i]->space_containing(addr);
1001 1000 if (res != NULL) return res;
1002 1001 }
1003 1002 Space* res = perm_gen()->space_containing(addr);
1004 1003 if (res != NULL) return res;
1005 1004 // Otherwise...
1006 1005 assert(false, "Could not find containing space");
1007 1006 return NULL;
1008 1007 }
1009 1008
1010 1009
1011 1010 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1012 1011 assert(is_in_reserved(addr), "block_start of address outside of heap");
1013 1012 for (int i = 0; i < _n_gens; i++) {
1014 1013 if (_gens[i]->is_in_reserved(addr)) {
1015 1014 assert(_gens[i]->is_in(addr),
1016 1015 "addr should be in allocated part of generation");
1017 1016 return _gens[i]->block_start(addr);
1018 1017 }
1019 1018 }
1020 1019 if (perm_gen()->is_in_reserved(addr)) {
1021 1020 assert(perm_gen()->is_in(addr),
1022 1021 "addr should be in allocated part of perm gen");
1023 1022 return perm_gen()->block_start(addr);
1024 1023 }
1025 1024 assert(false, "Some generation should contain the address");
1026 1025 return NULL;
1027 1026 }
1028 1027
1029 1028 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1030 1029 assert(is_in_reserved(addr), "block_size of address outside of heap");
1031 1030 for (int i = 0; i < _n_gens; i++) {
1032 1031 if (_gens[i]->is_in_reserved(addr)) {
1033 1032 assert(_gens[i]->is_in(addr),
1034 1033 "addr should be in allocated part of generation");
1035 1034 return _gens[i]->block_size(addr);
1036 1035 }
1037 1036 }
1038 1037 if (perm_gen()->is_in_reserved(addr)) {
1039 1038 assert(perm_gen()->is_in(addr),
1040 1039 "addr should be in allocated part of perm gen");
1041 1040 return perm_gen()->block_size(addr);
1042 1041 }
1043 1042 assert(false, "Some generation should contain the address");
1044 1043 return 0;
1045 1044 }
1046 1045
1047 1046 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1048 1047 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1049 1048 assert(block_start(addr) == addr, "addr must be a block start");
1050 1049 for (int i = 0; i < _n_gens; i++) {
1051 1050 if (_gens[i]->is_in_reserved(addr)) {
1052 1051 return _gens[i]->block_is_obj(addr);
1053 1052 }
1054 1053 }
1055 1054 if (perm_gen()->is_in_reserved(addr)) {
1056 1055 return perm_gen()->block_is_obj(addr);
1057 1056 }
1058 1057 assert(false, "Some generation should contain the address");
1059 1058 return false;
1060 1059 }
1061 1060
1062 1061 bool GenCollectedHeap::supports_tlab_allocation() const {
1063 1062 for (int i = 0; i < _n_gens; i += 1) {
1064 1063 if (_gens[i]->supports_tlab_allocation()) {
1065 1064 return true;
1066 1065 }
1067 1066 }
1068 1067 return false;
1069 1068 }
1070 1069
1071 1070 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1072 1071 size_t result = 0;
1073 1072 for (int i = 0; i < _n_gens; i += 1) {
1074 1073 if (_gens[i]->supports_tlab_allocation()) {
1075 1074 result += _gens[i]->tlab_capacity();
1076 1075 }
1077 1076 }
1078 1077 return result;
1079 1078 }
1080 1079
1081 1080 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1082 1081 size_t result = 0;
1083 1082 for (int i = 0; i < _n_gens; i += 1) {
1084 1083 if (_gens[i]->supports_tlab_allocation()) {
1085 1084 result += _gens[i]->unsafe_max_tlab_alloc();
1086 1085 }
1087 1086 }
1088 1087 return result;
1089 1088 }
1090 1089
1091 1090 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1092 1091 bool gc_overhead_limit_was_exceeded;
1093 1092 return collector_policy()->mem_allocate_work(size /* size */,
1094 1093 true /* is_tlab */,
1095 1094 &gc_overhead_limit_was_exceeded);
1096 1095 }
1097 1096
1098 1097 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
1099 1098 // from the list headed by "*prev_ptr".
1100 1099 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1101 1100 bool first = true;
1102 1101 size_t min_size = 0; // "first" makes this conceptually infinite.
1103 1102 ScratchBlock **smallest_ptr, *smallest;
1104 1103 ScratchBlock *cur = *prev_ptr;
1105 1104 while (cur) {
1106 1105 assert(*prev_ptr == cur, "just checking");
1107 1106 if (first || cur->num_words < min_size) {
1108 1107 smallest_ptr = prev_ptr;
1109 1108 smallest = cur;
1110 1109 min_size = smallest->num_words;
1111 1110 first = false;
1112 1111 }
1113 1112 prev_ptr = &cur->next;
1114 1113 cur = cur->next;
1115 1114 }
1116 1115 smallest = *smallest_ptr;
1117 1116 *smallest_ptr = smallest->next;
1118 1117 return smallest;
1119 1118 }
1120 1119
1121 1120 // Sort the scratch block list headed by res into decreasing size order,
1122 1121 // and set "res" to the result.
1123 1122 static void sort_scratch_list(ScratchBlock*& list) {
1124 1123 ScratchBlock* sorted = NULL;
1125 1124 ScratchBlock* unsorted = list;
1126 1125 while (unsorted) {
1127 1126 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1128 1127 smallest->next = sorted;
1129 1128 sorted = smallest;
1130 1129 }
1131 1130 list = sorted;
1132 1131 }
1133 1132
1134 1133 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1135 1134 size_t max_alloc_words) {
1136 1135 ScratchBlock* res = NULL;
1137 1136 for (int i = 0; i < _n_gens; i++) {
1138 1137 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1139 1138 }
1140 1139 sort_scratch_list(res);
1141 1140 return res;
1142 1141 }
1143 1142
1144 1143 void GenCollectedHeap::release_scratch() {
1145 1144 for (int i = 0; i < _n_gens; i++) {
1146 1145 _gens[i]->reset_scratch();
1147 1146 }
1148 1147 }
1149 1148
1150 1149 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1151 1150 void do_generation(Generation* gen) {
1152 1151 gen->prepare_for_verify();
1153 1152 }
1154 1153 };
1155 1154
1156 1155 void GenCollectedHeap::prepare_for_verify() {
1157 1156 ensure_parsability(false); // no need to retire TLABs
1158 1157 GenPrepareForVerifyClosure blk;
1159 1158 generation_iterate(&blk, false);
1160 1159 perm_gen()->prepare_for_verify();
1161 1160 }
1162 1161
1163 1162
1164 1163 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1165 1164 bool old_to_young) {
1166 1165 if (old_to_young) {
1167 1166 for (int i = _n_gens-1; i >= 0; i--) {
1168 1167 cl->do_generation(_gens[i]);
1169 1168 }
1170 1169 } else {
1171 1170 for (int i = 0; i < _n_gens; i++) {
1172 1171 cl->do_generation(_gens[i]);
1173 1172 }
1174 1173 }
1175 1174 }
1176 1175
1177 1176 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1178 1177 for (int i = 0; i < _n_gens; i++) {
1179 1178 _gens[i]->space_iterate(cl, true);
1180 1179 }
1181 1180 perm_gen()->space_iterate(cl, true);
1182 1181 }
1183 1182
1184 1183 bool GenCollectedHeap::is_maximal_no_gc() const {
1185 1184 for (int i = 0; i < _n_gens; i++) { // skip perm gen
1186 1185 if (!_gens[i]->is_maximal_no_gc()) {
1187 1186 return false;
1188 1187 }
1189 1188 }
1190 1189 return true;
1191 1190 }
1192 1191
1193 1192 void GenCollectedHeap::save_marks() {
1194 1193 for (int i = 0; i < _n_gens; i++) {
1195 1194 _gens[i]->save_marks();
1196 1195 }
1197 1196 perm_gen()->save_marks();
1198 1197 }
1199 1198
1200 1199 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1201 1200 for (int i = 0; i <= collectedGen; i++) {
1202 1201 _gens[i]->compute_new_size();
1203 1202 }
1204 1203 }
1205 1204
1206 1205 GenCollectedHeap* GenCollectedHeap::heap() {
1207 1206 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1208 1207 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1209 1208 return _gch;
1210 1209 }
1211 1210
1212 1211
1213 1212 void GenCollectedHeap::prepare_for_compaction() {
1214 1213 Generation* scanning_gen = _gens[_n_gens-1];
1215 1214 // Start by compacting into same gen.
1216 1215 CompactPoint cp(scanning_gen, NULL, NULL);
1217 1216 while (scanning_gen != NULL) {
1218 1217 scanning_gen->prepare_for_compaction(&cp);
1219 1218 scanning_gen = prev_gen(scanning_gen);
1220 1219 }
1221 1220 }
1222 1221
1223 1222 GCStats* GenCollectedHeap::gc_stats(int level) const {
1224 1223 return _gens[level]->gc_stats();
1225 1224 }
1226 1225
1227 1226 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1228 1227 if (!silent) {
1229 1228 gclog_or_tty->print("permgen ");
1230 1229 }
1231 1230 perm_gen()->verify();
1232 1231 for (int i = _n_gens-1; i >= 0; i--) {
1233 1232 Generation* g = _gens[i];
1234 1233 if (!silent) {
1235 1234 gclog_or_tty->print(g->name());
1236 1235 gclog_or_tty->print(" ");
1237 1236 }
1238 1237 g->verify();
1239 1238 }
1240 1239 if (!silent) {
1241 1240 gclog_or_tty->print("remset ");
1242 1241 }
1243 1242 rem_set()->verify();
1244 1243 }
1245 1244
1246 1245 void GenCollectedHeap::print_on(outputStream* st) const {
1247 1246 for (int i = 0; i < _n_gens; i++) {
1248 1247 _gens[i]->print_on(st);
1249 1248 }
1250 1249 perm_gen()->print_on(st);
1251 1250 }
1252 1251
1253 1252 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1254 1253 if (workers() != NULL) {
1255 1254 workers()->threads_do(tc);
1256 1255 }
1257 1256 #ifndef SERIALGC
1258 1257 if (UseConcMarkSweepGC) {
1259 1258 ConcurrentMarkSweepThread::threads_do(tc);
1260 1259 }
1261 1260 #endif // SERIALGC
1262 1261 }
1263 1262
1264 1263 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1265 1264 #ifndef SERIALGC
1266 1265 if (UseParNewGC) {
1267 1266 workers()->print_worker_threads_on(st);
1268 1267 }
1269 1268 if (UseConcMarkSweepGC) {
1270 1269 ConcurrentMarkSweepThread::print_all_on(st);
1271 1270 }
1272 1271 #endif // SERIALGC
1273 1272 }
1274 1273
1275 1274 void GenCollectedHeap::print_tracing_info() const {
1276 1275 if (TraceGen0Time) {
1277 1276 get_gen(0)->print_summary_info();
1278 1277 }
1279 1278 if (TraceGen1Time) {
1280 1279 get_gen(1)->print_summary_info();
1281 1280 }
1282 1281 }
1283 1282
1284 1283 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1285 1284 if (PrintGCDetails && Verbose) {
1286 1285 gclog_or_tty->print(" " SIZE_FORMAT
1287 1286 "->" SIZE_FORMAT
1288 1287 "(" SIZE_FORMAT ")",
1289 1288 prev_used, used(), capacity());
1290 1289 } else {
1291 1290 gclog_or_tty->print(" " SIZE_FORMAT "K"
1292 1291 "->" SIZE_FORMAT "K"
1293 1292 "(" SIZE_FORMAT "K)",
1294 1293 prev_used / K, used() / K, capacity() / K);
1295 1294 }
1296 1295 }
1297 1296
1298 1297 //New method to print perm gen info with PrintGCDetails flag
1299 1298 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1300 1299 gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1301 1300 perm_gen()->print_heap_change(perm_prev_used);
1302 1301 gclog_or_tty->print("]");
1303 1302 }
1304 1303
1305 1304 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1306 1305 private:
1307 1306 bool _full;
1308 1307 public:
1309 1308 void do_generation(Generation* gen) {
1310 1309 gen->gc_prologue(_full);
1311 1310 }
1312 1311 GenGCPrologueClosure(bool full) : _full(full) {};
1313 1312 };
1314 1313
1315 1314 void GenCollectedHeap::gc_prologue(bool full) {
1316 1315 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1317 1316
1318 1317 always_do_update_barrier = false;
1319 1318 // Fill TLAB's and such
1320 1319 CollectedHeap::accumulate_statistics_all_tlabs();
1321 1320 ensure_parsability(true); // retire TLABs
1322 1321
1323 1322 // Call allocation profiler
1324 1323 AllocationProfiler::iterate_since_last_gc();
1325 1324 // Walk generations
1326 1325 GenGCPrologueClosure blk(full);
1327 1326 generation_iterate(&blk, false); // not old-to-young.
1328 1327 perm_gen()->gc_prologue(full);
1329 1328 };
1330 1329
1331 1330 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1332 1331 private:
1333 1332 bool _full;
1334 1333 public:
1335 1334 void do_generation(Generation* gen) {
1336 1335 gen->gc_epilogue(_full);
1337 1336 }
1338 1337 GenGCEpilogueClosure(bool full) : _full(full) {};
1339 1338 };
1340 1339
1341 1340 void GenCollectedHeap::gc_epilogue(bool full) {
1342 1341 #ifdef COMPILER2
1343 1342 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1344 1343 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1345 1344 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1346 1345 #endif /* COMPILER2 */
1347 1346
1348 1347 resize_all_tlabs();
1349 1348
1350 1349 GenGCEpilogueClosure blk(full);
1351 1350 generation_iterate(&blk, false); // not old-to-young.
1352 1351 perm_gen()->gc_epilogue(full);
1353 1352
1354 1353 if (!CleanChunkPoolAsync) {
1355 1354 Chunk::clean_chunk_pool();
1356 1355 }
1357 1356
1358 1357 always_do_update_barrier = UseConcMarkSweepGC;
1359 1358 };
1360 1359
1361 1360 #ifndef PRODUCT
1362 1361 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1363 1362 private:
1364 1363 public:
1365 1364 void do_generation(Generation* gen) {
1366 1365 gen->record_spaces_top();
1367 1366 }
1368 1367 };
1369 1368
1370 1369 void GenCollectedHeap::record_gen_tops_before_GC() {
1371 1370 if (ZapUnusedHeapArea) {
1372 1371 GenGCSaveTopsBeforeGCClosure blk;
1373 1372 generation_iterate(&blk, false); // not old-to-young.
1374 1373 perm_gen()->record_spaces_top();
1375 1374 }
1376 1375 }
1377 1376 #endif // not PRODUCT
1378 1377
1379 1378 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1380 1379 public:
1381 1380 void do_generation(Generation* gen) {
1382 1381 gen->ensure_parsability();
1383 1382 }
1384 1383 };
1385 1384
1386 1385 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1387 1386 CollectedHeap::ensure_parsability(retire_tlabs);
1388 1387 GenEnsureParsabilityClosure ep_cl;
1389 1388 generation_iterate(&ep_cl, false);
1390 1389 perm_gen()->ensure_parsability();
1391 1390 }
1392 1391
1393 1392 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1394 1393 oop obj,
1395 1394 size_t obj_size) {
1396 1395 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1397 1396 HeapWord* result = NULL;
1398 1397
1399 1398 // First give each higher generation a chance to allocate the promoted object.
1400 1399 Generation* allocator = next_gen(gen);
1401 1400 if (allocator != NULL) {
1402 1401 do {
1403 1402 result = allocator->allocate(obj_size, false);
1404 1403 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1405 1404 }
1406 1405
1407 1406 if (result == NULL) {
1408 1407 // Then give gen and higher generations a chance to expand and allocate the
1409 1408 // object.
1410 1409 do {
1411 1410 result = gen->expand_and_allocate(obj_size, false);
1412 1411 } while (result == NULL && (gen = next_gen(gen)) != NULL);
1413 1412 }
1414 1413
1415 1414 if (result != NULL) {
1416 1415 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1417 1416 }
1418 1417 return oop(result);
1419 1418 }
1420 1419
1421 1420 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1422 1421 jlong _time; // in ms
1423 1422 jlong _now; // in ms
1424 1423
1425 1424 public:
1426 1425 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1427 1426
1428 1427 jlong time() { return _time; }
1429 1428
1430 1429 void do_generation(Generation* gen) {
1431 1430 _time = MIN2(_time, gen->time_of_last_gc(_now));
1432 1431 }
1433 1432 };
1434 1433
1435 1434 jlong GenCollectedHeap::millis_since_last_gc() {
1436 1435 // We need a monotonically non-deccreasing time in ms but
1437 1436 // os::javaTimeMillis() does not guarantee monotonicity.
1438 1437 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1439 1438 GenTimeOfLastGCClosure tolgc_cl(now);
1440 1439 // iterate over generations getting the oldest
1441 1440 // time that a generation was collected
1442 1441 generation_iterate(&tolgc_cl, false);
1443 1442 tolgc_cl.do_generation(perm_gen());
1444 1443
1445 1444 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1446 1445 // provided the underlying platform provides such a time source
1447 1446 // (and it is bug free). So we still have to guard against getting
1448 1447 // back a time later than 'now'.
1449 1448 jlong retVal = now - tolgc_cl.time();
1450 1449 if (retVal < 0) {
1451 1450 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1452 1451 return 0;
1453 1452 }
1454 1453 return retVal;
1455 1454 }
↓ open down ↓ |
965 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX