Print this page
rev 2870 : 7117303: VM uses non-monotonic time source and complains that it is non-monotonic
Summary: Replaces calls to os::javaTimeMillis(), which does not guarantee montonicity, in GC code to os::javaTimeNanos() with a suitable conversion factor. os::javaTimeNanos is guaranteed monotonic if the underlying platform provides a monotonic timesource. Changes in OS files are to make use of the newly defined constants in globalDefinitions.hpp.
Reviewed-by: dholmes
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/genCollectedHeap.cpp
+++ new/src/share/vm/memory/genCollectedHeap.cpp
1 1 /*
2 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/symbolTable.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "classfile/vmSymbols.hpp"
29 29 #include "code/icBuffer.hpp"
30 30 #include "gc_implementation/shared/collectorCounters.hpp"
31 31 #include "gc_implementation/shared/vmGCOperations.hpp"
32 32 #include "gc_interface/collectedHeap.inline.hpp"
33 33 #include "memory/compactPermGen.hpp"
34 34 #include "memory/filemap.hpp"
35 35 #include "memory/gcLocker.inline.hpp"
36 36 #include "memory/genCollectedHeap.hpp"
37 37 #include "memory/genOopClosures.inline.hpp"
38 38 #include "memory/generation.inline.hpp"
39 39 #include "memory/generationSpec.hpp"
40 40 #include "memory/permGen.hpp"
41 41 #include "memory/resourceArea.hpp"
42 42 #include "memory/sharedHeap.hpp"
43 43 #include "memory/space.hpp"
44 44 #include "oops/oop.inline.hpp"
45 45 #include "oops/oop.inline2.hpp"
46 46 #include "runtime/aprofiler.hpp"
47 47 #include "runtime/biasedLocking.hpp"
48 48 #include "runtime/fprofiler.hpp"
49 49 #include "runtime/handles.hpp"
50 50 #include "runtime/handles.inline.hpp"
51 51 #include "runtime/java.hpp"
52 52 #include "runtime/vmThread.hpp"
53 53 #include "services/memoryService.hpp"
54 54 #include "utilities/vmError.hpp"
55 55 #include "utilities/workgroup.hpp"
56 56 #ifndef SERIALGC
57 57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
58 58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
59 59 #endif
60 60
61 61 GenCollectedHeap* GenCollectedHeap::_gch;
62 62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
63 63
64 64 // The set of potentially parallel tasks in strong root scanning.
65 65 enum GCH_process_strong_roots_tasks {
66 66 // We probably want to parallelize both of these internally, but for now...
67 67 GCH_PS_younger_gens,
68 68 // Leave this one last.
69 69 GCH_PS_NumElements
70 70 };
71 71
72 72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
73 73 SharedHeap(policy),
74 74 _gen_policy(policy),
75 75 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
76 76 _full_collections_completed(0)
77 77 {
78 78 if (_gen_process_strong_tasks == NULL ||
79 79 !_gen_process_strong_tasks->valid()) {
80 80 vm_exit_during_initialization("Failed necessary allocation.");
81 81 }
82 82 assert(policy != NULL, "Sanity check");
83 83 _preloading_shared_classes = false;
84 84 }
85 85
86 86 jint GenCollectedHeap::initialize() {
87 87 CollectedHeap::pre_initialize();
88 88
89 89 int i;
90 90 _n_gens = gen_policy()->number_of_generations();
91 91
92 92 // While there are no constraints in the GC code that HeapWordSize
93 93 // be any particular value, there are multiple other areas in the
94 94 // system which believe this to be true (e.g. oop->object_size in some
95 95 // cases incorrectly returns the size in wordSize units rather than
96 96 // HeapWordSize).
97 97 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
98 98
99 99 // The heap must be at least as aligned as generations.
100 100 size_t alignment = Generation::GenGrain;
101 101
102 102 _gen_specs = gen_policy()->generations();
103 103 PermanentGenerationSpec *perm_gen_spec =
104 104 collector_policy()->permanent_generation();
105 105
106 106 // Make sure the sizes are all aligned.
107 107 for (i = 0; i < _n_gens; i++) {
108 108 _gen_specs[i]->align(alignment);
109 109 }
110 110 perm_gen_spec->align(alignment);
111 111
112 112 // If we are dumping the heap, then allocate a wasted block of address
113 113 // space in order to push the heap to a lower address. This extra
114 114 // address range allows for other (or larger) libraries to be loaded
115 115 // without them occupying the space required for the shared spaces.
116 116
117 117 if (DumpSharedSpaces) {
118 118 uintx reserved = 0;
119 119 uintx block_size = 64*1024*1024;
120 120 while (reserved < SharedDummyBlockSize) {
121 121 char* dummy = os::reserve_memory(block_size);
122 122 reserved += block_size;
123 123 }
124 124 }
125 125
126 126 // Allocate space for the heap.
127 127
128 128 char* heap_address;
129 129 size_t total_reserved = 0;
130 130 int n_covered_regions = 0;
131 131 ReservedSpace heap_rs(0);
132 132
133 133 heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
134 134 &n_covered_regions, &heap_rs);
135 135
136 136 if (UseSharedSpaces) {
137 137 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
138 138 if (heap_rs.is_reserved()) {
139 139 heap_rs.release();
140 140 }
141 141 FileMapInfo* mapinfo = FileMapInfo::current_info();
142 142 mapinfo->fail_continue("Unable to reserve shared region.");
143 143 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
144 144 &heap_rs);
145 145 }
146 146 }
147 147
148 148 if (!heap_rs.is_reserved()) {
149 149 vm_shutdown_during_initialization(
150 150 "Could not reserve enough space for object heap");
151 151 return JNI_ENOMEM;
152 152 }
153 153
154 154 _reserved = MemRegion((HeapWord*)heap_rs.base(),
155 155 (HeapWord*)(heap_rs.base() + heap_rs.size()));
156 156
157 157 // It is important to do this in a way such that concurrent readers can't
158 158 // temporarily think somethings in the heap. (Seen this happen in asserts.)
159 159 _reserved.set_word_size(0);
160 160 _reserved.set_start((HeapWord*)heap_rs.base());
161 161 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
162 162 - perm_gen_spec->misc_code_size();
163 163 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
164 164
165 165 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
166 166 set_barrier_set(rem_set()->bs());
167 167
168 168 _gch = this;
169 169
170 170 for (i = 0; i < _n_gens; i++) {
171 171 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
172 172 UseSharedSpaces, UseSharedSpaces);
173 173 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
174 174 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
175 175 }
176 176 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
177 177
178 178 clear_incremental_collection_failed();
179 179
180 180 #ifndef SERIALGC
181 181 // If we are running CMS, create the collector responsible
182 182 // for collecting the CMS generations.
183 183 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
184 184 bool success = create_cms_collector();
185 185 if (!success) return JNI_ENOMEM;
186 186 }
187 187 #endif // SERIALGC
188 188
189 189 return JNI_OK;
190 190 }
191 191
192 192
193 193 char* GenCollectedHeap::allocate(size_t alignment,
194 194 PermanentGenerationSpec* perm_gen_spec,
195 195 size_t* _total_reserved,
196 196 int* _n_covered_regions,
197 197 ReservedSpace* heap_rs){
198 198 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
199 199 "the maximum representable size";
200 200
201 201 // Now figure out the total size.
202 202 size_t total_reserved = 0;
203 203 int n_covered_regions = 0;
204 204 const size_t pageSize = UseLargePages ?
205 205 os::large_page_size() : os::vm_page_size();
206 206
207 207 for (int i = 0; i < _n_gens; i++) {
208 208 total_reserved += _gen_specs[i]->max_size();
209 209 if (total_reserved < _gen_specs[i]->max_size()) {
210 210 vm_exit_during_initialization(overflow_msg);
211 211 }
212 212 n_covered_regions += _gen_specs[i]->n_covered_regions();
213 213 }
214 214 assert(total_reserved % pageSize == 0,
215 215 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
216 216 SIZE_FORMAT, total_reserved, pageSize));
217 217 total_reserved += perm_gen_spec->max_size();
218 218 assert(total_reserved % pageSize == 0,
219 219 err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
220 220 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
221 221 pageSize, perm_gen_spec->max_size()));
222 222
223 223 if (total_reserved < perm_gen_spec->max_size()) {
224 224 vm_exit_during_initialization(overflow_msg);
225 225 }
226 226 n_covered_regions += perm_gen_spec->n_covered_regions();
227 227
228 228 // Add the size of the data area which shares the same reserved area
229 229 // as the heap, but which is not actually part of the heap.
230 230 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
231 231
232 232 total_reserved += s;
233 233 if (total_reserved < s) {
234 234 vm_exit_during_initialization(overflow_msg);
235 235 }
236 236
237 237 if (UseLargePages) {
238 238 assert(total_reserved != 0, "total_reserved cannot be 0");
239 239 total_reserved = round_to(total_reserved, os::large_page_size());
240 240 if (total_reserved < os::large_page_size()) {
241 241 vm_exit_during_initialization(overflow_msg);
242 242 }
243 243 }
244 244
245 245 // Calculate the address at which the heap must reside in order for
246 246 // the shared data to be at the required address.
247 247
248 248 char* heap_address;
249 249 if (UseSharedSpaces) {
250 250
251 251 // Calculate the address of the first word beyond the heap.
252 252 FileMapInfo* mapinfo = FileMapInfo::current_info();
253 253 int lr = CompactingPermGenGen::n_regions - 1;
254 254 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
255 255 heap_address = mapinfo->region_base(lr) + capacity;
256 256
257 257 // Calculate the address of the first word of the heap.
258 258 heap_address -= total_reserved;
259 259 } else {
260 260 heap_address = NULL; // any address will do.
261 261 if (UseCompressedOops) {
262 262 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
263 263 *_total_reserved = total_reserved;
264 264 *_n_covered_regions = n_covered_regions;
265 265 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
266 266 UseLargePages, heap_address);
267 267
268 268 if (heap_address != NULL && !heap_rs->is_reserved()) {
269 269 // Failed to reserve at specified address - the requested memory
270 270 // region is taken already, for example, by 'java' launcher.
271 271 // Try again to reserver heap higher.
272 272 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
273 273 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
274 274 UseLargePages, heap_address);
275 275
276 276 if (heap_address != NULL && !heap_rs->is_reserved()) {
277 277 // Failed to reserve at specified address again - give up.
278 278 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
279 279 assert(heap_address == NULL, "");
280 280 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
281 281 UseLargePages, heap_address);
282 282 }
283 283 }
284 284 return heap_address;
285 285 }
286 286 }
287 287
288 288 *_total_reserved = total_reserved;
289 289 *_n_covered_regions = n_covered_regions;
290 290 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
291 291 UseLargePages, heap_address);
292 292
293 293 return heap_address;
294 294 }
295 295
296 296
297 297 void GenCollectedHeap::post_initialize() {
298 298 SharedHeap::post_initialize();
299 299 TwoGenerationCollectorPolicy *policy =
300 300 (TwoGenerationCollectorPolicy *)collector_policy();
301 301 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
302 302 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
303 303 assert(def_new_gen->kind() == Generation::DefNew ||
304 304 def_new_gen->kind() == Generation::ParNew ||
305 305 def_new_gen->kind() == Generation::ASParNew,
306 306 "Wrong generation kind");
307 307
308 308 Generation* old_gen = get_gen(1);
309 309 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
310 310 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
311 311 old_gen->kind() == Generation::MarkSweepCompact,
312 312 "Wrong generation kind");
313 313
314 314 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
315 315 old_gen->capacity(),
316 316 def_new_gen->from()->capacity());
317 317 policy->initialize_gc_policy_counters();
318 318 }
319 319
320 320 void GenCollectedHeap::ref_processing_init() {
321 321 SharedHeap::ref_processing_init();
322 322 for (int i = 0; i < _n_gens; i++) {
323 323 _gens[i]->ref_processor_init();
324 324 }
325 325 }
326 326
327 327 size_t GenCollectedHeap::capacity() const {
328 328 size_t res = 0;
329 329 for (int i = 0; i < _n_gens; i++) {
330 330 res += _gens[i]->capacity();
331 331 }
332 332 return res;
333 333 }
334 334
335 335 size_t GenCollectedHeap::used() const {
336 336 size_t res = 0;
337 337 for (int i = 0; i < _n_gens; i++) {
338 338 res += _gens[i]->used();
339 339 }
340 340 return res;
341 341 }
342 342
343 343 // Save the "used_region" for generations level and lower,
344 344 // and, if perm is true, for perm gen.
345 345 void GenCollectedHeap::save_used_regions(int level, bool perm) {
346 346 assert(level < _n_gens, "Illegal level parameter");
347 347 for (int i = level; i >= 0; i--) {
348 348 _gens[i]->save_used_region();
349 349 }
350 350 if (perm) {
351 351 perm_gen()->save_used_region();
352 352 }
353 353 }
354 354
355 355 size_t GenCollectedHeap::max_capacity() const {
356 356 size_t res = 0;
357 357 for (int i = 0; i < _n_gens; i++) {
358 358 res += _gens[i]->max_capacity();
359 359 }
360 360 return res;
361 361 }
362 362
363 363 // Update the _full_collections_completed counter
364 364 // at the end of a stop-world full GC.
365 365 unsigned int GenCollectedHeap::update_full_collections_completed() {
366 366 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
367 367 assert(_full_collections_completed <= _total_full_collections,
368 368 "Can't complete more collections than were started");
369 369 _full_collections_completed = _total_full_collections;
370 370 ml.notify_all();
371 371 return _full_collections_completed;
372 372 }
373 373
374 374 // Update the _full_collections_completed counter, as appropriate,
375 375 // at the end of a concurrent GC cycle. Note the conditional update
376 376 // below to allow this method to be called by a concurrent collector
377 377 // without synchronizing in any manner with the VM thread (which
378 378 // may already have initiated a STW full collection "concurrently").
379 379 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
380 380 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
381 381 assert((_full_collections_completed <= _total_full_collections) &&
382 382 (count <= _total_full_collections),
383 383 "Can't complete more collections than were started");
384 384 if (count > _full_collections_completed) {
385 385 _full_collections_completed = count;
386 386 ml.notify_all();
387 387 }
388 388 return _full_collections_completed;
389 389 }
390 390
391 391
392 392 #ifndef PRODUCT
393 393 // Override of memory state checking method in CollectedHeap:
394 394 // Some collectors (CMS for example) can't have badHeapWordVal written
395 395 // in the first two words of an object. (For instance , in the case of
396 396 // CMS these words hold state used to synchronize between certain
397 397 // (concurrent) GC steps and direct allocating mutators.)
398 398 // The skip_header_HeapWords() method below, allows us to skip
399 399 // over the requisite number of HeapWord's. Note that (for
400 400 // generational collectors) this means that those many words are
401 401 // skipped in each object, irrespective of the generation in which
402 402 // that object lives. The resultant loss of precision seems to be
403 403 // harmless and the pain of avoiding that imprecision appears somewhat
404 404 // higher than we are prepared to pay for such rudimentary debugging
405 405 // support.
406 406 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
407 407 size_t size) {
408 408 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
409 409 // We are asked to check a size in HeapWords,
410 410 // but the memory is mangled in juint words.
411 411 juint* start = (juint*) (addr + skip_header_HeapWords());
412 412 juint* end = (juint*) (addr + size);
413 413 for (juint* slot = start; slot < end; slot += 1) {
414 414 assert(*slot == badHeapWordVal,
415 415 "Found non badHeapWordValue in pre-allocation check");
416 416 }
417 417 }
418 418 }
419 419 #endif
420 420
421 421 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
422 422 bool is_tlab,
423 423 bool first_only) {
424 424 HeapWord* res;
425 425 for (int i = 0; i < _n_gens; i++) {
426 426 if (_gens[i]->should_allocate(size, is_tlab)) {
427 427 res = _gens[i]->allocate(size, is_tlab);
428 428 if (res != NULL) return res;
429 429 else if (first_only) break;
430 430 }
431 431 }
432 432 // Otherwise...
433 433 return NULL;
434 434 }
435 435
436 436 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
437 437 bool* gc_overhead_limit_was_exceeded) {
438 438 return collector_policy()->mem_allocate_work(size,
439 439 false /* is_tlab */,
440 440 gc_overhead_limit_was_exceeded);
441 441 }
442 442
443 443 bool GenCollectedHeap::must_clear_all_soft_refs() {
444 444 return _gc_cause == GCCause::_last_ditch_collection;
445 445 }
446 446
447 447 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
448 448 return UseConcMarkSweepGC &&
449 449 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
450 450 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
451 451 }
452 452
453 453 void GenCollectedHeap::do_collection(bool full,
454 454 bool clear_all_soft_refs,
455 455 size_t size,
456 456 bool is_tlab,
457 457 int max_level) {
458 458 bool prepared_for_verification = false;
459 459 ResourceMark rm;
460 460 DEBUG_ONLY(Thread* my_thread = Thread::current();)
461 461
462 462 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
463 463 assert(my_thread->is_VM_thread() ||
464 464 my_thread->is_ConcurrentGC_thread(),
465 465 "incorrect thread type capability");
466 466 assert(Heap_lock->is_locked(),
467 467 "the requesting thread should have the Heap_lock");
468 468 guarantee(!is_gc_active(), "collection is not reentrant");
469 469 assert(max_level < n_gens(), "sanity check");
470 470
471 471 if (GC_locker::check_active_before_gc()) {
472 472 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
473 473 }
474 474
475 475 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
476 476 collector_policy()->should_clear_all_soft_refs();
477 477
478 478 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
479 479
480 480 const size_t perm_prev_used = perm_gen()->used();
481 481
482 482 if (PrintHeapAtGC) {
483 483 Universe::print_heap_before_gc();
484 484 if (Verbose) {
485 485 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
486 486 }
487 487 }
488 488
489 489 {
490 490 FlagSetting fl(_is_gc_active, true);
491 491
492 492 bool complete = full && (max_level == (n_gens()-1));
493 493 const char* gc_cause_str = "GC ";
494 494 if (complete) {
495 495 GCCause::Cause cause = gc_cause();
496 496 if (cause == GCCause::_java_lang_system_gc) {
497 497 gc_cause_str = "Full GC (System) ";
498 498 } else {
499 499 gc_cause_str = "Full GC ";
500 500 }
501 501 }
502 502 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
503 503 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
504 504 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
505 505
506 506 gc_prologue(complete);
507 507 increment_total_collections(complete);
508 508
509 509 size_t gch_prev_used = used();
510 510
511 511 int starting_level = 0;
512 512 if (full) {
513 513 // Search for the oldest generation which will collect all younger
514 514 // generations, and start collection loop there.
515 515 for (int i = max_level; i >= 0; i--) {
516 516 if (_gens[i]->full_collects_younger_generations()) {
517 517 starting_level = i;
518 518 break;
519 519 }
520 520 }
521 521 }
522 522
523 523 bool must_restore_marks_for_biased_locking = false;
524 524
525 525 int max_level_collected = starting_level;
526 526 for (int i = starting_level; i <= max_level; i++) {
527 527 if (_gens[i]->should_collect(full, size, is_tlab)) {
528 528 if (i == n_gens() - 1) { // a major collection is to happen
529 529 if (!complete) {
530 530 // The full_collections increment was missed above.
531 531 increment_total_full_collections();
532 532 }
533 533 pre_full_gc_dump(); // do any pre full gc dumps
534 534 }
535 535 // Timer for individual generations. Last argument is false: no CR
536 536 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
537 537 TraceCollectorStats tcs(_gens[i]->counters());
538 538 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
539 539
540 540 size_t prev_used = _gens[i]->used();
541 541 _gens[i]->stat_record()->invocations++;
542 542 _gens[i]->stat_record()->accumulated_time.start();
543 543
544 544 // Must be done anew before each collection because
545 545 // a previous collection will do mangling and will
546 546 // change top of some spaces.
547 547 record_gen_tops_before_GC();
548 548
549 549 if (PrintGC && Verbose) {
550 550 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
551 551 i,
552 552 _gens[i]->stat_record()->invocations,
553 553 size*HeapWordSize);
554 554 }
555 555
556 556 if (VerifyBeforeGC && i >= VerifyGCLevel &&
557 557 total_collections() >= VerifyGCStartAt) {
558 558 HandleMark hm; // Discard invalid handles created during verification
559 559 if (!prepared_for_verification) {
560 560 prepare_for_verify();
561 561 prepared_for_verification = true;
562 562 }
563 563 gclog_or_tty->print(" VerifyBeforeGC:");
564 564 Universe::verify(true);
565 565 }
566 566 COMPILER2_PRESENT(DerivedPointerTable::clear());
567 567
568 568 if (!must_restore_marks_for_biased_locking &&
569 569 _gens[i]->performs_in_place_marking()) {
570 570 // We perform this mark word preservation work lazily
571 571 // because it's only at this point that we know whether we
572 572 // absolutely have to do it; we want to avoid doing it for
573 573 // scavenge-only collections where it's unnecessary
574 574 must_restore_marks_for_biased_locking = true;
575 575 BiasedLocking::preserve_marks();
576 576 }
577 577
578 578 // Do collection work
579 579 {
580 580 // Note on ref discovery: For what appear to be historical reasons,
581 581 // GCH enables and disabled (by enqueing) refs discovery.
582 582 // In the future this should be moved into the generation's
583 583 // collect method so that ref discovery and enqueueing concerns
584 584 // are local to a generation. The collect method could return
585 585 // an appropriate indication in the case that notification on
586 586 // the ref lock was needed. This will make the treatment of
587 587 // weak refs more uniform (and indeed remove such concerns
588 588 // from GCH). XXX
589 589
590 590 HandleMark hm; // Discard invalid handles created during gc
591 591 save_marks(); // save marks for all gens
592 592 // We want to discover references, but not process them yet.
593 593 // This mode is disabled in process_discovered_references if the
594 594 // generation does some collection work, or in
595 595 // enqueue_discovered_references if the generation returns
596 596 // without doing any work.
597 597 ReferenceProcessor* rp = _gens[i]->ref_processor();
598 598 // If the discovery of ("weak") refs in this generation is
599 599 // atomic wrt other collectors in this configuration, we
600 600 // are guaranteed to have empty discovered ref lists.
601 601 if (rp->discovery_is_atomic()) {
602 602 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
603 603 rp->setup_policy(do_clear_all_soft_refs);
604 604 } else {
605 605 // collect() below will enable discovery as appropriate
606 606 }
607 607 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
608 608 if (!rp->enqueuing_is_done()) {
609 609 rp->enqueue_discovered_references();
610 610 } else {
611 611 rp->set_enqueuing_is_done(false);
612 612 }
613 613 rp->verify_no_references_recorded();
614 614 }
615 615 max_level_collected = i;
616 616
617 617 // Determine if allocation request was met.
618 618 if (size > 0) {
619 619 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
620 620 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
621 621 size = 0;
622 622 }
623 623 }
624 624 }
625 625
626 626 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
627 627
628 628 _gens[i]->stat_record()->accumulated_time.stop();
629 629
630 630 update_gc_stats(i, full);
631 631
632 632 if (VerifyAfterGC && i >= VerifyGCLevel &&
633 633 total_collections() >= VerifyGCStartAt) {
634 634 HandleMark hm; // Discard invalid handles created during verification
635 635 gclog_or_tty->print(" VerifyAfterGC:");
636 636 Universe::verify(false);
637 637 }
638 638
639 639 if (PrintGCDetails) {
640 640 gclog_or_tty->print(":");
641 641 _gens[i]->print_heap_change(prev_used);
642 642 }
643 643 }
644 644 }
645 645
646 646 // Update "complete" boolean wrt what actually transpired --
647 647 // for instance, a promotion failure could have led to
648 648 // a whole heap collection.
649 649 complete = complete || (max_level_collected == n_gens() - 1);
650 650
651 651 if (complete) { // We did a "major" collection
652 652 post_full_gc_dump(); // do any post full gc dumps
653 653 }
654 654
655 655 if (PrintGCDetails) {
656 656 print_heap_change(gch_prev_used);
657 657
658 658 // Print perm gen info for full GC with PrintGCDetails flag.
659 659 if (complete) {
660 660 print_perm_heap_change(perm_prev_used);
661 661 }
662 662 }
663 663
664 664 for (int j = max_level_collected; j >= 0; j -= 1) {
665 665 // Adjust generation sizes.
666 666 _gens[j]->compute_new_size();
667 667 }
668 668
669 669 if (complete) {
670 670 // Ask the permanent generation to adjust size for full collections
671 671 perm()->compute_new_size();
672 672 update_full_collections_completed();
673 673 }
674 674
675 675 // Track memory usage and detect low memory after GC finishes
676 676 MemoryService::track_memory_usage();
677 677
678 678 gc_epilogue(complete);
679 679
680 680 if (must_restore_marks_for_biased_locking) {
681 681 BiasedLocking::restore_marks();
682 682 }
683 683 }
684 684
685 685 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
686 686 AdaptiveSizePolicyOutput(sp, total_collections());
687 687
688 688 if (PrintHeapAtGC) {
689 689 Universe::print_heap_after_gc();
690 690 }
691 691
692 692 #ifdef TRACESPINNING
693 693 ParallelTaskTerminator::print_termination_counts();
694 694 #endif
695 695
696 696 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
697 697 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
698 698 vm_exit(-1);
699 699 }
700 700 }
701 701
702 702 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
703 703 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
704 704 }
705 705
706 706 void GenCollectedHeap::set_par_threads(int t) {
707 707 SharedHeap::set_par_threads(t);
708 708 _gen_process_strong_tasks->set_n_threads(t);
709 709 }
710 710
711 711 void GenCollectedHeap::
712 712 gen_process_strong_roots(int level,
713 713 bool younger_gens_as_roots,
714 714 bool activate_scope,
715 715 bool collecting_perm_gen,
716 716 SharedHeap::ScanningOption so,
717 717 OopsInGenClosure* not_older_gens,
718 718 bool do_code_roots,
719 719 OopsInGenClosure* older_gens) {
720 720 // General strong roots.
721 721
722 722 if (!do_code_roots) {
723 723 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
724 724 not_older_gens, NULL, older_gens);
725 725 } else {
726 726 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
727 727 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
728 728 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
729 729 not_older_gens, &code_roots, older_gens);
730 730 }
731 731
732 732 if (younger_gens_as_roots) {
733 733 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
734 734 for (int i = 0; i < level; i++) {
735 735 not_older_gens->set_generation(_gens[i]);
736 736 _gens[i]->oop_iterate(not_older_gens);
737 737 }
738 738 not_older_gens->reset_generation();
739 739 }
740 740 }
741 741 // When collection is parallel, all threads get to cooperate to do
742 742 // older-gen scanning.
743 743 for (int i = level+1; i < _n_gens; i++) {
744 744 older_gens->set_generation(_gens[i]);
745 745 rem_set()->younger_refs_iterate(_gens[i], older_gens);
746 746 older_gens->reset_generation();
747 747 }
748 748
749 749 _gen_process_strong_tasks->all_tasks_completed();
750 750 }
751 751
752 752 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
753 753 CodeBlobClosure* code_roots,
754 754 OopClosure* non_root_closure) {
755 755 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
756 756 // "Local" "weak" refs
757 757 for (int i = 0; i < _n_gens; i++) {
758 758 _gens[i]->ref_processor()->weak_oops_do(root_closure);
759 759 }
760 760 }
761 761
762 762 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
763 763 void GenCollectedHeap:: \
764 764 oop_since_save_marks_iterate(int level, \
765 765 OopClosureType* cur, \
766 766 OopClosureType* older) { \
767 767 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
768 768 for (int i = level+1; i < n_gens(); i++) { \
769 769 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
770 770 } \
771 771 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
772 772 }
773 773
774 774 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
775 775
776 776 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
777 777
778 778 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
779 779 for (int i = level; i < _n_gens; i++) {
780 780 if (!_gens[i]->no_allocs_since_save_marks()) return false;
781 781 }
782 782 return perm_gen()->no_allocs_since_save_marks();
783 783 }
784 784
785 785 bool GenCollectedHeap::supports_inline_contig_alloc() const {
786 786 return _gens[0]->supports_inline_contig_alloc();
787 787 }
788 788
789 789 HeapWord** GenCollectedHeap::top_addr() const {
790 790 return _gens[0]->top_addr();
791 791 }
792 792
793 793 HeapWord** GenCollectedHeap::end_addr() const {
794 794 return _gens[0]->end_addr();
795 795 }
796 796
797 797 size_t GenCollectedHeap::unsafe_max_alloc() {
798 798 return _gens[0]->unsafe_max_alloc_nogc();
799 799 }
800 800
801 801 // public collection interfaces
802 802
803 803 void GenCollectedHeap::collect(GCCause::Cause cause) {
804 804 if (should_do_concurrent_full_gc(cause)) {
805 805 #ifndef SERIALGC
806 806 // mostly concurrent full collection
807 807 collect_mostly_concurrent(cause);
808 808 #else // SERIALGC
809 809 ShouldNotReachHere();
810 810 #endif // SERIALGC
811 811 } else {
812 812 #ifdef ASSERT
813 813 if (cause == GCCause::_scavenge_alot) {
814 814 // minor collection only
815 815 collect(cause, 0);
816 816 } else {
817 817 // Stop-the-world full collection
818 818 collect(cause, n_gens() - 1);
819 819 }
820 820 #else
821 821 // Stop-the-world full collection
822 822 collect(cause, n_gens() - 1);
823 823 #endif
824 824 }
825 825 }
826 826
827 827 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
828 828 // The caller doesn't have the Heap_lock
829 829 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
830 830 MutexLocker ml(Heap_lock);
831 831 collect_locked(cause, max_level);
832 832 }
833 833
834 834 // This interface assumes that it's being called by the
835 835 // vm thread. It collects the heap assuming that the
836 836 // heap lock is already held and that we are executing in
837 837 // the context of the vm thread.
838 838 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
839 839 assert(Thread::current()->is_VM_thread(), "Precondition#1");
840 840 assert(Heap_lock->is_locked(), "Precondition#2");
841 841 GCCauseSetter gcs(this, cause);
842 842 switch (cause) {
843 843 case GCCause::_heap_inspection:
844 844 case GCCause::_heap_dump: {
845 845 HandleMark hm;
846 846 do_full_collection(false, // don't clear all soft refs
847 847 n_gens() - 1);
848 848 break;
849 849 }
850 850 default: // XXX FIX ME
851 851 ShouldNotReachHere(); // Unexpected use of this function
852 852 }
853 853 }
854 854
855 855 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
856 856 // The caller has the Heap_lock
857 857 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
858 858 collect_locked(cause, n_gens() - 1);
859 859 }
860 860
861 861 // this is the private collection interface
862 862 // The Heap_lock is expected to be held on entry.
863 863
864 864 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
865 865 if (_preloading_shared_classes) {
866 866 report_out_of_shared_space(SharedPermGen);
867 867 }
868 868 // Read the GC count while holding the Heap_lock
869 869 unsigned int gc_count_before = total_collections();
870 870 unsigned int full_gc_count_before = total_full_collections();
871 871 {
872 872 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
873 873 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
874 874 cause, max_level);
875 875 VMThread::execute(&op);
876 876 }
877 877 }
878 878
879 879 #ifndef SERIALGC
880 880 bool GenCollectedHeap::create_cms_collector() {
881 881
882 882 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
883 883 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
884 884 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
885 885 "Unexpected generation kinds");
886 886 // Skip two header words in the block content verification
887 887 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
888 888 CMSCollector* collector = new CMSCollector(
889 889 (ConcurrentMarkSweepGeneration*)_gens[1],
890 890 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
891 891 _rem_set->as_CardTableRS(),
892 892 (ConcurrentMarkSweepPolicy*) collector_policy());
893 893
894 894 if (collector == NULL || !collector->completed_initialization()) {
895 895 if (collector) {
896 896 delete collector; // Be nice in embedded situation
897 897 }
898 898 vm_shutdown_during_initialization("Could not create CMS collector");
899 899 return false;
900 900 }
901 901 return true; // success
902 902 }
903 903
904 904 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
905 905 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
906 906
907 907 MutexLocker ml(Heap_lock);
908 908 // Read the GC counts while holding the Heap_lock
909 909 unsigned int full_gc_count_before = total_full_collections();
910 910 unsigned int gc_count_before = total_collections();
911 911 {
912 912 MutexUnlocker mu(Heap_lock);
913 913 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
914 914 VMThread::execute(&op);
915 915 }
916 916 }
917 917 #endif // SERIALGC
918 918
919 919
920 920 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
921 921 int max_level) {
922 922 int local_max_level;
923 923 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
924 924 gc_cause() == GCCause::_gc_locker) {
925 925 local_max_level = 0;
926 926 } else {
927 927 local_max_level = max_level;
928 928 }
929 929
930 930 do_collection(true /* full */,
931 931 clear_all_soft_refs /* clear_all_soft_refs */,
932 932 0 /* size */,
933 933 false /* is_tlab */,
934 934 local_max_level /* max_level */);
935 935 // Hack XXX FIX ME !!!
936 936 // A scavenge may not have been attempted, or may have
937 937 // been attempted and failed, because the old gen was too full
938 938 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
939 939 incremental_collection_will_fail(false /* don't consult_young */)) {
940 940 if (PrintGCDetails) {
941 941 gclog_or_tty->print_cr("GC locker: Trying a full collection "
942 942 "because scavenge failed");
943 943 }
944 944 // This time allow the old gen to be collected as well
945 945 do_collection(true /* full */,
946 946 clear_all_soft_refs /* clear_all_soft_refs */,
947 947 0 /* size */,
948 948 false /* is_tlab */,
949 949 n_gens() - 1 /* max_level */);
950 950 }
951 951 }
952 952
953 953 bool GenCollectedHeap::is_in_young(oop p) {
954 954 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
955 955 assert(result == _gens[0]->is_in_reserved(p),
956 956 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
957 957 return result;
958 958 }
959 959
960 960 // Returns "TRUE" iff "p" points into the allocated area of the heap.
961 961 bool GenCollectedHeap::is_in(const void* p) const {
962 962 #ifndef ASSERT
963 963 guarantee(VerifyBeforeGC ||
964 964 VerifyDuringGC ||
965 965 VerifyBeforeExit ||
966 966 PrintAssembly ||
967 967 tty->count() != 0 || // already printing
968 968 VerifyAfterGC ||
969 969 VMError::fatal_error_in_progress(), "too expensive");
970 970
971 971 #endif
972 972 // This might be sped up with a cache of the last generation that
973 973 // answered yes.
974 974 for (int i = 0; i < _n_gens; i++) {
975 975 if (_gens[i]->is_in(p)) return true;
976 976 }
977 977 if (_perm_gen->as_gen()->is_in(p)) return true;
978 978 // Otherwise...
979 979 return false;
980 980 }
981 981
982 982 #ifdef ASSERT
983 983 // Don't implement this by using is_in_young(). This method is used
984 984 // in some cases to check that is_in_young() is correct.
985 985 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
986 986 assert(is_in_reserved(p) || p == NULL,
987 987 "Does not work if address is non-null and outside of the heap");
988 988 // The order of the generations is young (low addr), old, perm (high addr)
989 989 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
990 990 }
991 991 #endif
992 992
993 993 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
994 994 for (int i = 0; i < _n_gens; i++) {
995 995 _gens[i]->oop_iterate(cl);
996 996 }
997 997 }
998 998
999 999 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
1000 1000 for (int i = 0; i < _n_gens; i++) {
1001 1001 _gens[i]->oop_iterate(mr, cl);
1002 1002 }
1003 1003 }
1004 1004
1005 1005 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
1006 1006 for (int i = 0; i < _n_gens; i++) {
1007 1007 _gens[i]->object_iterate(cl);
1008 1008 }
1009 1009 perm_gen()->object_iterate(cl);
1010 1010 }
1011 1011
1012 1012 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
1013 1013 for (int i = 0; i < _n_gens; i++) {
1014 1014 _gens[i]->safe_object_iterate(cl);
1015 1015 }
1016 1016 perm_gen()->safe_object_iterate(cl);
1017 1017 }
1018 1018
1019 1019 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1020 1020 for (int i = 0; i < _n_gens; i++) {
1021 1021 _gens[i]->object_iterate_since_last_GC(cl);
1022 1022 }
1023 1023 }
1024 1024
1025 1025 Space* GenCollectedHeap::space_containing(const void* addr) const {
1026 1026 for (int i = 0; i < _n_gens; i++) {
1027 1027 Space* res = _gens[i]->space_containing(addr);
1028 1028 if (res != NULL) return res;
1029 1029 }
1030 1030 Space* res = perm_gen()->space_containing(addr);
1031 1031 if (res != NULL) return res;
1032 1032 // Otherwise...
1033 1033 assert(false, "Could not find containing space");
1034 1034 return NULL;
1035 1035 }
1036 1036
1037 1037
1038 1038 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1039 1039 assert(is_in_reserved(addr), "block_start of address outside of heap");
1040 1040 for (int i = 0; i < _n_gens; i++) {
1041 1041 if (_gens[i]->is_in_reserved(addr)) {
1042 1042 assert(_gens[i]->is_in(addr),
1043 1043 "addr should be in allocated part of generation");
1044 1044 return _gens[i]->block_start(addr);
1045 1045 }
1046 1046 }
1047 1047 if (perm_gen()->is_in_reserved(addr)) {
1048 1048 assert(perm_gen()->is_in(addr),
1049 1049 "addr should be in allocated part of perm gen");
1050 1050 return perm_gen()->block_start(addr);
1051 1051 }
1052 1052 assert(false, "Some generation should contain the address");
1053 1053 return NULL;
1054 1054 }
1055 1055
1056 1056 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1057 1057 assert(is_in_reserved(addr), "block_size of address outside of heap");
1058 1058 for (int i = 0; i < _n_gens; i++) {
1059 1059 if (_gens[i]->is_in_reserved(addr)) {
1060 1060 assert(_gens[i]->is_in(addr),
1061 1061 "addr should be in allocated part of generation");
1062 1062 return _gens[i]->block_size(addr);
1063 1063 }
1064 1064 }
1065 1065 if (perm_gen()->is_in_reserved(addr)) {
1066 1066 assert(perm_gen()->is_in(addr),
1067 1067 "addr should be in allocated part of perm gen");
1068 1068 return perm_gen()->block_size(addr);
1069 1069 }
1070 1070 assert(false, "Some generation should contain the address");
1071 1071 return 0;
1072 1072 }
1073 1073
1074 1074 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1075 1075 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1076 1076 assert(block_start(addr) == addr, "addr must be a block start");
1077 1077 for (int i = 0; i < _n_gens; i++) {
1078 1078 if (_gens[i]->is_in_reserved(addr)) {
1079 1079 return _gens[i]->block_is_obj(addr);
1080 1080 }
1081 1081 }
1082 1082 if (perm_gen()->is_in_reserved(addr)) {
1083 1083 return perm_gen()->block_is_obj(addr);
1084 1084 }
1085 1085 assert(false, "Some generation should contain the address");
1086 1086 return false;
1087 1087 }
1088 1088
1089 1089 bool GenCollectedHeap::supports_tlab_allocation() const {
1090 1090 for (int i = 0; i < _n_gens; i += 1) {
1091 1091 if (_gens[i]->supports_tlab_allocation()) {
1092 1092 return true;
1093 1093 }
1094 1094 }
1095 1095 return false;
1096 1096 }
1097 1097
1098 1098 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1099 1099 size_t result = 0;
1100 1100 for (int i = 0; i < _n_gens; i += 1) {
1101 1101 if (_gens[i]->supports_tlab_allocation()) {
1102 1102 result += _gens[i]->tlab_capacity();
1103 1103 }
1104 1104 }
1105 1105 return result;
1106 1106 }
1107 1107
1108 1108 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1109 1109 size_t result = 0;
1110 1110 for (int i = 0; i < _n_gens; i += 1) {
1111 1111 if (_gens[i]->supports_tlab_allocation()) {
1112 1112 result += _gens[i]->unsafe_max_tlab_alloc();
1113 1113 }
1114 1114 }
1115 1115 return result;
1116 1116 }
1117 1117
1118 1118 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1119 1119 bool gc_overhead_limit_was_exceeded;
1120 1120 return collector_policy()->mem_allocate_work(size /* size */,
1121 1121 true /* is_tlab */,
1122 1122 &gc_overhead_limit_was_exceeded);
1123 1123 }
1124 1124
1125 1125 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
1126 1126 // from the list headed by "*prev_ptr".
1127 1127 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1128 1128 bool first = true;
1129 1129 size_t min_size = 0; // "first" makes this conceptually infinite.
1130 1130 ScratchBlock **smallest_ptr, *smallest;
1131 1131 ScratchBlock *cur = *prev_ptr;
1132 1132 while (cur) {
1133 1133 assert(*prev_ptr == cur, "just checking");
1134 1134 if (first || cur->num_words < min_size) {
1135 1135 smallest_ptr = prev_ptr;
1136 1136 smallest = cur;
1137 1137 min_size = smallest->num_words;
1138 1138 first = false;
1139 1139 }
1140 1140 prev_ptr = &cur->next;
1141 1141 cur = cur->next;
1142 1142 }
1143 1143 smallest = *smallest_ptr;
1144 1144 *smallest_ptr = smallest->next;
1145 1145 return smallest;
1146 1146 }
1147 1147
1148 1148 // Sort the scratch block list headed by res into decreasing size order,
1149 1149 // and set "res" to the result.
1150 1150 static void sort_scratch_list(ScratchBlock*& list) {
1151 1151 ScratchBlock* sorted = NULL;
1152 1152 ScratchBlock* unsorted = list;
1153 1153 while (unsorted) {
1154 1154 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1155 1155 smallest->next = sorted;
1156 1156 sorted = smallest;
1157 1157 }
1158 1158 list = sorted;
1159 1159 }
1160 1160
1161 1161 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1162 1162 size_t max_alloc_words) {
1163 1163 ScratchBlock* res = NULL;
1164 1164 for (int i = 0; i < _n_gens; i++) {
1165 1165 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1166 1166 }
1167 1167 sort_scratch_list(res);
1168 1168 return res;
1169 1169 }
1170 1170
1171 1171 void GenCollectedHeap::release_scratch() {
1172 1172 for (int i = 0; i < _n_gens; i++) {
1173 1173 _gens[i]->reset_scratch();
1174 1174 }
1175 1175 }
1176 1176
1177 1177 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1178 1178 void do_generation(Generation* gen) {
1179 1179 gen->prepare_for_verify();
1180 1180 }
1181 1181 };
1182 1182
1183 1183 void GenCollectedHeap::prepare_for_verify() {
1184 1184 ensure_parsability(false); // no need to retire TLABs
1185 1185 GenPrepareForVerifyClosure blk;
1186 1186 generation_iterate(&blk, false);
1187 1187 perm_gen()->prepare_for_verify();
1188 1188 }
1189 1189
1190 1190
1191 1191 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1192 1192 bool old_to_young) {
1193 1193 if (old_to_young) {
1194 1194 for (int i = _n_gens-1; i >= 0; i--) {
1195 1195 cl->do_generation(_gens[i]);
1196 1196 }
1197 1197 } else {
1198 1198 for (int i = 0; i < _n_gens; i++) {
1199 1199 cl->do_generation(_gens[i]);
1200 1200 }
1201 1201 }
1202 1202 }
1203 1203
1204 1204 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1205 1205 for (int i = 0; i < _n_gens; i++) {
1206 1206 _gens[i]->space_iterate(cl, true);
1207 1207 }
1208 1208 perm_gen()->space_iterate(cl, true);
1209 1209 }
1210 1210
1211 1211 bool GenCollectedHeap::is_maximal_no_gc() const {
1212 1212 for (int i = 0; i < _n_gens; i++) { // skip perm gen
1213 1213 if (!_gens[i]->is_maximal_no_gc()) {
1214 1214 return false;
1215 1215 }
1216 1216 }
1217 1217 return true;
1218 1218 }
1219 1219
1220 1220 void GenCollectedHeap::save_marks() {
1221 1221 for (int i = 0; i < _n_gens; i++) {
1222 1222 _gens[i]->save_marks();
1223 1223 }
1224 1224 perm_gen()->save_marks();
1225 1225 }
1226 1226
1227 1227 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1228 1228 for (int i = 0; i <= collectedGen; i++) {
1229 1229 _gens[i]->compute_new_size();
1230 1230 }
1231 1231 }
1232 1232
1233 1233 GenCollectedHeap* GenCollectedHeap::heap() {
1234 1234 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1235 1235 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1236 1236 return _gch;
1237 1237 }
1238 1238
1239 1239
1240 1240 void GenCollectedHeap::prepare_for_compaction() {
1241 1241 Generation* scanning_gen = _gens[_n_gens-1];
1242 1242 // Start by compacting into same gen.
1243 1243 CompactPoint cp(scanning_gen, NULL, NULL);
1244 1244 while (scanning_gen != NULL) {
1245 1245 scanning_gen->prepare_for_compaction(&cp);
1246 1246 scanning_gen = prev_gen(scanning_gen);
1247 1247 }
1248 1248 }
1249 1249
1250 1250 GCStats* GenCollectedHeap::gc_stats(int level) const {
1251 1251 return _gens[level]->gc_stats();
1252 1252 }
1253 1253
1254 1254 void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
1255 1255 if (!silent) {
1256 1256 gclog_or_tty->print("permgen ");
1257 1257 }
1258 1258 perm_gen()->verify(allow_dirty);
1259 1259 for (int i = _n_gens-1; i >= 0; i--) {
1260 1260 Generation* g = _gens[i];
1261 1261 if (!silent) {
1262 1262 gclog_or_tty->print(g->name());
1263 1263 gclog_or_tty->print(" ");
1264 1264 }
1265 1265 g->verify(allow_dirty);
1266 1266 }
1267 1267 if (!silent) {
1268 1268 gclog_or_tty->print("remset ");
1269 1269 }
1270 1270 rem_set()->verify();
1271 1271 }
1272 1272
1273 1273 void GenCollectedHeap::print_on(outputStream* st) const {
1274 1274 for (int i = 0; i < _n_gens; i++) {
1275 1275 _gens[i]->print_on(st);
1276 1276 }
1277 1277 perm_gen()->print_on(st);
1278 1278 }
1279 1279
1280 1280 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1281 1281 if (workers() != NULL) {
1282 1282 workers()->threads_do(tc);
1283 1283 }
1284 1284 #ifndef SERIALGC
1285 1285 if (UseConcMarkSweepGC) {
1286 1286 ConcurrentMarkSweepThread::threads_do(tc);
1287 1287 }
1288 1288 #endif // SERIALGC
1289 1289 }
1290 1290
1291 1291 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1292 1292 #ifndef SERIALGC
1293 1293 if (UseParNewGC) {
1294 1294 workers()->print_worker_threads_on(st);
1295 1295 }
1296 1296 if (UseConcMarkSweepGC) {
1297 1297 ConcurrentMarkSweepThread::print_all_on(st);
1298 1298 }
1299 1299 #endif // SERIALGC
1300 1300 }
1301 1301
1302 1302 void GenCollectedHeap::print_tracing_info() const {
1303 1303 if (TraceGen0Time) {
1304 1304 get_gen(0)->print_summary_info();
1305 1305 }
1306 1306 if (TraceGen1Time) {
1307 1307 get_gen(1)->print_summary_info();
1308 1308 }
1309 1309 }
1310 1310
1311 1311 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1312 1312 if (PrintGCDetails && Verbose) {
1313 1313 gclog_or_tty->print(" " SIZE_FORMAT
1314 1314 "->" SIZE_FORMAT
1315 1315 "(" SIZE_FORMAT ")",
1316 1316 prev_used, used(), capacity());
1317 1317 } else {
1318 1318 gclog_or_tty->print(" " SIZE_FORMAT "K"
1319 1319 "->" SIZE_FORMAT "K"
1320 1320 "(" SIZE_FORMAT "K)",
1321 1321 prev_used / K, used() / K, capacity() / K);
1322 1322 }
1323 1323 }
1324 1324
1325 1325 //New method to print perm gen info with PrintGCDetails flag
1326 1326 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1327 1327 gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1328 1328 perm_gen()->print_heap_change(perm_prev_used);
1329 1329 gclog_or_tty->print("]");
1330 1330 }
1331 1331
1332 1332 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1333 1333 private:
1334 1334 bool _full;
1335 1335 public:
1336 1336 void do_generation(Generation* gen) {
1337 1337 gen->gc_prologue(_full);
1338 1338 }
1339 1339 GenGCPrologueClosure(bool full) : _full(full) {};
1340 1340 };
1341 1341
1342 1342 void GenCollectedHeap::gc_prologue(bool full) {
1343 1343 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1344 1344
1345 1345 always_do_update_barrier = false;
1346 1346 // Fill TLAB's and such
1347 1347 CollectedHeap::accumulate_statistics_all_tlabs();
1348 1348 ensure_parsability(true); // retire TLABs
1349 1349
1350 1350 // Call allocation profiler
1351 1351 AllocationProfiler::iterate_since_last_gc();
1352 1352 // Walk generations
1353 1353 GenGCPrologueClosure blk(full);
1354 1354 generation_iterate(&blk, false); // not old-to-young.
1355 1355 perm_gen()->gc_prologue(full);
1356 1356 };
1357 1357
1358 1358 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1359 1359 private:
1360 1360 bool _full;
1361 1361 public:
1362 1362 void do_generation(Generation* gen) {
1363 1363 gen->gc_epilogue(_full);
1364 1364 }
1365 1365 GenGCEpilogueClosure(bool full) : _full(full) {};
1366 1366 };
1367 1367
1368 1368 void GenCollectedHeap::gc_epilogue(bool full) {
1369 1369 #ifdef COMPILER2
1370 1370 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1371 1371 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1372 1372 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1373 1373 #endif /* COMPILER2 */
1374 1374
1375 1375 resize_all_tlabs();
1376 1376
1377 1377 GenGCEpilogueClosure blk(full);
1378 1378 generation_iterate(&blk, false); // not old-to-young.
1379 1379 perm_gen()->gc_epilogue(full);
1380 1380
1381 1381 if (!CleanChunkPoolAsync) {
1382 1382 Chunk::clean_chunk_pool();
1383 1383 }
1384 1384
1385 1385 always_do_update_barrier = UseConcMarkSweepGC;
1386 1386 };
1387 1387
1388 1388 #ifndef PRODUCT
1389 1389 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1390 1390 private:
1391 1391 public:
1392 1392 void do_generation(Generation* gen) {
1393 1393 gen->record_spaces_top();
1394 1394 }
1395 1395 };
1396 1396
1397 1397 void GenCollectedHeap::record_gen_tops_before_GC() {
1398 1398 if (ZapUnusedHeapArea) {
1399 1399 GenGCSaveTopsBeforeGCClosure blk;
1400 1400 generation_iterate(&blk, false); // not old-to-young.
1401 1401 perm_gen()->record_spaces_top();
1402 1402 }
1403 1403 }
1404 1404 #endif // not PRODUCT
1405 1405
1406 1406 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1407 1407 public:
1408 1408 void do_generation(Generation* gen) {
1409 1409 gen->ensure_parsability();
1410 1410 }
1411 1411 };
1412 1412
1413 1413 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1414 1414 CollectedHeap::ensure_parsability(retire_tlabs);
1415 1415 GenEnsureParsabilityClosure ep_cl;
1416 1416 generation_iterate(&ep_cl, false);
1417 1417 perm_gen()->ensure_parsability();
1418 1418 }
1419 1419
1420 1420 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1421 1421 oop obj,
1422 1422 size_t obj_size) {
1423 1423 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1424 1424 HeapWord* result = NULL;
1425 1425
1426 1426 // First give each higher generation a chance to allocate the promoted object.
1427 1427 Generation* allocator = next_gen(gen);
1428 1428 if (allocator != NULL) {
1429 1429 do {
1430 1430 result = allocator->allocate(obj_size, false);
1431 1431 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1432 1432 }
1433 1433
1434 1434 if (result == NULL) {
1435 1435 // Then give gen and higher generations a chance to expand and allocate the
1436 1436 // object.
1437 1437 do {
1438 1438 result = gen->expand_and_allocate(obj_size, false);
1439 1439 } while (result == NULL && (gen = next_gen(gen)) != NULL);
1440 1440 }
1441 1441
1442 1442 if (result != NULL) {
1443 1443 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1444 1444 }
1445 1445 return oop(result);
1446 1446 }
1447 1447
1448 1448 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1449 1449 jlong _time; // in ms
1450 1450 jlong _now; // in ms
1451 1451
1452 1452 public:
↓ open down ↓ |
1452 lines elided |
↑ open up ↑ |
1453 1453 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1454 1454
1455 1455 jlong time() { return _time; }
1456 1456
1457 1457 void do_generation(Generation* gen) {
1458 1458 _time = MIN2(_time, gen->time_of_last_gc(_now));
1459 1459 }
1460 1460 };
1461 1461
1462 1462 jlong GenCollectedHeap::millis_since_last_gc() {
1463 - jlong now = os::javaTimeMillis();
1463 + // We need a montonically increasing time in ms but os::javaTimeMillis()
1464 + // does not guarantee montonicity.
1465 + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1464 1466 GenTimeOfLastGCClosure tolgc_cl(now);
1465 1467 // iterate over generations getting the oldest
1466 1468 // time that a generation was collected
1467 1469 generation_iterate(&tolgc_cl, false);
1468 1470 tolgc_cl.do_generation(perm_gen());
1469 - // XXX Despite the assert above, since javaTimeMillis()
1470 - // doesnot guarantee monotonically increasing return
1471 - // values (note, i didn't say "strictly monotonic"),
1472 - // we need to guard against getting back a time
1473 - // later than now. This should be fixed by basing
1474 - // on someting like gethrtime() which guarantees
1475 - // monotonicity. Note that cond_wait() is susceptible
1476 - // to a similar problem, because its interface is
1477 - // based on absolute time in the form of the
1478 - // system time's notion of UCT. See also 4506635
1479 - // for yet another problem of similar nature. XXX
1471 +
1472 + // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1473 + // provided the underlying platform provides such a time source
1474 + // (and it is bug free). So we still have to guard against getting
1475 + // back a time later than 'now'.
1480 1476 jlong retVal = now - tolgc_cl.time();
1481 1477 if (retVal < 0) {
1482 - NOT_PRODUCT(warning("time warp: %d", retVal);)
1478 + NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1483 1479 return 0;
1484 1480 }
1485 1481 return retVal;
1486 1482 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX