Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/generation.hpp
+++ new/src/share/vm/memory/generation.hpp
1 1 /*
2 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_MEMORY_GENERATION_HPP
26 26 #define SHARE_VM_MEMORY_GENERATION_HPP
27 27
28 28 #include "gc_implementation/shared/collectorCounters.hpp"
29 29 #include "memory/allocation.hpp"
30 30 #include "memory/memRegion.hpp"
31 31 #include "memory/referenceProcessor.hpp"
32 32 #include "memory/universe.hpp"
33 33 #include "memory/watermark.hpp"
34 34 #include "runtime/mutex.hpp"
35 35 #include "runtime/perfData.hpp"
36 36 #include "runtime/virtualspace.hpp"
37 37
38 38 // A Generation models a heap area for similarly-aged objects.
39 39 // It will contain one ore more spaces holding the actual objects.
40 40 //
41 41 // The Generation class hierarchy:
42 42 //
43 43 // Generation - abstract base class
44 44 // - DefNewGeneration - allocation area (copy collected)
45 45 // - ParNewGeneration - a DefNewGeneration that is collected by
46 46 // several threads
47 47 // - CardGeneration - abstract class adding offset array behavior
48 48 // - OneContigSpaceCardGeneration - abstract class holding a single
49 49 // contiguous space with card marking
50 50 // - TenuredGeneration - tenured (old object) space (markSweepCompact)
51 51 // - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...)
52 52 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
53 53 // (Detlefs-Printezis refinement of
54 54 // Boehm-Demers-Schenker)
55 55 //
56 56 // The system configurations currently allowed are:
57 57 //
58 58 // DefNewGeneration + TenuredGeneration + PermGeneration
59 59 // DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen
60 60 //
61 61 // ParNewGeneration + TenuredGeneration + PermGeneration
62 62 // ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen
63 63 //
64 64
65 65 class DefNewGeneration;
66 66 class GenerationSpec;
67 67 class CompactibleSpace;
68 68 class ContiguousSpace;
69 69 class CompactPoint;
70 70 class OopsInGenClosure;
71 71 class OopClosure;
72 72 class ScanClosure;
73 73 class FastScanClosure;
74 74 class GenCollectedHeap;
75 75 class GenRemSet;
76 76 class GCStats;
77 77
78 78 // A "ScratchBlock" represents a block of memory in one generation usable by
79 79 // another. It represents "num_words" free words, starting at and including
80 80 // the address of "this".
81 81 struct ScratchBlock {
82 82 ScratchBlock* next;
83 83 size_t num_words;
84 84 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
85 85 // first two fields are word-sized.)
86 86 };
87 87
88 88
89 89 class Generation: public CHeapObj<mtGC> {
90 90 friend class VMStructs;
91 91 private:
92 92 jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
93 93 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
94 94 // used region at some specific point during collection.
95 95
96 96 protected:
97 97 // Minimum and maximum addresses for memory reserved (not necessarily
98 98 // committed) for generation.
99 99 // Used by card marking code. Must not overlap with address ranges of
100 100 // other generations.
101 101 MemRegion _reserved;
102 102
103 103 // Memory area reserved for generation
104 104 VirtualSpace _virtual_space;
105 105
106 106 // Level in the generation hierarchy.
107 107 int _level;
108 108
109 109 // ("Weak") Reference processing support
110 110 ReferenceProcessor* _ref_processor;
111 111
112 112 // Performance Counters
113 113 CollectorCounters* _gc_counters;
114 114
115 115 // Statistics for garbage collection
116 116 GCStats* _gc_stats;
117 117
118 118 // Returns the next generation in the configuration, or else NULL if this
119 119 // is the highest generation.
120 120 Generation* next_gen() const;
121 121
122 122 // Initialize the generation.
123 123 Generation(ReservedSpace rs, size_t initial_byte_size, int level);
124 124
125 125 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
126 126 // "sp" that point into younger generations.
127 127 // The iteration is only over objects allocated at the start of the
128 128 // iterations; objects allocated as a result of applying the closure are
129 129 // not included.
130 130 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
131 131
132 132 public:
133 133 // The set of possible generation kinds.
134 134 enum Name {
135 135 ASParNew,
136 136 ASConcurrentMarkSweep,
137 137 DefNew,
138 138 ParNew,
139 139 MarkSweepCompact,
140 140 ConcurrentMarkSweep,
141 141 Other
142 142 };
143 143
144 144 enum SomePublicConstants {
145 145 // Generations are GenGrain-aligned and have size that are multiples of
146 146 // GenGrain.
147 147 // Note: on ARM we add 1 bit for card_table_base to be properly aligned
148 148 // (we expect its low byte to be zero - see implementation of post_barrier)
149 149 LogOfGenGrain = 16 ARM_ONLY(+1),
150 150 GenGrain = 1 << LogOfGenGrain
151 151 };
152 152
153 153 // allocate and initialize ("weak") refs processing support
154 154 virtual void ref_processor_init();
155 155 void set_ref_processor(ReferenceProcessor* rp) {
156 156 assert(_ref_processor == NULL, "clobbering existing _ref_processor");
157 157 _ref_processor = rp;
158 158 }
159 159
160 160 virtual Generation::Name kind() { return Generation::Other; }
161 161 GenerationSpec* spec();
162 162
163 163 // This properly belongs in the collector, but for now this
164 164 // will do.
165 165 virtual bool refs_discovery_is_atomic() const { return true; }
166 166 virtual bool refs_discovery_is_mt() const { return false; }
167 167
168 168 // Space enquiries (results in bytes)
169 169 virtual size_t capacity() const = 0; // The maximum number of object bytes the
170 170 // generation can currently hold.
171 171 virtual size_t used() const = 0; // The number of used bytes in the gen.
172 172 virtual size_t free() const = 0; // The number of free bytes in the gen.
173 173
174 174 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
175 175 // Returns the total number of bytes available in a generation
176 176 // for the allocation of objects.
177 177 virtual size_t max_capacity() const;
178 178
179 179 // If this is a young generation, the maximum number of bytes that can be
180 180 // allocated in this generation before a GC is triggered.
181 181 virtual size_t capacity_before_gc() const { return 0; }
182 182
183 183 // The largest number of contiguous free bytes in the generation,
184 184 // including expansion (Assumes called at a safepoint.)
185 185 virtual size_t contiguous_available() const = 0;
186 186 // The largest number of contiguous free bytes in this or any higher generation.
187 187 virtual size_t max_contiguous_available() const;
188 188
189 189 // Returns true if promotions of the specified amount are
190 190 // likely to succeed without a promotion failure.
191 191 // Promotion of the full amount is not guaranteed but
192 192 // might be attempted in the worst case.
193 193 virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
194 194
195 195 // For a non-young generation, this interface can be used to inform a
196 196 // generation that a promotion attempt into that generation failed.
197 197 // Typically used to enable diagnostic output for post-mortem analysis,
198 198 // but other uses of the interface are not ruled out.
199 199 virtual void promotion_failure_occurred() { /* does nothing */ }
200 200
201 201 // Return an estimate of the maximum allocation that could be performed
202 202 // in the generation without triggering any collection or expansion
203 203 // activity. It is "unsafe" because no locks are taken; the result
204 204 // should be treated as an approximation, not a guarantee, for use in
205 205 // heuristic resizing decisions.
206 206 virtual size_t unsafe_max_alloc_nogc() const = 0;
207 207
208 208 // Returns true if this generation cannot be expanded further
209 209 // without a GC. Override as appropriate.
210 210 virtual bool is_maximal_no_gc() const {
211 211 return _virtual_space.uncommitted_size() == 0;
212 212 }
213 213
214 214 MemRegion reserved() const { return _reserved; }
215 215
216 216 // Returns a region guaranteed to contain all the objects in the
217 217 // generation.
218 218 virtual MemRegion used_region() const { return _reserved; }
219 219
220 220 MemRegion prev_used_region() const { return _prev_used_region; }
221 221 virtual void save_used_region() { _prev_used_region = used_region(); }
222 222
223 223 // Returns "TRUE" iff "p" points into the committed areas in the generation.
224 224 // For some kinds of generations, this may be an expensive operation.
225 225 // To avoid performance problems stemming from its inadvertent use in
226 226 // product jvm's, we restrict its use to assertion checking or
227 227 // verification only.
228 228 virtual bool is_in(const void* p) const;
229 229
230 230 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */
231 231 bool is_in_reserved(const void* p) const {
232 232 return _reserved.contains(p);
233 233 }
234 234
235 235 // Check that the generation kind is DefNewGeneration or a sub
236 236 // class of DefNewGeneration and return a DefNewGeneration*
237 237 DefNewGeneration* as_DefNewGeneration();
238 238
239 239 // If some space in the generation contains the given "addr", return a
240 240 // pointer to that space, else return "NULL".
241 241 virtual Space* space_containing(const void* addr) const;
242 242
243 243 // Iteration - do not use for time critical operations
244 244 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0;
245 245
246 246 // Returns the first space, if any, in the generation that can participate
247 247 // in compaction, or else "NULL".
248 248 virtual CompactibleSpace* first_compaction_space() const = 0;
249 249
250 250 // Returns "true" iff this generation should be used to allocate an
251 251 // object of the given size. Young generations might
252 252 // wish to exclude very large objects, for example, since, if allocated
253 253 // often, they would greatly increase the frequency of young-gen
254 254 // collection.
255 255 virtual bool should_allocate(size_t word_size, bool is_tlab) {
256 256 bool result = false;
257 257 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
258 258 if (!is_tlab || supports_tlab_allocation()) {
259 259 result = (word_size > 0) && (word_size < overflow_limit);
260 260 }
261 261 return result;
262 262 }
263 263
264 264 // Allocate and returns a block of the requested size, or returns "NULL".
265 265 // Assumes the caller has done any necessary locking.
266 266 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0;
267 267
268 268 // Like "allocate", but performs any necessary locking internally.
269 269 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
270 270
271 271 // A 'younger' gen has reached an allocation limit, and uses this to notify
272 272 // the next older gen. The return value is a new limit, or NULL if none. The
273 273 // caller must do the necessary locking.
274 274 virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
275 275 size_t word_size) {
276 276 return NULL;
277 277 }
278 278
279 279 // Some generation may offer a region for shared, contiguous allocation,
280 280 // via inlined code (by exporting the address of the top and end fields
281 281 // defining the extent of the contiguous allocation region.)
282 282
283 283 // This function returns "true" iff the heap supports this kind of
284 284 // allocation. (More precisely, this means the style of allocation that
285 285 // increments *top_addr()" with a CAS.) (Default is "no".)
286 286 // A generation that supports this allocation style must use lock-free
287 287 // allocation for *all* allocation, since there are times when lock free
288 288 // allocation will be concurrent with plain "allocate" calls.
289 289 virtual bool supports_inline_contig_alloc() const { return false; }
290 290
291 291 // These functions return the addresses of the fields that define the
292 292 // boundaries of the contiguous allocation area. (These fields should be
293 293 // physicall near to one another.)
294 294 virtual HeapWord** top_addr() const { return NULL; }
295 295 virtual HeapWord** end_addr() const { return NULL; }
296 296
297 297 // Thread-local allocation buffers
298 298 virtual bool supports_tlab_allocation() const { return false; }
299 299 virtual size_t tlab_capacity() const {
300 300 guarantee(false, "Generation doesn't support thread local allocation buffers");
301 301 return 0;
302 302 }
303 303 virtual size_t unsafe_max_tlab_alloc() const {
304 304 guarantee(false, "Generation doesn't support thread local allocation buffers");
305 305 return 0;
306 306 }
307 307
308 308 // "obj" is the address of an object in a younger generation. Allocate space
309 309 // for "obj" in the current (or some higher) generation, and copy "obj" into
310 310 // the newly allocated space, if possible, returning the result (or NULL if
311 311 // the allocation failed).
312 312 //
313 313 // The "obj_size" argument is just obj->size(), passed along so the caller can
314 314 // avoid repeating the virtual call to retrieve it.
315 315 virtual oop promote(oop obj, size_t obj_size);
316 316
317 317 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
318 318 // object "obj", whose original mark word was "m", and whose size is
319 319 // "word_sz". If possible, allocate space for "obj", copy obj into it
320 320 // (taking care to copy "m" into the mark word when done, since the mark
321 321 // word of "obj" may have been overwritten with a forwarding pointer, and
322 322 // also taking care to copy the klass pointer *last*. Returns the new
323 323 // object if successful, or else NULL.
324 324 virtual oop par_promote(int thread_num,
325 325 oop obj, markOop m, size_t word_sz);
326 326
327 327 // Undo, if possible, the most recent par_promote_alloc allocation by
328 328 // "thread_num" ("obj", of "word_sz").
329 329 virtual void par_promote_alloc_undo(int thread_num,
330 330 HeapWord* obj, size_t word_sz);
331 331
332 332 // Informs the current generation that all par_promote_alloc's in the
333 333 // collection have been completed; any supporting data structures can be
334 334 // reset. Default is to do nothing.
335 335 virtual void par_promote_alloc_done(int thread_num) {}
336 336
337 337 // Informs the current generation that all oop_since_save_marks_iterates
338 338 // performed by "thread_num" in the current collection, if any, have been
339 339 // completed; any supporting data structures can be reset. Default is to
340 340 // do nothing.
341 341 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
342 342
343 343 // This generation will collect all younger generations
344 344 // during a full collection.
345 345 virtual bool full_collects_younger_generations() const { return false; }
346 346
347 347 // This generation does in-place marking, meaning that mark words
348 348 // are mutated during the marking phase and presumably reinitialized
349 349 // to a canonical value after the GC. This is currently used by the
350 350 // biased locking implementation to determine whether additional
351 351 // work is required during the GC prologue and epilogue.
352 352 virtual bool performs_in_place_marking() const { return true; }
353 353
354 354 // Returns "true" iff collect() should subsequently be called on this
355 355 // this generation. See comment below.
356 356 // This is a generic implementation which can be overridden.
357 357 //
358 358 // Note: in the current (1.4) implementation, when genCollectedHeap's
359 359 // incremental_collection_will_fail flag is set, all allocations are
360 360 // slow path (the only fast-path place to allocate is DefNew, which
361 361 // will be full if the flag is set).
362 362 // Thus, older generations which collect younger generations should
363 363 // test this flag and collect if it is set.
364 364 virtual bool should_collect(bool full,
365 365 size_t word_size,
366 366 bool is_tlab) {
367 367 return (full || should_allocate(word_size, is_tlab));
368 368 }
369 369
370 370 // Returns true if the collection is likely to be safely
371 371 // completed. Even if this method returns true, a collection
372 372 // may not be guaranteed to succeed, and the system should be
373 373 // able to safely unwind and recover from that failure, albeit
374 374 // at some additional cost.
375 375 virtual bool collection_attempt_is_safe() {
376 376 guarantee(false, "Are you sure you want to call this method?");
377 377 return true;
378 378 }
379 379
380 380 // Perform a garbage collection.
381 381 // If full is true attempt a full garbage collection of this generation.
382 382 // Otherwise, attempting to (at least) free enough space to support an
383 383 // allocation of the given "word_size".
384 384 virtual void collect(bool full,
385 385 bool clear_all_soft_refs,
386 386 size_t word_size,
387 387 bool is_tlab) = 0;
388 388
389 389 // Perform a heap collection, attempting to create (at least) enough
390 390 // space to support an allocation of the given "word_size". If
391 391 // successful, perform the allocation and return the resulting
392 392 // "oop" (initializing the allocated block). If the allocation is
393 393 // still unsuccessful, return "NULL".
394 394 virtual HeapWord* expand_and_allocate(size_t word_size,
395 395 bool is_tlab,
396 396 bool parallel = false) = 0;
397 397
398 398 // Some generations may require some cleanup or preparation actions before
399 399 // allowing a collection. The default is to do nothing.
400 400 virtual void gc_prologue(bool full) {};
401 401
402 402 // Some generations may require some cleanup actions after a collection.
403 403 // The default is to do nothing.
404 404 virtual void gc_epilogue(bool full) {};
405 405
406 406 // Save the high water marks for the used space in a generation.
407 407 virtual void record_spaces_top() {};
408 408
409 409 // Some generations may need to be "fixed-up" after some allocation
410 410 // activity to make them parsable again. The default is to do nothing.
411 411 virtual void ensure_parsability() {};
412 412
413 413 // Time (in ms) when we were last collected or now if a collection is
414 414 // in progress.
415 415 virtual jlong time_of_last_gc(jlong now) {
416 416 // Both _time_of_last_gc and now are set using a time source
417 417 // that guarantees monotonically non-decreasing values provided
418 418 // the underlying platform provides such a source. So we still
419 419 // have to guard against non-monotonicity.
420 420 NOT_PRODUCT(
421 421 if (now < _time_of_last_gc) {
422 422 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, _time_of_last_gc, now);
423 423 }
424 424 )
425 425 return _time_of_last_gc;
426 426 }
427 427
428 428 virtual void update_time_of_last_gc(jlong now) {
429 429 _time_of_last_gc = now;
430 430 }
431 431
432 432 // Generations may keep statistics about collection. This
433 433 // method updates those statistics. current_level is
434 434 // the level of the collection that has most recently
435 435 // occurred. This allows the generation to decide what
436 436 // statistics are valid to collect. For example, the
437 437 // generation can decide to gather the amount of promoted data
438 438 // if the collection of the younger generations has completed.
439 439 GCStats* gc_stats() const { return _gc_stats; }
440 440 virtual void update_gc_stats(int current_level, bool full) {}
441 441
442 442 // Mark sweep support phase2
443 443 virtual void prepare_for_compaction(CompactPoint* cp);
444 444 // Mark sweep support phase3
445 445 virtual void pre_adjust_pointers() {ShouldNotReachHere();}
446 446 virtual void adjust_pointers();
447 447 // Mark sweep support phase4
448 448 virtual void compact();
449 449 virtual void post_compact() {ShouldNotReachHere();}
↓ open down ↓ |
449 lines elided |
↑ open up ↑ |
450 450
451 451 // Support for CMS's rescan. In this general form we return a pointer
452 452 // to an abstract object that can be used, based on specific previously
453 453 // decided protocols, to exchange information between generations,
454 454 // information that may be useful for speeding up certain types of
455 455 // garbage collectors. A NULL value indicates to the client that
456 456 // no data recording is expected by the provider. The data-recorder is
457 457 // expected to be GC worker thread-local, with the worker index
458 458 // indicated by "thr_num".
459 459 virtual void* get_data_recorder(int thr_num) { return NULL; }
460 + virtual void sample_eden_chunk() {}
460 461
461 462 // Some generations may require some cleanup actions before allowing
462 463 // a verification.
463 464 virtual void prepare_for_verify() {};
464 465
465 466 // Accessing "marks".
466 467
467 468 // This function gives a generation a chance to note a point between
468 469 // collections. For example, a contiguous generation might note the
469 470 // beginning allocation point post-collection, which might allow some later
470 471 // operations to be optimized.
471 472 virtual void save_marks() {}
472 473
473 474 // This function allows generations to initialize any "saved marks". That
474 475 // is, should only be called when the generation is empty.
475 476 virtual void reset_saved_marks() {}
476 477
477 478 // This function is "true" iff any no allocations have occurred in the
478 479 // generation since the last call to "save_marks".
479 480 virtual bool no_allocs_since_save_marks() = 0;
480 481
481 482 // Apply "cl->apply" to (the addresses of) all reference fields in objects
482 483 // allocated in the current generation since the last call to "save_marks".
483 484 // If more objects are allocated in this generation as a result of applying
484 485 // the closure, iterates over reference fields in those objects as well.
485 486 // Calls "save_marks" at the end of the iteration.
486 487 // General signature...
487 488 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
488 489 // ...and specializations for de-virtualization. (The general
489 490 // implemention of the _nv versions call the virtual version.
490 491 // Note that the _nv suffix is not really semantically necessary,
491 492 // but it avoids some not-so-useful warnings on Solaris.)
492 493 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
493 494 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
494 495 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \
495 496 }
496 497 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
497 498
498 499 #undef Generation_SINCE_SAVE_MARKS_DECL
499 500
500 501 // The "requestor" generation is performing some garbage collection
501 502 // action for which it would be useful to have scratch space. If
502 503 // the target is not the requestor, no gc actions will be required
503 504 // of the target. The requestor promises to allocate no more than
504 505 // "max_alloc_words" in the target generation (via promotion say,
505 506 // if the requestor is a young generation and the target is older).
506 507 // If the target generation can provide any scratch space, it adds
507 508 // it to "list", leaving "list" pointing to the head of the
508 509 // augmented list. The default is to offer no space.
509 510 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
510 511 size_t max_alloc_words) {}
511 512
512 513 // Give each generation an opportunity to do clean up for any
513 514 // contributed scratch.
514 515 virtual void reset_scratch() {};
515 516
516 517 // When an older generation has been collected, and perhaps resized,
517 518 // this method will be invoked on all younger generations (from older to
518 519 // younger), allowing them to resize themselves as appropriate.
519 520 virtual void compute_new_size() = 0;
520 521
521 522 // Printing
522 523 virtual const char* name() const = 0;
523 524 virtual const char* short_name() const = 0;
524 525
525 526 int level() const { return _level; }
526 527
527 528 // Attributes
528 529
529 530 // True iff the given generation may only be the youngest generation.
530 531 virtual bool must_be_youngest() const = 0;
531 532 // True iff the given generation may only be the oldest generation.
532 533 virtual bool must_be_oldest() const = 0;
533 534
534 535 // Reference Processing accessor
535 536 ReferenceProcessor* const ref_processor() { return _ref_processor; }
536 537
537 538 // Iteration.
538 539
539 540 // Iterate over all the ref-containing fields of all objects in the
540 541 // generation, calling "cl.do_oop" on each.
541 542 virtual void oop_iterate(OopClosure* cl);
542 543
543 544 // Same as above, restricted to the intersection of a memory region and
544 545 // the generation.
545 546 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
546 547
547 548 // Iterate over all objects in the generation, calling "cl.do_object" on
548 549 // each.
549 550 virtual void object_iterate(ObjectClosure* cl);
550 551
551 552 // Iterate over all safe objects in the generation, calling "cl.do_object" on
552 553 // each. An object is safe if its references point to other objects in
553 554 // the heap. This defaults to object_iterate() unless overridden.
554 555 virtual void safe_object_iterate(ObjectClosure* cl);
555 556
556 557 // Iterate over all objects allocated in the generation since the last
557 558 // collection, calling "cl.do_object" on each. The generation must have
558 559 // been initialized properly to support this function, or else this call
559 560 // will fail.
560 561 virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0;
561 562
562 563 // Apply "cl->do_oop" to (the address of) all and only all the ref fields
563 564 // in the current generation that contain pointers to objects in younger
564 565 // generations. Objects allocated since the last "save_marks" call are
565 566 // excluded.
566 567 virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
567 568
568 569 // Inform a generation that it longer contains references to objects
569 570 // in any younger generation. [e.g. Because younger gens are empty,
570 571 // clear the card table.]
571 572 virtual void clear_remembered_set() { }
572 573
573 574 // Inform a generation that some of its objects have moved. [e.g. The
574 575 // generation's spaces were compacted, invalidating the card table.]
575 576 virtual void invalidate_remembered_set() { }
576 577
577 578 // Block abstraction.
578 579
579 580 // Returns the address of the start of the "block" that contains the
580 581 // address "addr". We say "blocks" instead of "object" since some heaps
581 582 // may not pack objects densely; a chunk may either be an object or a
582 583 // non-object.
583 584 virtual HeapWord* block_start(const void* addr) const;
584 585
585 586 // Requires "addr" to be the start of a chunk, and returns its size.
586 587 // "addr + size" is required to be the start of a new chunk, or the end
587 588 // of the active area of the heap.
588 589 virtual size_t block_size(const HeapWord* addr) const ;
589 590
590 591 // Requires "addr" to be the start of a block, and returns "TRUE" iff
591 592 // the block is an object.
592 593 virtual bool block_is_obj(const HeapWord* addr) const;
593 594
594 595
595 596 // PrintGC, PrintGCDetails support
596 597 void print_heap_change(size_t prev_used) const;
597 598
598 599 // PrintHeapAtGC support
599 600 virtual void print() const;
600 601 virtual void print_on(outputStream* st) const;
601 602
602 603 virtual void verify() = 0;
603 604
604 605 struct StatRecord {
605 606 int invocations;
606 607 elapsedTimer accumulated_time;
607 608 StatRecord() :
608 609 invocations(0),
609 610 accumulated_time(elapsedTimer()) {}
610 611 };
611 612 private:
612 613 StatRecord _stat_record;
613 614 public:
614 615 StatRecord* stat_record() { return &_stat_record; }
615 616
616 617 virtual void print_summary_info();
617 618 virtual void print_summary_info_on(outputStream* st);
618 619
619 620 // Performance Counter support
620 621 virtual void update_counters() = 0;
621 622 virtual CollectorCounters* counters() { return _gc_counters; }
622 623 };
623 624
624 625 // Class CardGeneration is a generation that is covered by a card table,
625 626 // and uses a card-size block-offset array to implement block_start.
626 627
627 628 // class BlockOffsetArray;
628 629 // class BlockOffsetArrayContigSpace;
629 630 class BlockOffsetSharedArray;
630 631
631 632 class CardGeneration: public Generation {
632 633 friend class VMStructs;
633 634 protected:
634 635 // This is shared with other generations.
635 636 GenRemSet* _rs;
636 637 // This is local to this generation.
637 638 BlockOffsetSharedArray* _bts;
638 639
639 640 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
640 641 GenRemSet* remset);
641 642
642 643 public:
643 644
644 645 // Attempt to expand the generation by "bytes". Expand by at a
645 646 // minimum "expand_bytes". Return true if some amount (not
646 647 // necessarily the full "bytes") was done.
647 648 virtual bool expand(size_t bytes, size_t expand_bytes);
648 649
649 650 virtual void clear_remembered_set();
650 651
651 652 virtual void invalidate_remembered_set();
652 653
653 654 virtual void prepare_for_verify();
654 655
655 656 // Grow generation with specified size (returns false if unable to grow)
656 657 virtual bool grow_by(size_t bytes) = 0;
657 658 // Grow generation to reserved size.
658 659 virtual bool grow_to_reserved() = 0;
659 660 };
660 661
661 662 // OneContigSpaceCardGeneration models a heap of old objects contained in a single
662 663 // contiguous space.
663 664 //
664 665 // Garbage collection is performed using mark-compact.
665 666
666 667 class OneContigSpaceCardGeneration: public CardGeneration {
667 668 friend class VMStructs;
668 669 // Abstractly, this is a subtype that gets access to protected fields.
669 670 friend class CompactingPermGen;
670 671 friend class VM_PopulateDumpSharedSpace;
671 672
672 673 protected:
673 674 size_t _min_heap_delta_bytes; // Minimum amount to expand.
674 675 ContiguousSpace* _the_space; // actual space holding objects
675 676 WaterMark _last_gc; // watermark between objects allocated before
676 677 // and after last GC.
677 678
678 679 // Grow generation with specified size (returns false if unable to grow)
679 680 virtual bool grow_by(size_t bytes);
680 681 // Grow generation to reserved size.
681 682 virtual bool grow_to_reserved();
682 683 // Shrink generation with specified size (returns false if unable to shrink)
683 684 void shrink_by(size_t bytes);
684 685
685 686 // Allocation failure
686 687 virtual bool expand(size_t bytes, size_t expand_bytes);
687 688 void shrink(size_t bytes);
688 689
689 690 // Accessing spaces
690 691 ContiguousSpace* the_space() const { return _the_space; }
691 692
692 693 public:
693 694 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
694 695 size_t min_heap_delta_bytes,
695 696 int level, GenRemSet* remset,
696 697 ContiguousSpace* space) :
697 698 CardGeneration(rs, initial_byte_size, level, remset),
698 699 _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
699 700 {}
700 701
701 702 inline bool is_in(const void* p) const;
702 703
703 704 // Space enquiries
704 705 size_t capacity() const;
705 706 size_t used() const;
706 707 size_t free() const;
707 708
708 709 MemRegion used_region() const;
709 710
710 711 size_t unsafe_max_alloc_nogc() const;
711 712 size_t contiguous_available() const;
712 713
713 714 // Iteration
714 715 void object_iterate(ObjectClosure* blk);
715 716 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
716 717 void object_iterate_since_last_GC(ObjectClosure* cl);
717 718
718 719 void younger_refs_iterate(OopsInGenClosure* blk);
719 720
720 721 inline CompactibleSpace* first_compaction_space() const;
721 722
722 723 virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
723 724 virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
724 725
725 726 // Accessing marks
726 727 inline WaterMark top_mark();
727 728 inline WaterMark bottom_mark();
728 729
729 730 #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
730 731 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
731 732 OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
732 733 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
733 734
734 735 void save_marks();
735 736 void reset_saved_marks();
736 737 bool no_allocs_since_save_marks();
737 738
738 739 inline size_t block_size(const HeapWord* addr) const;
739 740
740 741 inline bool block_is_obj(const HeapWord* addr) const;
741 742
742 743 virtual void collect(bool full,
743 744 bool clear_all_soft_refs,
744 745 size_t size,
745 746 bool is_tlab);
746 747 HeapWord* expand_and_allocate(size_t size,
747 748 bool is_tlab,
748 749 bool parallel = false);
749 750
750 751 virtual void prepare_for_verify();
751 752
752 753 virtual void gc_epilogue(bool full);
753 754
754 755 virtual void record_spaces_top();
755 756
756 757 virtual void verify();
757 758 virtual void print_on(outputStream* st) const;
758 759 };
759 760
760 761 #endif // SHARE_VM_MEMORY_GENERATION_HPP
↓ open down ↓ |
291 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX