513 bool expect_null_mutator_alloc_region,
514 bool* gc_succeeded);
515
516 // Attempting to expand the heap sufficiently
517 // to support an allocation of the given "word_size". If
518 // successful, perform the allocation and return the address of the
519 // allocated block, or else "NULL".
520 HeapWord* expand_and_allocate(size_t word_size);
521
522 // Process any reference objects discovered.
523 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
524
525 // If during an initial mark pause we may install a pending list head which is not
526 // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
527 // to discover.
528 void make_pending_list_reachable();
529
530 // Merges the information gathered on a per-thread basis for all worker threads
531 // during GC into global variables.
532 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
533 public:
534 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
535
536 WorkGang* workers() const { return _workers; }
537
538 // Runs the given AbstractGangTask with the current active workers, returning the
539 // total time taken.
540 Tickspan run_task(AbstractGangTask* task);
541
542 G1Allocator* allocator() {
543 return _allocator;
544 }
545
546 G1HeapVerifier* verifier() {
547 return _verifier;
548 }
549
550 G1MonitoringSupport* g1mm() {
551 assert(_g1mm != NULL, "should have been initialized");
552 return _g1mm;
1267
1268 // Print the maximum heap capacity.
1269 virtual size_t max_capacity() const;
1270
1271 // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1272 virtual size_t max_reserved_capacity() const;
1273
1274 virtual jlong millis_since_last_gc();
1275
1276
1277 // Convenience function to be used in situations where the heap type can be
1278 // asserted to be this type.
1279 static G1CollectedHeap* heap();
1280
1281 void set_region_short_lived_locked(HeapRegion* hr);
1282 // add appropriate methods for any other surv rate groups
1283
1284 const G1SurvivorRegions* survivor() const { return &_survivor; }
1285
1286 uint eden_regions_count() const { return _eden.length(); }
1287 uint survivor_regions_count() const { return _survivor.length(); }
1288 size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1289 size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1290 uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1291 uint old_regions_count() const { return _old_set.length(); }
1292 uint archive_regions_count() const { return _archive_set.length(); }
1293 uint humongous_regions_count() const { return _humongous_set.length(); }
1294
1295 #ifdef ASSERT
1296 bool check_young_list_empty();
1297 #endif
1298
1299 // *** Stuff related to concurrent marking. It's not clear to me that so
1300 // many of these need to be public.
1301
1302 // The functions below are helper functions that a subclass of
1303 // "CollectedHeap" can use in the implementation of its virtual
1304 // functions.
1305 // This performs a concurrent marking of the live objects in a
1306 // bitmap off to the side.
1307 void do_concurrent_mark();
|
513 bool expect_null_mutator_alloc_region,
514 bool* gc_succeeded);
515
516 // Attempting to expand the heap sufficiently
517 // to support an allocation of the given "word_size". If
518 // successful, perform the allocation and return the address of the
519 // allocated block, or else "NULL".
520 HeapWord* expand_and_allocate(size_t word_size);
521
522 // Process any reference objects discovered.
523 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
524
525 // If during an initial mark pause we may install a pending list head which is not
526 // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
527 // to discover.
528 void make_pending_list_reachable();
529
530 // Merges the information gathered on a per-thread basis for all worker threads
531 // during GC into global variables.
532 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
533
534 void verify_numa_regions(const char* desc);
535
536 public:
537 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
538
539 WorkGang* workers() const { return _workers; }
540
541 // Runs the given AbstractGangTask with the current active workers, returning the
542 // total time taken.
543 Tickspan run_task(AbstractGangTask* task);
544
545 G1Allocator* allocator() {
546 return _allocator;
547 }
548
549 G1HeapVerifier* verifier() {
550 return _verifier;
551 }
552
553 G1MonitoringSupport* g1mm() {
554 assert(_g1mm != NULL, "should have been initialized");
555 return _g1mm;
1270
1271 // Print the maximum heap capacity.
1272 virtual size_t max_capacity() const;
1273
1274 // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1275 virtual size_t max_reserved_capacity() const;
1276
1277 virtual jlong millis_since_last_gc();
1278
1279
1280 // Convenience function to be used in situations where the heap type can be
1281 // asserted to be this type.
1282 static G1CollectedHeap* heap();
1283
1284 void set_region_short_lived_locked(HeapRegion* hr);
1285 // add appropriate methods for any other surv rate groups
1286
1287 const G1SurvivorRegions* survivor() const { return &_survivor; }
1288
1289 uint eden_regions_count() const { return _eden.length(); }
1290 uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1291 uint survivor_regions_count() const { return _survivor.length(); }
1292 uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1293 size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1294 size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1295 uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1296 uint old_regions_count() const { return _old_set.length(); }
1297 uint archive_regions_count() const { return _archive_set.length(); }
1298 uint humongous_regions_count() const { return _humongous_set.length(); }
1299
1300 #ifdef ASSERT
1301 bool check_young_list_empty();
1302 #endif
1303
1304 // *** Stuff related to concurrent marking. It's not clear to me that so
1305 // many of these need to be public.
1306
1307 // The functions below are helper functions that a subclass of
1308 // "CollectedHeap" can use in the implementation of its virtual
1309 // functions.
1310 // This performs a concurrent marking of the live objects in a
1311 // bitmap off to the side.
1312 void do_concurrent_mark();
|