# HG changeset patch # User ehelin # Date 1415816223 -3600 # Wed Nov 12 19:17:03 2014 +0100 # Node ID f90c954d0ea1808d308c59bea676ca7ef4c439c5 # Parent bce440df920e17203ae299922d86b132ea36f6d6 8064721: The card tables only ever need two covering regions diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1883,17 +1883,17 @@ jint G1CollectedHeap::initialize() { // compressed oops mode. ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment); initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); // Create the gen rem set (and barrier set) for the entire reserved region. - _rem_set = collector_policy()->create_rem_set(reserved_region(), 2); + _rem_set = collector_policy()->create_rem_set(reserved_region()); set_barrier_set(rem_set()->bs()); if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); return JNI_ENOMEM; } // Also create a G1 rem set. _g1_rem_set = new G1RemSet(this, g1_barrier_set()); diff --git a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp @@ -27,19 +27,18 @@ #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/satbQueue.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/thread.inline.hpp" -G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap, - int max_covered_regions) : - CardTableModRefBSForCTRS(whole_heap, max_covered_regions) +G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) : + CardTableModRefBSForCTRS(whole_heap) { _kind = G1SATBCT; } void G1SATBCardTableModRefBS::enqueue(oop pre_val) { // Nulls should have been already filtered. assert(pre_val->is_oop(true), "Error"); @@ -127,19 +126,18 @@ void G1SATBCardTableModRefBS::verify_g1_ void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter. MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); _card_table->clear(mr); } G1SATBCardTableLoggingModRefBS:: -G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, - int max_covered_regions) : - G1SATBCardTableModRefBS(whole_heap, max_covered_regions), +G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) : + G1SATBCardTableModRefBS(whole_heap), _dcqs(JavaThread::dirty_card_queue_set()), _listener() { _kind = G1SATBCTLogging; _listener.set_card_table(this); } void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) { diff --git a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp @@ -45,18 +45,17 @@ protected: public: static int g1_young_card_val() { return g1_young_gen; } // Add "pre_val" to a set of objects that may have been disconnected from the // pre-marking object graph. static void enqueue(oop pre_val); - G1SATBCardTableModRefBS(MemRegion whole_heap, - int max_covered_regions); + G1SATBCardTableModRefBS(MemRegion whole_heap); bool is_a(BarrierSet::Name bsn) { return bsn == BarrierSet::G1SATBCT || CardTableModRefBS::is_a(bsn); } virtual bool has_write_ref_pre_barrier() { return true; } // This notes that we don't need to access any BarrierSet data @@ -147,18 +146,17 @@ class G1SATBCardTableLoggingModRefBS: pu G1SATBCardTableLoggingModRefBSChangedListener _listener; DirtyCardQueueSet& _dcqs; public: static size_t compute_size(size_t mem_region_size_in_words) { size_t number_of_slots = (mem_region_size_in_words / card_size_in_words); return ReservedSpace::allocation_align_size_up(number_of_slots); } - G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, - int max_covered_regions); + G1SATBCardTableLoggingModRefBS(MemRegion whole_heap); virtual void initialize() { } virtual void initialize(G1RegionToSpaceMapper* mapper); virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); } bool is_a(BarrierSet::Name bsn) { return bsn == BarrierSet::G1SATBCTLogging || diff --git a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp @@ -48,18 +48,18 @@ class CardTableExtension : public CardTa static void verify_all_young_refs_precise_helper(MemRegion mr); public: enum ExtendedCardValue { youngergen_card = CardTableModRefBS::CT_MR_BS_last_reserved + 1, verify_card = CardTableModRefBS::CT_MR_BS_last_reserved + 5 }; - CardTableExtension(MemRegion whole_heap, int max_covered_regions) : - CardTableModRefBS(whole_heap, max_covered_regions) { } + CardTableExtension(MemRegion whole_heap) : + CardTableModRefBS(whole_heap) { } // Too risky for the 4/10/02 putback // BarrierSet::Name kind() { return BarrierSet::CardTableExtension; } // Scavenge support void scavenge_contents_parallel(ObjectStartArray* start_array, MutableSpace* sp, HeapWord* space_top, diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -71,17 +71,17 @@ jint ParallelScavengeHeap::initialize() if (!heap_rs.is_reserved()) { vm_shutdown_during_initialization( "Could not reserve enough space for object heap"); return JNI_ENOMEM; } initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); - CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3); + CardTableExtension* const barrier_set = new CardTableExtension(reserved_region()); barrier_set->initialize(); _barrier_set = barrier_set; oopDesc::set_bs(_barrier_set); if (_barrier_set == NULL) { vm_shutdown_during_initialization( "Could not reserve enough space for barrier set"); return JNI_ENOMEM; } diff --git a/src/share/vm/memory/barrierSet.hpp b/src/share/vm/memory/barrierSet.hpp --- a/src/share/vm/memory/barrierSet.hpp +++ b/src/share/vm/memory/barrierSet.hpp @@ -44,17 +44,22 @@ public: Uninit }; enum Flags { None = 0, TargetUninitialized = 1 }; protected: - int _max_covered_regions; + // Some barrier sets create tables whose elements correspond to parts of + // the heap; the CardTableModRefBS is an example. Such barrier sets will + // normally reserve space for such tables, and commit parts of the table + // "covering" parts of the heap that are committed. At most one covered + // region per generation is needed. + static const int _max_covered_regions = 2; Name _kind; public: BarrierSet() { _kind = Uninit; } // To get around prohibition on RTTI. BarrierSet::Name kind() { return _kind; } virtual bool is_a(BarrierSet::Name bsn) = 0; @@ -154,28 +159,16 @@ public: // (For efficiency reasons, this operation is specialized for certain // barrier types. Semantically, it should be thought of as a call to the // virtual "_work" function below, which must implement the barrier.) inline void write_region(MemRegion mr); protected: virtual void write_region_work(MemRegion mr) = 0; public: - - // Some barrier sets create tables whose elements correspond to parts of - // the heap; the CardTableModRefBS is an example. Such barrier sets will - // normally reserve space for such tables, and commit parts of the table - // "covering" parts of the heap that are committed. The constructor is - // passed the maximum number of independently committable subregions to - // be covered, and the "resize_covered_region" function allows the - // sub-parts of the heap to inform the barrier set of changes of their - // sizes. - BarrierSet(int max_covered_regions) : - _max_covered_regions(max_covered_regions) {} - // Inform the BarrierSet that the the covered heap region that starts // with "base" has been changed to have the given size (possibly from 0, // for initialization.) virtual void resize_covered_region(MemRegion new_region) = 0; // If the barrier set imposes any alignment restrictions on boundaries // within the heap, this function tells whether they are met. virtual bool is_aligned(HeapWord* addr) = 0; diff --git a/src/share/vm/memory/cardTableModRefBS.cpp b/src/share/vm/memory/cardTableModRefBS.cpp --- a/src/share/vm/memory/cardTableModRefBS.cpp +++ b/src/share/vm/memory/cardTableModRefBS.cpp @@ -48,19 +48,18 @@ size_t CardTableModRefBS::compute_byte_m { assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, "uninitialized, check declaration order"); assert(_page_size != 0, "uninitialized, check declaration order"); const size_t granularity = os::vm_allocation_granularity(); return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); } -CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, - int max_covered_regions): - ModRefBarrierSet(max_covered_regions), +CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap) : + ModRefBarrierSet(), _whole_heap(whole_heap), _guard_index(0), _guard_region(), _last_valid_index(0), _page_size(os::vm_page_size()), _byte_map_size(0), _covered(NULL), _committed(NULL), diff --git a/src/share/vm/memory/cardTableModRefBS.hpp b/src/share/vm/memory/cardTableModRefBS.hpp --- a/src/share/vm/memory/cardTableModRefBS.hpp +++ b/src/share/vm/memory/cardTableModRefBS.hpp @@ -279,17 +279,17 @@ public: static int precleaned_card_val() { return precleaned_card; } static int deferred_card_val() { return deferred_card; } // For RTTI simulation. bool is_a(BarrierSet::Name bsn) { return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn); } - CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); + CardTableModRefBS(MemRegion whole_heap); ~CardTableModRefBS(); virtual void initialize(); // *** Barrier set functions. bool has_write_ref_pre_barrier() { return false; } @@ -477,17 +477,16 @@ class CardTableRS; // A specialization for the CardTableRS gen rem set. class CardTableModRefBSForCTRS: public CardTableModRefBS { CardTableRS* _rs; protected: bool card_will_be_scanned(jbyte cv); bool card_may_have_been_dirty(jbyte cv); public: - CardTableModRefBSForCTRS(MemRegion whole_heap, - int max_covered_regions) : - CardTableModRefBS(whole_heap, max_covered_regions) {} + CardTableModRefBSForCTRS(MemRegion whole_heap) : + CardTableModRefBS(whole_heap) {} void set_CTRS(CardTableRS* rs) { _rs = rs; } }; #endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP diff --git a/src/share/vm/memory/cardTableRS.cpp b/src/share/vm/memory/cardTableRS.cpp --- a/src/share/vm/memory/cardTableRS.cpp +++ b/src/share/vm/memory/cardTableRS.cpp @@ -33,31 +33,28 @@ #include "runtime/java.hpp" #include "runtime/os.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #endif // INCLUDE_ALL_GCS -CardTableRS::CardTableRS(MemRegion whole_heap, - int max_covered_regions) : +CardTableRS::CardTableRS(MemRegion whole_heap) : GenRemSet(), - _cur_youngergen_card_val(youngergenP1_card), - _regions_to_iterate(max_covered_regions - 1) + _cur_youngergen_card_val(youngergenP1_card) { #if INCLUDE_ALL_GCS if (UseG1GC) { - _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap, - max_covered_regions); + _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap); } else { - _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); + _ct_bs = new CardTableModRefBSForCTRS(whole_heap); } #else - _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); + _ct_bs = new CardTableModRefBSForCTRS(whole_heap); #endif _ct_bs->initialize(); set_bs(_ct_bs); _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1, mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); if (_last_cur_val_in_gen == NULL) { vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); } diff --git a/src/share/vm/memory/cardTableRS.hpp b/src/share/vm/memory/cardTableRS.hpp --- a/src/share/vm/memory/cardTableRS.hpp +++ b/src/share/vm/memory/cardTableRS.hpp @@ -78,17 +78,18 @@ class CardTableRS: public GenRemSet { // used as the current value for a younger_refs_do iteration of that // portion of the table. (The perm gen is index 0; other gens are at // their level plus 1. They youngest gen is in the table, but will // always have the value "clean_card".) jbyte* _last_cur_val_in_gen; jbyte _cur_youngergen_card_val; - int _regions_to_iterate; + // Number of generations (including permgen). + static const int _regions_to_iterate = 3; jbyte cur_youngergen_card_val() { return _cur_youngergen_card_val; } void set_cur_youngergen_card_val(jbyte v) { _cur_youngergen_card_val = v; } bool is_prev_youngergen_card_val(jbyte v) { @@ -96,17 +97,17 @@ class CardTableRS: public GenRemSet { youngergen_card <= v && v < cur_youngergen_and_prev_nonclean_card && v != _cur_youngergen_card_val; } // Return a youngergen_card_value that is not currently in use. jbyte find_unused_youngergenP_card_value(); public: - CardTableRS(MemRegion whole_heap, int max_covered_regions); + CardTableRS(MemRegion whole_heap); ~CardTableRS(); // *** GenRemSet functions. CardTableRS* as_CardTableRS() { return this; } CardTableModRefBS* ct_bs() { return _ct_bs; } // Override. diff --git a/src/share/vm/memory/collectorPolicy.cpp b/src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp +++ b/src/share/vm/memory/collectorPolicy.cpp @@ -147,19 +147,18 @@ void CollectorPolicy::initialize_size_in } bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { bool result = _should_clear_all_soft_refs; set_should_clear_all_soft_refs(false); return result; } -GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, - int max_covered_regions) { - return new CardTableRS(whole_heap, max_covered_regions); +GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap) { + return new CardTableRS(whole_heap); } void CollectorPolicy::cleared_all_soft_refs() { // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may // have been cleared in the last collection but if the gc overhear // limit continues to be near, SoftRefs should still be cleared. if (size_policy() != NULL) { _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); diff --git a/src/share/vm/memory/collectorPolicy.hpp b/src/share/vm/memory/collectorPolicy.hpp --- a/src/share/vm/memory/collectorPolicy.hpp +++ b/src/share/vm/memory/collectorPolicy.hpp @@ -147,20 +147,17 @@ class CollectorPolicy : public CHeapObj< #else // INCLUDE_ALL_GCS bool is_concurrent_mark_sweep_policy() { return false; } bool is_g1_policy() { return false; } #endif // INCLUDE_ALL_GCS virtual BarrierSet::Name barrier_set_name() = 0; - // Create the remembered set (to cover the given reserved region, - // allowing breaking up into at most "max_covered_regions"). - virtual GenRemSet* create_rem_set(MemRegion reserved, - int max_covered_regions); + virtual GenRemSet* create_rem_set(MemRegion reserved); // This method controls how a collector satisfies a request // for a block of memory. "gc_time_limit_was_exceeded" will // be set to true if the adaptive size policy determine that // an excessive amount of time is being spent doing collections // and caused a NULL to be returned. If a NULL is not returned, // "gc_time_limit_was_exceeded" has an undefined meaning. virtual HeapWord* mem_allocate_work(size_t size, diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp +++ b/src/share/vm/memory/genCollectedHeap.cpp @@ -104,33 +104,31 @@ jint GenCollectedHeap::initialize() { for (i = 0; i < _n_gens; i++) { _gen_specs[i]->align(gen_alignment); } // Allocate space for the heap. char* heap_address; size_t total_reserved = 0; - int n_covered_regions = 0; ReservedSpace heap_rs; size_t heap_alignment = collector_policy()->heap_alignment(); - heap_address = allocate(heap_alignment, &total_reserved, - &n_covered_regions, &heap_rs); + heap_address = allocate(heap_alignment, &total_reserved, &heap_rs); if (!heap_rs.is_reserved()) { vm_shutdown_during_initialization( "Could not reserve enough space for object heap"); return JNI_ENOMEM; } initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); - _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions); + _rem_set = collector_policy()->create_rem_set(reserved_region()); set_barrier_set(rem_set()->bs()); _gch = this; for (i = 0; i < _n_gens; i++) { ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); @@ -147,46 +145,38 @@ jint GenCollectedHeap::initialize() { #endif // INCLUDE_ALL_GCS return JNI_OK; } char* GenCollectedHeap::allocate(size_t alignment, size_t* _total_reserved, - int* _n_covered_regions, ReservedSpace* heap_rs){ const char overflow_msg[] = "The size of the object heap + VM data exceeds " "the maximum representable size"; // Now figure out the total size. size_t total_reserved = 0; - int n_covered_regions = 0; const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be"); for (int i = 0; i < _n_gens; i++) { total_reserved += _gen_specs[i]->max_size(); if (total_reserved < _gen_specs[i]->max_size()) { vm_exit_during_initialization(overflow_msg); } - n_covered_regions += _gen_specs[i]->n_covered_regions(); } assert(total_reserved % alignment == 0, err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" SIZE_FORMAT, total_reserved, alignment)); - // Needed until the cardtable is fixed to have the right number - // of covered regions. - n_covered_regions += 2; - *_total_reserved = total_reserved; - *_n_covered_regions = n_covered_regions; *heap_rs = Universe::reserve_heap(total_reserved, alignment); return heap_rs->base(); } void GenCollectedHeap::post_initialize() { SharedHeap::post_initialize(); diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp +++ b/src/share/vm/memory/genCollectedHeap.hpp @@ -116,19 +116,17 @@ protected: public: GenCollectedHeap(GenCollectorPolicy *policy); GCStats* gc_stats(int level) const; // Returns JNI_OK on success virtual jint initialize(); - char* allocate(size_t alignment, - size_t* _total_reserved, int* _n_covered_regions, - ReservedSpace* heap_rs); + char* allocate(size_t alignment, size_t* _total_reserved, ReservedSpace* heap_rs); // Does operations required after initialization has been done. void post_initialize(); // Initialize ("weak") refs processing support virtual void ref_processing_init(); virtual CollectedHeap::Name kind() const { diff --git a/src/share/vm/memory/generationSpec.hpp b/src/share/vm/memory/generationSpec.hpp --- a/src/share/vm/memory/generationSpec.hpp +++ b/src/share/vm/memory/generationSpec.hpp @@ -54,17 +54,13 @@ public: size_t max_size() const { return _max_size; } void set_max_size(size_t size) { _max_size = size; } // Alignment void align(size_t alignment) { set_init_size(align_size_up(init_size(), alignment)); set_max_size(align_size_up(max_size(), alignment)); } - - // Return the number of regions contained in the generation which - // might need to be independently covered by a remembered set. - virtual int n_covered_regions() const { return 1; } }; typedef GenerationSpec* GenerationSpecPtr; #endif // SHARE_VM_MEMORY_GENERATIONSPEC_HPP diff --git a/src/share/vm/memory/modRefBarrierSet.hpp b/src/share/vm/memory/modRefBarrierSet.hpp --- a/src/share/vm/memory/modRefBarrierSet.hpp +++ b/src/share/vm/memory/modRefBarrierSet.hpp @@ -32,17 +32,17 @@ // enumeration), using a card table. class OopClosure; class Generation; class ModRefBarrierSet: public BarrierSet { public: - ModRefBarrierSet() { _kind = BarrierSet::ModRef; } + ModRefBarrierSet() : BarrierSet() { _kind = BarrierSet::ModRef; } bool is_a(BarrierSet::Name bsn) { return bsn == BarrierSet::ModRef; } // Barriers only on ref writes. bool has_read_ref_barrier() { return false; } bool has_read_prim_barrier() { return false; } @@ -90,15 +90,11 @@ public: // Causes all refs in "mr" to be assumed to be modified. If "whole_heap" // is true, the caller asserts that the entire heap is being invalidated, // which may admit an optimized implementation for some barriers. virtual void invalidate(MemRegion mr, bool whole_heap = false) = 0; // The caller guarantees that "mr" contains no references. (Perhaps it's // objects have been moved elsewhere.) virtual void clear(MemRegion mr) = 0; - - // Pass along the argument to the superclass. - ModRefBarrierSet(int max_covered_regions) : - BarrierSet(max_covered_regions) {} }; #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp +++ b/src/share/vm/runtime/vmStructs.cpp @@ -468,17 +468,16 @@ typedef TwoOopHashtable