Print this page


Split Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_interface/collectedHeap.hpp
          +++ new/src/share/vm/gc_interface/collectedHeap.hpp
↓ open down ↓ 84 lines elided ↑ open up ↑
  85   85    friend class IsGCActiveMark; // Block structured external access to _is_gc_active
  86   86    friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
  87   87  
  88   88  #ifdef ASSERT
  89   89    static int       _fire_out_of_memory_count;
  90   90  #endif
  91   91  
  92   92    // Used for filler objects (static, but initialized in ctor).
  93   93    static size_t _filler_array_max_size;
  94   94  
       95 +  const static char* OverflowMessage;
       96 +
  95   97    GCHeapLog* _gc_heap_log;
  96   98  
  97   99    // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
  98  100    bool _defer_initial_card_mark;
  99  101  
 100  102   protected:
 101  103    MemRegion _reserved;
 102  104    BarrierSet* _barrier_set;
 103  105    bool _is_gc_active;
 104  106    uint _n_par_threads;
↓ open down ↓ 22 lines elided ↑ open up ↑
 127  129  
 128  130    // Create a new tlab. All TLAB allocations must go through this.
 129  131    virtual HeapWord* allocate_new_tlab(size_t size);
 130  132  
 131  133    // Accumulate statistics on all tlabs.
 132  134    virtual void accumulate_statistics_all_tlabs();
 133  135  
 134  136    // Reinitialize tlabs before resuming mutators.
 135  137    virtual void resize_all_tlabs();
 136  138  
      139 +  // Returns the sum of total and size if the sum does not overflow;
      140 +  // Otherwise, call vm_exit_during_initialization().
      141 +  // The overflow check is performed by comparing the result of the sum against size, which is assumed to be non-zero.
      142 +  size_t add_and_check_overflow(size_t total, size_t size);
      143 +
      144 +  // Round up total against size and return the value, if the result does not overflow;
      145 +  // Otherwise, call vm_exit_during_initialization().
      146 +  // The overflow check is performed by comparing the round-up result against size, which is assumed to be non-zero.
      147 +  size_t round_up_and_check_overflow(size_t total, size_t size);
      148 +
 137  149    // Allocate from the current thread's TLAB, with broken-out slow path.
 138  150    inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size);
 139  151    static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size);
 140  152  
 141  153    // Allocate an uninitialized block of the given size, or returns NULL if
 142  154    // this is impossible.
 143  155    inline static HeapWord* common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS);
 144  156  
 145  157    // Like allocate_init, but the block returned by a successful allocation
 146  158    // is guaranteed initialized to zeros.
↓ open down ↓ 572 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX