src/share/vm/runtime/handles.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File webrev Sdiff src/share/vm/runtime

src/share/vm/runtime/handles.cpp

Print this page




 112 
 113     f->do_oop(bottom++);
 114   }
 115   return handles_visited;
 116 }
 117 
 118 // Used for debugging handle allocation.
 119 NOT_PRODUCT(jint _nof_handlemarks  = 0;)
 120 
 121 void HandleArea::oops_do(OopClosure* f) {
 122   uintx handles_visited = 0;
 123   // First handle the current chunk. It is filled to the high water mark.
 124   handles_visited += chunk_oops_do(f, _chunk, _hwm);
 125   // Then handle all previous chunks. They are completely filled.
 126   Chunk* k = _first;
 127   while(k != _chunk) {
 128     handles_visited += chunk_oops_do(f, k, k->top());
 129     k = k->next();
 130   }
 131 
 132   // The thread local handle areas should not get very large
 133   if (TraceHandleAllocation && (size_t)handles_visited > TotalHandleAllocationLimit) {
 134 #ifdef ASSERT
 135     warning("%d: Visited in HandleMark : " SIZE_FORMAT, _nof_handlemarks, handles_visited);
 136 #else
 137     warning("Visited in HandleMark : " SIZE_FORMAT, handles_visited);
 138 #endif
 139   }
 140   if (_prev != NULL) _prev->oops_do(f);
 141 }
 142 
 143 void HandleMark::initialize(Thread* thread) {
 144   _thread = thread;
 145   // Save area
 146   _area  = thread->handle_area();
 147   // Save current top
 148   _chunk = _area->_chunk;
 149   _hwm   = _area->_hwm;
 150   _max   = _area->_max;
 151   _size_in_bytes = _area->_size_in_bytes;
 152   debug_only(_area->_handle_mark_nesting++);
 153   assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks");
 154   debug_only(Atomic::inc(&_nof_handlemarks);)
 155 
 156   // Link this in the thread
 157   set_previous_handle_mark(thread->last_handle_mark());
 158   thread->set_last_handle_mark(this);
 159 }
 160 
 161 
 162 HandleMark::~HandleMark() {
 163   HandleArea* area = _area;   // help compilers with poor alias analysis
 164   assert(area == _thread->handle_area(), "sanity check");
 165   assert(area->_handle_mark_nesting > 0, "must stack allocate HandleMarks" );
 166   debug_only(area->_handle_mark_nesting--);
 167 
 168   // Debug code to trace the number of handles allocated per mark/
 169 #ifdef ASSERT
 170   if (TraceHandleAllocation) {
 171     size_t handles = 0;
 172     Chunk *c = _chunk->next();
 173     if (c == NULL) {
 174       handles = area->_hwm - _hwm; // no new chunk allocated
 175     } else {
 176       handles = _max - _hwm;      // add rest in first chunk
 177       while(c != NULL) {
 178         handles += c->length();
 179         c = c->next();
 180       }
 181       handles -= area->_max - area->_hwm; // adjust for last trunk not full
 182     }
 183     handles /= sizeof(void *); // Adjust for size of a handle
 184     if (handles > HandleAllocationLimit) {
 185       // Note: _nof_handlemarks is only set in debug mode
 186       warning("%d: Allocated in HandleMark : " SIZE_FORMAT, _nof_handlemarks, handles);
 187     }
 188 
 189     tty->print_cr("Handles " SIZE_FORMAT, handles);
 190   }
 191 #endif
 192 
 193   // Delete later chunks
 194   if( _chunk->next() ) {
 195     // reset arena size before delete chunks. Otherwise, the total
 196     // arena size could exceed total chunk size
 197     assert(area->size_in_bytes() > size_in_bytes(), "Sanity check");
 198     area->set_size_in_bytes(size_in_bytes());
 199     _chunk->next_chop();
 200   } else {
 201     assert(area->size_in_bytes() == size_in_bytes(), "Sanity check");
 202   }
 203   // Roll back arena to saved top markers
 204   area->_chunk = _chunk;
 205   area->_hwm = _hwm;
 206   area->_max = _max;
 207 #ifdef ASSERT
 208   // clear out first chunk (to detect allocation bugs)
 209   if (ZapVMHandleArea) {
 210     memset(_hwm, badHandleValue, _max - _hwm);
 211   }
 212   Atomic::dec(&_nof_handlemarks);




 112 
 113     f->do_oop(bottom++);
 114   }
 115   return handles_visited;
 116 }
 117 
 118 // Used for debugging handle allocation.
 119 NOT_PRODUCT(jint _nof_handlemarks  = 0;)
 120 
 121 void HandleArea::oops_do(OopClosure* f) {
 122   uintx handles_visited = 0;
 123   // First handle the current chunk. It is filled to the high water mark.
 124   handles_visited += chunk_oops_do(f, _chunk, _hwm);
 125   // Then handle all previous chunks. They are completely filled.
 126   Chunk* k = _first;
 127   while(k != _chunk) {
 128     handles_visited += chunk_oops_do(f, k, k->top());
 129     k = k->next();
 130   }
 131 








 132   if (_prev != NULL) _prev->oops_do(f);
 133 }
 134 
 135 void HandleMark::initialize(Thread* thread) {
 136   _thread = thread;
 137   // Save area
 138   _area  = thread->handle_area();
 139   // Save current top
 140   _chunk = _area->_chunk;
 141   _hwm   = _area->_hwm;
 142   _max   = _area->_max;
 143   _size_in_bytes = _area->_size_in_bytes;
 144   debug_only(_area->_handle_mark_nesting++);
 145   assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks");
 146   debug_only(Atomic::inc(&_nof_handlemarks);)
 147 
 148   // Link this in the thread
 149   set_previous_handle_mark(thread->last_handle_mark());
 150   thread->set_last_handle_mark(this);
 151 }
 152 
 153 
 154 HandleMark::~HandleMark() {
 155   HandleArea* area = _area;   // help compilers with poor alias analysis
 156   assert(area == _thread->handle_area(), "sanity check");
 157   assert(area->_handle_mark_nesting > 0, "must stack allocate HandleMarks" );
 158   debug_only(area->_handle_mark_nesting--);
 159 

























 160   // Delete later chunks
 161   if( _chunk->next() ) {
 162     // reset arena size before delete chunks. Otherwise, the total
 163     // arena size could exceed total chunk size
 164     assert(area->size_in_bytes() > size_in_bytes(), "Sanity check");
 165     area->set_size_in_bytes(size_in_bytes());
 166     _chunk->next_chop();
 167   } else {
 168     assert(area->size_in_bytes() == size_in_bytes(), "Sanity check");
 169   }
 170   // Roll back arena to saved top markers
 171   area->_chunk = _chunk;
 172   area->_hwm = _hwm;
 173   area->_max = _max;
 174 #ifdef ASSERT
 175   // clear out first chunk (to detect allocation bugs)
 176   if (ZapVMHandleArea) {
 177     memset(_hwm, badHandleValue, _max - _hwm);
 178   }
 179   Atomic::dec(&_nof_handlemarks);


src/share/vm/runtime/handles.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File