< prev index next >

src/hotspot/share/memory/vtBuffer.cpp

Print this page




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/gcLocker.hpp"
  27 #include "memory/vtBuffer.hpp"
  28 #include "oops/oop.inline.hpp"
  29 #include "oops/valueKlass.hpp"
  30 #include "runtime/frame.hpp"
  31 #include "runtime/thread.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 #include "utilities/ticks.hpp"
  34 #include "utilities/ticks.inline.hpp"




  35 
  36 VTBufferChunk* VTBuffer::_free_list = NULL;
  37 Mutex* VTBuffer::_pool_lock = new Mutex(Mutex::leaf, "VTBuffer::_pool_lock", true, Monitor::_safepoint_check_never);
  38 int VTBuffer::_pool_counter = 0;
  39 int VTBuffer::_max_pool_counter = 0;
  40 int VTBuffer::_total_allocated = 0;
  41 int VTBuffer::_total_deallocated = 0;
  42 int VTBuffer::_total_failed = 0;
  43 








  44 oop VTBuffer::allocate_value(ValueKlass* k, TRAPS) {
  45   assert(THREAD->is_Java_thread(), "Only JavaThreads have a buffer for value types");
  46   JavaThread* thread = (JavaThread*)THREAD;
  47   if (thread->vt_alloc_ptr() == NULL) {
  48     if (!allocate_vt_chunk(thread)) {
  49       return NULL; // will trigger fall back strategy: allocation in Java heap
  50     }
  51   }
  52   assert(thread->vt_alloc_ptr() != NULL, "should not be null if chunk allocation was successful");
  53   int size_in_bytes = k->size_helper() * wordSize;
  54   if ((char*)thread->vt_alloc_ptr() + size_in_bytes  >= thread->vt_alloc_limit()) {
  55     if (size_in_bytes > (int)VTBufferChunk::max_alloc_size()) {
  56       // Too big to be allocated in a buffer
  57       return NULL;
  58     }





  59     if (!allocate_vt_chunk(thread)) {
  60       return NULL; // will trigger fall back strategy: allocation in Java heap
  61     }
  62   }
  63   assert((char*)thread->vt_alloc_ptr() + size_in_bytes < thread->vt_alloc_limit(),"otherwise the logic above is wrong");

  64   oop new_vt = (oop)thread->vt_alloc_ptr();
  65   int size_in_words = k->size_helper();
  66   thread->increment_vtchunk_total_memory_buffered(size_in_words * HeapWordSize);
  67   int increment = align_object_size(size_in_words);
  68   void* new_ptr = (char*)thread->vt_alloc_ptr() + increment * HeapWordSize;
  69   new_ptr = MIN2(new_ptr, thread->vt_alloc_limit());
  70   assert(VTBufferChunk::chunk(new_ptr) == VTBufferChunk::chunk(thread->vt_alloc_ptr()),
  71       "old and new alloc ptr must be in the same chunk");
  72   thread->set_vt_alloc_ptr(new_ptr);
  73   // the value and its header must be initialized before being returned!!!
  74   memset(((char*)(oopDesc*)new_vt), 0, size_in_bytes);
  75   new_vt->set_klass(k);

  76   new_vt->set_mark(markOop(k->java_mirror()));



  77   return new_vt;
  78 }
  79 
  80 bool VTBuffer::allocate_vt_chunk(JavaThread* thread) {
  81   VTBufferChunk* new_chunk = NULL;
  82   // Trying local cache;
  83   if (thread->local_free_chunk() != NULL) {
  84     new_chunk = thread->local_free_chunk();
  85     thread->set_local_free_chunk(NULL);
  86   } else {
  87     // Trying global pool
  88     MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag);
  89     if (_free_list != NULL) {
  90       new_chunk = _free_list;
  91       _free_list = new_chunk->next();
  92       if (_free_list != NULL) {
  93         _free_list->set_prev(NULL);
  94       }
  95       new_chunk->set_next(NULL);
  96       _pool_counter--;


 115   assert(new_chunk->index() == -1, "Sanity check");
 116   new_chunk->set_owner(thread);
 117   if(current != NULL) {
 118     new_chunk->set_prev(current);
 119     new_chunk->set_index(current->index() + 1);
 120     current->set_next(new_chunk);
 121   } else {
 122     new_chunk->set_index(0);
 123   }
 124   thread->increment_vtchunk_in_use();
 125   thread->set_vt_alloc_ptr(new_chunk->first_alloc());
 126   thread->set_vt_alloc_limit(new_chunk->alloc_limit());
 127   return true; // allocation was successful
 128 }
 129 
 130 void VTBuffer::recycle_chunk(JavaThread* thread, VTBufferChunk* chunk) {
 131   if (thread->local_free_chunk() == NULL) {
 132     chunk->set_prev(NULL);
 133     chunk->set_next(NULL);
 134     chunk->set_index(-1);

 135     thread->set_local_free_chunk(chunk);
 136   } else {
 137     return_vt_chunk(thread, chunk);
 138   }
 139   thread->decrement_vtchunk_in_use();
 140 }
 141 
 142 // This is the main way to recycle VTBuffer memory, it is called from
 143 // remove_activation() when an interpreter frame is about to be removed
 144 // from the stack. All memory used in the context of this frame is freed,
 145 // and the vt_alloc_ptr is restored to the value it had when the frame
 146 // was created (modulo a possible adjustment if a value is being returned)
 147 void VTBuffer::recycle_vtbuffer(JavaThread* thread, frame current_frame) {
 148   address current_ptr = (address)thread->vt_alloc_ptr();
 149   assert(current_ptr != NULL, "Should not reach here if NULL");
 150   VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
 151   assert(current_chunk->owner() == thread, "Sanity check");
 152   address previous_ptr = (address)current_frame.interpreter_frame_vt_alloc_ptr();
 153   if (previous_ptr == NULL) {
 154     // vt_alloc_ptr has not been initialized in this frame
 155     // let's initialize it to the first_alloc() value of the first chunk
 156     VTBufferChunk* first_chunk = current_chunk;
 157     while (first_chunk->prev() != NULL) {
 158       first_chunk = first_chunk->prev();
 159     }
 160     previous_ptr = (address)first_chunk->first_alloc();
 161   }
 162   assert(previous_ptr != NULL, "Should not reach here if NULL");
 163   VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
 164   assert(previous_chunk->owner() == thread, "Sanity check");
 165   if (current_ptr == previous_ptr) return;
 166   assert(current_chunk != previous_chunk || current_ptr >= previous_ptr, "Sanity check");
 167   VTBufferChunk* del = previous_chunk->next();
 168   previous_chunk->set_next(NULL);
 169   thread->set_vt_alloc_ptr(previous_ptr);

 170   thread->set_vt_alloc_limit(previous_chunk->alloc_limit());
 171   while (del != NULL) {
 172     VTBufferChunk* temp = del->next();
 173     VTBuffer::recycle_chunk(thread, del);
 174     del = temp;
 175   }
 176 }
 177 
 178 void VTBuffer::return_vt_chunk(JavaThread* thread, VTBufferChunk* chunk) {
 179   chunk->set_prev(NULL);
 180   chunk->set_owner(NULL);
 181   chunk->set_index(-1);

 182   MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag);
 183   if (_pool_counter < _max_free_list) {
 184     if (_free_list != NULL) {
 185       chunk->set_next(_free_list);
 186       _free_list->set_prev(chunk);
 187       _free_list = chunk;
 188     } else {
 189       chunk->set_next(NULL);
 190       _free_list = chunk;
 191     }
 192     _pool_counter++;
 193     if (_pool_counter > _max_pool_counter) {
 194       _max_pool_counter = _pool_counter;
 195     }
 196   } else {
 197     delete chunk;
 198     _total_deallocated++;
 199   }
 200   thread->increment_vtchunk_returned();
 201 }
 202 
 203 bool VTBuffer::value_belongs_to_frame(oop p, frame* f) {
 204   // the code below assumes that frame f is the last interpreted frame
 205   // on the execution stack



 206   int p_chunk_idx = VTBufferChunk::chunk(p)->index();
 207   int frame_first_chunk_idx;
 208   if (f->interpreter_frame_vt_alloc_ptr() != NULL) {
 209     frame_first_chunk_idx = VTBufferChunk::chunk(f->interpreter_frame_vt_alloc_ptr())->index();
 210   } else {
 211     frame_first_chunk_idx = 0;
 212   }
 213   if (p_chunk_idx == frame_first_chunk_idx) {
 214     return (intptr_t*)p >= f->interpreter_frame_vt_alloc_ptr();
 215   } else {
 216     return  p_chunk_idx > frame_first_chunk_idx;
 217   }
 218 
 219 }
 220 
 221 void VTBuffer::fix_frame_vt_alloc_ptr(frame f, VTBufferChunk* chunk) {
 222   assert(f.is_interpreted_frame(), "recycling can only be triggered from interpreted frames");
 223   assert(chunk != NULL, "Should not be called if null");
 224   while (chunk->prev() != NULL) {
 225     chunk = chunk->prev();
 226   }
 227   f.interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc());
 228 }
 229 
 230 extern "C" {
 231   static int compare_reloc_entries(const void* void_a, const void* void_b) {
 232     struct VT_relocation_entry* entry_a = (struct VT_relocation_entry*)void_a;
 233     struct VT_relocation_entry* entry_b = (struct VT_relocation_entry*)void_b;
 234     if (entry_a->chunk_index == entry_b->chunk_index) {
 235       if (entry_a->old_ptr < entry_b->old_ptr) {
 236         return -1;
 237       } else {
 238         return 1;


 249 
 250 void dump_reloc_table(struct VT_relocation_entry* table, int nelem, bool print_new_ptr) {
 251   ResourceMark rm;
 252   for (int i = 0; i < nelem; i++) {
 253           InstanceKlass* ik = InstanceKlass::cast(((oop)table[i].old_ptr)->klass());
 254     tty->print("%d:\t%p\t%d\t%s\t%x", i, table[i].old_ptr, table[i].chunk_index,
 255                 ik->name()->as_C_string(), ik->size_helper() * HeapWordSize);
 256     if (print_new_ptr) {
 257         tty->print_cr("\t%p\t%d\n", table[i].new_ptr, VTBufferChunk::chunk(table[i].new_ptr)->index());
 258     } else {
 259         tty->print_cr("");
 260     }
 261   }
 262 }
 263 
 264 // Relocate value 'old' after value 'previous'
 265 address VTBuffer::relocate_value(address old, address previous, int previous_size_in_words) {
 266   InstanceKlass* ik_old = InstanceKlass::cast(((oop)old)->klass());
 267   assert(ik_old->is_value(), "Sanity check");
 268   VTBufferChunk* chunk = VTBufferChunk::chunk(previous);
 269   address next_alloc = previous + align_object_size(ik_old->size_helper());
 270   if(next_alloc + ik_old->size_helper() * HeapWordSize < chunk->alloc_limit()) {
 271     // relocation can be performed in the same chunk
 272     return previous + align_object_size(previous_size_in_words) * HeapWordSize;
 273   } else {
 274     // relocation must be performed in the next chunk
 275     VTBufferChunk* next_chunk = chunk->next();
 276     assert(next_chunk != NULL, "Because we are compacting, there should be enough in use chunks");
 277     return (address)next_chunk->first_alloc();
 278   }
 279 }
 280 
 281 oop VTBuffer::relocate_return_value(JavaThread* thread, frame current_frame, oop obj) {
 282   assert(!Universe::heap()->is_in_reserved(obj), "This method should never be called on Java heap allocated values");
 283   assert(obj->klass()->is_value(), "Sanity check");
 284   if (!VTBuffer::value_belongs_to_frame(obj, &current_frame)) return obj;
 285   ValueKlass* vk = ValueKlass::cast(obj->klass());
 286   address current_ptr = (address)thread->vt_alloc_ptr();
 287   VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
 288   address previous_ptr = (address)current_frame.interpreter_frame_vt_alloc_ptr();
 289   if (previous_ptr == NULL) {
 290     fix_frame_vt_alloc_ptr(current_frame, current_chunk);
 291     previous_ptr = (address)current_frame.interpreter_frame_vt_alloc_ptr();

 292   }
 293   VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
 294   address dest;
 295   if ((address)obj != previous_ptr) {
 296     if (previous_chunk == current_chunk
 297         || (previous_ptr + vk->size_helper() * wordSize) < previous_chunk->alloc_limit()) {
 298       dest = previous_ptr;
 299     } else {
 300       assert(previous_chunk->next() != NULL, "Should not happen");
 301       dest = (address)previous_chunk->next()->first_alloc();
 302     }
 303     // Copying header
 304     memcpy(dest, obj, vk->first_field_offset());
 305     // Copying value content
 306     vk->value_store(((char*)(address)obj) + vk->first_field_offset(),
 307                     dest + vk->first_field_offset(), false, true);
 308   } else {
 309     dest = (address)obj;
 310   }
 311   address new_alloc_ptr = dest + vk->size_helper() * wordSize;
 312   current_frame.interpreter_frame_set_vt_alloc_ptr((intptr_t*)new_alloc_ptr);
 313   VTBufferChunk* last = VTBufferChunk::chunk(dest);
 314   VTBufferChunk* del = last->next();
 315   thread->set_vt_alloc_ptr(new_alloc_ptr);
 316   thread->set_vt_alloc_limit(last->alloc_limit());




 317   last->set_next(NULL);
 318   while (del != NULL) {
 319     VTBufferChunk* tmp = del->next();
 320     VTBuffer::recycle_chunk(thread, del);
 321     del = tmp;
 322   }
 323   return (oop)dest;
 324 }
 325 
 326 // This method is called to recycle VTBuffer memory when the VM has detected
 327 // that too much memory is being consumed in the current frame context. This
 328 // can only happen when the method contains at least one loop in which new
 329 // values are created.
 330 void VTBuffer::recycle_vt_in_frame(JavaThread* thread, frame* f) {
 331   Ticks begin, end;
 332   Ticks step1, step2, step3, step4, step5, step6, step7;
 333   int returned_chunks = 0;
 334 
 335   if (ReportVTBufferRecyclingTimes) {
 336     begin = Ticks::now();


 374       ((oop)reloc_table[0].old_ptr)->set_mark((markOop)reloc_table[0].new_ptr);
 375       for (int i = 1; i < n_entries; i++) {
 376         reloc_table[i].new_ptr = relocate_value(reloc_table[i].old_ptr, reloc_table[i-1].new_ptr,
 377             InstanceKlass::cast(((oop)reloc_table[i-1].old_ptr)->klass())->size_helper());
 378         ((oop)reloc_table[i].old_ptr)->set_mark((markOop)reloc_table[i].new_ptr);
 379       }
 380       if (ReportVTBufferRecyclingTimes) {
 381         step3 = Ticks::now();
 382       }
 383 
 384       // 4 - update pointers
 385       BufferedValuesPointersUpdate update_closure = BufferedValuesPointersUpdate(f);
 386       f->buffered_values_interpreted_do(&update_closure);
 387       if (ReportVTBufferRecyclingTimes) {
 388         step4 = Ticks::now();
 389       }
 390 
 391       // 5 - relocate values
 392       for (int i = 0; i < n_entries; i++) {
 393         if (reloc_table[i].old_ptr != reloc_table[i].new_ptr) {


 394           InstanceKlass* ik_old = InstanceKlass::cast(((oop)reloc_table[i].old_ptr)->klass());
 395           // instead of memcpy, a value_store() might be required here
 396           memcpy(reloc_table[i].new_ptr, reloc_table[i].old_ptr, ik_old->size_helper() * HeapWordSize);
 397         }
 398         // Resetting the mark word
 399         ((oop)reloc_table[i].new_ptr)->set_mark(markOop(((oop)reloc_table[i].new_ptr)->klass()->java_mirror()));
 400       }
 401       if (ReportVTBufferRecyclingTimes) {
 402         step5 = Ticks::now();
 403       }
 404 
 405       // 6 - update thread allocation pointer
 406       oop last_oop = (oop)reloc_table[n_entries - 1].new_ptr;



 407       InstanceKlass* ik = InstanceKlass::cast(last_oop->klass());
 408       thread->set_vt_alloc_ptr((address)last_oop + ik->size_helper() * HeapWordSize);
 409       thread->set_vt_alloc_limit(VTBufferChunk::chunk(thread->vt_alloc_ptr())->alloc_limit());



 410       if (ReportVTBufferRecyclingTimes) {
 411         step6 = Ticks::now();
 412       }
 413 
 414       // 7 - free/return unused chunks
 415       VTBufferChunk* chunk = VTBufferChunk::chunk(reloc_table[n_entries - 1].new_ptr);
 416       VTBufferChunk* temp = chunk;
 417       chunk = chunk->next();
 418       temp->set_next(NULL);
 419       while (chunk != NULL) {
 420         returned_chunks++;
 421         temp = chunk->next();
 422         VTBuffer::recycle_chunk(thread, chunk);
 423         chunk = temp;
 424       }
 425       if (ReportVTBufferRecyclingTimes) {
 426         step7 = Ticks::now();
 427       }
 428     } else {
 429       f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)thread->vt_alloc_ptr());
 430     }
 431   }
 432 
 433   // 8 - free relocation table
 434   FREE_RESOURCE_ARRAY(struct VT_relocation_entry, reloc_table, max_entries);

 435   if (ReportVTBufferRecyclingTimes) {
 436     end = Ticks::now();
 437     ResourceMark rm(thread);
 438     tty->print_cr("VTBufferRecyling: %s : %s.%s %s : " JLONG_FORMAT "us",
 439         thread->name(),
 440         f->interpreter_frame_method()->klass_name()->as_C_string(),
 441         f->interpreter_frame_method()->name()->as_C_string(),
 442         f->interpreter_frame_method()->signature()->as_C_string(),
 443         (end.value() - begin.value()) / 1000);
 444     tty->print("Step1 : " JLONG_FORMAT "ns ", step1.value() - begin.value());
 445     tty->print("Step2 : " JLONG_FORMAT "ns ", step2.value() - step1.value());
 446     tty->print("Step3 : " JLONG_FORMAT "ns ", step3.value() - step2.value());
 447     tty->print("Step4 : " JLONG_FORMAT "ns ", step4.value() - step3.value());
 448     tty->print("Step5 : " JLONG_FORMAT "ns ", step5.value() - step4.value());
 449     tty->print("Step6 : " JLONG_FORMAT "ns ", step6.value() - step5.value());
 450     tty->print("Step7 : " JLONG_FORMAT "ns ", step7.value() - step6.value());
 451     tty->print("Step8 : " JLONG_FORMAT "ns ", end.value() - step7.value());
 452     tty->print_cr("Returned chunks: %d", returned_chunks);
 453   }
 454 }
 455 
 456 void BufferedValuesMarking::do_buffered_value(oop* p) {
 457   assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check");
 458   if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
 459     if (!(*p)->mark()->is_marked()) {
 460       assert(*_index < _size, "index outside of relocation table range");
 461       _reloc_table[*_index].old_ptr = (address)*p;
 462       _reloc_table[*_index].chunk_index = VTBufferChunk::chunk(*p)->index();

 463       *_index = (*_index) + 1;
 464       (*p)->set_mark((*p)->mark()->set_marked());
 465     }
 466   }
 467 }
 468 
 469 void BufferedValuesPointersUpdate::do_buffered_value(oop* p) {
 470   assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check");
 471   // might be coded more efficiently just by checking mark word is not NULL
 472   if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
 473     *p = (oop)(*p)->mark();
 474   }






































 475 }


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/gcLocker.hpp"
  27 #include "memory/vtBuffer.hpp"
  28 #include "oops/oop.inline.hpp"
  29 #include "oops/valueKlass.hpp"
  30 #include "runtime/frame.hpp"
  31 #include "runtime/thread.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 #include "utilities/ticks.hpp"
  34 #include "utilities/ticks.inline.hpp"
  35 #if INCLUDE_ALL_GCS
  36 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  37 #endif // INCLUDE_ALL_GCS
  38 
  39 
  40 VTBufferChunk* VTBuffer::_free_list = NULL;
  41 Mutex* VTBuffer::_pool_lock = new Mutex(Mutex::leaf, "VTBuffer::_pool_lock", true, Monitor::_safepoint_check_never);
  42 int VTBuffer::_pool_counter = 0;
  43 int VTBuffer::_max_pool_counter = 0;
  44 int VTBuffer::_total_allocated = 0;
  45 int VTBuffer::_total_deallocated = 0;
  46 int VTBuffer::_total_failed = 0;
  47 
  48 void VTBufferChunk::zap(void* start) {
  49   assert(this == (VTBufferChunk*)((intptr_t)start & chunk_mask()), "start must be in current chunk");
  50   if (ZapVTBufferChunks) {
  51     size_t size = chunk_size() - ((char*)start - (char*)this);
  52     memset((char*)start, 0, size);
  53   }
  54 }
  55 
  56 oop VTBuffer::allocate_value(ValueKlass* k, TRAPS) {
  57   assert(THREAD->is_Java_thread(), "Only JavaThreads have a buffer for value types");
  58   JavaThread* thread = (JavaThread*)THREAD;
  59   if (thread->vt_alloc_ptr() == NULL) {
  60     if (!allocate_vt_chunk(thread)) {
  61       return NULL; // will trigger fall back strategy: allocation in Java heap
  62     }
  63   }
  64   assert(thread->vt_alloc_ptr() != NULL, "should not be null if chunk allocation was successful");
  65   int allocation_size_in_bytes = k->size_helper() * HeapWordSize;
  66   if ((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes  >= thread->vt_alloc_limit()) {
  67     if (allocation_size_in_bytes > (int)VTBufferChunk::max_alloc_size()) {
  68       // Too big to be allocated in a buffer
  69       return NULL;
  70     }
  71     VTBufferChunk* next = VTBufferChunk::chunk(thread->vt_alloc_ptr())->next();
  72     if (next != NULL) {
  73       thread->set_vt_alloc_ptr(next->first_alloc());
  74       thread->set_vt_alloc_limit(next->alloc_limit());
  75     } else {
  76       if (!allocate_vt_chunk(thread)) {
  77         return NULL; // will trigger fall back strategy: allocation in Java heap
  78       }
  79     }
  80   }
  81   assert((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes < thread->vt_alloc_limit(),"otherwise the logic above is wrong");
  82   oop new_vt = (oop)thread->vt_alloc_ptr();
  83   int allocation_size_in_words = k->size_helper();
  84   thread->increment_vtchunk_total_memory_buffered(allocation_size_in_words * HeapWordSize);
  85   int increment = align_object_size(allocation_size_in_words);
  86   void* new_ptr = (char*)thread->vt_alloc_ptr() + increment * HeapWordSize;
  87   new_ptr = MIN2(new_ptr, thread->vt_alloc_limit());
  88   assert(VTBufferChunk::chunk(new_ptr) == VTBufferChunk::chunk(thread->vt_alloc_ptr()),
  89       "old and new alloc ptr must be in the same chunk");
  90   thread->set_vt_alloc_ptr(new_ptr);
  91   // the value and its header must be initialized before being returned!!!
  92   memset(((char*)(oopDesc*)new_vt), 0, allocation_size_in_bytes);
  93   new_vt->set_klass(k);
  94   assert(((intptr_t)(oopDesc*)k->java_mirror() & (intptr_t)VTBuffer::mark_mask) == 0, "Checking least significant bits are available");
  95   new_vt->set_mark(markOop(k->java_mirror()));
  96   if (UseG1GC) {
  97     G1SATBCardTableModRefBS::enqueue(k->java_mirror());
  98   }
  99   return new_vt;
 100 }
 101 
 102 bool VTBuffer::allocate_vt_chunk(JavaThread* thread) {
 103   VTBufferChunk* new_chunk = NULL;
 104   // Trying local cache;
 105   if (thread->local_free_chunk() != NULL) {
 106     new_chunk = thread->local_free_chunk();
 107     thread->set_local_free_chunk(NULL);
 108   } else {
 109     // Trying global pool
 110     MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag);
 111     if (_free_list != NULL) {
 112       new_chunk = _free_list;
 113       _free_list = new_chunk->next();
 114       if (_free_list != NULL) {
 115         _free_list->set_prev(NULL);
 116       }
 117       new_chunk->set_next(NULL);
 118       _pool_counter--;


 137   assert(new_chunk->index() == -1, "Sanity check");
 138   new_chunk->set_owner(thread);
 139   if(current != NULL) {
 140     new_chunk->set_prev(current);
 141     new_chunk->set_index(current->index() + 1);
 142     current->set_next(new_chunk);
 143   } else {
 144     new_chunk->set_index(0);
 145   }
 146   thread->increment_vtchunk_in_use();
 147   thread->set_vt_alloc_ptr(new_chunk->first_alloc());
 148   thread->set_vt_alloc_limit(new_chunk->alloc_limit());
 149   return true; // allocation was successful
 150 }
 151 
 152 void VTBuffer::recycle_chunk(JavaThread* thread, VTBufferChunk* chunk) {
 153   if (thread->local_free_chunk() == NULL) {
 154     chunk->set_prev(NULL);
 155     chunk->set_next(NULL);
 156     chunk->set_index(-1);
 157     chunk->zap(chunk->first_alloc());
 158     thread->set_local_free_chunk(chunk);
 159   } else {
 160     return_vt_chunk(thread, chunk);
 161   }
 162   thread->decrement_vtchunk_in_use();
 163 }
 164 
 165 // This is the main way to recycle VTBuffer memory, it is called from
 166 // remove_activation() when an interpreter frame is about to be removed
 167 // from the stack. All memory used in the context of this frame is freed,
 168 // and the vt_alloc_ptr is restored to the value it had when the frame
 169 // was created (modulo a possible adjustment if a value is being returned)
 170 void VTBuffer::recycle_vtbuffer(JavaThread* thread, void* alloc_ptr) {
 171   address current_ptr = (address)thread->vt_alloc_ptr();
 172   assert(current_ptr != NULL, "Should not reach here if NULL");
 173   VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
 174   assert(current_chunk->owner() == thread, "Sanity check");
 175   address previous_ptr = (address)alloc_ptr;
 176   if (previous_ptr == NULL) {
 177     // vt_alloc_ptr has not been initialized in this frame
 178     // let's initialize it to the first_alloc() value of the first chunk
 179     VTBufferChunk* first_chunk = current_chunk;
 180     while (first_chunk->prev() != NULL) {
 181       first_chunk = first_chunk->prev();
 182     }
 183     previous_ptr = (address)first_chunk->first_alloc();
 184   }
 185   assert(previous_ptr != NULL, "Should not reach here if NULL");
 186   VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
 187   assert(previous_chunk->owner() == thread, "Sanity check");
 188   if (current_ptr == previous_ptr) return;
 189   assert(current_chunk != previous_chunk || current_ptr >= previous_ptr, "Sanity check");
 190   VTBufferChunk* del = previous_chunk->next();
 191   previous_chunk->set_next(NULL);
 192   thread->set_vt_alloc_ptr(previous_ptr);
 193   previous_chunk->zap(previous_ptr);
 194   thread->set_vt_alloc_limit(previous_chunk->alloc_limit());
 195   while (del != NULL) {
 196     VTBufferChunk* temp = del->next();
 197     VTBuffer::recycle_chunk(thread, del);
 198     del = temp;
 199   }
 200 }
 201 
 202 void VTBuffer::return_vt_chunk(JavaThread* thread, VTBufferChunk* chunk) {
 203   chunk->set_prev(NULL);
 204   chunk->set_owner(NULL);
 205   chunk->set_index(-1);
 206   chunk->zap(chunk->first_alloc());
 207   MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag);
 208   if (_pool_counter < _max_free_list) {
 209     if (_free_list != NULL) {
 210       chunk->set_next(_free_list);
 211       _free_list->set_prev(chunk);
 212       _free_list = chunk;
 213     } else {
 214       chunk->set_next(NULL);
 215       _free_list = chunk;
 216     }
 217     _pool_counter++;
 218     if (_pool_counter > _max_pool_counter) {
 219       _max_pool_counter = _pool_counter;
 220     }
 221   } else {
 222     delete chunk;
 223     _total_deallocated++;
 224   }
 225   thread->increment_vtchunk_returned();
 226 }
 227 
 228 bool VTBuffer::value_belongs_to_frame(oop p, frame* f) {
 229   return is_value_allocated_after(p, f->interpreter_frame_vt_alloc_ptr());
 230 }
 231 
 232 bool VTBuffer::is_value_allocated_after(oop p, void* a) {
 233   // Test if value p has been allocated after alloc ptr a
 234   int p_chunk_idx = VTBufferChunk::chunk(p)->index();
 235    int frame_first_chunk_idx;
 236    if (a != NULL) {
 237      frame_first_chunk_idx = VTBufferChunk::chunk(a)->index();
 238    } else {
 239      frame_first_chunk_idx = 0;
 240    }
 241    if (p_chunk_idx == frame_first_chunk_idx) {
 242      return (intptr_t*)p >= a;
 243    } else {
 244      return  p_chunk_idx > frame_first_chunk_idx;
 245    }

 246 }
 247 
 248 void VTBuffer::fix_frame_vt_alloc_ptr(frame f, VTBufferChunk* chunk) {
 249   assert(f.is_interpreted_frame(), "recycling can only be triggered from interpreted frames");
 250   assert(chunk != NULL, "Should not be called if null");
 251   while (chunk->prev() != NULL) {
 252     chunk = chunk->prev();
 253   }
 254   f.interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc());
 255 }
 256 
 257 extern "C" {
 258   static int compare_reloc_entries(const void* void_a, const void* void_b) {
 259     struct VT_relocation_entry* entry_a = (struct VT_relocation_entry*)void_a;
 260     struct VT_relocation_entry* entry_b = (struct VT_relocation_entry*)void_b;
 261     if (entry_a->chunk_index == entry_b->chunk_index) {
 262       if (entry_a->old_ptr < entry_b->old_ptr) {
 263         return -1;
 264       } else {
 265         return 1;


 276 
 277 void dump_reloc_table(struct VT_relocation_entry* table, int nelem, bool print_new_ptr) {
 278   ResourceMark rm;
 279   for (int i = 0; i < nelem; i++) {
 280           InstanceKlass* ik = InstanceKlass::cast(((oop)table[i].old_ptr)->klass());
 281     tty->print("%d:\t%p\t%d\t%s\t%x", i, table[i].old_ptr, table[i].chunk_index,
 282                 ik->name()->as_C_string(), ik->size_helper() * HeapWordSize);
 283     if (print_new_ptr) {
 284         tty->print_cr("\t%p\t%d\n", table[i].new_ptr, VTBufferChunk::chunk(table[i].new_ptr)->index());
 285     } else {
 286         tty->print_cr("");
 287     }
 288   }
 289 }
 290 
 291 // Relocate value 'old' after value 'previous'
 292 address VTBuffer::relocate_value(address old, address previous, int previous_size_in_words) {
 293   InstanceKlass* ik_old = InstanceKlass::cast(((oop)old)->klass());
 294   assert(ik_old->is_value(), "Sanity check");
 295   VTBufferChunk* chunk = VTBufferChunk::chunk(previous);
 296   address next_alloc = previous + previous_size_in_words * HeapWordSize;
 297   if(next_alloc + ik_old->size_helper() * HeapWordSize < chunk->alloc_limit()) {
 298     // relocation can be performed in the same chunk
 299     return next_alloc;
 300   } else {
 301     // relocation must be performed in the next chunk
 302     VTBufferChunk* next_chunk = chunk->next();
 303     assert(next_chunk != NULL, "Because we are compacting, there should be enough chunks");
 304     return (address)next_chunk->first_alloc();
 305   }
 306 }
 307 
 308 oop VTBuffer::relocate_return_value(JavaThread* thread, void* alloc_ptr, oop obj) {
 309   assert(!Universe::heap()->is_in_reserved(obj), "This method should never be called on Java heap allocated values");
 310   assert(obj->klass()->is_value(), "Sanity check");
 311   if (!VTBuffer::is_value_allocated_after(obj, alloc_ptr)) return obj;
 312   ValueKlass* vk = ValueKlass::cast(obj->klass());
 313   address current_ptr = (address)thread->vt_alloc_ptr();
 314   VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
 315   address previous_ptr = (address)alloc_ptr;
 316   if (previous_ptr == NULL) {
 317     VTBufferChunk* c = VTBufferChunk::chunk(obj);
 318     while (c->prev() != NULL) c = c->prev();
 319     previous_ptr = (address)c->first_alloc();
 320   }
 321   VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
 322   address dest;
 323   if ((address)obj != previous_ptr) {
 324     if (previous_chunk == current_chunk
 325         && (previous_ptr + vk->size_helper() * HeapWordSize) < previous_chunk->alloc_limit()) {
 326       dest = previous_ptr;
 327     } else {
 328       assert(previous_chunk->next() != NULL, "Should not happen");
 329       dest = (address)previous_chunk->next()->first_alloc();
 330     }
 331     // Copying header
 332     memcpy(dest, obj, vk->first_field_offset());
 333     // Copying value content
 334     vk->value_store(((char*)(address)obj) + vk->first_field_offset(),
 335                     dest + vk->first_field_offset(), false, true);
 336   } else {
 337     dest = (address)obj;
 338   }


 339   VTBufferChunk* last = VTBufferChunk::chunk(dest);


 340   thread->set_vt_alloc_limit(last->alloc_limit());
 341   void* new_alloc_ptr = MIN2((void*)(dest + vk->size_helper() * HeapWordSize), last->alloc_limit());
 342   thread->set_vt_alloc_ptr(new_alloc_ptr);
 343   assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check");
 344   VTBufferChunk* del = last->next();
 345   last->set_next(NULL);
 346   while (del != NULL) {
 347     VTBufferChunk* tmp = del->next();
 348     VTBuffer::recycle_chunk(thread, del);
 349     del = tmp;
 350   }
 351   return (oop)dest;
 352 }
 353 
 354 // This method is called to recycle VTBuffer memory when the VM has detected
 355 // that too much memory is being consumed in the current frame context. This
 356 // can only happen when the method contains at least one loop in which new
 357 // values are created.
 358 void VTBuffer::recycle_vt_in_frame(JavaThread* thread, frame* f) {
 359   Ticks begin, end;
 360   Ticks step1, step2, step3, step4, step5, step6, step7;
 361   int returned_chunks = 0;
 362 
 363   if (ReportVTBufferRecyclingTimes) {
 364     begin = Ticks::now();


 402       ((oop)reloc_table[0].old_ptr)->set_mark((markOop)reloc_table[0].new_ptr);
 403       for (int i = 1; i < n_entries; i++) {
 404         reloc_table[i].new_ptr = relocate_value(reloc_table[i].old_ptr, reloc_table[i-1].new_ptr,
 405             InstanceKlass::cast(((oop)reloc_table[i-1].old_ptr)->klass())->size_helper());
 406         ((oop)reloc_table[i].old_ptr)->set_mark((markOop)reloc_table[i].new_ptr);
 407       }
 408       if (ReportVTBufferRecyclingTimes) {
 409         step3 = Ticks::now();
 410       }
 411 
 412       // 4 - update pointers
 413       BufferedValuesPointersUpdate update_closure = BufferedValuesPointersUpdate(f);
 414       f->buffered_values_interpreted_do(&update_closure);
 415       if (ReportVTBufferRecyclingTimes) {
 416         step4 = Ticks::now();
 417       }
 418 
 419       // 5 - relocate values
 420       for (int i = 0; i < n_entries; i++) {
 421         if (reloc_table[i].old_ptr != reloc_table[i].new_ptr) {
 422           assert(VTBufferChunk::chunk(reloc_table[i].old_ptr)->owner() == Thread::current(), "Sanity check");
 423           assert(VTBufferChunk::chunk(reloc_table[i].new_ptr)->owner() == Thread::current(), "Sanity check");
 424           InstanceKlass* ik_old = InstanceKlass::cast(((oop)reloc_table[i].old_ptr)->klass());
 425           // instead of memcpy, a value_store() might be required here
 426           memcpy(reloc_table[i].new_ptr, reloc_table[i].old_ptr, ik_old->size_helper() * HeapWordSize);
 427         }
 428         // Restoring the mark word
 429         ((oop)reloc_table[i].new_ptr)->set_mark(reloc_table[i].mark_word);
 430       }
 431       if (ReportVTBufferRecyclingTimes) {
 432         step5 = Ticks::now();
 433       }
 434 

 435       oop last_oop = (oop)reloc_table[n_entries - 1].new_ptr;
 436       assert(last_oop->is_value(), "sanity check");
 437       assert(VTBufferChunk::chunk((address)last_oop)->owner() == Thread::current(), "Sanity check");
 438       VTBufferChunk* last_chunk = VTBufferChunk::chunk(last_oop);
 439       InstanceKlass* ik = InstanceKlass::cast(last_oop->klass());
 440       thread->set_vt_alloc_limit(last_chunk->alloc_limit());
 441       void* new_alloc_ptr = MIN2((void*)((address)last_oop + ik->size_helper() * HeapWordSize), thread->vt_alloc_limit());
 442       thread->set_vt_alloc_ptr(new_alloc_ptr);
 443       assert(VTBufferChunk::chunk(thread->vt_alloc_ptr())->owner() == Thread::current(), "Sanity check");
 444       assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check");
 445       if (ReportVTBufferRecyclingTimes) {
 446         step6 = Ticks::now();
 447       }
 448 
 449       // 7 - free/return unused chunks
 450       VTBufferChunk* last = VTBufferChunk::chunk(thread->vt_alloc_ptr());
 451       VTBufferChunk* del = last->next();
 452       last->set_next(NULL);
 453       while (del != NULL) {

 454         returned_chunks++;
 455         VTBufferChunk* tmp = del->next();
 456         VTBuffer::recycle_chunk(thread, del);
 457         del = tmp;
 458       }
 459       if (ReportVTBufferRecyclingTimes) {
 460         step7 = Ticks::now();
 461       }
 462     } else {
 463       f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)thread->vt_alloc_ptr());
 464     }
 465   }
 466 
 467   // 8 - free relocation table
 468   FREE_RESOURCE_ARRAY(struct VT_relocation_entry, reloc_table, max_entries);
 469 
 470   if (ReportVTBufferRecyclingTimes) {
 471     end = Ticks::now();
 472     ResourceMark rm(thread);
 473     tty->print_cr("VTBufferRecyling: %s : %s.%s %s : " JLONG_FORMAT "us",
 474         thread->name(),
 475         f->interpreter_frame_method()->klass_name()->as_C_string(),
 476         f->interpreter_frame_method()->name()->as_C_string(),
 477         f->interpreter_frame_method()->signature()->as_C_string(),
 478         (end.value() - begin.value()) / 1000);
 479     tty->print("Step1 : " JLONG_FORMAT "ns ", step1.value() - begin.value());
 480     tty->print("Step2 : " JLONG_FORMAT "ns ", step2.value() - step1.value());
 481     tty->print("Step3 : " JLONG_FORMAT "ns ", step3.value() - step2.value());
 482     tty->print("Step4 : " JLONG_FORMAT "ns ", step4.value() - step3.value());
 483     tty->print("Step5 : " JLONG_FORMAT "ns ", step5.value() - step4.value());
 484     tty->print("Step6 : " JLONG_FORMAT "ns ", step6.value() - step5.value());
 485     tty->print("Step7 : " JLONG_FORMAT "ns ", step7.value() - step6.value());
 486     tty->print("Step8 : " JLONG_FORMAT "ns ", end.value() - step7.value());
 487     tty->print_cr("Returned chunks: %d", returned_chunks);
 488   }
 489 }
 490 
 491 void BufferedValuesMarking::do_buffered_value(oop* p) {
 492   assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check");
 493   if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
 494     if (!(*p)->mark()->is_marked()) {
 495       assert(*_index < _size, "index outside of relocation table range");
 496       _reloc_table[*_index].old_ptr = (address)*p;
 497       _reloc_table[*_index].chunk_index = VTBufferChunk::chunk(*p)->index();
 498       _reloc_table[*_index].mark_word = (*p)->mark();
 499       *_index = (*_index) + 1;
 500       (*p)->set_mark((*p)->mark()->set_marked());
 501     }
 502   }
 503 }
 504 
 505 void BufferedValuesPointersUpdate::do_buffered_value(oop* p) {
 506   assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check");
 507   // might be coded more efficiently just by checking mark word is not NULL
 508   if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
 509     *p = (oop)(*p)->mark();
 510   }
 511 }
 512 
 513 BufferedValuesDealiaser::BufferedValuesDealiaser(JavaThread* thread) {
 514   Thread* current = Thread::current();
 515   assert(current->buffered_values_dealiaser() == NULL, "Must not be used twice concurrently");
 516   VTBuffer::Mark mark = VTBuffer::switch_mark(thread->current_vtbuffer_mark());
 517   _target = thread;
 518   _current_mark = mark;
 519   thread->set_current_vtbuffer_mark(_current_mark);
 520   current->_buffered_values_dealiaser = this;
 521 }
 522 
 523 void BufferedValuesDealiaser::oops_do(OopClosure* f, oop value) {
 524 
 525   assert(VTBuffer::is_in_vt_buffer((oopDesc*)value), "Should only be called on buffered values");
 526 
 527   intptr_t mark =  *(intptr_t*)(value)->mark_addr();
 528   if ((mark & VTBuffer::mark_mask) == _current_mark) {
 529     return;
 530   }
 531 
 532   ValueKlass* vk = ValueKlass::cast(value->klass());
 533 
 534   oop mirror = (oopDesc*)((intptr_t)value->mark() & (intptr_t)~VTBuffer::mark_mask);
 535   assert(oopDesc::is_oop(mirror), "Sanity check");
 536   value->set_mark((markOop)mirror);
 537 
 538   vk->iterate_over_inside_oops(f, value);
 539 
 540   intptr_t new_mark_word = ((intptr_t) (oopDesc*)(value->mark()))
 541               | (intptr_t)_current_mark;
 542   value->set_mark(markOop((oopDesc*)new_mark_word));
 543 }
 544 
 545 BufferedValuesDealiaser::~BufferedValuesDealiaser() {
 546   assert(Thread::current()->buffered_values_dealiaser() != NULL, "Should not be NULL");
 547   assert(_target->current_vtbuffer_mark() == _current_mark, "Must be the same");
 548   Thread::current()->_buffered_values_dealiaser = NULL;
 549 }
< prev index next >