1 /*
   2  * Copyright (c) 2016, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/gcLocker.hpp"
  27 #include "memory/vtBuffer.hpp"
  28 #include "oops/oop.inline.hpp"
  29 #include "oops/valueKlass.hpp"
  30 #include "runtime/frame.hpp"
  31 #include "runtime/globals_extension.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/thread.hpp"
  34 #include "utilities/globalDefinitions.hpp"
  35 #include "utilities/ticks.hpp"
  36 #include "utilities/ticks.inline.hpp"
  37 
  38 VTBufferChunk* VTBuffer::_free_list = NULL;
  39 Mutex* VTBuffer::_pool_lock = new Mutex(Mutex::leaf, "VTBuffer::_pool_lock", true, Monitor::_safepoint_check_never);
  40 int VTBuffer::_pool_counter = 0;
  41 int VTBuffer::_max_pool_counter = 0;
  42 int VTBuffer::_total_allocated = 0;
  43 int VTBuffer::_total_failed = 0;
  44 address VTBuffer::_base = NULL;
  45 address VTBuffer::_end = NULL;
  46 address VTBuffer::_commit_ptr;
  47 size_t VTBuffer::_size;
  48 
  49 void VTBuffer::init() {
  50   if ((!(EnableValhalla || EnableMVT)) || ValueTypesBufferMaxMemory == 0) {
  51     _base = NULL;
  52     _end = NULL;
  53     _commit_ptr = NULL;
  54     _size = 0;
  55     return;
  56   }
  57   size_t size = ValueTypesBufferMaxMemory * os::vm_page_size();
  58   _base = (address)os::reserve_memory(size, NULL, (size_t)os::vm_page_size());
  59   if (_base == NULL) {
  60     if (!FLAG_IS_DEFAULT(ValueTypesBufferMaxMemory)) {
  61       vm_exit_during_initialization("Cannot reserved memory requested for Thread-Local Value Buffer");
  62     }
  63     // memory allocation failed, disabling buffering
  64     ValueTypesBufferMaxMemory = 0;
  65     _size = 0;
  66     _commit_ptr = NULL;
  67     _end = NULL;
  68   } else {
  69     _commit_ptr = _base;
  70     _size = size;
  71     _end = _base + _size;
  72   }
  73 }
  74 
  75 VTBufferChunk* VTBuffer::get_new_chunk(JavaThread* thread) {
  76   if (_commit_ptr  >= _base + _size) {
  77     return NULL;
  78   }
  79   if (os::commit_memory((char*)_commit_ptr, (size_t)os::vm_page_size(), false)) {
  80     VTBufferChunk* chunk = (VTBufferChunk*)_commit_ptr;
  81     _commit_ptr += os::vm_page_size();
  82     VTBufferChunk::init(chunk, thread);
  83     return chunk;
  84   } else {
  85    return NULL;
  86   }
  87 }
  88 
  89 void VTBufferChunk::zap(void* start) {
  90   assert(this == (VTBufferChunk*)((intptr_t)start & chunk_mask()), "start must be in current chunk");
  91   if (ZapVTBufferChunks) {
  92     size_t size = chunk_size() - ((char*)start - (char*)this);
  93     memset((char*)start, 0, size);
  94   }
  95 }
  96 
  97 oop VTBuffer::allocate_value(ValueKlass* k, TRAPS) {
  98   assert(THREAD->is_Java_thread(), "Only JavaThreads have a buffer for value types");
  99   JavaThread* thread = (JavaThread*)THREAD;
 100   if (thread->vt_alloc_ptr() == NULL) {
 101     if (!allocate_vt_chunk(thread)) {
 102       return NULL; // will trigger fall back strategy: allocation in Java heap
 103     }
 104   }
 105   assert(thread->vt_alloc_ptr() != NULL, "should not be null if chunk allocation was successful");
 106   int allocation_size_in_bytes = k->size_helper() * HeapWordSize;
 107   if ((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes  >= thread->vt_alloc_limit()) {
 108     if (allocation_size_in_bytes > (int)VTBufferChunk::max_alloc_size()) {
 109       // Too big to be allocated in a buffer
 110       return NULL;
 111     }
 112     VTBufferChunk* next = VTBufferChunk::chunk(thread->vt_alloc_ptr())->next();
 113     if (next != NULL) {
 114       thread->set_vt_alloc_ptr(next->first_alloc());
 115       thread->set_vt_alloc_limit(next->alloc_limit());
 116     } else {
 117       if (!allocate_vt_chunk(thread)) {
 118         return NULL; // will trigger fall back strategy: allocation in Java heap
 119       }
 120     }
 121   }
 122   assert((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes < thread->vt_alloc_limit(),"otherwise the logic above is wrong");
 123   oop new_vt = (oop)thread->vt_alloc_ptr();
 124   int allocation_size_in_words = k->size_helper();
 125   thread->increment_vtchunk_total_memory_buffered(allocation_size_in_words * HeapWordSize);
 126   int increment = align_object_size(allocation_size_in_words);
 127   void* new_ptr = (char*)thread->vt_alloc_ptr() + increment * HeapWordSize;
 128   new_ptr = MIN2(new_ptr, thread->vt_alloc_limit());
 129   assert(VTBufferChunk::chunk(new_ptr) == VTBufferChunk::chunk(thread->vt_alloc_ptr()),
 130       "old and new alloc ptr must be in the same chunk");
 131   thread->set_vt_alloc_ptr(new_ptr);
 132   // the value and its header must be initialized before being returned!!!
 133   memset(((char*)(oopDesc*)new_vt), 0, allocation_size_in_bytes);
 134   new_vt->set_klass(k);
 135   assert(((intptr_t)(oopDesc*)k->java_mirror() & (intptr_t)VTBuffer::mark_mask) == 0, "Checking least significant bits are available");
 136   new_vt->set_mark(markOop(k->java_mirror()));
 137   return new_vt;
 138 }
 139 
 140 bool VTBuffer::allocate_vt_chunk(JavaThread* thread) {
 141   VTBufferChunk* new_chunk = NULL;
 142   // Trying local cache;
 143   if (thread->local_free_chunk() != NULL) {
 144     new_chunk = thread->local_free_chunk();
 145     thread->set_local_free_chunk(NULL);
 146   } else {
 147     // Trying global pool
 148     MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag);
 149     if (_free_list != NULL) {
 150       new_chunk = _free_list;
 151       _free_list = new_chunk->next();
 152       if (_free_list != NULL) {
 153         _free_list->set_prev(NULL);
 154       }
 155       new_chunk->set_next(NULL);
 156       _pool_counter--;
 157     } else {
 158       // Trying to commit a new chunk
 159       // Hold _pool_lock for thread-safety
 160       new_chunk = get_new_chunk(thread);
 161       _total_allocated += new_chunk == NULL ? 0 : 1;
 162     }
 163   }
 164   if (new_chunk == NULL) {
 165     _total_failed++;
 166     thread->increment_vtchunk_failed();
 167     return false; // allocation failed
 168   }
 169   VTBufferChunk* current = thread->current_chunk();
 170   assert(new_chunk->owner() == thread || new_chunk->owner()== NULL, "Sanity check");
 171   assert(new_chunk->index() == -1, "Sanity check");
 172   new_chunk->set_owner(thread);
 173   if(current != NULL) {
 174     new_chunk->set_prev(current);
 175     new_chunk->set_index(current->index() + 1);
 176     current->set_next(new_chunk);
 177   } else {
 178     new_chunk->set_index(0);
 179   }
 180   thread->increment_vtchunk_in_use();
 181   thread->set_vt_alloc_ptr(new_chunk->first_alloc());
 182   thread->set_vt_alloc_limit(new_chunk->alloc_limit());
 183   return true; // allocation was successful
 184 }
 185 
 186 void VTBuffer::recycle_chunk(JavaThread* thread, VTBufferChunk* chunk) {
 187   if (thread->local_free_chunk() == NULL) {
 188     chunk->set_prev(NULL);
 189     chunk->set_next(NULL);
 190     chunk->set_index(-1);
 191     chunk->zap(chunk->first_alloc());
 192     thread->set_local_free_chunk(chunk);
 193   } else {
 194     return_vt_chunk(thread, chunk);
 195   }
 196   thread->decrement_vtchunk_in_use();
 197 }
 198 
 199 // This is the main way to recycle VTBuffer memory, it is called from
 200 // remove_activation() when an interpreter frame is about to be removed
 201 // from the stack. All memory used in the context of this frame is freed,
 202 // and the vt_alloc_ptr is restored to the value it had when the frame
 203 // was created (modulo a possible adjustment if a value is being returned)
 204 void VTBuffer::recycle_vtbuffer(JavaThread* thread, void* alloc_ptr) {
 205   address current_ptr = (address)thread->vt_alloc_ptr();
 206   assert(current_ptr != NULL, "Should not reach here if NULL");
 207   VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
 208   assert(current_chunk->owner() == thread, "Sanity check");
 209   address previous_ptr = (address)alloc_ptr;
 210   if (previous_ptr == NULL) {
 211     // vt_alloc_ptr has not been initialized in this frame
 212     // let's initialize it to the first_alloc() value of the first chunk
 213     VTBufferChunk* first_chunk = current_chunk;
 214     while (first_chunk->prev() != NULL) {
 215       first_chunk = first_chunk->prev();
 216     }
 217     previous_ptr = (address)first_chunk->first_alloc();
 218   }
 219   assert(previous_ptr != NULL, "Should not reach here if NULL");
 220   VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
 221   assert(previous_chunk->owner() == thread, "Sanity check");
 222   if (current_ptr == previous_ptr) return;
 223   assert(current_chunk != previous_chunk || current_ptr >= previous_ptr, "Sanity check");
 224   VTBufferChunk* del = previous_chunk->next();
 225   previous_chunk->set_next(NULL);
 226   thread->set_vt_alloc_ptr(previous_ptr);
 227   previous_chunk->zap(previous_ptr);
 228   thread->set_vt_alloc_limit(previous_chunk->alloc_limit());
 229   while (del != NULL) {
 230     VTBufferChunk* temp = del->next();
 231     VTBuffer::recycle_chunk(thread, del);
 232     del = temp;
 233   }
 234 }
 235 
 236 void VTBuffer::return_vt_chunk(JavaThread* thread, VTBufferChunk* chunk) {
 237   chunk->set_prev(NULL);
 238   chunk->set_owner(NULL);
 239   chunk->set_index(-1);
 240   chunk->zap(chunk->first_alloc());
 241   MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag);
 242   if (_free_list != NULL) {
 243     chunk->set_next(_free_list);
 244     _free_list->set_prev(chunk);
 245     _free_list = chunk;
 246   } else {
 247     chunk->set_next(NULL);
 248     _free_list = chunk;
 249   }
 250   _pool_counter++;
 251   if (_pool_counter > _max_pool_counter) {
 252     _max_pool_counter = _pool_counter;
 253   }
 254   thread->increment_vtchunk_returned();
 255 }
 256 
 257 bool VTBuffer::value_belongs_to_frame(oop p, frame* f) {
 258   return is_value_allocated_after(p, f->interpreter_frame_vt_alloc_ptr());
 259 }
 260 
 261 bool VTBuffer::is_value_allocated_after(oop p, void* a) {
 262   // Test if value p has been allocated after alloc ptr a
 263   int p_chunk_idx = VTBufferChunk::chunk(p)->index();
 264    int frame_first_chunk_idx;
 265    if (a != NULL) {
 266      frame_first_chunk_idx = VTBufferChunk::chunk(a)->index();
 267    } else {
 268      frame_first_chunk_idx = 0;
 269    }
 270    if (p_chunk_idx == frame_first_chunk_idx) {
 271      return (intptr_t*)p >= a;
 272    } else {
 273      return  p_chunk_idx > frame_first_chunk_idx;
 274    }
 275 }
 276 
 277 void VTBuffer::fix_frame_vt_alloc_ptr(frame f, VTBufferChunk* chunk) {
 278   assert(f.is_interpreted_frame(), "recycling can only be triggered from interpreted frames");
 279   assert(chunk != NULL, "Should not be called if null");
 280   while (chunk->prev() != NULL) {
 281     chunk = chunk->prev();
 282   }
 283   f.interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc());
 284 }
 285 
 286 extern "C" {
 287   static int compare_reloc_entries(const void* void_a, const void* void_b) {
 288     struct VT_relocation_entry* entry_a = (struct VT_relocation_entry*)void_a;
 289     struct VT_relocation_entry* entry_b = (struct VT_relocation_entry*)void_b;
 290     if (entry_a->chunk_index == entry_b->chunk_index) {
 291       if (entry_a->old_ptr < entry_b->old_ptr) {
 292         return -1;
 293       } else {
 294         return 1;
 295       }
 296     } else {
 297       if (entry_a->chunk_index < entry_b->chunk_index) {
 298         return -1;
 299       } else {
 300         return 1;
 301       }
 302     }
 303   }
 304 }
 305 
 306 void dump_reloc_table(struct VT_relocation_entry* table, int nelem, bool print_new_ptr) {
 307   ResourceMark rm;
 308   for (int i = 0; i < nelem; i++) {
 309           InstanceKlass* ik = InstanceKlass::cast(((oop)table[i].old_ptr)->klass());
 310     tty->print("%d:\t%p\t%d\t%s\t%x", i, table[i].old_ptr, table[i].chunk_index,
 311                 ik->name()->as_C_string(), ik->size_helper() * HeapWordSize);
 312     if (print_new_ptr) {
 313         tty->print_cr("\t%p\t%d\n", table[i].new_ptr, VTBufferChunk::chunk(table[i].new_ptr)->index());
 314     } else {
 315         tty->print_cr("");
 316     }
 317   }
 318 }
 319 
 320 // Relocate value 'old' after value 'previous'
 321 address VTBuffer::relocate_value(address old, address previous, int previous_size_in_words) {
 322   InstanceKlass* ik_old = InstanceKlass::cast(((oop)old)->klass());
 323   assert(ik_old->is_value(), "Sanity check");
 324   VTBufferChunk* chunk = VTBufferChunk::chunk(previous);
 325   address next_alloc = previous + previous_size_in_words * HeapWordSize;
 326   if(next_alloc + ik_old->size_helper() * HeapWordSize < chunk->alloc_limit()) {
 327     // relocation can be performed in the same chunk
 328     return next_alloc;
 329   } else {
 330     // relocation must be performed in the next chunk
 331     VTBufferChunk* next_chunk = chunk->next();
 332     assert(next_chunk != NULL, "Because we are compacting, there should be enough chunks");
 333     return (address)next_chunk->first_alloc();
 334   }
 335 }
 336 
 337 oop VTBuffer::relocate_return_value(JavaThread* thread, void* alloc_ptr, oop obj) {
 338   assert(!Universe::heap()->is_in_reserved(obj), "This method should never be called on Java heap allocated values");
 339   assert(obj->klass()->is_value(), "Sanity check");
 340   if (!VTBuffer::is_value_allocated_after(obj, alloc_ptr)) return obj;
 341   ValueKlass* vk = ValueKlass::cast(obj->klass());
 342   address current_ptr = (address)thread->vt_alloc_ptr();
 343   VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
 344   address previous_ptr = (address)alloc_ptr;
 345   if (previous_ptr == NULL) {
 346     VTBufferChunk* c = VTBufferChunk::chunk(obj);
 347     while (c->prev() != NULL) c = c->prev();
 348     previous_ptr = (address)c->first_alloc();
 349   }
 350   VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
 351   address dest;
 352   if ((address)obj != previous_ptr) {
 353     if (previous_chunk == current_chunk
 354         && (previous_ptr + vk->size_helper() * HeapWordSize) < previous_chunk->alloc_limit()) {
 355       dest = previous_ptr;
 356     } else {
 357       assert(previous_chunk->next() != NULL, "Should not happen");
 358       dest = (address)previous_chunk->next()->first_alloc();
 359     }
 360     // Copying header
 361     memcpy(dest, obj, vk->first_field_offset());
 362     // Copying value content
 363     vk->value_store(((char*)(address)obj) + vk->first_field_offset(),
 364                     dest + vk->first_field_offset(), false, true);
 365   } else {
 366     dest = (address)obj;
 367   }
 368   VTBufferChunk* last = VTBufferChunk::chunk(dest);
 369   thread->set_vt_alloc_limit(last->alloc_limit());
 370   void* new_alloc_ptr = MIN2((void*)(dest + vk->size_helper() * HeapWordSize), last->alloc_limit());
 371   thread->set_vt_alloc_ptr(new_alloc_ptr);
 372   assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check");
 373   VTBufferChunk* del = last->next();
 374   last->set_next(NULL);
 375   while (del != NULL) {
 376     VTBufferChunk* tmp = del->next();
 377     VTBuffer::recycle_chunk(thread, del);
 378     del = tmp;
 379   }
 380   return (oop)dest;
 381 }
 382 
 383 // This method is called to recycle VTBuffer memory when the VM has detected
 384 // that too much memory is being consumed in the current frame context. This
 385 // can only happen when the method contains at least one loop in which new
 386 // values are created.
 387 void VTBuffer::recycle_vt_in_frame(JavaThread* thread, frame* f) {
 388   Ticks begin, end;
 389   Ticks step1, step2, step3, step4, step5, step6, step7;
 390   int returned_chunks = 0;
 391 
 392   if (ReportVTBufferRecyclingTimes) {
 393     begin = Ticks::now();
 394   }
 395   assert(f->is_interpreted_frame(), "only interpreted frames are using VT buffering so far");
 396   ResourceMark rm(thread);
 397 
 398   // 1 - allocate relocation table
 399   Method* m = f->interpreter_frame_method();
 400   int max_entries = m->max_locals() + m->max_stack();
 401   VT_relocation_entry* reloc_table = NEW_RESOURCE_ARRAY_IN_THREAD(thread, struct VT_relocation_entry, max_entries);
 402   int n_entries = 0;
 403   if (ReportVTBufferRecyclingTimes) {
 404     step1 = Ticks::now();
 405   }
 406 
 407   {
 408     // No GC should occur during the phases 2->5
 409     // either because the mark word (usually containing the pointer
 410     // to the Java mirror) is used for marking, or because the values are being relocated
 411     NoSafepointVerifier nsv;
 412 
 413     // 2 - marking phase + populate relocation table
 414     BufferedValuesMarking marking_closure = BufferedValuesMarking(f, reloc_table, max_entries, &n_entries);
 415     f->buffered_values_interpreted_do(&marking_closure);
 416     if (ReportVTBufferRecyclingTimes) {
 417       step2 = Ticks::now();
 418     }
 419 
 420     if (n_entries > 0) {
 421       // 3 - sort relocation table entries and compute compaction
 422       qsort(reloc_table, n_entries, sizeof(struct VT_relocation_entry), compare_reloc_entries);
 423       if (f->interpreter_frame_vt_alloc_ptr() == NULL) {
 424         VTBufferChunk* chunk = VTBufferChunk::chunk(reloc_table[0].old_ptr);
 425         while (chunk->prev() != NULL) chunk = chunk->prev();
 426         //f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc());
 427         reloc_table[0].new_ptr = (address)chunk->first_alloc();
 428       } else {
 429         reloc_table[0].new_ptr = (address)f->interpreter_frame_vt_alloc_ptr();
 430       }
 431       ((oop)reloc_table[0].old_ptr)->set_mark((markOop)reloc_table[0].new_ptr);
 432       for (int i = 1; i < n_entries; i++) {
 433         reloc_table[i].new_ptr = relocate_value(reloc_table[i].old_ptr, reloc_table[i-1].new_ptr,
 434             InstanceKlass::cast(((oop)reloc_table[i-1].old_ptr)->klass())->size_helper());
 435         ((oop)reloc_table[i].old_ptr)->set_mark((markOop)reloc_table[i].new_ptr);
 436       }
 437       if (ReportVTBufferRecyclingTimes) {
 438         step3 = Ticks::now();
 439       }
 440 
 441       // 4 - update pointers
 442       BufferedValuesPointersUpdate update_closure = BufferedValuesPointersUpdate(f);
 443       f->buffered_values_interpreted_do(&update_closure);
 444       if (ReportVTBufferRecyclingTimes) {
 445         step4 = Ticks::now();
 446       }
 447 
 448       // 5 - relocate values
 449       for (int i = 0; i < n_entries; i++) {
 450         if (reloc_table[i].old_ptr != reloc_table[i].new_ptr) {
 451           assert(VTBufferChunk::chunk(reloc_table[i].old_ptr)->owner() == Thread::current(), "Sanity check");
 452           assert(VTBufferChunk::chunk(reloc_table[i].new_ptr)->owner() == Thread::current(), "Sanity check");
 453           InstanceKlass* ik_old = InstanceKlass::cast(((oop)reloc_table[i].old_ptr)->klass());
 454           // instead of memcpy, a value_store() might be required here
 455           memcpy(reloc_table[i].new_ptr, reloc_table[i].old_ptr, ik_old->size_helper() * HeapWordSize);
 456         }
 457         // Restoring the mark word
 458         ((oop)reloc_table[i].new_ptr)->set_mark(reloc_table[i].mark_word);
 459       }
 460       if (ReportVTBufferRecyclingTimes) {
 461         step5 = Ticks::now();
 462       }
 463 
 464       oop last_oop = (oop)reloc_table[n_entries - 1].new_ptr;
 465       assert(last_oop->is_value(), "sanity check");
 466       assert(VTBufferChunk::chunk((address)last_oop)->owner() == Thread::current(), "Sanity check");
 467       VTBufferChunk* last_chunk = VTBufferChunk::chunk(last_oop);
 468       InstanceKlass* ik = InstanceKlass::cast(last_oop->klass());
 469       thread->set_vt_alloc_limit(last_chunk->alloc_limit());
 470       void* new_alloc_ptr = MIN2((void*)((address)last_oop + ik->size_helper() * HeapWordSize), thread->vt_alloc_limit());
 471       thread->set_vt_alloc_ptr(new_alloc_ptr);
 472       assert(VTBufferChunk::chunk(thread->vt_alloc_ptr())->owner() == Thread::current(), "Sanity check");
 473       assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check");
 474       if (ReportVTBufferRecyclingTimes) {
 475         step6 = Ticks::now();
 476       }
 477 
 478       // 7 - free/return unused chunks
 479       VTBufferChunk* last = VTBufferChunk::chunk(thread->vt_alloc_ptr());
 480       VTBufferChunk* del = last->next();
 481       last->set_next(NULL);
 482       while (del != NULL) {
 483         returned_chunks++;
 484         VTBufferChunk* tmp = del->next();
 485         VTBuffer::recycle_chunk(thread, del);
 486         del = tmp;
 487       }
 488       if (ReportVTBufferRecyclingTimes) {
 489         step7 = Ticks::now();
 490       }
 491     } else {
 492       f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)thread->vt_alloc_ptr());
 493     }
 494   }
 495 
 496   // 8 - free relocation table
 497   FREE_RESOURCE_ARRAY(struct VT_relocation_entry, reloc_table, max_entries);
 498 
 499   if (ReportVTBufferRecyclingTimes) {
 500     end = Ticks::now();
 501     ResourceMark rm(thread);
 502     tty->print_cr("VTBufferRecyling: %s : %s.%s %s : " JLONG_FORMAT "us",
 503         thread->name(),
 504         f->interpreter_frame_method()->klass_name()->as_C_string(),
 505         f->interpreter_frame_method()->name()->as_C_string(),
 506         f->interpreter_frame_method()->signature()->as_C_string(),
 507         (end.value() - begin.value()) / 1000);
 508     tty->print("Step1 : " JLONG_FORMAT "ns ", step1.value() - begin.value());
 509     tty->print("Step2 : " JLONG_FORMAT "ns ", step2.value() - step1.value());
 510     tty->print("Step3 : " JLONG_FORMAT "ns ", step3.value() - step2.value());
 511     tty->print("Step4 : " JLONG_FORMAT "ns ", step4.value() - step3.value());
 512     tty->print("Step5 : " JLONG_FORMAT "ns ", step5.value() - step4.value());
 513     tty->print("Step6 : " JLONG_FORMAT "ns ", step6.value() - step5.value());
 514     tty->print("Step7 : " JLONG_FORMAT "ns ", step7.value() - step6.value());
 515     tty->print("Step8 : " JLONG_FORMAT "ns ", end.value() - step7.value());
 516     tty->print_cr("Returned chunks: %d", returned_chunks);
 517   }
 518 }
 519 
 520 void BufferedValuesMarking::do_buffered_value(oop* p) {
 521   assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check");
 522   if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
 523     if (!(*p)->mark()->is_marked()) {
 524       assert(*_index < _size, "index outside of relocation table range");
 525       _reloc_table[*_index].old_ptr = (address)*p;
 526       _reloc_table[*_index].chunk_index = VTBufferChunk::chunk(*p)->index();
 527       _reloc_table[*_index].mark_word = (*p)->mark();
 528       *_index = (*_index) + 1;
 529       (*p)->set_mark((*p)->mark()->set_marked());
 530     }
 531   }
 532 }
 533 
 534 void BufferedValuesPointersUpdate::do_buffered_value(oop* p) {
 535   assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check");
 536   // might be coded more efficiently just by checking mark word is not NULL
 537   if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
 538     *p = (oop)(*p)->mark();
 539   }
 540 }
 541 
 542 BufferedValuesDealiaser::BufferedValuesDealiaser(JavaThread* thread) {
 543   Thread* current = Thread::current();
 544   assert(current->buffered_values_dealiaser() == NULL, "Must not be used twice concurrently");
 545   VTBuffer::Mark mark = VTBuffer::switch_mark(thread->current_vtbuffer_mark());
 546   _target = thread;
 547   _current_mark = mark;
 548   thread->set_current_vtbuffer_mark(_current_mark);
 549   current->_buffered_values_dealiaser = this;
 550 }
 551 
 552 void BufferedValuesDealiaser::oops_do(OopClosure* f, oop value) {
 553 
 554   assert(VTBuffer::is_in_vt_buffer((oopDesc*)value), "Should only be called on buffered values");
 555 
 556   intptr_t mark =  *(intptr_t*)(value)->mark_addr();
 557   if ((mark & VTBuffer::mark_mask) == _current_mark) {
 558     return;
 559   }
 560 
 561   ValueKlass* vk = ValueKlass::cast(value->klass());
 562 
 563   oop mirror = (oopDesc*)((intptr_t)value->mark() & (intptr_t)~VTBuffer::mark_mask);
 564   assert(oopDesc::is_oop(mirror), "Sanity check");
 565   value->set_mark((markOop)mirror);
 566 
 567   vk->iterate_over_inside_oops(f, value);
 568 
 569   intptr_t new_mark_word = ((intptr_t) (oopDesc*)(value->mark()))
 570               | (intptr_t)_current_mark;
 571   value->set_mark(markOop((oopDesc*)new_mark_word));
 572 
 573   assert(((intptr_t)value->mark() & VTBuffer::mark_mask) == _current_mark, "Sanity check");
 574 }
 575 
 576 BufferedValuesDealiaser::~BufferedValuesDealiaser() {
 577   assert(Thread::current()->buffered_values_dealiaser() != NULL, "Should not be NULL");
 578   assert(_target->current_vtbuffer_mark() == _current_mark, "Must be the same");
 579   Thread::current()->_buffered_values_dealiaser = NULL;
 580 }