1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/gcLocker.hpp" 27 #include "memory/vtBuffer.hpp" 28 #include "oops/oop.inline.hpp" 29 #include "oops/valueKlass.hpp" 30 #include "runtime/frame.hpp" 31 #include "runtime/globals_extension.hpp" 32 #include "runtime/os.hpp" 33 #include "runtime/thread.hpp" 34 #include "utilities/globalDefinitions.hpp" 35 #include "utilities/ticks.hpp" 36 #include "utilities/ticks.inline.hpp" 37 38 VTBufferChunk* VTBuffer::_free_list = NULL; 39 Mutex* VTBuffer::_pool_lock = new Mutex(Mutex::leaf, "VTBuffer::_pool_lock", true, Monitor::_safepoint_check_never); 40 int VTBuffer::_pool_counter = 0; 41 int VTBuffer::_max_pool_counter = 0; 42 int VTBuffer::_total_allocated = 0; 43 int VTBuffer::_total_failed = 0; 44 address VTBuffer::_base = NULL; 45 address VTBuffer::_commit_ptr; 46 size_t VTBuffer::_size; 47 48 void VTBuffer::init() { 49 if ((!EnableValhalla) || ValueTypesBufferMaxMemory == 0) { 50 _base = NULL; 51 _commit_ptr = NULL; 52 _size = 0; 53 return; 54 } 55 size_t size = ValueTypesBufferMaxMemory * os::vm_page_size(); 56 _base = (address)os::reserve_memory(size, NULL, (size_t)os::vm_page_size()); 57 if (_base == NULL) { 58 if (!FLAG_IS_DEFAULT(ValueTypesBufferMaxMemory)) { 59 vm_exit_during_initialization("Cannot reserved memory requested for Thread-Local Value Buffer"); 60 } 61 // memory allocation failed, disabling buffering 62 ValueTypesBufferMaxMemory = 0; 63 _size = 0; 64 _commit_ptr = NULL; 65 } else { 66 _commit_ptr = _base; 67 _size = size; 68 } 69 } 70 71 VTBufferChunk* VTBuffer::get_new_chunk(JavaThread* thread) { 72 if (_commit_ptr >= _base + _size) { 73 return NULL; 74 } 75 if (os::commit_memory((char*)_commit_ptr, (size_t)os::vm_page_size(), false)) { 76 VTBufferChunk* chunk = (VTBufferChunk*)_commit_ptr; 77 _commit_ptr += os::vm_page_size(); 78 VTBufferChunk::init(chunk, thread); 79 return chunk; 80 } else { 81 return NULL; 82 } 83 } 84 85 void VTBufferChunk::zap(void* start) { 86 assert(this == (VTBufferChunk*)((intptr_t)start & chunk_mask()), "start must be in current chunk"); 87 if (ZapVTBufferChunks) { 88 size_t size = chunk_size() - ((char*)start - (char*)this); 89 memset((char*)start, 0, size); 90 } 91 } 92 93 oop VTBuffer::allocate_value(ValueKlass* k, TRAPS) { 94 assert(THREAD->is_Java_thread(), "Only JavaThreads have a buffer for value types"); 95 JavaThread* thread = (JavaThread*)THREAD; 96 if (thread->vt_alloc_ptr() == NULL) { 97 if (!allocate_vt_chunk(thread)) { 98 return NULL; // will trigger fall back strategy: allocation in Java heap 99 } 100 } 101 assert(thread->vt_alloc_ptr() != NULL, "should not be null if chunk allocation was successful"); 102 int allocation_size_in_bytes = k->size_helper() * HeapWordSize; 103 if ((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes >= thread->vt_alloc_limit()) { 104 if (allocation_size_in_bytes > (int)VTBufferChunk::max_alloc_size()) { 105 // Too big to be allocated in a buffer 106 return NULL; 107 } 108 VTBufferChunk* next = VTBufferChunk::chunk(thread->vt_alloc_ptr())->next(); 109 if (next != NULL) { 110 thread->set_vt_alloc_ptr(next->first_alloc()); 111 thread->set_vt_alloc_limit(next->alloc_limit()); 112 } else { 113 if (!allocate_vt_chunk(thread)) { 114 return NULL; // will trigger fall back strategy: allocation in Java heap 115 } 116 } 117 } 118 assert((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes < thread->vt_alloc_limit(),"otherwise the logic above is wrong"); 119 oop new_vt = (oop)thread->vt_alloc_ptr(); 120 int allocation_size_in_words = k->size_helper(); 121 thread->increment_vtchunk_total_memory_buffered(allocation_size_in_words * HeapWordSize); 122 int increment = align_object_size(allocation_size_in_words); 123 void* new_ptr = (char*)thread->vt_alloc_ptr() + increment * HeapWordSize; 124 new_ptr = MIN2(new_ptr, thread->vt_alloc_limit()); 125 assert(VTBufferChunk::chunk(new_ptr) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), 126 "old and new alloc ptr must be in the same chunk"); 127 thread->set_vt_alloc_ptr(new_ptr); 128 // the value and its header must be initialized before being returned!!! 129 memset(((char*)(oopDesc*)new_vt), 0, allocation_size_in_bytes); 130 new_vt->set_klass(k); 131 assert(((intptr_t)(oopDesc*)k->java_mirror() & (intptr_t)VTBuffer::mark_mask) == 0, "Checking least significant bits are available"); 132 new_vt->set_mark(markOop(k->java_mirror())); 133 return new_vt; 134 } 135 136 bool VTBuffer::allocate_vt_chunk(JavaThread* thread) { 137 VTBufferChunk* new_chunk = NULL; 138 // Trying local cache; 139 if (thread->local_free_chunk() != NULL) { 140 new_chunk = thread->local_free_chunk(); 141 thread->set_local_free_chunk(NULL); 142 } else { 143 // Trying global pool 144 MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag); 145 if (_free_list != NULL) { 146 new_chunk = _free_list; 147 _free_list = new_chunk->next(); 148 if (_free_list != NULL) { 149 _free_list->set_prev(NULL); 150 } 151 new_chunk->set_next(NULL); 152 _pool_counter--; 153 } else { 154 // Trying to commit a new chunk 155 // Hold _pool_lock for thread-safety 156 new_chunk = get_new_chunk(thread); 157 _total_allocated += new_chunk == NULL ? 0 : 1; 158 } 159 } 160 if (new_chunk == NULL) { 161 _total_failed++; 162 thread->increment_vtchunk_failed(); 163 return false; // allocation failed 164 } 165 VTBufferChunk* current = thread->current_chunk(); 166 assert(new_chunk->owner() == thread || new_chunk->owner()== NULL, "Sanity check"); 167 assert(new_chunk->index() == -1, "Sanity check"); 168 new_chunk->set_owner(thread); 169 if(current != NULL) { 170 new_chunk->set_prev(current); 171 new_chunk->set_index(current->index() + 1); 172 current->set_next(new_chunk); 173 } else { 174 new_chunk->set_index(0); 175 } 176 thread->increment_vtchunk_in_use(); 177 thread->set_vt_alloc_ptr(new_chunk->first_alloc()); 178 thread->set_vt_alloc_limit(new_chunk->alloc_limit()); 179 return true; // allocation was successful 180 } 181 182 void VTBuffer::recycle_chunk(JavaThread* thread, VTBufferChunk* chunk) { 183 if (thread->local_free_chunk() == NULL) { 184 chunk->set_prev(NULL); 185 chunk->set_next(NULL); 186 chunk->set_index(-1); 187 chunk->zap(chunk->first_alloc()); 188 thread->set_local_free_chunk(chunk); 189 } else { 190 return_vt_chunk(thread, chunk); 191 } 192 thread->decrement_vtchunk_in_use(); 193 } 194 195 // This is the main way to recycle VTBuffer memory, it is called from 196 // remove_activation() when an interpreter frame is about to be removed 197 // from the stack. All memory used in the context of this frame is freed, 198 // and the vt_alloc_ptr is restored to the value it had when the frame 199 // was created (modulo a possible adjustment if a value is being returned) 200 void VTBuffer::recycle_vtbuffer(JavaThread* thread, void* alloc_ptr) { 201 address current_ptr = (address)thread->vt_alloc_ptr(); 202 assert(current_ptr != NULL, "Should not reach here if NULL"); 203 VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr); 204 assert(current_chunk->owner() == thread, "Sanity check"); 205 address previous_ptr = (address)alloc_ptr; 206 if (previous_ptr == NULL) { 207 // vt_alloc_ptr has not been initialized in this frame 208 // let's initialize it to the first_alloc() value of the first chunk 209 VTBufferChunk* first_chunk = current_chunk; 210 while (first_chunk->prev() != NULL) { 211 first_chunk = first_chunk->prev(); 212 } 213 previous_ptr = (address)first_chunk->first_alloc(); 214 } 215 assert(previous_ptr != NULL, "Should not reach here if NULL"); 216 VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr); 217 assert(previous_chunk->owner() == thread, "Sanity check"); 218 if (current_ptr == previous_ptr) return; 219 assert(current_chunk != previous_chunk || current_ptr >= previous_ptr, "Sanity check"); 220 VTBufferChunk* del = previous_chunk->next(); 221 previous_chunk->set_next(NULL); 222 thread->set_vt_alloc_ptr(previous_ptr); 223 previous_chunk->zap(previous_ptr); 224 thread->set_vt_alloc_limit(previous_chunk->alloc_limit()); 225 while (del != NULL) { 226 VTBufferChunk* temp = del->next(); 227 VTBuffer::recycle_chunk(thread, del); 228 del = temp; 229 } 230 } 231 232 void VTBuffer::return_vt_chunk(JavaThread* thread, VTBufferChunk* chunk) { 233 chunk->set_prev(NULL); 234 chunk->set_owner(NULL); 235 chunk->set_index(-1); 236 chunk->zap(chunk->first_alloc()); 237 MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag); 238 if (_free_list != NULL) { 239 chunk->set_next(_free_list); 240 _free_list->set_prev(chunk); 241 _free_list = chunk; 242 } else { 243 chunk->set_next(NULL); 244 _free_list = chunk; 245 } 246 _pool_counter++; 247 if (_pool_counter > _max_pool_counter) { 248 _max_pool_counter = _pool_counter; 249 } 250 thread->increment_vtchunk_returned(); 251 } 252 253 bool VTBuffer::value_belongs_to_frame(oop p, frame* f) { 254 return is_value_allocated_after(p, f->interpreter_frame_vt_alloc_ptr()); 255 } 256 257 bool VTBuffer::is_value_allocated_after(oop p, void* a) { 258 // Test if value p has been allocated after alloc ptr a 259 int p_chunk_idx = VTBufferChunk::chunk(p)->index(); 260 int frame_first_chunk_idx; 261 if (a != NULL) { 262 frame_first_chunk_idx = VTBufferChunk::chunk(a)->index(); 263 } else { 264 frame_first_chunk_idx = 0; 265 } 266 if (p_chunk_idx == frame_first_chunk_idx) { 267 return (intptr_t*)p >= a; 268 } else { 269 return p_chunk_idx > frame_first_chunk_idx; 270 } 271 } 272 273 void VTBuffer::fix_frame_vt_alloc_ptr(frame f, VTBufferChunk* chunk) { 274 assert(f.is_interpreted_frame(), "recycling can only be triggered from interpreted frames"); 275 assert(chunk != NULL, "Should not be called if null"); 276 while (chunk->prev() != NULL) { 277 chunk = chunk->prev(); 278 } 279 f.interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc()); 280 } 281 282 extern "C" { 283 static int compare_reloc_entries(const void* void_a, const void* void_b) { 284 struct VT_relocation_entry* entry_a = (struct VT_relocation_entry*)void_a; 285 struct VT_relocation_entry* entry_b = (struct VT_relocation_entry*)void_b; 286 if (entry_a->chunk_index == entry_b->chunk_index) { 287 if (entry_a->old_ptr < entry_b->old_ptr) { 288 return -1; 289 } else { 290 return 1; 291 } 292 } else { 293 if (entry_a->chunk_index < entry_b->chunk_index) { 294 return -1; 295 } else { 296 return 1; 297 } 298 } 299 } 300 } 301 302 void dump_reloc_table(struct VT_relocation_entry* table, int nelem, bool print_new_ptr) { 303 ResourceMark rm; 304 for (int i = 0; i < nelem; i++) { 305 InstanceKlass* ik = InstanceKlass::cast(((oop)table[i].old_ptr)->klass()); 306 tty->print("%d:\t%p\t%d\t%s\t%x", i, table[i].old_ptr, table[i].chunk_index, 307 ik->name()->as_C_string(), ik->size_helper() * HeapWordSize); 308 if (print_new_ptr) { 309 tty->print_cr("\t%p\t%d\n", table[i].new_ptr, VTBufferChunk::chunk(table[i].new_ptr)->index()); 310 } else { 311 tty->print_cr(""); 312 } 313 } 314 } 315 316 // Relocate value 'old' after value 'previous' 317 address VTBuffer::relocate_value(address old, address previous, int previous_size_in_words) { 318 InstanceKlass* ik_old = InstanceKlass::cast(((oop)old)->klass()); 319 assert(ik_old->is_value(), "Sanity check"); 320 VTBufferChunk* chunk = VTBufferChunk::chunk(previous); 321 address next_alloc = previous + previous_size_in_words * HeapWordSize; 322 if(next_alloc + ik_old->size_helper() * HeapWordSize < chunk->alloc_limit()) { 323 // relocation can be performed in the same chunk 324 return next_alloc; 325 } else { 326 // relocation must be performed in the next chunk 327 VTBufferChunk* next_chunk = chunk->next(); 328 assert(next_chunk != NULL, "Because we are compacting, there should be enough chunks"); 329 return (address)next_chunk->first_alloc(); 330 } 331 } 332 333 oop VTBuffer::relocate_return_value(JavaThread* thread, void* alloc_ptr, oop obj) { 334 assert(!Universe::heap()->is_in_reserved(obj), "This method should never be called on Java heap allocated values"); 335 assert(obj->klass()->is_value(), "Sanity check"); 336 if (!VTBuffer::is_value_allocated_after(obj, alloc_ptr)) return obj; 337 ValueKlass* vk = ValueKlass::cast(obj->klass()); 338 address current_ptr = (address)thread->vt_alloc_ptr(); 339 VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr); 340 address previous_ptr = (address)alloc_ptr; 341 if (previous_ptr == NULL) { 342 VTBufferChunk* c = VTBufferChunk::chunk(obj); 343 while (c->prev() != NULL) c = c->prev(); 344 previous_ptr = (address)c->first_alloc(); 345 } 346 VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr); 347 address dest; 348 if ((address)obj != previous_ptr) { 349 if (previous_chunk == current_chunk 350 && (previous_ptr + vk->size_helper() * HeapWordSize) < previous_chunk->alloc_limit()) { 351 dest = previous_ptr; 352 } else { 353 assert(previous_chunk->next() != NULL, "Should not happen"); 354 dest = (address)previous_chunk->next()->first_alloc(); 355 } 356 // Copying header 357 memcpy(dest, obj, vk->first_field_offset()); 358 // Copying value content 359 vk->value_store(((char*)(address)obj) + vk->first_field_offset(), 360 dest + vk->first_field_offset(), false, true); 361 } else { 362 dest = (address)obj; 363 } 364 VTBufferChunk* last = VTBufferChunk::chunk(dest); 365 thread->set_vt_alloc_limit(last->alloc_limit()); 366 void* new_alloc_ptr = MIN2((void*)(dest + vk->size_helper() * HeapWordSize), last->alloc_limit()); 367 thread->set_vt_alloc_ptr(new_alloc_ptr); 368 assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check"); 369 VTBufferChunk* del = last->next(); 370 last->set_next(NULL); 371 while (del != NULL) { 372 VTBufferChunk* tmp = del->next(); 373 VTBuffer::recycle_chunk(thread, del); 374 del = tmp; 375 } 376 return (oop)dest; 377 } 378 379 // This method is called to recycle VTBuffer memory when the VM has detected 380 // that too much memory is being consumed in the current frame context. This 381 // can only happen when the method contains at least one loop in which new 382 // values are created. 383 void VTBuffer::recycle_vt_in_frame(JavaThread* thread, frame* f) { 384 Ticks begin, end; 385 Ticks step1, step2, step3, step4, step5, step6, step7; 386 int returned_chunks = 0; 387 388 if (ReportVTBufferRecyclingTimes) { 389 begin = Ticks::now(); 390 } 391 assert(f->is_interpreted_frame(), "only interpreted frames are using VT buffering so far"); 392 ResourceMark rm(thread); 393 394 // 1 - allocate relocation table 395 Method* m = f->interpreter_frame_method(); 396 int max_entries = m->max_locals() + m->max_stack(); 397 VT_relocation_entry* reloc_table = NEW_RESOURCE_ARRAY_IN_THREAD(thread, struct VT_relocation_entry, max_entries); 398 int n_entries = 0; 399 if (ReportVTBufferRecyclingTimes) { 400 step1 = Ticks::now(); 401 } 402 403 { 404 // No GC should occur during the phases 2->5 405 // either because the mark word (usually containing the pointer 406 // to the Java mirror) is used for marking, or because the values are being relocated 407 NoSafepointVerifier nsv; 408 409 // 2 - marking phase + populate relocation table 410 BufferedValuesMarking marking_closure = BufferedValuesMarking(f, reloc_table, max_entries, &n_entries); 411 f->buffered_values_interpreted_do(&marking_closure); 412 if (ReportVTBufferRecyclingTimes) { 413 step2 = Ticks::now(); 414 } 415 416 if (n_entries > 0) { 417 // 3 - sort relocation table entries and compute compaction 418 qsort(reloc_table, n_entries, sizeof(struct VT_relocation_entry), compare_reloc_entries); 419 if (f->interpreter_frame_vt_alloc_ptr() == NULL) { 420 VTBufferChunk* chunk = VTBufferChunk::chunk(reloc_table[0].old_ptr); 421 while (chunk->prev() != NULL) chunk = chunk->prev(); 422 //f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc()); 423 reloc_table[0].new_ptr = (address)chunk->first_alloc(); 424 } else { 425 reloc_table[0].new_ptr = (address)f->interpreter_frame_vt_alloc_ptr(); 426 } 427 ((oop)reloc_table[0].old_ptr)->set_mark((markOop)reloc_table[0].new_ptr); 428 for (int i = 1; i < n_entries; i++) { 429 reloc_table[i].new_ptr = relocate_value(reloc_table[i].old_ptr, reloc_table[i-1].new_ptr, 430 InstanceKlass::cast(((oop)reloc_table[i-1].old_ptr)->klass())->size_helper()); 431 ((oop)reloc_table[i].old_ptr)->set_mark((markOop)reloc_table[i].new_ptr); 432 } 433 if (ReportVTBufferRecyclingTimes) { 434 step3 = Ticks::now(); 435 } 436 437 // 4 - update pointers 438 BufferedValuesPointersUpdate update_closure = BufferedValuesPointersUpdate(f); 439 f->buffered_values_interpreted_do(&update_closure); 440 if (ReportVTBufferRecyclingTimes) { 441 step4 = Ticks::now(); 442 } 443 444 // 5 - relocate values 445 for (int i = 0; i < n_entries; i++) { 446 if (reloc_table[i].old_ptr != reloc_table[i].new_ptr) { 447 assert(VTBufferChunk::chunk(reloc_table[i].old_ptr)->owner() == Thread::current(), "Sanity check"); 448 assert(VTBufferChunk::chunk(reloc_table[i].new_ptr)->owner() == Thread::current(), "Sanity check"); 449 InstanceKlass* ik_old = InstanceKlass::cast(((oop)reloc_table[i].old_ptr)->klass()); 450 // instead of memcpy, a value_store() might be required here 451 memcpy(reloc_table[i].new_ptr, reloc_table[i].old_ptr, ik_old->size_helper() * HeapWordSize); 452 } 453 // Restoring the mark word 454 ((oop)reloc_table[i].new_ptr)->set_mark(reloc_table[i].mark_word); 455 } 456 if (ReportVTBufferRecyclingTimes) { 457 step5 = Ticks::now(); 458 } 459 460 oop last_oop = (oop)reloc_table[n_entries - 1].new_ptr; 461 assert(last_oop->is_value(), "sanity check"); 462 assert(VTBufferChunk::chunk((address)last_oop)->owner() == Thread::current(), "Sanity check"); 463 VTBufferChunk* last_chunk = VTBufferChunk::chunk(last_oop); 464 InstanceKlass* ik = InstanceKlass::cast(last_oop->klass()); 465 thread->set_vt_alloc_limit(last_chunk->alloc_limit()); 466 void* new_alloc_ptr = MIN2((void*)((address)last_oop + ik->size_helper() * HeapWordSize), thread->vt_alloc_limit()); 467 thread->set_vt_alloc_ptr(new_alloc_ptr); 468 assert(VTBufferChunk::chunk(thread->vt_alloc_ptr())->owner() == Thread::current(), "Sanity check"); 469 assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check"); 470 if (ReportVTBufferRecyclingTimes) { 471 step6 = Ticks::now(); 472 } 473 474 // 7 - free/return unused chunks 475 VTBufferChunk* last = VTBufferChunk::chunk(thread->vt_alloc_ptr()); 476 VTBufferChunk* del = last->next(); 477 last->set_next(NULL); 478 while (del != NULL) { 479 returned_chunks++; 480 VTBufferChunk* tmp = del->next(); 481 VTBuffer::recycle_chunk(thread, del); 482 del = tmp; 483 } 484 if (ReportVTBufferRecyclingTimes) { 485 step7 = Ticks::now(); 486 } 487 } else { 488 f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)thread->vt_alloc_ptr()); 489 } 490 } 491 492 // 8 - free relocation table 493 FREE_RESOURCE_ARRAY(struct VT_relocation_entry, reloc_table, max_entries); 494 495 if (ReportVTBufferRecyclingTimes) { 496 end = Ticks::now(); 497 ResourceMark rm(thread); 498 tty->print_cr("VTBufferRecyling: %s : %s.%s %s : " JLONG_FORMAT "us", 499 thread->name(), 500 f->interpreter_frame_method()->klass_name()->as_C_string(), 501 f->interpreter_frame_method()->name()->as_C_string(), 502 f->interpreter_frame_method()->signature()->as_C_string(), 503 (end.value() - begin.value()) / 1000); 504 tty->print("Step1 : " JLONG_FORMAT "ns ", step1.value() - begin.value()); 505 tty->print("Step2 : " JLONG_FORMAT "ns ", step2.value() - step1.value()); 506 tty->print("Step3 : " JLONG_FORMAT "ns ", step3.value() - step2.value()); 507 tty->print("Step4 : " JLONG_FORMAT "ns ", step4.value() - step3.value()); 508 tty->print("Step5 : " JLONG_FORMAT "ns ", step5.value() - step4.value()); 509 tty->print("Step6 : " JLONG_FORMAT "ns ", step6.value() - step5.value()); 510 tty->print("Step7 : " JLONG_FORMAT "ns ", step7.value() - step6.value()); 511 tty->print("Step8 : " JLONG_FORMAT "ns ", end.value() - step7.value()); 512 tty->print_cr("Returned chunks: %d", returned_chunks); 513 } 514 } 515 516 void BufferedValuesMarking::do_buffered_value(oop* p) { 517 assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check"); 518 if (VTBuffer::value_belongs_to_frame(*p, _frame)) { 519 if (!(*p)->mark()->is_marked()) { 520 assert(*_index < _size, "index outside of relocation table range"); 521 _reloc_table[*_index].old_ptr = (address)*p; 522 _reloc_table[*_index].chunk_index = VTBufferChunk::chunk(*p)->index(); 523 _reloc_table[*_index].mark_word = (*p)->mark(); 524 *_index = (*_index) + 1; 525 (*p)->set_mark((*p)->mark()->set_marked()); 526 } 527 } 528 } 529 530 void BufferedValuesPointersUpdate::do_buffered_value(oop* p) { 531 assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check"); 532 // might be coded more efficiently just by checking mark word is not NULL 533 if (VTBuffer::value_belongs_to_frame(*p, _frame)) { 534 *p = (oop)(*p)->mark(); 535 } 536 } 537 538 BufferedValuesDealiaser::BufferedValuesDealiaser(JavaThread* thread) { 539 Thread* current = Thread::current(); 540 assert(current->buffered_values_dealiaser() == NULL, "Must not be used twice concurrently"); 541 VTBuffer::Mark mark = VTBuffer::switch_mark(thread->current_vtbuffer_mark()); 542 _target = thread; 543 _current_mark = mark; 544 thread->set_current_vtbuffer_mark(_current_mark); 545 current->_buffered_values_dealiaser = this; 546 } 547 548 void BufferedValuesDealiaser::oops_do(OopClosure* f, oop value) { 549 550 assert(VTBuffer::is_in_vt_buffer((oopDesc*)value), "Should only be called on buffered values"); 551 552 intptr_t mark = *(intptr_t*)(value)->mark_addr(); 553 if ((mark & VTBuffer::mark_mask) == _current_mark) { 554 return; 555 } 556 557 ValueKlass* vk = ValueKlass::cast(value->klass()); 558 559 oop mirror = (oopDesc*)((intptr_t)value->mark() & (intptr_t)~VTBuffer::mark_mask); 560 assert(oopDesc::is_oop(mirror), "Sanity check"); 561 value->set_mark((markOop)mirror); 562 563 vk->iterate_over_inside_oops(f, value); 564 565 intptr_t new_mark_word = ((intptr_t) (oopDesc*)(value->mark())) 566 | (intptr_t)_current_mark; 567 value->set_mark(markOop((oopDesc*)new_mark_word)); 568 569 assert(((intptr_t)value->mark() & VTBuffer::mark_mask) == _current_mark, "Sanity check"); 570 } 571 572 BufferedValuesDealiaser::~BufferedValuesDealiaser() { 573 assert(Thread::current()->buffered_values_dealiaser() != NULL, "Should not be NULL"); 574 assert(_target->current_vtbuffer_mark() == _current_mark, "Must be the same"); 575 Thread::current()->_buffered_values_dealiaser = NULL; 576 }