1 /* 2 * Copyright (c) 2016, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/gcLocker.hpp" 27 #include "memory/vtBuffer.hpp" 28 #include "oops/oop.inline.hpp" 29 #include "oops/valueKlass.hpp" 30 #include "runtime/frame.hpp" 31 #include "runtime/thread.hpp" 32 #include "utilities/globalDefinitions.hpp" 33 #include "utilities/ticks.hpp" 34 #include "utilities/ticks.inline.hpp" 35 #if INCLUDE_ALL_GCS 36 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 37 #endif // INCLUDE_ALL_GCS 38 39 40 VTBufferChunk* VTBuffer::_free_list = NULL; 41 Mutex* VTBuffer::_pool_lock = new Mutex(Mutex::leaf, "VTBuffer::_pool_lock", true, Monitor::_safepoint_check_never); 42 int VTBuffer::_pool_counter = 0; 43 int VTBuffer::_max_pool_counter = 0; 44 int VTBuffer::_total_allocated = 0; 45 int VTBuffer::_total_deallocated = 0; 46 int VTBuffer::_total_failed = 0; 47 48 void VTBufferChunk::zap(void* start) { 49 assert(this == (VTBufferChunk*)((intptr_t)start & chunk_mask()), "start must be in current chunk"); 50 if (ZapVTBufferChunks) { 51 size_t size = chunk_size() - ((char*)start - (char*)this); 52 memset((char*)start, 0, size); 53 } 54 } 55 56 oop VTBuffer::allocate_value(ValueKlass* k, TRAPS) { 57 assert(THREAD->is_Java_thread(), "Only JavaThreads have a buffer for value types"); 58 JavaThread* thread = (JavaThread*)THREAD; 59 if (thread->vt_alloc_ptr() == NULL) { 60 if (!allocate_vt_chunk(thread)) { 61 return NULL; // will trigger fall back strategy: allocation in Java heap 62 } 63 } 64 assert(thread->vt_alloc_ptr() != NULL, "should not be null if chunk allocation was successful"); 65 int allocation_size_in_bytes = k->size_helper() * HeapWordSize; 66 if ((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes >= thread->vt_alloc_limit()) { 67 if (allocation_size_in_bytes > (int)VTBufferChunk::max_alloc_size()) { 68 // Too big to be allocated in a buffer 69 return NULL; 70 } 71 VTBufferChunk* next = VTBufferChunk::chunk(thread->vt_alloc_ptr())->next(); 72 if (next != NULL) { 73 thread->set_vt_alloc_ptr(next->first_alloc()); 74 thread->set_vt_alloc_limit(next->alloc_limit()); 75 } else { 76 if (!allocate_vt_chunk(thread)) { 77 return NULL; // will trigger fall back strategy: allocation in Java heap 78 } 79 } 80 } 81 assert((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes < thread->vt_alloc_limit(),"otherwise the logic above is wrong"); 82 oop new_vt = (oop)thread->vt_alloc_ptr(); 83 int allocation_size_in_words = k->size_helper(); 84 thread->increment_vtchunk_total_memory_buffered(allocation_size_in_words * HeapWordSize); 85 int increment = align_object_size(allocation_size_in_words); 86 void* new_ptr = (char*)thread->vt_alloc_ptr() + increment * HeapWordSize; 87 new_ptr = MIN2(new_ptr, thread->vt_alloc_limit()); 88 assert(VTBufferChunk::chunk(new_ptr) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), 89 "old and new alloc ptr must be in the same chunk"); 90 thread->set_vt_alloc_ptr(new_ptr); 91 // the value and its header must be initialized before being returned!!! 92 memset(((char*)(oopDesc*)new_vt), 0, allocation_size_in_bytes); 93 new_vt->set_klass(k); 94 assert(((intptr_t)(oopDesc*)k->java_mirror() & (intptr_t)VTBuffer::mark_mask) == 0, "Checking least significant bits are available"); 95 new_vt->set_mark(markOop(k->java_mirror())); 96 if (UseG1GC) { 97 G1SATBCardTableModRefBS::enqueue(k->java_mirror()); 98 } 99 return new_vt; 100 } 101 102 bool VTBuffer::allocate_vt_chunk(JavaThread* thread) { 103 VTBufferChunk* new_chunk = NULL; 104 // Trying local cache; 105 if (thread->local_free_chunk() != NULL) { 106 new_chunk = thread->local_free_chunk(); 107 thread->set_local_free_chunk(NULL); 108 } else { 109 // Trying global pool 110 MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag); 111 if (_free_list != NULL) { 112 new_chunk = _free_list; 113 _free_list = new_chunk->next(); 114 if (_free_list != NULL) { 115 _free_list->set_prev(NULL); 116 } 117 new_chunk->set_next(NULL); 118 _pool_counter--; 119 } else { 120 // A new chunk has to be allocated 121 // Hold _pool_lock to maintain counters 122 if ((_total_allocated + 1) <= ValueTypesBufferMaxMemory) { 123 // Allocate new chunk only if total size for buffer 124 // memory is below its max size 125 new_chunk = new VTBufferChunk(thread); 126 _total_allocated += new_chunk == NULL ? 0 : 1; 127 } 128 } 129 } 130 if (new_chunk == NULL) { 131 _total_failed++; 132 thread->increment_vtchunk_failed(); 133 return false; // allocation failed 134 } 135 VTBufferChunk* current = thread->current_chunk(); 136 assert(new_chunk->owner() == thread || new_chunk->owner()== NULL, "Sanity check"); 137 assert(new_chunk->index() == -1, "Sanity check"); 138 new_chunk->set_owner(thread); 139 if(current != NULL) { 140 new_chunk->set_prev(current); 141 new_chunk->set_index(current->index() + 1); 142 current->set_next(new_chunk); 143 } else { 144 new_chunk->set_index(0); 145 } 146 thread->increment_vtchunk_in_use(); 147 thread->set_vt_alloc_ptr(new_chunk->first_alloc()); 148 thread->set_vt_alloc_limit(new_chunk->alloc_limit()); 149 return true; // allocation was successful 150 } 151 152 void VTBuffer::recycle_chunk(JavaThread* thread, VTBufferChunk* chunk) { 153 if (thread->local_free_chunk() == NULL) { 154 chunk->set_prev(NULL); 155 chunk->set_next(NULL); 156 chunk->set_index(-1); 157 chunk->zap(chunk->first_alloc()); 158 thread->set_local_free_chunk(chunk); 159 } else { 160 return_vt_chunk(thread, chunk); 161 } 162 thread->decrement_vtchunk_in_use(); 163 } 164 165 // This is the main way to recycle VTBuffer memory, it is called from 166 // remove_activation() when an interpreter frame is about to be removed 167 // from the stack. All memory used in the context of this frame is freed, 168 // and the vt_alloc_ptr is restored to the value it had when the frame 169 // was created (modulo a possible adjustment if a value is being returned) 170 void VTBuffer::recycle_vtbuffer(JavaThread* thread, void* alloc_ptr) { 171 address current_ptr = (address)thread->vt_alloc_ptr(); 172 assert(current_ptr != NULL, "Should not reach here if NULL"); 173 VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr); 174 assert(current_chunk->owner() == thread, "Sanity check"); 175 address previous_ptr = (address)alloc_ptr; 176 if (previous_ptr == NULL) { 177 // vt_alloc_ptr has not been initialized in this frame 178 // let's initialize it to the first_alloc() value of the first chunk 179 VTBufferChunk* first_chunk = current_chunk; 180 while (first_chunk->prev() != NULL) { 181 first_chunk = first_chunk->prev(); 182 } 183 previous_ptr = (address)first_chunk->first_alloc(); 184 } 185 assert(previous_ptr != NULL, "Should not reach here if NULL"); 186 VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr); 187 assert(previous_chunk->owner() == thread, "Sanity check"); 188 if (current_ptr == previous_ptr) return; 189 assert(current_chunk != previous_chunk || current_ptr >= previous_ptr, "Sanity check"); 190 VTBufferChunk* del = previous_chunk->next(); 191 previous_chunk->set_next(NULL); 192 thread->set_vt_alloc_ptr(previous_ptr); 193 previous_chunk->zap(previous_ptr); 194 thread->set_vt_alloc_limit(previous_chunk->alloc_limit()); 195 while (del != NULL) { 196 VTBufferChunk* temp = del->next(); 197 VTBuffer::recycle_chunk(thread, del); 198 del = temp; 199 } 200 } 201 202 void VTBuffer::return_vt_chunk(JavaThread* thread, VTBufferChunk* chunk) { 203 chunk->set_prev(NULL); 204 chunk->set_owner(NULL); 205 chunk->set_index(-1); 206 chunk->zap(chunk->first_alloc()); 207 MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag); 208 if (_pool_counter < _max_free_list) { 209 if (_free_list != NULL) { 210 chunk->set_next(_free_list); 211 _free_list->set_prev(chunk); 212 _free_list = chunk; 213 } else { 214 chunk->set_next(NULL); 215 _free_list = chunk; 216 } 217 _pool_counter++; 218 if (_pool_counter > _max_pool_counter) { 219 _max_pool_counter = _pool_counter; 220 } 221 } else { 222 delete chunk; 223 _total_deallocated++; 224 } 225 thread->increment_vtchunk_returned(); 226 } 227 228 bool VTBuffer::value_belongs_to_frame(oop p, frame* f) { 229 return is_value_allocated_after(p, f->interpreter_frame_vt_alloc_ptr()); 230 } 231 232 bool VTBuffer::is_value_allocated_after(oop p, void* a) { 233 // Test if value p has been allocated after alloc ptr a 234 int p_chunk_idx = VTBufferChunk::chunk(p)->index(); 235 int frame_first_chunk_idx; 236 if (a != NULL) { 237 frame_first_chunk_idx = VTBufferChunk::chunk(a)->index(); 238 } else { 239 frame_first_chunk_idx = 0; 240 } 241 if (p_chunk_idx == frame_first_chunk_idx) { 242 return (intptr_t*)p >= a; 243 } else { 244 return p_chunk_idx > frame_first_chunk_idx; 245 } 246 } 247 248 void VTBuffer::fix_frame_vt_alloc_ptr(frame f, VTBufferChunk* chunk) { 249 assert(f.is_interpreted_frame(), "recycling can only be triggered from interpreted frames"); 250 assert(chunk != NULL, "Should not be called if null"); 251 while (chunk->prev() != NULL) { 252 chunk = chunk->prev(); 253 } 254 f.interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc()); 255 } 256 257 extern "C" { 258 static int compare_reloc_entries(const void* void_a, const void* void_b) { 259 struct VT_relocation_entry* entry_a = (struct VT_relocation_entry*)void_a; 260 struct VT_relocation_entry* entry_b = (struct VT_relocation_entry*)void_b; 261 if (entry_a->chunk_index == entry_b->chunk_index) { 262 if (entry_a->old_ptr < entry_b->old_ptr) { 263 return -1; 264 } else { 265 return 1; 266 } 267 } else { 268 if (entry_a->chunk_index < entry_b->chunk_index) { 269 return -1; 270 } else { 271 return 1; 272 } 273 } 274 } 275 } 276 277 void dump_reloc_table(struct VT_relocation_entry* table, int nelem, bool print_new_ptr) { 278 ResourceMark rm; 279 for (int i = 0; i < nelem; i++) { 280 InstanceKlass* ik = InstanceKlass::cast(((oop)table[i].old_ptr)->klass()); 281 tty->print("%d:\t%p\t%d\t%s\t%x", i, table[i].old_ptr, table[i].chunk_index, 282 ik->name()->as_C_string(), ik->size_helper() * HeapWordSize); 283 if (print_new_ptr) { 284 tty->print_cr("\t%p\t%d\n", table[i].new_ptr, VTBufferChunk::chunk(table[i].new_ptr)->index()); 285 } else { 286 tty->print_cr(""); 287 } 288 } 289 } 290 291 // Relocate value 'old' after value 'previous' 292 address VTBuffer::relocate_value(address old, address previous, int previous_size_in_words) { 293 InstanceKlass* ik_old = InstanceKlass::cast(((oop)old)->klass()); 294 assert(ik_old->is_value(), "Sanity check"); 295 VTBufferChunk* chunk = VTBufferChunk::chunk(previous); 296 address next_alloc = previous + previous_size_in_words * HeapWordSize; 297 if(next_alloc + ik_old->size_helper() * HeapWordSize < chunk->alloc_limit()) { 298 // relocation can be performed in the same chunk 299 return next_alloc; 300 } else { 301 // relocation must be performed in the next chunk 302 VTBufferChunk* next_chunk = chunk->next(); 303 assert(next_chunk != NULL, "Because we are compacting, there should be enough chunks"); 304 return (address)next_chunk->first_alloc(); 305 } 306 } 307 308 oop VTBuffer::relocate_return_value(JavaThread* thread, void* alloc_ptr, oop obj) { 309 assert(!Universe::heap()->is_in_reserved(obj), "This method should never be called on Java heap allocated values"); 310 assert(obj->klass()->is_value(), "Sanity check"); 311 if (!VTBuffer::is_value_allocated_after(obj, alloc_ptr)) return obj; 312 ValueKlass* vk = ValueKlass::cast(obj->klass()); 313 address current_ptr = (address)thread->vt_alloc_ptr(); 314 VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr); 315 address previous_ptr = (address)alloc_ptr; 316 if (previous_ptr == NULL) { 317 VTBufferChunk* c = VTBufferChunk::chunk(obj); 318 while (c->prev() != NULL) c = c->prev(); 319 previous_ptr = (address)c->first_alloc(); 320 } 321 VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr); 322 address dest; 323 if ((address)obj != previous_ptr) { 324 if (previous_chunk == current_chunk 325 && (previous_ptr + vk->size_helper() * HeapWordSize) < previous_chunk->alloc_limit()) { 326 dest = previous_ptr; 327 } else { 328 assert(previous_chunk->next() != NULL, "Should not happen"); 329 dest = (address)previous_chunk->next()->first_alloc(); 330 } 331 // Copying header 332 memcpy(dest, obj, vk->first_field_offset()); 333 // Copying value content 334 vk->value_store(((char*)(address)obj) + vk->first_field_offset(), 335 dest + vk->first_field_offset(), false, true); 336 } else { 337 dest = (address)obj; 338 } 339 VTBufferChunk* last = VTBufferChunk::chunk(dest); 340 thread->set_vt_alloc_limit(last->alloc_limit()); 341 void* new_alloc_ptr = MIN2((void*)(dest + vk->size_helper() * HeapWordSize), last->alloc_limit()); 342 thread->set_vt_alloc_ptr(new_alloc_ptr); 343 assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check"); 344 VTBufferChunk* del = last->next(); 345 last->set_next(NULL); 346 while (del != NULL) { 347 VTBufferChunk* tmp = del->next(); 348 VTBuffer::recycle_chunk(thread, del); 349 del = tmp; 350 } 351 return (oop)dest; 352 } 353 354 // This method is called to recycle VTBuffer memory when the VM has detected 355 // that too much memory is being consumed in the current frame context. This 356 // can only happen when the method contains at least one loop in which new 357 // values are created. 358 void VTBuffer::recycle_vt_in_frame(JavaThread* thread, frame* f) { 359 Ticks begin, end; 360 Ticks step1, step2, step3, step4, step5, step6, step7; 361 int returned_chunks = 0; 362 363 if (ReportVTBufferRecyclingTimes) { 364 begin = Ticks::now(); 365 } 366 assert(f->is_interpreted_frame(), "only interpreted frames are using VT buffering so far"); 367 ResourceMark rm(thread); 368 369 // 1 - allocate relocation table 370 Method* m = f->interpreter_frame_method(); 371 int max_entries = m->max_locals() + m->max_stack(); 372 VT_relocation_entry* reloc_table = NEW_RESOURCE_ARRAY_IN_THREAD(thread, struct VT_relocation_entry, max_entries); 373 int n_entries = 0; 374 if (ReportVTBufferRecyclingTimes) { 375 step1 = Ticks::now(); 376 } 377 378 { 379 // No GC should occur during the phases 2->5 380 // either because the mark word (usually containing the pointer 381 // to the Java mirror) is used for marking, or because the values are being relocated 382 NoSafepointVerifier nsv; 383 384 // 2 - marking phase + populate relocation table 385 BufferedValuesMarking marking_closure = BufferedValuesMarking(f, reloc_table, max_entries, &n_entries); 386 f->buffered_values_interpreted_do(&marking_closure); 387 if (ReportVTBufferRecyclingTimes) { 388 step2 = Ticks::now(); 389 } 390 391 if (n_entries > 0) { 392 // 3 - sort relocation table entries and compute compaction 393 qsort(reloc_table, n_entries, sizeof(struct VT_relocation_entry), compare_reloc_entries); 394 if (f->interpreter_frame_vt_alloc_ptr() == NULL) { 395 VTBufferChunk* chunk = VTBufferChunk::chunk(reloc_table[0].old_ptr); 396 while (chunk->prev() != NULL) chunk = chunk->prev(); 397 //f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)chunk->first_alloc()); 398 reloc_table[0].new_ptr = (address)chunk->first_alloc(); 399 } else { 400 reloc_table[0].new_ptr = (address)f->interpreter_frame_vt_alloc_ptr(); 401 } 402 ((oop)reloc_table[0].old_ptr)->set_mark((markOop)reloc_table[0].new_ptr); 403 for (int i = 1; i < n_entries; i++) { 404 reloc_table[i].new_ptr = relocate_value(reloc_table[i].old_ptr, reloc_table[i-1].new_ptr, 405 InstanceKlass::cast(((oop)reloc_table[i-1].old_ptr)->klass())->size_helper()); 406 ((oop)reloc_table[i].old_ptr)->set_mark((markOop)reloc_table[i].new_ptr); 407 } 408 if (ReportVTBufferRecyclingTimes) { 409 step3 = Ticks::now(); 410 } 411 412 // 4 - update pointers 413 BufferedValuesPointersUpdate update_closure = BufferedValuesPointersUpdate(f); 414 f->buffered_values_interpreted_do(&update_closure); 415 if (ReportVTBufferRecyclingTimes) { 416 step4 = Ticks::now(); 417 } 418 419 // 5 - relocate values 420 for (int i = 0; i < n_entries; i++) { 421 if (reloc_table[i].old_ptr != reloc_table[i].new_ptr) { 422 assert(VTBufferChunk::chunk(reloc_table[i].old_ptr)->owner() == Thread::current(), "Sanity check"); 423 assert(VTBufferChunk::chunk(reloc_table[i].new_ptr)->owner() == Thread::current(), "Sanity check"); 424 InstanceKlass* ik_old = InstanceKlass::cast(((oop)reloc_table[i].old_ptr)->klass()); 425 // instead of memcpy, a value_store() might be required here 426 memcpy(reloc_table[i].new_ptr, reloc_table[i].old_ptr, ik_old->size_helper() * HeapWordSize); 427 } 428 // Restoring the mark word 429 ((oop)reloc_table[i].new_ptr)->set_mark(reloc_table[i].mark_word); 430 } 431 if (ReportVTBufferRecyclingTimes) { 432 step5 = Ticks::now(); 433 } 434 435 oop last_oop = (oop)reloc_table[n_entries - 1].new_ptr; 436 assert(last_oop->is_value(), "sanity check"); 437 assert(VTBufferChunk::chunk((address)last_oop)->owner() == Thread::current(), "Sanity check"); 438 VTBufferChunk* last_chunk = VTBufferChunk::chunk(last_oop); 439 InstanceKlass* ik = InstanceKlass::cast(last_oop->klass()); 440 thread->set_vt_alloc_limit(last_chunk->alloc_limit()); 441 void* new_alloc_ptr = MIN2((void*)((address)last_oop + ik->size_helper() * HeapWordSize), thread->vt_alloc_limit()); 442 thread->set_vt_alloc_ptr(new_alloc_ptr); 443 assert(VTBufferChunk::chunk(thread->vt_alloc_ptr())->owner() == Thread::current(), "Sanity check"); 444 assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check"); 445 if (ReportVTBufferRecyclingTimes) { 446 step6 = Ticks::now(); 447 } 448 449 // 7 - free/return unused chunks 450 VTBufferChunk* last = VTBufferChunk::chunk(thread->vt_alloc_ptr()); 451 VTBufferChunk* del = last->next(); 452 last->set_next(NULL); 453 while (del != NULL) { 454 returned_chunks++; 455 VTBufferChunk* tmp = del->next(); 456 VTBuffer::recycle_chunk(thread, del); 457 del = tmp; 458 } 459 if (ReportVTBufferRecyclingTimes) { 460 step7 = Ticks::now(); 461 } 462 } else { 463 f->interpreter_frame_set_vt_alloc_ptr((intptr_t*)thread->vt_alloc_ptr()); 464 } 465 } 466 467 // 8 - free relocation table 468 FREE_RESOURCE_ARRAY(struct VT_relocation_entry, reloc_table, max_entries); 469 470 if (ReportVTBufferRecyclingTimes) { 471 end = Ticks::now(); 472 ResourceMark rm(thread); 473 tty->print_cr("VTBufferRecyling: %s : %s.%s %s : " JLONG_FORMAT "us", 474 thread->name(), 475 f->interpreter_frame_method()->klass_name()->as_C_string(), 476 f->interpreter_frame_method()->name()->as_C_string(), 477 f->interpreter_frame_method()->signature()->as_C_string(), 478 (end.value() - begin.value()) / 1000); 479 tty->print("Step1 : " JLONG_FORMAT "ns ", step1.value() - begin.value()); 480 tty->print("Step2 : " JLONG_FORMAT "ns ", step2.value() - step1.value()); 481 tty->print("Step3 : " JLONG_FORMAT "ns ", step3.value() - step2.value()); 482 tty->print("Step4 : " JLONG_FORMAT "ns ", step4.value() - step3.value()); 483 tty->print("Step5 : " JLONG_FORMAT "ns ", step5.value() - step4.value()); 484 tty->print("Step6 : " JLONG_FORMAT "ns ", step6.value() - step5.value()); 485 tty->print("Step7 : " JLONG_FORMAT "ns ", step7.value() - step6.value()); 486 tty->print("Step8 : " JLONG_FORMAT "ns ", end.value() - step7.value()); 487 tty->print_cr("Returned chunks: %d", returned_chunks); 488 } 489 } 490 491 void BufferedValuesMarking::do_buffered_value(oop* p) { 492 assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check"); 493 if (VTBuffer::value_belongs_to_frame(*p, _frame)) { 494 if (!(*p)->mark()->is_marked()) { 495 assert(*_index < _size, "index outside of relocation table range"); 496 _reloc_table[*_index].old_ptr = (address)*p; 497 _reloc_table[*_index].chunk_index = VTBufferChunk::chunk(*p)->index(); 498 _reloc_table[*_index].mark_word = (*p)->mark(); 499 *_index = (*_index) + 1; 500 (*p)->set_mark((*p)->mark()->set_marked()); 501 } 502 } 503 } 504 505 void BufferedValuesPointersUpdate::do_buffered_value(oop* p) { 506 assert(!Universe::heap()->is_in_reserved_or_null(*p), "Sanity check"); 507 // might be coded more efficiently just by checking mark word is not NULL 508 if (VTBuffer::value_belongs_to_frame(*p, _frame)) { 509 *p = (oop)(*p)->mark(); 510 } 511 } 512 513 BufferedValuesDealiaser::BufferedValuesDealiaser(JavaThread* thread) { 514 Thread* current = Thread::current(); 515 assert(current->buffered_values_dealiaser() == NULL, "Must not be used twice concurrently"); 516 VTBuffer::Mark mark = VTBuffer::switch_mark(thread->current_vtbuffer_mark()); 517 _target = thread; 518 _current_mark = mark; 519 thread->set_current_vtbuffer_mark(_current_mark); 520 current->_buffered_values_dealiaser = this; 521 } 522 523 void BufferedValuesDealiaser::oops_do(OopClosure* f, oop value) { 524 525 assert(VTBuffer::is_in_vt_buffer((oopDesc*)value), "Should only be called on buffered values"); 526 527 intptr_t mark = *(intptr_t*)(value)->mark_addr(); 528 if ((mark & VTBuffer::mark_mask) == _current_mark) { 529 return; 530 } 531 532 ValueKlass* vk = ValueKlass::cast(value->klass()); 533 534 oop mirror = (oopDesc*)((intptr_t)value->mark() & (intptr_t)~VTBuffer::mark_mask); 535 assert(oopDesc::is_oop(mirror), "Sanity check"); 536 value->set_mark((markOop)mirror); 537 538 vk->iterate_over_inside_oops(f, value); 539 540 intptr_t new_mark_word = ((intptr_t) (oopDesc*)(value->mark())) 541 | (intptr_t)_current_mark; 542 value->set_mark(markOop((oopDesc*)new_mark_word)); 543 } 544 545 BufferedValuesDealiaser::~BufferedValuesDealiaser() { 546 assert(Thread::current()->buffered_values_dealiaser() != NULL, "Should not be NULL"); 547 assert(_target->current_vtbuffer_mark() == _current_mark, "Must be the same"); 548 Thread::current()->_buffered_values_dealiaser = NULL; 549 }