< prev index next >
src/hotspot/share/memory/vtBuffer.cpp
Print this page
*** 30,81 ****
#include "runtime/frame.hpp"
#include "runtime/thread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
#include "utilities/ticks.inline.hpp"
VTBufferChunk* VTBuffer::_free_list = NULL;
Mutex* VTBuffer::_pool_lock = new Mutex(Mutex::leaf, "VTBuffer::_pool_lock", true, Monitor::_safepoint_check_never);
int VTBuffer::_pool_counter = 0;
int VTBuffer::_max_pool_counter = 0;
int VTBuffer::_total_allocated = 0;
int VTBuffer::_total_deallocated = 0;
int VTBuffer::_total_failed = 0;
oop VTBuffer::allocate_value(ValueKlass* k, TRAPS) {
assert(THREAD->is_Java_thread(), "Only JavaThreads have a buffer for value types");
JavaThread* thread = (JavaThread*)THREAD;
if (thread->vt_alloc_ptr() == NULL) {
if (!allocate_vt_chunk(thread)) {
return NULL; // will trigger fall back strategy: allocation in Java heap
}
}
assert(thread->vt_alloc_ptr() != NULL, "should not be null if chunk allocation was successful");
! int size_in_bytes = k->size_helper() * wordSize;
! if ((char*)thread->vt_alloc_ptr() + size_in_bytes >= thread->vt_alloc_limit()) {
! if (size_in_bytes > (int)VTBufferChunk::max_alloc_size()) {
// Too big to be allocated in a buffer
return NULL;
}
if (!allocate_vt_chunk(thread)) {
return NULL; // will trigger fall back strategy: allocation in Java heap
}
}
! assert((char*)thread->vt_alloc_ptr() + size_in_bytes < thread->vt_alloc_limit(),"otherwise the logic above is wrong");
oop new_vt = (oop)thread->vt_alloc_ptr();
! int size_in_words = k->size_helper();
! thread->increment_vtchunk_total_memory_buffered(size_in_words * HeapWordSize);
! int increment = align_object_size(size_in_words);
void* new_ptr = (char*)thread->vt_alloc_ptr() + increment * HeapWordSize;
new_ptr = MIN2(new_ptr, thread->vt_alloc_limit());
assert(VTBufferChunk::chunk(new_ptr) == VTBufferChunk::chunk(thread->vt_alloc_ptr()),
"old and new alloc ptr must be in the same chunk");
thread->set_vt_alloc_ptr(new_ptr);
// the value and its header must be initialized before being returned!!!
! memset(((char*)(oopDesc*)new_vt), 0, size_in_bytes);
new_vt->set_klass(k);
new_vt->set_mark(markOop(k->java_mirror()));
return new_vt;
}
bool VTBuffer::allocate_vt_chunk(JavaThread* thread) {
VTBufferChunk* new_chunk = NULL;
--- 30,103 ----
#include "runtime/frame.hpp"
#include "runtime/thread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
#include "utilities/ticks.inline.hpp"
+ #if INCLUDE_ALL_GCS
+ #include "gc/g1/g1SATBCardTableModRefBS.hpp"
+ #endif // INCLUDE_ALL_GCS
+
VTBufferChunk* VTBuffer::_free_list = NULL;
Mutex* VTBuffer::_pool_lock = new Mutex(Mutex::leaf, "VTBuffer::_pool_lock", true, Monitor::_safepoint_check_never);
int VTBuffer::_pool_counter = 0;
int VTBuffer::_max_pool_counter = 0;
int VTBuffer::_total_allocated = 0;
int VTBuffer::_total_deallocated = 0;
int VTBuffer::_total_failed = 0;
+ void VTBufferChunk::zap(void* start) {
+ assert(this == (VTBufferChunk*)((intptr_t)start & chunk_mask()), "start must be in current chunk");
+ if (ZapVTBufferChunks) {
+ size_t size = chunk_size() - ((char*)start - (char*)this);
+ memset((char*)start, 0, size);
+ }
+ }
+
oop VTBuffer::allocate_value(ValueKlass* k, TRAPS) {
assert(THREAD->is_Java_thread(), "Only JavaThreads have a buffer for value types");
JavaThread* thread = (JavaThread*)THREAD;
if (thread->vt_alloc_ptr() == NULL) {
if (!allocate_vt_chunk(thread)) {
return NULL; // will trigger fall back strategy: allocation in Java heap
}
}
assert(thread->vt_alloc_ptr() != NULL, "should not be null if chunk allocation was successful");
! int allocation_size_in_bytes = k->size_helper() * HeapWordSize;
! if ((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes >= thread->vt_alloc_limit()) {
! if (allocation_size_in_bytes > (int)VTBufferChunk::max_alloc_size()) {
// Too big to be allocated in a buffer
return NULL;
}
+ VTBufferChunk* next = VTBufferChunk::chunk(thread->vt_alloc_ptr())->next();
+ if (next != NULL) {
+ thread->set_vt_alloc_ptr(next->first_alloc());
+ thread->set_vt_alloc_limit(next->alloc_limit());
+ } else {
if (!allocate_vt_chunk(thread)) {
return NULL; // will trigger fall back strategy: allocation in Java heap
}
}
! }
! assert((char*)thread->vt_alloc_ptr() + allocation_size_in_bytes < thread->vt_alloc_limit(),"otherwise the logic above is wrong");
oop new_vt = (oop)thread->vt_alloc_ptr();
! int allocation_size_in_words = k->size_helper();
! thread->increment_vtchunk_total_memory_buffered(allocation_size_in_words * HeapWordSize);
! int increment = align_object_size(allocation_size_in_words);
void* new_ptr = (char*)thread->vt_alloc_ptr() + increment * HeapWordSize;
new_ptr = MIN2(new_ptr, thread->vt_alloc_limit());
assert(VTBufferChunk::chunk(new_ptr) == VTBufferChunk::chunk(thread->vt_alloc_ptr()),
"old and new alloc ptr must be in the same chunk");
thread->set_vt_alloc_ptr(new_ptr);
// the value and its header must be initialized before being returned!!!
! memset(((char*)(oopDesc*)new_vt), 0, allocation_size_in_bytes);
new_vt->set_klass(k);
+ assert(((intptr_t)(oopDesc*)k->java_mirror() & (intptr_t)VTBuffer::mark_mask) == 0, "Checking least significant bits are available");
new_vt->set_mark(markOop(k->java_mirror()));
+ if (UseG1GC) {
+ G1SATBCardTableModRefBS::enqueue(k->java_mirror());
+ }
return new_vt;
}
bool VTBuffer::allocate_vt_chunk(JavaThread* thread) {
VTBufferChunk* new_chunk = NULL;
*** 130,139 ****
--- 152,162 ----
void VTBuffer::recycle_chunk(JavaThread* thread, VTBufferChunk* chunk) {
if (thread->local_free_chunk() == NULL) {
chunk->set_prev(NULL);
chunk->set_next(NULL);
chunk->set_index(-1);
+ chunk->zap(chunk->first_alloc());
thread->set_local_free_chunk(chunk);
} else {
return_vt_chunk(thread, chunk);
}
thread->decrement_vtchunk_in_use();
*** 142,157 ****
// This is the main way to recycle VTBuffer memory, it is called from
// remove_activation() when an interpreter frame is about to be removed
// from the stack. All memory used in the context of this frame is freed,
// and the vt_alloc_ptr is restored to the value it had when the frame
// was created (modulo a possible adjustment if a value is being returned)
! void VTBuffer::recycle_vtbuffer(JavaThread* thread, frame current_frame) {
address current_ptr = (address)thread->vt_alloc_ptr();
assert(current_ptr != NULL, "Should not reach here if NULL");
VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
assert(current_chunk->owner() == thread, "Sanity check");
! address previous_ptr = (address)current_frame.interpreter_frame_vt_alloc_ptr();
if (previous_ptr == NULL) {
// vt_alloc_ptr has not been initialized in this frame
// let's initialize it to the first_alloc() value of the first chunk
VTBufferChunk* first_chunk = current_chunk;
while (first_chunk->prev() != NULL) {
--- 165,180 ----
// This is the main way to recycle VTBuffer memory, it is called from
// remove_activation() when an interpreter frame is about to be removed
// from the stack. All memory used in the context of this frame is freed,
// and the vt_alloc_ptr is restored to the value it had when the frame
// was created (modulo a possible adjustment if a value is being returned)
! void VTBuffer::recycle_vtbuffer(JavaThread* thread, void* alloc_ptr) {
address current_ptr = (address)thread->vt_alloc_ptr();
assert(current_ptr != NULL, "Should not reach here if NULL");
VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
assert(current_chunk->owner() == thread, "Sanity check");
! address previous_ptr = (address)alloc_ptr;
if (previous_ptr == NULL) {
// vt_alloc_ptr has not been initialized in this frame
// let's initialize it to the first_alloc() value of the first chunk
VTBufferChunk* first_chunk = current_chunk;
while (first_chunk->prev() != NULL) {
*** 165,174 ****
--- 188,198 ----
if (current_ptr == previous_ptr) return;
assert(current_chunk != previous_chunk || current_ptr >= previous_ptr, "Sanity check");
VTBufferChunk* del = previous_chunk->next();
previous_chunk->set_next(NULL);
thread->set_vt_alloc_ptr(previous_ptr);
+ previous_chunk->zap(previous_ptr);
thread->set_vt_alloc_limit(previous_chunk->alloc_limit());
while (del != NULL) {
VTBufferChunk* temp = del->next();
VTBuffer::recycle_chunk(thread, del);
del = temp;
*** 177,186 ****
--- 201,211 ----
void VTBuffer::return_vt_chunk(JavaThread* thread, VTBufferChunk* chunk) {
chunk->set_prev(NULL);
chunk->set_owner(NULL);
chunk->set_index(-1);
+ chunk->zap(chunk->first_alloc());
MutexLockerEx ml(_pool_lock, Mutex::_no_safepoint_check_flag);
if (_pool_counter < _max_free_list) {
if (_free_list != NULL) {
chunk->set_next(_free_list);
_free_list->set_prev(chunk);
*** 199,223 ****
}
thread->increment_vtchunk_returned();
}
bool VTBuffer::value_belongs_to_frame(oop p, frame* f) {
! // the code below assumes that frame f is the last interpreted frame
! // on the execution stack
int p_chunk_idx = VTBufferChunk::chunk(p)->index();
int frame_first_chunk_idx;
! if (f->interpreter_frame_vt_alloc_ptr() != NULL) {
! frame_first_chunk_idx = VTBufferChunk::chunk(f->interpreter_frame_vt_alloc_ptr())->index();
} else {
frame_first_chunk_idx = 0;
}
if (p_chunk_idx == frame_first_chunk_idx) {
! return (intptr_t*)p >= f->interpreter_frame_vt_alloc_ptr();
} else {
return p_chunk_idx > frame_first_chunk_idx;
}
-
}
void VTBuffer::fix_frame_vt_alloc_ptr(frame f, VTBufferChunk* chunk) {
assert(f.is_interpreted_frame(), "recycling can only be triggered from interpreted frames");
assert(chunk != NULL, "Should not be called if null");
--- 224,250 ----
}
thread->increment_vtchunk_returned();
}
bool VTBuffer::value_belongs_to_frame(oop p, frame* f) {
! return is_value_allocated_after(p, f->interpreter_frame_vt_alloc_ptr());
! }
!
! bool VTBuffer::is_value_allocated_after(oop p, void* a) {
! // Test if value p has been allocated after alloc ptr a
int p_chunk_idx = VTBufferChunk::chunk(p)->index();
int frame_first_chunk_idx;
! if (a != NULL) {
! frame_first_chunk_idx = VTBufferChunk::chunk(a)->index();
} else {
frame_first_chunk_idx = 0;
}
if (p_chunk_idx == frame_first_chunk_idx) {
! return (intptr_t*)p >= a;
} else {
return p_chunk_idx > frame_first_chunk_idx;
}
}
void VTBuffer::fix_frame_vt_alloc_ptr(frame f, VTBufferChunk* chunk) {
assert(f.is_interpreted_frame(), "recycling can only be triggered from interpreted frames");
assert(chunk != NULL, "Should not be called if null");
*** 264,302 ****
// Relocate value 'old' after value 'previous'
address VTBuffer::relocate_value(address old, address previous, int previous_size_in_words) {
InstanceKlass* ik_old = InstanceKlass::cast(((oop)old)->klass());
assert(ik_old->is_value(), "Sanity check");
VTBufferChunk* chunk = VTBufferChunk::chunk(previous);
! address next_alloc = previous + align_object_size(ik_old->size_helper());
if(next_alloc + ik_old->size_helper() * HeapWordSize < chunk->alloc_limit()) {
// relocation can be performed in the same chunk
! return previous + align_object_size(previous_size_in_words) * HeapWordSize;
} else {
// relocation must be performed in the next chunk
VTBufferChunk* next_chunk = chunk->next();
! assert(next_chunk != NULL, "Because we are compacting, there should be enough in use chunks");
return (address)next_chunk->first_alloc();
}
}
! oop VTBuffer::relocate_return_value(JavaThread* thread, frame current_frame, oop obj) {
assert(!Universe::heap()->is_in_reserved(obj), "This method should never be called on Java heap allocated values");
assert(obj->klass()->is_value(), "Sanity check");
! if (!VTBuffer::value_belongs_to_frame(obj, ¤t_frame)) return obj;
ValueKlass* vk = ValueKlass::cast(obj->klass());
address current_ptr = (address)thread->vt_alloc_ptr();
VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
! address previous_ptr = (address)current_frame.interpreter_frame_vt_alloc_ptr();
if (previous_ptr == NULL) {
! fix_frame_vt_alloc_ptr(current_frame, current_chunk);
! previous_ptr = (address)current_frame.interpreter_frame_vt_alloc_ptr();
}
VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
address dest;
if ((address)obj != previous_ptr) {
if (previous_chunk == current_chunk
! || (previous_ptr + vk->size_helper() * wordSize) < previous_chunk->alloc_limit()) {
dest = previous_ptr;
} else {
assert(previous_chunk->next() != NULL, "Should not happen");
dest = (address)previous_chunk->next()->first_alloc();
}
--- 291,330 ----
// Relocate value 'old' after value 'previous'
address VTBuffer::relocate_value(address old, address previous, int previous_size_in_words) {
InstanceKlass* ik_old = InstanceKlass::cast(((oop)old)->klass());
assert(ik_old->is_value(), "Sanity check");
VTBufferChunk* chunk = VTBufferChunk::chunk(previous);
! address next_alloc = previous + previous_size_in_words * HeapWordSize;
if(next_alloc + ik_old->size_helper() * HeapWordSize < chunk->alloc_limit()) {
// relocation can be performed in the same chunk
! return next_alloc;
} else {
// relocation must be performed in the next chunk
VTBufferChunk* next_chunk = chunk->next();
! assert(next_chunk != NULL, "Because we are compacting, there should be enough chunks");
return (address)next_chunk->first_alloc();
}
}
! oop VTBuffer::relocate_return_value(JavaThread* thread, void* alloc_ptr, oop obj) {
assert(!Universe::heap()->is_in_reserved(obj), "This method should never be called on Java heap allocated values");
assert(obj->klass()->is_value(), "Sanity check");
! if (!VTBuffer::is_value_allocated_after(obj, alloc_ptr)) return obj;
ValueKlass* vk = ValueKlass::cast(obj->klass());
address current_ptr = (address)thread->vt_alloc_ptr();
VTBufferChunk* current_chunk = VTBufferChunk::chunk(current_ptr);
! address previous_ptr = (address)alloc_ptr;
if (previous_ptr == NULL) {
! VTBufferChunk* c = VTBufferChunk::chunk(obj);
! while (c->prev() != NULL) c = c->prev();
! previous_ptr = (address)c->first_alloc();
}
VTBufferChunk* previous_chunk = VTBufferChunk::chunk(previous_ptr);
address dest;
if ((address)obj != previous_ptr) {
if (previous_chunk == current_chunk
! && (previous_ptr + vk->size_helper() * HeapWordSize) < previous_chunk->alloc_limit()) {
dest = previous_ptr;
} else {
assert(previous_chunk->next() != NULL, "Should not happen");
dest = (address)previous_chunk->next()->first_alloc();
}
*** 306,321 ****
vk->value_store(((char*)(address)obj) + vk->first_field_offset(),
dest + vk->first_field_offset(), false, true);
} else {
dest = (address)obj;
}
- address new_alloc_ptr = dest + vk->size_helper() * wordSize;
- current_frame.interpreter_frame_set_vt_alloc_ptr((intptr_t*)new_alloc_ptr);
VTBufferChunk* last = VTBufferChunk::chunk(dest);
- VTBufferChunk* del = last->next();
- thread->set_vt_alloc_ptr(new_alloc_ptr);
thread->set_vt_alloc_limit(last->alloc_limit());
last->set_next(NULL);
while (del != NULL) {
VTBufferChunk* tmp = del->next();
VTBuffer::recycle_chunk(thread, del);
del = tmp;
--- 334,349 ----
vk->value_store(((char*)(address)obj) + vk->first_field_offset(),
dest + vk->first_field_offset(), false, true);
} else {
dest = (address)obj;
}
VTBufferChunk* last = VTBufferChunk::chunk(dest);
thread->set_vt_alloc_limit(last->alloc_limit());
+ void* new_alloc_ptr = MIN2((void*)(dest + vk->size_helper() * HeapWordSize), last->alloc_limit());
+ thread->set_vt_alloc_ptr(new_alloc_ptr);
+ assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check");
+ VTBufferChunk* del = last->next();
last->set_next(NULL);
while (del != NULL) {
VTBufferChunk* tmp = del->next();
VTBuffer::recycle_chunk(thread, del);
del = tmp;
*** 389,428 ****
}
// 5 - relocate values
for (int i = 0; i < n_entries; i++) {
if (reloc_table[i].old_ptr != reloc_table[i].new_ptr) {
InstanceKlass* ik_old = InstanceKlass::cast(((oop)reloc_table[i].old_ptr)->klass());
// instead of memcpy, a value_store() might be required here
memcpy(reloc_table[i].new_ptr, reloc_table[i].old_ptr, ik_old->size_helper() * HeapWordSize);
}
! // Resetting the mark word
! ((oop)reloc_table[i].new_ptr)->set_mark(markOop(((oop)reloc_table[i].new_ptr)->klass()->java_mirror()));
}
if (ReportVTBufferRecyclingTimes) {
step5 = Ticks::now();
}
- // 6 - update thread allocation pointer
oop last_oop = (oop)reloc_table[n_entries - 1].new_ptr;
InstanceKlass* ik = InstanceKlass::cast(last_oop->klass());
! thread->set_vt_alloc_ptr((address)last_oop + ik->size_helper() * HeapWordSize);
! thread->set_vt_alloc_limit(VTBufferChunk::chunk(thread->vt_alloc_ptr())->alloc_limit());
if (ReportVTBufferRecyclingTimes) {
step6 = Ticks::now();
}
// 7 - free/return unused chunks
! VTBufferChunk* chunk = VTBufferChunk::chunk(reloc_table[n_entries - 1].new_ptr);
! VTBufferChunk* temp = chunk;
! chunk = chunk->next();
! temp->set_next(NULL);
! while (chunk != NULL) {
returned_chunks++;
! temp = chunk->next();
! VTBuffer::recycle_chunk(thread, chunk);
! chunk = temp;
}
if (ReportVTBufferRecyclingTimes) {
step7 = Ticks::now();
}
} else {
--- 417,462 ----
}
// 5 - relocate values
for (int i = 0; i < n_entries; i++) {
if (reloc_table[i].old_ptr != reloc_table[i].new_ptr) {
+ assert(VTBufferChunk::chunk(reloc_table[i].old_ptr)->owner() == Thread::current(), "Sanity check");
+ assert(VTBufferChunk::chunk(reloc_table[i].new_ptr)->owner() == Thread::current(), "Sanity check");
InstanceKlass* ik_old = InstanceKlass::cast(((oop)reloc_table[i].old_ptr)->klass());
// instead of memcpy, a value_store() might be required here
memcpy(reloc_table[i].new_ptr, reloc_table[i].old_ptr, ik_old->size_helper() * HeapWordSize);
}
! // Restoring the mark word
! ((oop)reloc_table[i].new_ptr)->set_mark(reloc_table[i].mark_word);
}
if (ReportVTBufferRecyclingTimes) {
step5 = Ticks::now();
}
oop last_oop = (oop)reloc_table[n_entries - 1].new_ptr;
+ assert(last_oop->is_value(), "sanity check");
+ assert(VTBufferChunk::chunk((address)last_oop)->owner() == Thread::current(), "Sanity check");
+ VTBufferChunk* last_chunk = VTBufferChunk::chunk(last_oop);
InstanceKlass* ik = InstanceKlass::cast(last_oop->klass());
! thread->set_vt_alloc_limit(last_chunk->alloc_limit());
! void* new_alloc_ptr = MIN2((void*)((address)last_oop + ik->size_helper() * HeapWordSize), thread->vt_alloc_limit());
! thread->set_vt_alloc_ptr(new_alloc_ptr);
! assert(VTBufferChunk::chunk(thread->vt_alloc_ptr())->owner() == Thread::current(), "Sanity check");
! assert(VTBufferChunk::chunk(thread->vt_alloc_limit()) == VTBufferChunk::chunk(thread->vt_alloc_ptr()), "Sanity check");
if (ReportVTBufferRecyclingTimes) {
step6 = Ticks::now();
}
// 7 - free/return unused chunks
! VTBufferChunk* last = VTBufferChunk::chunk(thread->vt_alloc_ptr());
! VTBufferChunk* del = last->next();
! last->set_next(NULL);
! while (del != NULL) {
returned_chunks++;
! VTBufferChunk* tmp = del->next();
! VTBuffer::recycle_chunk(thread, del);
! del = tmp;
}
if (ReportVTBufferRecyclingTimes) {
step7 = Ticks::now();
}
} else {
*** 430,439 ****
--- 464,474 ----
}
}
// 8 - free relocation table
FREE_RESOURCE_ARRAY(struct VT_relocation_entry, reloc_table, max_entries);
+
if (ReportVTBufferRecyclingTimes) {
end = Ticks::now();
ResourceMark rm(thread);
tty->print_cr("VTBufferRecyling: %s : %s.%s %s : " JLONG_FORMAT "us",
thread->name(),
*** 458,467 ****
--- 493,503 ----
if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
if (!(*p)->mark()->is_marked()) {
assert(*_index < _size, "index outside of relocation table range");
_reloc_table[*_index].old_ptr = (address)*p;
_reloc_table[*_index].chunk_index = VTBufferChunk::chunk(*p)->index();
+ _reloc_table[*_index].mark_word = (*p)->mark();
*_index = (*_index) + 1;
(*p)->set_mark((*p)->mark()->set_marked());
}
}
}
*** 471,475 ****
--- 507,549 ----
// might be coded more efficiently just by checking mark word is not NULL
if (VTBuffer::value_belongs_to_frame(*p, _frame)) {
*p = (oop)(*p)->mark();
}
}
+
+ BufferedValuesDealiaser::BufferedValuesDealiaser(JavaThread* thread) {
+ Thread* current = Thread::current();
+ assert(current->buffered_values_dealiaser() == NULL, "Must not be used twice concurrently");
+ VTBuffer::Mark mark = VTBuffer::switch_mark(thread->current_vtbuffer_mark());
+ _target = thread;
+ _current_mark = mark;
+ thread->set_current_vtbuffer_mark(_current_mark);
+ current->_buffered_values_dealiaser = this;
+ }
+
+ void BufferedValuesDealiaser::oops_do(OopClosure* f, oop value) {
+
+ assert(VTBuffer::is_in_vt_buffer((oopDesc*)value), "Should only be called on buffered values");
+
+ intptr_t mark = *(intptr_t*)(value)->mark_addr();
+ if ((mark & VTBuffer::mark_mask) == _current_mark) {
+ return;
+ }
+
+ ValueKlass* vk = ValueKlass::cast(value->klass());
+
+ oop mirror = (oopDesc*)((intptr_t)value->mark() & (intptr_t)~VTBuffer::mark_mask);
+ assert(oopDesc::is_oop(mirror), "Sanity check");
+ value->set_mark((markOop)mirror);
+
+ vk->iterate_over_inside_oops(f, value);
+
+ intptr_t new_mark_word = ((intptr_t) (oopDesc*)(value->mark()))
+ | (intptr_t)_current_mark;
+ value->set_mark(markOop((oopDesc*)new_mark_word));
+ }
+
+ BufferedValuesDealiaser::~BufferedValuesDealiaser() {
+ assert(Thread::current()->buffered_values_dealiaser() != NULL, "Should not be NULL");
+ assert(_target->current_vtbuffer_mark() == _current_mark, "Must be the same");
+ Thread::current()->_buffered_values_dealiaser = NULL;
+ }
< prev index next >