< prev index next >

src/hotspot/cpu/x86/templateTable_x86.cpp

Print this page

        

*** 969,979 **** __ movl(bc, Bytecodes::_fast_iaccess_0); __ jccb(Assembler::equal, rewrite); // if _agetfield then rewrite to _fast_aaccess_0 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); - assert(ValueTypesBufferMaxMemory == 0, "Such rewritting doesn't support flattened values yet"); __ cmpl(rbx, Bytecodes::_fast_agetfield); __ movl(bc, Bytecodes::_fast_aaccess_0); __ jccb(Assembler::equal, rewrite); // if _fgetfield then rewrite to _fast_faccess_0 --- 969,978 ----
*** 1171,1188 **** // Come here on success __ bind(ok_is_subtype); // Get the value we will store __ movptr(rax, at_tos()); - if (ValueTypesBufferMaxMemory > 0) { - Label is_on_heap; - __ test_value_is_not_buffered(rax, rbx, is_on_heap); - __ push(rdx); // save precomputed element address, and convert buffer oop to heap oop - __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_heap_copy), rax); - __ pop(rdx); - __ bind(is_on_heap); - } __ movl(rcx, at_tos_p1()); // index // Now store using the appropriate barrier do_oop_store(_masm, element_address, rax, IS_ARRAY); __ jmp(done); --- 1170,1179 ----
*** 2212,2251 **** #endif // _LP64 } } void TemplateTable::branch(bool is_jsr, bool is_wide) { - if (ValueTypesThreadLocalRecycling) { - Label no_vt_recycling, no_fixing_required; - const Register thread1 = NOT_LP64(rbx) LP64_ONLY(r15_thread); - NOT_LP64(__ get_thread(thread1)); - __ movptr(rbx, Address(thread1, in_bytes(JavaThread::vt_alloc_ptr_offset()))); - __ testptr(rbx, rbx); - __ jcc(Assembler::zero, no_vt_recycling); - __ movptr(rcx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize)); - __ testptr(rcx, rcx); - __ jcc(Assembler::notZero, no_fixing_required); - // vt_alloc_ptr in JavaThread is non-null but frame vt_alloc_ptr is null - // which means frame vt_alloc_ptr needs to be initialized - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::fix_frame_vt_alloc_ptr)); - __ movptr(rcx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize)); - __ bind(no_fixing_required); - __ testptr(rcx, rbx); - __ jcc(Assembler::equal, no_vt_recycling); - __ andptr(rcx, VTBufferChunk::chunk_mask()); - __ movl(rcx, Address(rcx, VTBufferChunk::index_offset())); - __ andptr(rbx, VTBufferChunk::chunk_mask()); - __ movl(rbx, Address(rbx, VTBufferChunk::index_offset())); - __ subl(rbx, rcx); - __ get_method(rcx); - __ movl(rcx, Address(rcx, Method::max_vt_buffer_offset())); - __ cmpl(rbx, rcx); - __ jcc(Assembler::lessEqual, no_vt_recycling); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::recycle_buffered_values)); - __ bind(no_vt_recycling); - } - __ get_method(rcx); // rcx holds method __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx // holds bumped taken count const ByteSize be_offset = MethodCounters::backedge_counter_offset() + --- 2203,2212 ----
*** 2816,2836 **** __ load_unsigned_short(tmp, Address(rbx, Method::flags_offset())); __ testl(tmp, Method::is_returning_vt_mask()); __ jcc(Assembler::zero, not_returning_null_vt); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::deoptimize_caller_frame_for_vt), method); __ bind(not_returning_null_vt); - - if (ValueTypesBufferMaxMemory > 0) { - Label notBuffered; - - __ test_value_is_not_buffered(rax, rbx, notBuffered); - const Register thread1 = NOT_LP64(rcx) LP64_ONLY(r15_thread); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::return_value), rax); - NOT_LP64(__ get_thread(thread1)); - __ get_vm_result(rax, thread1); - __ bind(notBuffered); - } } // Narrow result if state is itos but result type is smaller. // Need to narrow in the return bytecode rather than in generate_return_entry // since compiled code callers expect the result to already be narrowed. --- 2777,2786 ----
*** 3436,3467 **** if (is_static) { Label notFlattenable, notBuffered; __ test_field_is_not_flattenable(flags2, rscratch1, notFlattenable); __ null_check(rax); __ bind(notFlattenable); - if (ValueTypesBufferMaxMemory > 0) { - __ test_value_is_not_buffered(rax, rscratch1, notBuffered); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy), - rax, off, obj); - __ jmp(Done); - __ bind(notBuffered); - } do_oop_store(_masm, field, rax); __ jmp(Done); } else { Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable; __ test_field_is_flattenable(flags2, rscratch1, isFlattenable); // Not flattenable case, covers not flattenable values and objects pop_and_check_object(obj); // Store into the field - if (ValueTypesBufferMaxMemory > 0) { - __ test_value_is_not_buffered(rax, rscratch1, notBuffered); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy), - rax, off, obj); - __ jmp(rewriteNotFlattenable); - __ bind(notBuffered); - } do_oop_store(_masm, field, rax); __ bind(rewriteNotFlattenable); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); } --- 3386,3403 ----
*** 3469,3486 **** // Implementation of the flattenable semantic __ bind(isFlattenable); __ null_check(rax); __ test_field_is_flattened(flags2, rscratch1, isFlattened); // Not flattened case - if (ValueTypesBufferMaxMemory > 0) { - __ test_value_is_not_buffered(rax, rscratch1, notBuffered2); - pop_and_check_object(obj); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy), - rax, off, obj); - __ jmp(rewriteFlattenable); - __ bind(notBuffered2); - } pop_and_check_object(obj); // Store into the field do_oop_store(_masm, field, rax); __ jmp(rewriteFlattenable); __ bind(isFlattened); --- 3405,3414 ----
*** 3745,3785 **** // access field switch (bytecode()) { case Bytecodes::_fast_qputfield: { ! Label isFlattened, notBuffered, done; __ null_check(rax); __ test_field_is_flattened(rscratch2, rscratch1, isFlattened); // No Flattened case - if (ValueTypesBufferMaxMemory > 0) { - __ test_value_is_not_buffered(rax, rscratch1, notBuffered); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy), - rax, rbx, rcx); - __ jmp(done); - __ bind(notBuffered); - } do_oop_store(_masm, field, rax); __ jmp(done); __ bind(isFlattened); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), rax, rbx, rcx); __ bind(done); } break; case Bytecodes::_fast_aputfield: { - Label notBuffered, done; - if (ValueTypesBufferMaxMemory > 0) { - __ test_value_is_not_buffered(rax, rscratch1, notBuffered); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy), - rax, rbx, rcx); - __ jmp(done); - __ bind(notBuffered); - } do_oop_store(_masm, field, rax); - __ bind(done); } break; case Bytecodes::_fast_lputfield: #ifdef _LP64 __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg); --- 3673,3697 ---- // access field switch (bytecode()) { case Bytecodes::_fast_qputfield: { ! Label isFlattened, done; __ null_check(rax); __ test_field_is_flattened(rscratch2, rscratch1, isFlattened); // No Flattened case do_oop_store(_masm, field, rax); __ jmp(done); __ bind(isFlattened); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), rax, rbx, rcx); __ bind(done); } break; case Bytecodes::_fast_aputfield: { do_oop_store(_masm, field, rax); } break; case Bytecodes::_fast_lputfield: #ifdef _LP64 __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
< prev index next >