101 slop_bytes += slop_delta; 102 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); 103 __ bind(L); 104 } 105 #endif // PRODUCT 106 107 const Register method = rbx; 108 109 // load Method* and target address 110 start_pc = __ pc(); 111 __ lookup_virtual_method(rax, vtable_index, method); 112 slop_delta = 8 - (int)(__ pc() - start_pc); 113 slop_bytes += slop_delta; 114 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); 115 116 #ifndef PRODUCT 117 if (DebugVtables) { 118 Label L; 119 __ cmpptr(method, (int32_t)NULL_WORD); 120 __ jcc(Assembler::equal, L); 121 __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD); 122 __ jcc(Assembler::notZero, L); 123 __ stop("Vtable entry is NULL"); 124 __ bind(L); 125 } 126 #endif // PRODUCT 127 128 // rax: receiver klass 129 // method (rbx): Method* 130 // rcx: receiver 131 address ame_addr = __ pc(); 132 __ jmp( Address(rbx, Method::from_compiled_offset())); 133 134 masm->flush(); 135 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets 136 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop); 137 138 return s; 139 } 140 141 142 VtableStub* VtableStubs::create_itable_stub(int itable_index) { 143 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. 144 const int stub_code_length = code_size_limit(false); 145 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); 146 // Can be NULL if there is no free space in the code cache. 147 if (s == NULL) { 148 return NULL; 149 } 150 // Count unused bytes in instruction sequences of variable size. 151 // We add them to the computed buffer size in order to avoid 152 // overflow in subsequently generated stubs. 218 // For linux, a very narrow estimate would be 112, but Solaris requires some more space (130). 219 const ptrdiff_t estimate = 136; 220 const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop; 221 slop_delta = (int)(estimate - codesize); 222 slop_bytes += slop_delta; 223 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize); 224 225 // If we take a trap while this arg is on the stack we will not 226 // be able to walk the stack properly. This is not an issue except 227 // when there are mistakes in this assembly code that could generate 228 // a spurious fault. Ask me how I know... 229 230 // method (rbx): Method* 231 // j_rarg0: receiver 232 233 #ifdef ASSERT 234 if (DebugVtables) { 235 Label L2; 236 __ cmpptr(method, (int32_t)NULL_WORD); 237 __ jcc(Assembler::equal, L2); 238 __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD); 239 __ jcc(Assembler::notZero, L2); 240 __ stop("compiler entrypoint is null"); 241 __ bind(L2); 242 } 243 #endif // ASSERT 244 245 address ame_addr = __ pc(); 246 __ jmp(Address(method, Method::from_compiled_offset())); 247 248 __ bind(L_no_such_interface); 249 // Handle IncompatibleClassChangeError in itable stubs. 250 // More detailed error message. 251 // We force resolving of the call site by jumping to the "handle 252 // wrong method" stub, and so let the interpreter runtime do all the 253 // dirty work. 254 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 255 256 masm->flush(); 257 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets 258 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop); 259 260 return s; 261 } 262 263 int VtableStub::pd_code_alignment() { 264 // x86 cache line size is 64 bytes, but we want to limit alignment loss. 265 const unsigned int icache_line_size = wordSize; 266 return icache_line_size; | 101 slop_bytes += slop_delta; 102 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); 103 __ bind(L); 104 } 105 #endif // PRODUCT 106 107 const Register method = rbx; 108 109 // load Method* and target address 110 start_pc = __ pc(); 111 __ lookup_virtual_method(rax, vtable_index, method); 112 slop_delta = 8 - (int)(__ pc() - start_pc); 113 slop_bytes += slop_delta; 114 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); 115 116 #ifndef PRODUCT 117 if (DebugVtables) { 118 Label L; 119 __ cmpptr(method, (int32_t)NULL_WORD); 120 __ jcc(Assembler::equal, L); 121 __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD); 122 __ jcc(Assembler::notZero, L); 123 __ stop("Vtable entry is NULL"); 124 __ bind(L); 125 } 126 #endif // PRODUCT 127 128 // rax: receiver klass 129 // method (rbx): Method* 130 // rcx: receiver 131 address ame_addr = __ pc(); 132 __ jmp( Address(rbx, Method::from_compiled_value_ro_offset())); 133 134 masm->flush(); 135 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets 136 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop); 137 138 return s; 139 } 140 141 142 VtableStub* VtableStubs::create_itable_stub(int itable_index) { 143 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. 144 const int stub_code_length = code_size_limit(false); 145 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); 146 // Can be NULL if there is no free space in the code cache. 147 if (s == NULL) { 148 return NULL; 149 } 150 // Count unused bytes in instruction sequences of variable size. 151 // We add them to the computed buffer size in order to avoid 152 // overflow in subsequently generated stubs. 218 // For linux, a very narrow estimate would be 112, but Solaris requires some more space (130). 219 const ptrdiff_t estimate = 136; 220 const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop; 221 slop_delta = (int)(estimate - codesize); 222 slop_bytes += slop_delta; 223 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize); 224 225 // If we take a trap while this arg is on the stack we will not 226 // be able to walk the stack properly. This is not an issue except 227 // when there are mistakes in this assembly code that could generate 228 // a spurious fault. Ask me how I know... 229 230 // method (rbx): Method* 231 // j_rarg0: receiver 232 233 #ifdef ASSERT 234 if (DebugVtables) { 235 Label L2; 236 __ cmpptr(method, (int32_t)NULL_WORD); 237 __ jcc(Assembler::equal, L2); 238 __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD); 239 __ jcc(Assembler::notZero, L2); 240 __ stop("compiler entrypoint is null"); 241 __ bind(L2); 242 } 243 #endif // ASSERT 244 245 address ame_addr = __ pc(); 246 __ jmp(Address(method, Method::from_compiled_value_ro_offset())); 247 248 __ bind(L_no_such_interface); 249 // Handle IncompatibleClassChangeError in itable stubs. 250 // More detailed error message. 251 // We force resolving of the call site by jumping to the "handle 252 // wrong method" stub, and so let the interpreter runtime do all the 253 // dirty work. 254 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 255 256 masm->flush(); 257 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets 258 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop); 259 260 return s; 261 } 262 263 int VtableStub::pd_code_alignment() { 264 // x86 cache line size is 64 bytes, but we want to limit alignment loss. 265 const unsigned int icache_line_size = wordSize; 266 return icache_line_size; |