1 /*
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
28 #include "interp_masm_x86.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/compiledICHolder.hpp"
31 #include "oops/instanceKlass.hpp"
32 #include "oops/klassVtable.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "vmreg_x86.inline.hpp"
35 #ifdef COMPILER2
36 #include "opto/runtime.hpp"
37 #endif
38
39 // machine-dependent part of VtableStubs: create VtableStub of correct size and
40 // initialize its code
41
42 #define __ masm->
43
44 #ifndef PRODUCT
45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
46 #endif
47
48 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
49 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
50 const int stub_code_length = code_size_limit(true);
51 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
52 // Can be NULL if there is no free space in the code cache.
53 if (s == NULL) {
54 return NULL;
55 }
56
57 // Count unused bytes in instruction sequences of variable size.
58 // We add them to the computed buffer size in order to avoid
59 // overflow in subsequently generated stubs.
60 address start_pc;
61 int slop_bytes = 0;
62 int slop_delta = 0;
63 // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
64 const int index_dependent_slop = 0;
65
66 ResourceMark rm;
67 CodeBuffer cb(s->entry_point(), stub_code_length);
68 MacroAssembler* masm = new MacroAssembler(&cb);
69
70 #if (!defined(PRODUCT) && defined(COMPILER2))
71 if (CountCompiledCalls) {
72 __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
73 }
74 #endif
75
76 // get receiver (need to skip return address on top of stack)
77 assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
78
79 // Free registers (non-args) are rax, rbx
80
81 // get receiver klass
82 address npe_addr = __ pc();
83 __ load_klass(rax, j_rarg0);
84
101 slop_bytes += slop_delta;
102 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
103 __ bind(L);
104 }
105 #endif // PRODUCT
106
107 const Register method = rbx;
108
109 // load Method* and target address
110 start_pc = __ pc();
111 __ lookup_virtual_method(rax, vtable_index, method);
112 slop_delta = 8 - (int)(__ pc() - start_pc);
113 slop_bytes += slop_delta;
114 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
115
116 #ifndef PRODUCT
117 if (DebugVtables) {
118 Label L;
119 __ cmpptr(method, (int32_t)NULL_WORD);
120 __ jcc(Assembler::equal, L);
121 __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD);
122 __ jcc(Assembler::notZero, L);
123 __ stop("Vtable entry is NULL");
124 __ bind(L);
125 }
126 #endif // PRODUCT
127
128 // rax: receiver klass
129 // method (rbx): Method*
130 // rcx: receiver
131 address ame_addr = __ pc();
132 __ jmp( Address(rbx, Method::from_compiled_value_ro_offset()));
133
134 masm->flush();
135 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
136 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
137
138 return s;
139 }
140
141
142 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
143 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
144 const int stub_code_length = code_size_limit(false);
145 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
146 // Can be NULL if there is no free space in the code cache.
147 if (s == NULL) {
148 return NULL;
149 }
150 // Count unused bytes in instruction sequences of variable size.
151 // We add them to the computed buffer size in order to avoid
152 // overflow in subsequently generated stubs.
153 address start_pc;
154 int slop_bytes = 0;
155 int slop_delta = 0;
156 const int index_dependent_slop = (itable_index == 0) ? 4 : // code size change with transition from 8-bit to 32-bit constant (@index == 16).
157 (itable_index < 16) ? 3 : 0; // index == 0 generates even shorter code.
158
159 ResourceMark rm;
160 CodeBuffer cb(s->entry_point(), stub_code_length);
161 MacroAssembler *masm = new MacroAssembler(&cb);
162
163 #if (!defined(PRODUCT) && defined(COMPILER2))
164 if (CountCompiledCalls) {
165 __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
218 // For linux, a very narrow estimate would be 112, but Solaris requires some more space (130).
219 const ptrdiff_t estimate = 136;
220 const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
221 slop_delta = (int)(estimate - codesize);
222 slop_bytes += slop_delta;
223 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
224
225 // If we take a trap while this arg is on the stack we will not
226 // be able to walk the stack properly. This is not an issue except
227 // when there are mistakes in this assembly code that could generate
228 // a spurious fault. Ask me how I know...
229
230 // method (rbx): Method*
231 // j_rarg0: receiver
232
233 #ifdef ASSERT
234 if (DebugVtables) {
235 Label L2;
236 __ cmpptr(method, (int32_t)NULL_WORD);
237 __ jcc(Assembler::equal, L2);
238 __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD);
239 __ jcc(Assembler::notZero, L2);
240 __ stop("compiler entrypoint is null");
241 __ bind(L2);
242 }
243 #endif // ASSERT
244
245 address ame_addr = __ pc();
246 __ jmp(Address(method, Method::from_compiled_value_ro_offset()));
247
248 __ bind(L_no_such_interface);
249 // Handle IncompatibleClassChangeError in itable stubs.
250 // More detailed error message.
251 // We force resolving of the call site by jumping to the "handle
252 // wrong method" stub, and so let the interpreter runtime do all the
253 // dirty work.
254 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
255
256 masm->flush();
257 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
258 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
259
260 return s;
261 }
262
263 int VtableStub::pd_code_alignment() {
264 // x86 cache line size is 64 bytes, but we want to limit alignment loss.
265 const unsigned int icache_line_size = wordSize;
266 return icache_line_size;
|
1 /*
2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
28 #include "interp_masm_x86.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/compiledICHolder.hpp"
31 #include "oops/instanceKlass.hpp"
32 #include "oops/klassVtable.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "vmreg_x86.inline.hpp"
35 #ifdef COMPILER2
36 #include "opto/runtime.hpp"
37 #endif
38
39 // machine-dependent part of VtableStubs: create VtableStub of correct size and
40 // initialize its code
41
42 #define __ masm->
43
44 #ifndef PRODUCT
45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
46 #endif
47
48 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
49 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
50 const int stub_code_length = code_size_limit(true);
51 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
52 // Can be NULL if there is no free space in the code cache.
53 if (s == NULL) {
54 return NULL;
55 }
56
57 // Count unused bytes in instruction sequences of variable size.
58 // We add them to the computed buffer size in order to avoid
59 // overflow in subsequently generated stubs.
60 address start_pc;
61 int slop_bytes = 0;
62 int slop_delta = 0;
63 // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
64 const int index_dependent_slop = 0;
65 ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_value_offset() : Method::from_compiled_value_ro_offset();
66
67 ResourceMark rm;
68 CodeBuffer cb(s->entry_point(), stub_code_length);
69 MacroAssembler* masm = new MacroAssembler(&cb);
70
71 #if (!defined(PRODUCT) && defined(COMPILER2))
72 if (CountCompiledCalls) {
73 __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
74 }
75 #endif
76
77 // get receiver (need to skip return address on top of stack)
78 assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
79
80 // Free registers (non-args) are rax, rbx
81
82 // get receiver klass
83 address npe_addr = __ pc();
84 __ load_klass(rax, j_rarg0);
85
102 slop_bytes += slop_delta;
103 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
104 __ bind(L);
105 }
106 #endif // PRODUCT
107
108 const Register method = rbx;
109
110 // load Method* and target address
111 start_pc = __ pc();
112 __ lookup_virtual_method(rax, vtable_index, method);
113 slop_delta = 8 - (int)(__ pc() - start_pc);
114 slop_bytes += slop_delta;
115 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
116
117 #ifndef PRODUCT
118 if (DebugVtables) {
119 Label L;
120 __ cmpptr(method, (int32_t)NULL_WORD);
121 __ jcc(Assembler::equal, L);
122 __ cmpptr(Address(method, entry_offset), (int32_t)NULL_WORD);
123 __ jcc(Assembler::notZero, L);
124 __ stop("Vtable entry is NULL");
125 __ bind(L);
126 }
127 #endif // PRODUCT
128
129 // rax: receiver klass
130 // method (rbx): Method*
131 // rcx: receiver
132 address ame_addr = __ pc();
133 __ jmp( Address(rbx, entry_offset));
134
135 masm->flush();
136 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
137 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
138
139 return s;
140 }
141
142
143 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
144 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
145 const int stub_code_length = code_size_limit(false);
146 ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_value_offset() : Method::from_compiled_value_ro_offset();
147 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
148 // Can be NULL if there is no free space in the code cache.
149 if (s == NULL) {
150 return NULL;
151 }
152 // Count unused bytes in instruction sequences of variable size.
153 // We add them to the computed buffer size in order to avoid
154 // overflow in subsequently generated stubs.
155 address start_pc;
156 int slop_bytes = 0;
157 int slop_delta = 0;
158 const int index_dependent_slop = (itable_index == 0) ? 4 : // code size change with transition from 8-bit to 32-bit constant (@index == 16).
159 (itable_index < 16) ? 3 : 0; // index == 0 generates even shorter code.
160
161 ResourceMark rm;
162 CodeBuffer cb(s->entry_point(), stub_code_length);
163 MacroAssembler *masm = new MacroAssembler(&cb);
164
165 #if (!defined(PRODUCT) && defined(COMPILER2))
166 if (CountCompiledCalls) {
167 __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
220 // For linux, a very narrow estimate would be 112, but Solaris requires some more space (130).
221 const ptrdiff_t estimate = 136;
222 const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
223 slop_delta = (int)(estimate - codesize);
224 slop_bytes += slop_delta;
225 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
226
227 // If we take a trap while this arg is on the stack we will not
228 // be able to walk the stack properly. This is not an issue except
229 // when there are mistakes in this assembly code that could generate
230 // a spurious fault. Ask me how I know...
231
232 // method (rbx): Method*
233 // j_rarg0: receiver
234
235 #ifdef ASSERT
236 if (DebugVtables) {
237 Label L2;
238 __ cmpptr(method, (int32_t)NULL_WORD);
239 __ jcc(Assembler::equal, L2);
240 __ cmpptr(Address(method, entry_offset), (int32_t)NULL_WORD);
241 __ jcc(Assembler::notZero, L2);
242 __ stop("compiler entrypoint is null");
243 __ bind(L2);
244 }
245 #endif // ASSERT
246
247 address ame_addr = __ pc();
248 __ jmp(Address(method, entry_offset));
249
250 __ bind(L_no_such_interface);
251 // Handle IncompatibleClassChangeError in itable stubs.
252 // More detailed error message.
253 // We force resolving of the call site by jumping to the "handle
254 // wrong method" stub, and so let the interpreter runtime do all the
255 // dirty work.
256 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
257
258 masm->flush();
259 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
260 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
261
262 return s;
263 }
264
265 int VtableStub::pd_code_alignment() {
266 // x86 cache line size is 64 bytes, but we want to limit alignment loss.
267 const unsigned int icache_line_size = wordSize;
268 return icache_line_size;
|