1 /*
2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "interp_masm_s390.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/compiledICHolder.hpp"
32 #include "oops/instanceKlass.hpp"
33 #include "oops/klassVtable.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_s390.inline.hpp"
36 #ifdef COMPILER2
37 #include "opto/runtime.hpp"
38 #endif
39
40 // Machine-dependent part of VtableStubs: create vtableStub of correct
41 // size and initialize its code.
42
43 #define __ masm->
44
45 #ifndef PRODUCT
46 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
47 #endif
48
49 // Used by compiler only; may use only caller saved, non-argument registers.
50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
51
52 const int code_length = VtableStub::pd_code_size_limit(true);
53 VtableStub *s = new(code_length) VtableStub(true, vtable_index);
54 if (s == NULL) { // Indicates OOM In the code cache.
55 return NULL;
56 }
57
58 ResourceMark rm;
59 CodeBuffer cb(s->entry_point(), code_length);
60 MacroAssembler *masm = new MacroAssembler(&cb);
61 int padding_bytes = 0;
62
63 #if (!defined(PRODUCT) && defined(COMPILER2))
64 if (CountCompiledCalls) {
65 // Count unused bytes
66 // worst case actual size
67 padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
68
69 // Use generic emitter for direct memory increment.
70 // Abuse Z_method as scratch register for generic emitter.
71 // It is loaded further down anyway before it is first used.
72 __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
73 }
74 #endif
75
76 assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
77
78 // Get receiver klass.
79 // Must do an explicit check if implicit checks are disabled.
80 address npe_addr = __ pc(); // npe == NULL ptr exception
81 __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
82 const Register rcvr_klass = Z_R1_scratch;
83 __ load_klass(rcvr_klass, Z_ARG1);
84
85 // Set method (in case of interpreted method), and destination address.
86 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
87 vtable_index * vtableEntry::size_in_bytes();
88
89 #ifndef PRODUCT
90 if (DebugVtables) {
91 Label L;
92 // Check offset vs vtable length.
93 const Register vtable_idx = Z_R0_scratch;
94
95 // Count unused bytes.
96 // worst case actual size
97 padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
98
99 assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
100 __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
101 __ z_brl(L);
102 __ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
103 __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
104 // Count unused bytes (assume worst case here).
105 padding_bytes += 12;
106 __ bind(L);
107 }
108 #endif
109
110 int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
111
112 // Duplicate safety code from enc_class Java_Dynamic_Call_dynTOC.
113 if (Displacement::is_validDisp(v_off)) {
114 __ z_lg(Z_method/*method oop*/, v_off, rcvr_klass/*class oop*/);
115 // Account for the load_const in the else path.
116 padding_bytes += __ load_const_size();
117 } else {
118 // Worse case, offset does not fit in displacement field.
119 __ load_const(Z_method, v_off); // Z_method temporarily holds the offset value.
120 __ z_lg(Z_method/*method oop*/, 0, Z_method/*method offset*/, rcvr_klass/*class oop*/);
121 }
122
123 #ifndef PRODUCT
124 if (DebugVtables) {
125 Label L;
126 __ z_ltgr(Z_method, Z_method);
127 __ z_brne(L);
128 __ stop("Vtable entry is ZERO",102);
129 __ bind(L);
130 }
131 #endif
132
133 address ame_addr = __ pc(); // ame = abstract method error
134
135 // Must do an explicit check if implicit checks are disabled.
136 __ null_check(Z_method, Z_R1_scratch, in_bytes(Method::from_compiled_offset()));
137 __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
138 __ z_br(Z_R1_scratch);
139
140 masm->flush();
141
142 s->set_exception_points(npe_addr, ame_addr);
143
144 return s;
145 }
146
147 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
148 const int code_length = VtableStub::pd_code_size_limit(false);
149 VtableStub *s = new(code_length) VtableStub(false, itable_index);
150 if (s == NULL) { // Indicates OOM in the code cache.
151 return NULL;
152 }
153
154 ResourceMark rm;
155 CodeBuffer cb(s->entry_point(), code_length);
156 MacroAssembler *masm = new MacroAssembler(&cb);
157 int padding_bytes = 0;
158
159 #if (!defined(PRODUCT) && defined(COMPILER2))
160 if (CountCompiledCalls) {
161 // Count unused bytes
162 // worst case actual size
163 padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
164
165 // Use generic emitter for direct memory increment.
166 // Use Z_tmp_1 as scratch register for generic emitter.
167 __ add2mem_32((Z_R1_scratch), 1, Z_tmp_1);
168 }
169 #endif
170
171 assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
172
173 // Entry arguments:
174 // Z_method: Interface
175 // Z_ARG1: Receiver
176 NearLabel no_such_interface;
177 const Register rcvr_klass = Z_tmp_1,
178 interface = Z_tmp_2;
179
180 // Get receiver klass.
181 // Must do an explicit check if implicit checks are disabled.
182 address npe_addr = __ pc(); // npe == NULL ptr exception
183 __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
184 __ load_klass(rcvr_klass, Z_ARG1);
185
186 // Receiver subtype check against REFC.
187 __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_klass_offset()));
188 __ lookup_interface_method(rcvr_klass, interface, noreg,
189 noreg, Z_R1, no_such_interface, /*return_method=*/ false);
190
191 // Get Method* and entrypoint for compiler
192 __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_metadata_offset()));
193 __ lookup_interface_method(rcvr_klass, interface, itable_index,
194 Z_method, Z_R1, no_such_interface, /*return_method=*/ true);
195
196 #ifndef PRODUCT
197 if (DebugVtables) {
198 Label ok1;
199 __ z_ltgr(Z_method, Z_method);
200 __ z_brne(ok1);
201 __ stop("method is null",103);
202 __ bind(ok1);
203 }
204 #endif
205
206 address ame_addr = __ pc();
207 // Must do an explicit check if implicit checks are disabled.
208 if (!ImplicitNullChecks) {
209 __ compare64_and_branch(Z_method, (intptr_t) 0, Assembler::bcondEqual, no_such_interface);
210 }
211 __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
212 __ z_br(Z_R1_scratch);
213
214 // Handle IncompatibleClassChangeError in itable stubs.
215 __ bind(no_such_interface);
216 // Count unused bytes
217 // worst case actual size
218 // We force resolving of the call site by jumping to
219 // the "handle wrong method" stub, and so let the
220 // interpreter runtime do all the dirty work.
221 padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::get_handle_wrong_method_stub(), true);
222 __ z_br(Z_R1_scratch);
223
224 masm->flush();
225
226 s->set_exception_points(npe_addr, ame_addr);
227 return s;
228 }
229
230 // In order to tune these parameters, run the JVM with VM options
231 // +PrintMiscellaneous and +WizardMode to see information about
232 // actual itable stubs. Run it with -Xmx31G -XX:+UseCompressedOops.
233 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
234 int size = DebugVtables ? 216 : 0;
235 if (CountCompiledCalls) {
236 size += 6 * 4;
237 }
238 size += is_vtable_stub ? 36 : 140;
239 if (UseCompressedClassPointers) {
240 size += MacroAssembler::instr_size_for_decode_klass_not_null();
241 }
242 if (!ImplicitNullChecks) {
243 size += 36;
244 }
245 return size;
246 }
247
248 int VtableStub::pd_code_alignment() {
249 const unsigned int icache_line_size = 32;
250 return icache_line_size;
251 }
|
1 /*
2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "interp_masm_s390.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/compiledICHolder.hpp"
32 #include "oops/instanceKlass.hpp"
33 #include "oops/klassVtable.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_s390.inline.hpp"
36 #ifdef COMPILER2
37 #include "opto/runtime.hpp"
38 #endif
39
40 #define __ masm->
41
42 #ifndef PRODUCT
43 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
44 #endif
45
46 // Used by compiler only; may use only caller saved, non-argument registers.
47 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
48 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
49 const int stub_code_length = VtableStub::code_size_limit(true);
50 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
51 // Can be NULL if there is no free space in the code cache.
52 if (s == NULL) {
53 return NULL;
54 }
55
56 // Count unused bytes in instruction sequences of variable size.
57 // We add them to the computed buffer size in order to avoid
58 // overflow in subsequently generated stubs.
59 address start_pc;
60 int slop_bytes = 0;
61 int slop_delta = 0;
62
63 ResourceMark rm;
64 CodeBuffer cb(s->entry_point(), stub_code_length);
65 MacroAssembler* masm = new MacroAssembler(&cb);
66
67 #if (!defined(PRODUCT) && defined(COMPILER2))
68 if (CountCompiledCalls) {
69 // worst case actual size
70 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
71 slop_bytes += slop_delta;
72 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
73 // Use generic emitter for direct memory increment.
74 // Abuse Z_method as scratch register for generic emitter.
75 // It is loaded further down anyway before it is first used.
76 // No dynamic code size variance here, increment is 1, always.
77 __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
78 }
79 #endif
80
81 assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
82
83 const Register rcvr_klass = Z_R1_scratch;
84 address npe_addr = __ pc(); // npe == NULL ptr exception
85 // check if we must do an explicit check (implicit checks disabled, offset too large).
86 __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
87 // Get receiver klass.
88 __ load_klass(rcvr_klass, Z_ARG1);
89
90 #ifndef PRODUCT
91 if (DebugVtables) {
92 NearLabel L;
93 // Check offset vs vtable length.
94 const Register vtable_idx = Z_R0_scratch;
95
96 // worst case actual size
97 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size(), true);
98 slop_bytes += slop_delta;
99 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
100
101 assert(Displacement::is_shortDisp(in_bytes(Klass::vtable_length_offset())), "disp to large");
102 __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
103 __ z_brl(L);
104 __ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
105 __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
106 // Count unused bytes (assume worst case here).
107 slop_bytes += 12;
108 __ bind(L);
109 }
110 #endif
111
112 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
113 vtable_index * vtableEntry::size_in_bytes();
114 int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
115
116 // Set method (in case of interpreted method), and destination address.
117 // Duplicate safety code from enc_class Java_Dynamic_Call_dynTOC.
118 if (Displacement::is_validDisp(v_off)) {
119 __ z_lg(Z_method/*method oop*/, v_off, rcvr_klass/*class oop*/);
120 // Account for the load_const in the else path.
121 slop_delta = __ load_const_size();
122 } else {
123 // Worse case, offset does not fit in displacement field.
124 // worst case actual size
125 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_method, v_off, true);
126 __ z_lg(Z_method/*method oop*/, 0, Z_method/*method offset*/, rcvr_klass/*class oop*/);
127 }
128 slop_bytes += slop_delta;
129
130 #ifndef PRODUCT
131 if (DebugVtables) {
132 NearLabel L;
133 __ z_ltgr(Z_method, Z_method);
134 __ z_brne(L);
135 __ stop("Vtable entry is ZERO", 102);
136 __ bind(L);
137 }
138 #endif
139
140 // Must do an explicit check if offset too large or implicit checks are disabled.
141 address ame_addr = __ pc();
142 __ null_check(Z_method, Z_R1_scratch, in_bytes(Method::from_compiled_offset()));
143 __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
144 __ z_br(Z_R1_scratch);
145
146 masm->flush();
147 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
148
149 return s;
150 }
151
152 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
153 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
154 const int stub_code_length = VtableStub::code_size_limit(false);
155 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
156 // Can be NULL if there is no free space in the code cache.
157 if (s == NULL) {
158 return NULL;
159 }
160 // Count unused bytes in instruction sequences of variable size.
161 // We add them to the computed buffer size in order to avoid
162 // overflow in subsequently generated stubs.
163 address start_pc;
164 int slop_bytes = 0;
165 int slop_delta = 0;
166
167 ResourceMark rm;
168 CodeBuffer cb(s->entry_point(), stub_code_length);
169 MacroAssembler* masm = new MacroAssembler(&cb);
170
171 #if (!defined(PRODUCT) && defined(COMPILER2))
172 if (CountCompiledCalls) {
173 // worst case actual size
174 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
175 slop_bytes += slop_delta;
176 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
177 // Use generic emitter for direct memory increment.
178 // Abuse Z_method as scratch register for generic emitter.
179 // It is loaded further down anyway before it is first used.
180 // No dynamic code size variance here, increment is 1, always.
181 __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
182 }
183 #endif
184
185 assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
186
187 // Entry arguments:
188 // Z_method: Interface
189 // Z_ARG1: Receiver
190 NearLabel no_such_interface;
191 const Register rcvr_klass = Z_tmp_1,
192 interface = Z_tmp_2;
193
194 // Get receiver klass.
195 // Must do an explicit check if offset too large or implicit checks are disabled.
196 address npe_addr = __ pc(); // npe == NULL ptr exception
197 __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
198 __ load_klass(rcvr_klass, Z_ARG1);
199
200 // Receiver subtype check against REFC.
201 __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_klass_offset()));
202 __ lookup_interface_method(rcvr_klass, interface, noreg,
203 noreg, Z_R1, no_such_interface, /*return_method=*/ false);
204
205 // Get Method* and entrypoint for compiler
206 __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_metadata_offset()));
207 __ lookup_interface_method(rcvr_klass, interface, itable_index,
208 Z_method, Z_R1, no_such_interface, /*return_method=*/ true);
209
210 #ifndef PRODUCT
211 if (DebugVtables) {
212 NearLabel ok1;
213 __ z_ltgr(Z_method, Z_method);
214 __ z_brne(ok1);
215 __ stop("method is null", 103);
216 __ bind(ok1);
217 }
218 #endif
219
220 address ame_addr = __ pc();
221 // Must do an explicit check if implicit checks are disabled.
222 if (!ImplicitNullChecks) {
223 __ compare64_and_branch(Z_method, (intptr_t) 0, Assembler::bcondEqual, no_such_interface);
224 }
225 __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
226 __ z_br(Z_R1_scratch);
227
228 // Handle IncompatibleClassChangeError in itable stubs.
229 __ bind(no_such_interface);
230 // more detailed IncompatibleClassChangeError
231 // we force re-resolving of the call site by jumping to
232 // the "handle wrong method" stub, thus letting the
233 // interpreter runtime do all the dirty work.
234 // worst case actual size
235 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::get_handle_wrong_method_stub(), true);
236 slop_bytes += slop_delta;
237 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
238 __ z_br(Z_R1_scratch);
239
240 masm->flush();
241 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
242
243 return s;
244 }
245
246 int VtableStub::pd_code_alignment() {
247 // System z cache line size is 256 bytes, but octoword-alignment is quite ok.
248 const unsigned int icache_line_size = 32;
249 return icache_line_size;
250 }
|