31 #include "memory/resourceArea.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/instanceKlass.hpp"
34 #include "oops/klassVtable.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "vmreg_arm.inline.hpp"
37 #ifdef COMPILER2
38 #include "opto/runtime.hpp"
39 #endif
40
41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
42 // initialize its code
43
44 #define __ masm->
45
46 #ifndef PRODUCT
47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
48 #endif
49
50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
51 const int code_length = VtableStub::pd_code_size_limit(true);
52 VtableStub* s = new(code_length) VtableStub(true, vtable_index);
53 // Can be NULL if there is no free space in the code cache.
54 if (s == NULL) {
55 return NULL;
56 }
57
58 ResourceMark rm;
59 CodeBuffer cb(s->entry_point(), code_length);
60 MacroAssembler* masm = new MacroAssembler(&cb);
61
62 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
63
64 const Register tmp = Rtemp; // Rtemp OK, should be free at call sites
65
66 address npe_addr = __ pc();
67 __ load_klass(tmp, R0);
68
69 {
70 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
71 int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
72
73 assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
74 int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff);
75 if (method_offset & ~offset_mask) {
76 __ add(tmp, tmp, method_offset & ~offset_mask);
77 }
78 __ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
79 }
80
81 address ame_addr = __ pc();
82 #ifdef AARCH64
83 __ ldr(tmp, Address(Rmethod, Method::from_compiled_offset()));
84 __ br(tmp);
85 #else
86 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
87 #endif // AARCH64
88
89 masm->flush();
90
91 if (PrintMiscellaneous && (WizardMode || Verbose)) {
92 tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
93 vtable_index, p2i(s->entry_point()),
94 (int)(s->code_end() - s->entry_point()),
95 (int)(s->code_end() - __ pc()));
96 }
97 guarantee(__ pc() <= s->code_end(), "overflowed buffer");
98 // FIXME ARM: need correct 'slop' - below is x86 code
99 // shut the door on sizing bugs
100 //int slop = 8; // 32-bit offset is this much larger than a 13-bit one
101 //assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
102
103 s->set_exception_points(npe_addr, ame_addr);
104 return s;
105 }
106
107 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
108 const int code_length = VtableStub::pd_code_size_limit(false);
109 VtableStub* s = new(code_length) VtableStub(false, itable_index);
110 // Can be NULL if there is no free space in the code cache.
111 if (s == NULL) {
112 return NULL;
113 }
114
115 ResourceMark rm;
116 CodeBuffer cb(s->entry_point(), code_length);
117 MacroAssembler* masm = new MacroAssembler(&cb);
118
119 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
120
121 // R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
122 const Register Rclass = AARCH64_ONLY(R9) NOT_AARCH64(R4);
123 const Register Rintf = AARCH64_ONLY(R10) NOT_AARCH64(R5);
124 const Register Rscan = AARCH64_ONLY(R11) NOT_AARCH64(R6);
125
126 assert_different_registers(Ricklass, Rclass, Rintf, Rscan, Rtemp);
127
128 // Calculate the start of itable (itable goes after vtable)
129 const int scale = exact_log2(vtableEntry::size_in_bytes());
130 address npe_addr = __ pc();
131 __ load_klass(Rclass, R0);
132
133 Label L_no_such_interface;
134
135 // Receiver subtype check against REFC.
136 __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
137 __ lookup_interface_method(// inputs: rec. class, interface, itable index
138 Rclass, Rintf, noreg,
139 // outputs: temp reg1, temp reg2
140 noreg, Rscan, Rtemp,
141 L_no_such_interface);
142
143 // Get Method* and entry point for compiler
144 __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
145 __ lookup_interface_method(// inputs: rec. class, interface, itable index
146 Rclass, Rintf, itable_index,
147 // outputs: temp reg1, temp reg2, temp reg3
148 Rmethod, Rscan, Rtemp,
149 L_no_such_interface);
150
151 address ame_addr = __ pc();
152
153 #ifdef AARCH64
154 __ ldr(Rtemp, Address(Rmethod, Method::from_compiled_offset()));
155 __ br(Rtemp);
156 #else
157 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
158 #endif // AARCH64
159
160 __ bind(L_no_such_interface);
161
162 // Handle IncompatibleClassChangeError in itable stubs.
163 // More detailed error message.
164 // We force resolving of the call site by jumping to the "handle
165 // wrong method" stub, and so let the interpreter runtime do all the
166 // dirty work.
167 assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
168 __ jump(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type, Rtemp);
169
170 masm->flush();
171
172 if (PrintMiscellaneous && (WizardMode || Verbose)) {
173 tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
174 itable_index, p2i(s->entry_point()),
175 (int)(s->code_end() - s->entry_point()),
176 (int)(s->code_end() - __ pc()));
177 }
178 guarantee(__ pc() <= s->code_end(), "overflowed buffer");
179 // FIXME ARM: need correct 'slop' - below is x86 code
180 // shut the door on sizing bugs
181 //int slop = 8; // 32-bit offset is this much larger than a 13-bit one
182 //assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
183
184 s->set_exception_points(npe_addr, ame_addr);
185 return s;
186 }
187
188 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
189 int instr_count;
190
191 if (is_vtable_stub) {
192 // vtable stub size
193 instr_count = NOT_AARCH64(4) AARCH64_ONLY(5);
194 } else {
195 // itable stub size
196 instr_count = NOT_AARCH64(31) AARCH64_ONLY(31);
197 }
198
199 #ifdef AARCH64
200 if (UseCompressedClassPointers) {
201 instr_count += MacroAssembler::instr_count_for_decode_klass_not_null();
202 }
203 #endif // AARCH64
204
205 return instr_count * Assembler::InstructionSize;
206 }
207
208 int VtableStub::pd_code_alignment() {
209 return 8;
210 }
|
31 #include "memory/resourceArea.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/instanceKlass.hpp"
34 #include "oops/klassVtable.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "vmreg_arm.inline.hpp"
37 #ifdef COMPILER2
38 #include "opto/runtime.hpp"
39 #endif
40
41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
42 // initialize its code
43
44 #define __ masm->
45
46 #ifndef PRODUCT
47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
48 #endif
49
50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
51 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
52 const int stub_code_length = VtableStub::code_size_limit(true);
53 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
54 // Can be NULL if there is no free space in the code cache.
55 if (s == NULL) {
56 return NULL;
57 }
58
59 // Count unused bytes in instruction sequences of variable size.
60 // We add them to the computed buffer size in order to avoid
61 // overflow in subsequently generated stubs.
62 address start_pc;
63 int slop_bytes = 0;
64 int slop_delta = 0;
65
66 ResourceMark rm;
67 CodeBuffer cb(s->entry_point(), stub_code_length);
68 MacroAssembler* masm = new MacroAssembler(&cb);
69
70 #if (!defined(PRODUCT) && defined(COMPILER2))
71 if (CountCompiledCalls) {
72 // Implementation required?
73 }
74 #endif
75
76 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
77
78 const Register tmp = Rtemp; // Rtemp OK, should be free at call sites
79
80 address npe_addr = __ pc();
81 __ load_klass(tmp, R0);
82
83 #ifndef PRODUCT
84 if (DebugVtables) {
85 // Implementation required?
86 }
87 #endif
88
89 start_pc = __ pc();
90 { // lookup virtual method
91 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
92 int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
93
94 assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
95 int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff);
96 if (method_offset & ~offset_mask) {
97 __ add(tmp, tmp, method_offset & ~offset_mask);
98 }
99 __ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
100 }
101 slop_delta = 8 - (int)(__ pc() - start_pc);
102 slop_bytes += slop_delta;
103 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
104
105 #ifndef PRODUCT
106 if (DebugVtables) {
107 // Implementation required?
108 }
109 #endif
110
111 address ame_addr = __ pc();
112 #ifdef AARCH64
113 __ ldr(tmp, Address(Rmethod, Method::from_compiled_offset()));
114 __ br(tmp);
115 #else
116 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
117 #endif // AARCH64
118
119 masm->flush();
120 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
121
122 return s;
123 }
124
125 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
126 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
127 const int stub_code_length = VtableStub::code_size_limit(false);
128 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
129 // Can be NULL if there is no free space in the code cache.
130 if (s == NULL) {
131 return NULL;
132 }
133 // Count unused bytes in instruction sequences of variable size.
134 // We add them to the computed buffer size in order to avoid
135 // overflow in subsequently generated stubs.
136 address start_pc;
137 int slop_bytes = 0;
138 int slop_delta = 0;
139
140 ResourceMark rm;
141 CodeBuffer cb(s->entry_point(), stub_code_length);
142 MacroAssembler* masm = new MacroAssembler(&cb);
143
144 #if (!defined(PRODUCT) && defined(COMPILER2))
145 if (CountCompiledCalls) {
146 // Implementation required?
147 }
148 #endif
149
150 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
151
152 // R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
153 const Register Rclass = AARCH64_ONLY(R9) NOT_AARCH64(R4);
154 const Register Rintf = AARCH64_ONLY(R10) NOT_AARCH64(R5);
155 const Register Rscan = AARCH64_ONLY(R11) NOT_AARCH64(R6);
156
157 Label L_no_such_interface;
158
159 assert_different_registers(Ricklass, Rclass, Rintf, Rscan, Rtemp);
160
161 start_pc = __ pc();
162
163 // get receiver klass (also an implicit null-check)
164 address npe_addr = __ pc();
165 __ load_klass(Rclass, R0);
166
167 // Receiver subtype check against REFC.
168 __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
169 __ lookup_interface_method(// inputs: rec. class, interface, itable index
170 Rclass, Rintf, noreg,
171 // outputs: temp reg1, temp reg2
172 noreg, Rscan, Rtemp,
173 L_no_such_interface);
174
175 const ptrdiff_t typecheckSize = __ pc() - start_pc;
176 start_pc = __ pc();
177
178 // Get Method* and entry point for compiler
179 __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
180 __ lookup_interface_method(// inputs: rec. class, interface, itable index
181 Rclass, Rintf, itable_index,
182 // outputs: temp reg1, temp reg2, temp reg3
183 Rmethod, Rscan, Rtemp,
184 L_no_such_interface);
185
186 const ptrdiff_t lookupSize = __ pc() - start_pc;
187
188 // Reduce "estimate" such that "padding" does not drop below 8.
189 const ptrdiff_t estimate = 140;
190 const ptrdiff_t codesize = typecheckSize + lookupSize;
191 slop_delta = (int)(estimate - codesize);
192 slop_bytes += slop_delta;
193 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
194
195 #ifndef PRODUCT
196 if (DebugVtables) {
197 // Implementation required?
198 }
199 #endif
200
201 address ame_addr = __ pc();
202
203 #ifdef AARCH64
204 __ ldr(Rtemp, Address(Rmethod, Method::from_compiled_offset()));
205 __ br(Rtemp);
206 #else
207 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
208 #endif // AARCH64
209
210 __ bind(L_no_such_interface);
211 // Handle IncompatibleClassChangeError in itable stubs.
212 // More detailed error message.
213 // We force resolving of the call site by jumping to the "handle
214 // wrong method" stub, and so let the interpreter runtime do all the
215 // dirty work.
216 assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
217 __ jump(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type, Rtemp);
218
219 masm->flush();
220 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
221
222 return s;
223 }
224
225 int VtableStub::pd_code_alignment() {
226 // ARM32 cache line size is not an architected constant. We just align on word size.
227 const unsigned int icache_line_size = wordSize;
228 return icache_line_size;
229 }
|