160 JavaThread* thread = JavaThread::current();
161 RegisterMap reg_map(thread, false);
162 frame runtime_frame = thread->last_frame();
163 frame caller_frame = runtime_frame.sender(®_map);
164 assert(caller_frame.is_compiled_frame(), "must be compiled");
165 return caller_frame.is_deoptimized_frame();
166 }
167
168 // Stress deoptimization
169 static void deopt_caller() {
170 if ( !caller_is_deopted()) {
171 JavaThread* thread = JavaThread::current();
172 RegisterMap reg_map(thread, false);
173 frame runtime_frame = thread->last_frame();
174 frame caller_frame = runtime_frame.sender(®_map);
175 Deoptimization::deoptimize_frame(thread, caller_frame.id());
176 assert(caller_is_deopted(), "Must be deoptimized");
177 }
178 }
179
180
181 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
182 assert(0 <= id && id < number_of_ids, "illegal stub id");
183 ResourceMark rm;
184 // create code buffer for code storage
185 CodeBuffer code(buffer_blob);
186
187 OopMapSet* oop_maps;
188 int frame_size;
189 bool must_gc_arguments;
190
191 Compilation::setup_code_buffer(&code, 0);
192
193 // create assembler for code generation
194 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
195 // generate code for runtime stub
196 oop_maps = generate_code_for(id, sasm);
197 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
198 "if stub has an oop map it must have a valid frame size");
199
200 #ifdef ASSERT
201 // Make sure that stubs that need oopmaps have them
202 switch (id) {
203 // These stubs don't need to have an oopmap
204 case dtrace_object_alloc_id:
205 case g1_pre_barrier_slow_id:
206 case g1_post_barrier_slow_id:
207 case slow_subtype_check_id:
208 case fpu2long_stub_id:
209 case unwind_exception_id:
210 case counter_overflow_id:
211 #if defined(SPARC) || defined(PPC32)
212 case handle_exception_nofpu_id: // Unused on sparc
213 #endif
214 break;
215
216 // All other stubs should have oopmaps
217 default:
218 assert(oop_maps != NULL, "must have an oopmap");
219 }
220 #endif
221
222 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
223 sasm->align(BytesPerWord);
224 // make sure all code is in code buffer
225 sasm->flush();
226
227 frame_size = sasm->frame_size();
228 must_gc_arguments = sasm->must_gc_arguments();
229 // create blob - distinguish a few special cases
230 CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
231 &code,
232 CodeOffsets::frame_never_safe,
233 frame_size,
234 oop_maps,
235 must_gc_arguments);
236 // install blob
237 assert(blob != NULL, "blob must exist");
238 _blobs[id] = blob;
239 }
240
241
242 void Runtime1::initialize(BufferBlob* blob) {
243 // platform-dependent initialization
244 initialize_pd();
245 // generate stubs
246 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
247 // printing
248 #ifndef PRODUCT
249 if (PrintSimpleStubs) {
250 ResourceMark rm;
251 for (int id = 0; id < number_of_ids; id++) {
252 _blobs[id]->print();
253 if (_blobs[id]->oop_maps() != NULL) {
254 _blobs[id]->oop_maps()->print();
255 }
256 }
257 }
258 #endif
259 }
260
261
262 CodeBlob* Runtime1::blob_for(StubID id) {
263 assert(0 <= id && id < number_of_ids, "illegal stub id");
264 return _blobs[id];
265 }
266
267
268 const char* Runtime1::name_for(StubID id) {
269 assert(0 <= id && id < number_of_ids, "illegal stub id");
270 return _blob_names[id];
271 }
272
273 const char* Runtime1::name_for_address(address entry) {
274 for (int id = 0; id < number_of_ids; id++) {
275 if (entry == entry_for((StubID)id)) return name_for((StubID)id);
276 }
277
278 #define FUNCTION_CASE(a, f) \
279 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
|
161 JavaThread* thread = JavaThread::current();
162 RegisterMap reg_map(thread, false);
163 frame runtime_frame = thread->last_frame();
164 frame caller_frame = runtime_frame.sender(®_map);
165 assert(caller_frame.is_compiled_frame(), "must be compiled");
166 return caller_frame.is_deoptimized_frame();
167 }
168
169 // Stress deoptimization
170 static void deopt_caller() {
171 if ( !caller_is_deopted()) {
172 JavaThread* thread = JavaThread::current();
173 RegisterMap reg_map(thread, false);
174 frame runtime_frame = thread->last_frame();
175 frame caller_frame = runtime_frame.sender(®_map);
176 Deoptimization::deoptimize_frame(thread, caller_frame.id());
177 assert(caller_is_deopted(), "Must be deoptimized");
178 }
179 }
180
181 class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
182 private:
183 Runtime1::StubID _id;
184 public:
185 StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}
186 virtual OopMapSet* generate_code(StubAssembler* sasm) {
187 return Runtime1::generate_code_for(_id, sasm);
188 }
189 };
190
191 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenCl
192 ResourceMark rm;
193 // create code buffer for code storage
194 CodeBuffer code(buffer_blob);
195
196 OopMapSet* oop_maps;
197 int frame_size;
198 bool must_gc_arguments;
199
200 Compilation::setup_code_buffer(&code, 0);
201
202 // create assembler for code generation
203 StubAssembler* sasm = new StubAssembler(&code, name, stub_id);
204 // generate code for runtime stub
205 oop_maps = cl->generate_code(sasm);
206 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
207 "if stub has an oop map it must have a valid frame size");
208 assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap");
209
210 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
211 sasm->align(BytesPerWord);
212 // make sure all code is in code buffer
213 sasm->flush();
214
215 frame_size = sasm->frame_size();
216 must_gc_arguments = sasm->must_gc_arguments();
217 // create blob - distinguish a few special cases
218 CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
219 &code,
220 CodeOffsets::frame_never_safe,
221 frame_size,
222 oop_maps,
223 must_gc_arguments);
224 assert(blob != NULL, "blob must exist");
225 return blob;
226 }
227
228 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
229 assert(0 <= id && id < number_of_ids, "illegal stub id");
230 bool expect_oop_map = true;
231 #ifdef ASSERT
232 // Make sure that stubs that need oopmaps have them
233 switch (id) {
234 // These stubs don't need to have an oopmap
235 case dtrace_object_alloc_id:
236 case slow_subtype_check_id:
237 case fpu2long_stub_id:
238 case unwind_exception_id:
239 case counter_overflow_id:
240 #if defined(SPARC) || defined(PPC32)
241 case handle_exception_nofpu_id: // Unused on sparc
242 #endif
243 expect_oop_map = false;
244 break;
245 default:
246 break;
247 }
248 #endif
249 StubIDStubAssemblerCodeGenClosure cl(id);
250 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
251 // install blob
252 _blobs[id] = blob;
253 }
254
255 void Runtime1::initialize(BufferBlob* blob) {
256 // platform-dependent initialization
257 initialize_pd();
258 // generate stubs
259 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
260 // printing
261 #ifndef PRODUCT
262 if (PrintSimpleStubs) {
263 ResourceMark rm;
264 for (int id = 0; id < number_of_ids; id++) {
265 _blobs[id]->print();
266 if (_blobs[id]->oop_maps() != NULL) {
267 _blobs[id]->oop_maps()->print();
268 }
269 }
270 }
271 #endif
272 BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
273 bs->generate_c1_runtime_stubs(blob);
274 }
275
276 CodeBlob* Runtime1::blob_for(StubID id) {
277 assert(0 <= id && id < number_of_ids, "illegal stub id");
278 return _blobs[id];
279 }
280
281
282 const char* Runtime1::name_for(StubID id) {
283 assert(0 <= id && id < number_of_ids, "illegal stub id");
284 return _blob_names[id];
285 }
286
287 const char* Runtime1::name_for_address(address entry) {
288 for (int id = 0; id < number_of_ids; id++) {
289 if (entry == entry_for((StubID)id)) return name_for((StubID)id);
290 }
291
292 #define FUNCTION_CASE(a, f) \
293 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
|