22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/codeBuffer.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Defs.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "classfile/systemDictionary.hpp"
34 #include "classfile/vmSymbols.hpp"
35 #include "code/codeBlob.hpp"
36 #include "code/compiledIC.hpp"
37 #include "code/pcDesc.hpp"
38 #include "code/scopeDesc.hpp"
39 #include "code/vtableStubs.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/bytecode.hpp"
44 #include "interpreter/interpreter.hpp"
45 #include "logging/log.hpp"
46 #include "memory/allocation.inline.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/atomic.hpp"
52 #include "runtime/biasedLocking.hpp"
53 #include "runtime/compilationPolicy.hpp"
54 #include "runtime/interfaceSupport.hpp"
55 #include "runtime/javaCalls.hpp"
56 #include "runtime/sharedRuntime.hpp"
57 #include "runtime/threadCritical.hpp"
58 #include "runtime/vframe.hpp"
59 #include "runtime/vframeArray.hpp"
60 #include "runtime/vm_version.hpp"
61 #include "utilities/copy.hpp"
62 #include "utilities/events.hpp"
63
64
65 // Implementation of StubAssembler
66
67 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
68 _name = name;
69 _must_gc_arguments = false;
70 _frame_size = no_frame_size;
88
89
90 void StubAssembler::set_num_rt_args(int args) {
91 if (_num_rt_args == 0) {
92 _num_rt_args = args;
93 }
94 assert(_num_rt_args == args, "can't change the number of args");
95 }
96
97 // Implementation of Runtime1
98
99 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
100 const char *Runtime1::_blob_names[] = {
101 RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
102 };
103
104 #ifndef PRODUCT
105 // statistics
106 int Runtime1::_generic_arraycopy_cnt = 0;
107 int Runtime1::_primitive_arraycopy_cnt = 0;
108 int Runtime1::_oop_arraycopy_cnt = 0;
109 int Runtime1::_generic_arraycopystub_cnt = 0;
110 int Runtime1::_arraycopy_slowcase_cnt = 0;
111 int Runtime1::_arraycopy_checkcast_cnt = 0;
112 int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
113 int Runtime1::_new_type_array_slowcase_cnt = 0;
114 int Runtime1::_new_object_array_slowcase_cnt = 0;
115 int Runtime1::_new_instance_slowcase_cnt = 0;
116 int Runtime1::_new_multi_array_slowcase_cnt = 0;
117 int Runtime1::_monitorenter_slowcase_cnt = 0;
118 int Runtime1::_monitorexit_slowcase_cnt = 0;
119 int Runtime1::_patch_code_slowcase_cnt = 0;
120 int Runtime1::_throw_range_check_exception_count = 0;
121 int Runtime1::_throw_index_exception_count = 0;
122 int Runtime1::_throw_div0_exception_count = 0;
123 int Runtime1::_throw_null_pointer_exception_count = 0;
124 int Runtime1::_throw_class_cast_exception_count = 0;
125 int Runtime1::_throw_incompatible_class_change_error_count = 0;
126 int Runtime1::_throw_array_store_exception_count = 0;
127 int Runtime1::_throw_count = 0;
128
160 JavaThread* thread = JavaThread::current();
161 RegisterMap reg_map(thread, false);
162 frame runtime_frame = thread->last_frame();
163 frame caller_frame = runtime_frame.sender(®_map);
164 assert(caller_frame.is_compiled_frame(), "must be compiled");
165 return caller_frame.is_deoptimized_frame();
166 }
167
168 // Stress deoptimization
169 static void deopt_caller() {
170 if ( !caller_is_deopted()) {
171 JavaThread* thread = JavaThread::current();
172 RegisterMap reg_map(thread, false);
173 frame runtime_frame = thread->last_frame();
174 frame caller_frame = runtime_frame.sender(®_map);
175 Deoptimization::deoptimize_frame(thread, caller_frame.id());
176 assert(caller_is_deopted(), "Must be deoptimized");
177 }
178 }
179
180
181 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
182 assert(0 <= id && id < number_of_ids, "illegal stub id");
183 ResourceMark rm;
184 // create code buffer for code storage
185 CodeBuffer code(buffer_blob);
186
187 OopMapSet* oop_maps;
188 int frame_size;
189 bool must_gc_arguments;
190
191 Compilation::setup_code_buffer(&code, 0);
192
193 // create assembler for code generation
194 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
195 // generate code for runtime stub
196 oop_maps = generate_code_for(id, sasm);
197 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
198 "if stub has an oop map it must have a valid frame size");
199
200 #ifdef ASSERT
201 // Make sure that stubs that need oopmaps have them
202 switch (id) {
203 // These stubs don't need to have an oopmap
204 case dtrace_object_alloc_id:
205 case g1_pre_barrier_slow_id:
206 case g1_post_barrier_slow_id:
207 case slow_subtype_check_id:
208 case fpu2long_stub_id:
209 case unwind_exception_id:
210 case counter_overflow_id:
211 #if defined(SPARC) || defined(PPC32)
212 case handle_exception_nofpu_id: // Unused on sparc
213 #endif
214 break;
215
216 // All other stubs should have oopmaps
217 default:
218 assert(oop_maps != NULL, "must have an oopmap");
219 }
220 #endif
221
222 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
223 sasm->align(BytesPerWord);
224 // make sure all code is in code buffer
225 sasm->flush();
226
227 frame_size = sasm->frame_size();
228 must_gc_arguments = sasm->must_gc_arguments();
229 // create blob - distinguish a few special cases
230 CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
231 &code,
232 CodeOffsets::frame_never_safe,
233 frame_size,
234 oop_maps,
235 must_gc_arguments);
236 // install blob
237 assert(blob != NULL, "blob must exist");
238 _blobs[id] = blob;
239 }
240
241
242 void Runtime1::initialize(BufferBlob* blob) {
243 // platform-dependent initialization
244 initialize_pd();
245 // generate stubs
246 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
247 // printing
248 #ifndef PRODUCT
249 if (PrintSimpleStubs) {
250 ResourceMark rm;
251 for (int id = 0; id < number_of_ids; id++) {
252 _blobs[id]->print();
253 if (_blobs[id]->oop_maps() != NULL) {
254 _blobs[id]->oop_maps()->print();
255 }
256 }
257 }
258 #endif
259 }
260
261
262 CodeBlob* Runtime1::blob_for(StubID id) {
263 assert(0 <= id && id < number_of_ids, "illegal stub id");
264 return _blobs[id];
265 }
266
267
268 const char* Runtime1::name_for(StubID id) {
269 assert(0 <= id && id < number_of_ids, "illegal stub id");
270 return _blob_names[id];
271 }
272
273 const char* Runtime1::name_for_address(address entry) {
274 for (int id = 0; id < number_of_ids; id++) {
275 if (entry == entry_for((StubID)id)) return name_for((StubID)id);
276 }
277
278 #define FUNCTION_CASE(a, f) \
279 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
280
281 FUNCTION_CASE(entry, os::javaTimeMillis);
1208 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1209 relocInfo::none, rtype);
1210 }
1211 #endif
1212 }
1213
1214 } else {
1215 ICache::invalidate_range(copy_buff, *byte_count);
1216 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1217 }
1218 }
1219 }
1220 }
1221
1222 // If we are patching in a non-perm oop, make sure the nmethod
1223 // is on the right list.
1224 if (ScavengeRootsInCode) {
1225 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1226 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1227 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1228 if (!nm->on_scavenge_root_list() &&
1229 ((mirror.not_null() && mirror()->is_scavengable()) ||
1230 (appendix.not_null() && appendix->is_scavengable()))) {
1231 CodeCache::add_scavenge_root_nmethod(nm);
1232 }
1233
1234 // Since we've patched some oops in the nmethod,
1235 // (re)register it with the heap.
1236 Universe::heap()->register_nmethod(nm);
1237 }
1238 JRT_END
1239
1240 #else // DEOPTIMIZE_WHEN_PATCHING
1241
1242 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
1243 RegisterMap reg_map(thread, false);
1244
1245 NOT_PRODUCT(_patch_code_slowcase_cnt++;)
1246 if (TracePatching) {
1247 tty->print_cr("Deoptimizing because patch is needed");
1248 }
1249
1250 frame runtime_frame = thread->last_frame();
1251 frame caller_frame = runtime_frame.sender(®_map);
1252
1359 JRT_END
1360
1361
1362 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1363 // for now we just print out the block id
1364 tty->print("%d ", block_id);
1365 JRT_END
1366
1367
1368 // Array copy return codes.
1369 enum {
1370 ac_failed = -1, // arraycopy failed
1371 ac_ok = 0 // arraycopy succeeded
1372 };
1373
1374
1375 // Below length is the # elements copied.
1376 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1377 oopDesc* dst, T* dst_addr,
1378 int length) {
1379
1380 // For performance reasons, we assume we are using a card marking write
1381 // barrier. The assert will fail if this is not the case.
1382 // Note that we use the non-virtual inlineable variant of write_ref_array.
1383 BarrierSet* bs = Universe::heap()->barrier_set();
1384 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1385 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1386 if (src == dst) {
1387 // same object, no check
1388 bs->write_ref_array_pre(dst_addr, length);
1389 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1390 bs->write_ref_array((HeapWord*)dst_addr, length);
1391 return ac_ok;
1392 } else {
1393 Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
1394 Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
1395 if (stype == bound || stype->is_subtype_of(bound)) {
1396 // Elements are guaranteed to be subtypes, so no check necessary
1397 bs->write_ref_array_pre(dst_addr, length);
1398 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1399 bs->write_ref_array((HeapWord*)dst_addr, length);
1400 return ac_ok;
1401 }
1402 }
1403 return ac_failed;
1404 }
1405
1406 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
1407 // and we did not copy anything
1408 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
1409 #ifndef PRODUCT
1410 _generic_arraycopy_cnt++; // Slow-path oop array copy
1411 #endif
1412
1413 if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
1414 if (!dst->is_array() || !src->is_array()) return ac_failed;
1415 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1416 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1417
1418 if (length == 0) return ac_ok;
1419 if (src->is_typeArray()) {
1437 oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);
1438 oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
1439 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1440 }
1441 }
1442 return ac_failed;
1443 JRT_END
1444
1445
1446 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
1447 #ifndef PRODUCT
1448 _primitive_arraycopy_cnt++;
1449 #endif
1450
1451 if (length == 0) return;
1452 // Not guaranteed to be word atomic, but that doesn't matter
1453 // for anything but an oop array, which is covered by oop_arraycopy.
1454 Copy::conjoint_jbytes(src, dst, length);
1455 JRT_END
1456
1457 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
1458 #ifndef PRODUCT
1459 _oop_arraycopy_cnt++;
1460 #endif
1461
1462 if (num == 0) return;
1463 BarrierSet* bs = Universe::heap()->barrier_set();
1464 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1465 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1466 if (UseCompressedOops) {
1467 bs->write_ref_array_pre((narrowOop*)dst, num);
1468 Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
1469 } else {
1470 bs->write_ref_array_pre((oop*)dst, num);
1471 Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
1472 }
1473 bs->write_ref_array(dst, num);
1474 JRT_END
1475
1476
1477 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1478 // had to return int instead of bool, otherwise there may be a mismatch
1479 // between the C calling convention and the Java one.
1480 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1481 // JVM takes the whole %eax as the return value, which may misinterpret
1482 // the return value as a boolean true.
1483
1484 assert(mirror != NULL, "should null-check on mirror before calling");
1485 Klass* k = java_lang_Class::as_Klass(mirror);
1486 return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
1487 JRT_END
1488
1489 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
1490 ResourceMark rm;
1491
1492 assert(!TieredCompilation, "incompatible with tiered compilation");
1493
1494 RegisterMap reg_map(thread, false);
1495 frame runtime_frame = thread->last_frame();
1529
1530 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1531
1532 JRT_END
1533
1534 #ifndef PRODUCT
1535 void Runtime1::print_statistics() {
1536 tty->print_cr("C1 Runtime statistics:");
1537 tty->print_cr(" _resolve_invoke_virtual_cnt: %d", SharedRuntime::_resolve_virtual_ctr);
1538 tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
1539 tty->print_cr(" _resolve_invoke_static_cnt: %d", SharedRuntime::_resolve_static_ctr);
1540 tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr);
1541 tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr);
1542 tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt);
1543 tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt);
1544 tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_stub_cnt);
1545 tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_stub_cnt);
1546 tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_stub_cnt);
1547 tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_stub_cnt);
1548 tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt);
1549 tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt);
1550 tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_stub_cnt);
1551 tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt);
1552 tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt);
1553 tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
1554
1555 tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt);
1556 tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);
1557 tty->print_cr(" _new_instance_slowcase_cnt: %d", _new_instance_slowcase_cnt);
1558 tty->print_cr(" _new_multi_array_slowcase_cnt: %d", _new_multi_array_slowcase_cnt);
1559 tty->print_cr(" _monitorenter_slowcase_cnt: %d", _monitorenter_slowcase_cnt);
1560 tty->print_cr(" _monitorexit_slowcase_cnt: %d", _monitorexit_slowcase_cnt);
1561 tty->print_cr(" _patch_code_slowcase_cnt: %d", _patch_code_slowcase_cnt);
1562
1563 tty->print_cr(" _throw_range_check_exception_count: %d:", _throw_range_check_exception_count);
1564 tty->print_cr(" _throw_index_exception_count: %d:", _throw_index_exception_count);
1565 tty->print_cr(" _throw_div0_exception_count: %d:", _throw_div0_exception_count);
1566 tty->print_cr(" _throw_null_pointer_exception_count: %d:", _throw_null_pointer_exception_count);
1567 tty->print_cr(" _throw_class_cast_exception_count: %d:", _throw_class_cast_exception_count);
1568 tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count);
1569 tty->print_cr(" _throw_array_store_exception_count: %d:", _throw_array_store_exception_count);
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/codeBuffer.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Defs.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "classfile/systemDictionary.hpp"
34 #include "classfile/vmSymbols.hpp"
35 #include "code/codeBlob.hpp"
36 #include "code/compiledIC.hpp"
37 #include "code/pcDesc.hpp"
38 #include "code/scopeDesc.hpp"
39 #include "code/vtableStubs.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/c1BarrierSetCodeGen.hpp"
43 #include "gc/shared/collectedHeap.hpp"
44 #include "interpreter/bytecode.hpp"
45 #include "interpreter/interpreter.hpp"
46 #include "logging/log.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "oops/objArrayKlass.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "runtime/access.inline.hpp"
53 #include "runtime/atomic.hpp"
54 #include "runtime/biasedLocking.hpp"
55 #include "runtime/compilationPolicy.hpp"
56 #include "runtime/interfaceSupport.hpp"
57 #include "runtime/javaCalls.hpp"
58 #include "runtime/sharedRuntime.hpp"
59 #include "runtime/threadCritical.hpp"
60 #include "runtime/vframe.hpp"
61 #include "runtime/vframeArray.hpp"
62 #include "runtime/vm_version.hpp"
63 #include "utilities/copy.hpp"
64 #include "utilities/events.hpp"
65
66
67 // Implementation of StubAssembler
68
69 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
70 _name = name;
71 _must_gc_arguments = false;
72 _frame_size = no_frame_size;
90
91
92 void StubAssembler::set_num_rt_args(int args) {
93 if (_num_rt_args == 0) {
94 _num_rt_args = args;
95 }
96 assert(_num_rt_args == args, "can't change the number of args");
97 }
98
99 // Implementation of Runtime1
100
101 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
102 const char *Runtime1::_blob_names[] = {
103 RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
104 };
105
106 #ifndef PRODUCT
107 // statistics
108 int Runtime1::_generic_arraycopy_cnt = 0;
109 int Runtime1::_primitive_arraycopy_cnt = 0;
110 int Runtime1::_generic_arraycopystub_cnt = 0;
111 int Runtime1::_arraycopy_slowcase_cnt = 0;
112 int Runtime1::_arraycopy_checkcast_cnt = 0;
113 int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
114 int Runtime1::_new_type_array_slowcase_cnt = 0;
115 int Runtime1::_new_object_array_slowcase_cnt = 0;
116 int Runtime1::_new_instance_slowcase_cnt = 0;
117 int Runtime1::_new_multi_array_slowcase_cnt = 0;
118 int Runtime1::_monitorenter_slowcase_cnt = 0;
119 int Runtime1::_monitorexit_slowcase_cnt = 0;
120 int Runtime1::_patch_code_slowcase_cnt = 0;
121 int Runtime1::_throw_range_check_exception_count = 0;
122 int Runtime1::_throw_index_exception_count = 0;
123 int Runtime1::_throw_div0_exception_count = 0;
124 int Runtime1::_throw_null_pointer_exception_count = 0;
125 int Runtime1::_throw_class_cast_exception_count = 0;
126 int Runtime1::_throw_incompatible_class_change_error_count = 0;
127 int Runtime1::_throw_array_store_exception_count = 0;
128 int Runtime1::_throw_count = 0;
129
161 JavaThread* thread = JavaThread::current();
162 RegisterMap reg_map(thread, false);
163 frame runtime_frame = thread->last_frame();
164 frame caller_frame = runtime_frame.sender(®_map);
165 assert(caller_frame.is_compiled_frame(), "must be compiled");
166 return caller_frame.is_deoptimized_frame();
167 }
168
169 // Stress deoptimization
170 static void deopt_caller() {
171 if ( !caller_is_deopted()) {
172 JavaThread* thread = JavaThread::current();
173 RegisterMap reg_map(thread, false);
174 frame runtime_frame = thread->last_frame();
175 frame caller_frame = runtime_frame.sender(®_map);
176 Deoptimization::deoptimize_frame(thread, caller_frame.id());
177 assert(caller_is_deopted(), "Must be deoptimized");
178 }
179 }
180
181 class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
182 private:
183 Runtime1::StubID _id;
184 public:
185 StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}
186 virtual OopMapSet* generate_code(StubAssembler* sasm) {
187 return Runtime1::generate_code_for(_id, sasm);
188 }
189 };
190
191 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
192 ResourceMark rm;
193 // create code buffer for code storage
194 CodeBuffer code(buffer_blob);
195
196 OopMapSet* oop_maps;
197 int frame_size;
198 bool must_gc_arguments;
199
200 Compilation::setup_code_buffer(&code, 0);
201
202 // create assembler for code generation
203 StubAssembler* sasm = new StubAssembler(&code, name, stub_id);
204 // generate code for runtime stub
205 oop_maps = cl->generate_code(sasm);
206 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
207 "if stub has an oop map it must have a valid frame size");
208 assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap");
209
210 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
211 sasm->align(BytesPerWord);
212 // make sure all code is in code buffer
213 sasm->flush();
214
215 frame_size = sasm->frame_size();
216 must_gc_arguments = sasm->must_gc_arguments();
217 // create blob - distinguish a few special cases
218 CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
219 &code,
220 CodeOffsets::frame_never_safe,
221 frame_size,
222 oop_maps,
223 must_gc_arguments);
224 assert(blob != NULL, "blob must exist");
225 return blob;
226 }
227
228 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
229 assert(0 <= id && id < number_of_ids, "illegal stub id");
230 bool expect_oop_map = true;
231 #ifdef ASSERT
232 // Make sure that stubs that need oopmaps have them
233 switch (id) {
234 // These stubs don't need to have an oopmap
235 case dtrace_object_alloc_id:
236 case slow_subtype_check_id:
237 case fpu2long_stub_id:
238 case unwind_exception_id:
239 case counter_overflow_id:
240 #if defined(SPARC) || defined(PPC32)
241 case handle_exception_nofpu_id: // Unused on sparc
242 #endif
243 expect_oop_map = false;
244 break;
245 }
246 #endif
247 StubIDStubAssemblerCodeGenClosure cl(id);
248 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
249 // install blob
250 _blobs[id] = blob;
251 }
252
253 void Runtime1::initialize(BufferBlob* blob) {
254 // platform-dependent initialization
255 initialize_pd();
256 // generate stubs
257 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
258 // printing
259 #ifndef PRODUCT
260 if (PrintSimpleStubs) {
261 ResourceMark rm;
262 for (int id = 0; id < number_of_ids; id++) {
263 _blobs[id]->print();
264 if (_blobs[id]->oop_maps() != NULL) {
265 _blobs[id]->oop_maps()->print();
266 }
267 }
268 }
269 #endif
270 C1BarrierSetCodeGen* code_gen = Universe::heap()->barrier_set()->c1_code_gen();
271 code_gen->generate_c1_runtime_stubs(blob);
272 }
273
274 CodeBlob* Runtime1::blob_for(StubID id) {
275 assert(0 <= id && id < number_of_ids, "illegal stub id");
276 return _blobs[id];
277 }
278
279
280 const char* Runtime1::name_for(StubID id) {
281 assert(0 <= id && id < number_of_ids, "illegal stub id");
282 return _blob_names[id];
283 }
284
285 const char* Runtime1::name_for_address(address entry) {
286 for (int id = 0; id < number_of_ids; id++) {
287 if (entry == entry_for((StubID)id)) return name_for((StubID)id);
288 }
289
290 #define FUNCTION_CASE(a, f) \
291 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
292
293 FUNCTION_CASE(entry, os::javaTimeMillis);
1220 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1221 relocInfo::none, rtype);
1222 }
1223 #endif
1224 }
1225
1226 } else {
1227 ICache::invalidate_range(copy_buff, *byte_count);
1228 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1229 }
1230 }
1231 }
1232 }
1233
1234 // If we are patching in a non-perm oop, make sure the nmethod
1235 // is on the right list.
1236 if (ScavengeRootsInCode) {
1237 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1238 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1239 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1240
1241 // Since we've patched some oops in the nmethod,
1242 // (re)register it with the heap.
1243 Universe::heap()->register_nmethod(nm);
1244 }
1245 JRT_END
1246
1247 #else // DEOPTIMIZE_WHEN_PATCHING
1248
1249 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
1250 RegisterMap reg_map(thread, false);
1251
1252 NOT_PRODUCT(_patch_code_slowcase_cnt++;)
1253 if (TracePatching) {
1254 tty->print_cr("Deoptimizing because patch is needed");
1255 }
1256
1257 frame runtime_frame = thread->last_frame();
1258 frame caller_frame = runtime_frame.sender(®_map);
1259
1366 JRT_END
1367
1368
1369 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1370 // for now we just print out the block id
1371 tty->print("%d ", block_id);
1372 JRT_END
1373
1374
1375 // Array copy return codes.
1376 enum {
1377 ac_failed = -1, // arraycopy failed
1378 ac_ok = 0 // arraycopy succeeded
1379 };
1380
1381
1382 // Below length is the # elements copied.
1383 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1384 oopDesc* dst, T* dst_addr,
1385 int length) {
1386 if (src == dst) {
1387 HeapAccess<DEST_CONJOINT | DEST_COVARIANT | ACCESS_ATOMIC>::oop_copy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length);
1388 return ac_ok;
1389 } else {
1390 Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
1391 Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
1392 if (stype == bound || stype->is_subtype_of(bound)) {
1393 // Elements are guaranteed to be subtypes, so no check necessary
1394 HeapAccess<DEST_DISJOINT | DEST_COVARIANT | ACCESS_ATOMIC>::oop_copy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length);
1395 return ac_ok;
1396 }
1397 }
1398 return ac_failed;
1399 }
1400
1401 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
1402 // and we did not copy anything
1403 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
1404 #ifndef PRODUCT
1405 _generic_arraycopy_cnt++; // Slow-path oop array copy
1406 #endif
1407
1408 if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
1409 if (!dst->is_array() || !src->is_array()) return ac_failed;
1410 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1411 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1412
1413 if (length == 0) return ac_ok;
1414 if (src->is_typeArray()) {
1432 oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);
1433 oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
1434 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1435 }
1436 }
1437 return ac_failed;
1438 JRT_END
1439
1440
1441 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
1442 #ifndef PRODUCT
1443 _primitive_arraycopy_cnt++;
1444 #endif
1445
1446 if (length == 0) return;
1447 // Not guaranteed to be word atomic, but that doesn't matter
1448 // for anything but an oop array, which is covered by oop_arraycopy.
1449 Copy::conjoint_jbytes(src, dst, length);
1450 JRT_END
1451
1452
1453 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1454 // had to return int instead of bool, otherwise there may be a mismatch
1455 // between the C calling convention and the Java one.
1456 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1457 // JVM takes the whole %eax as the return value, which may misinterpret
1458 // the return value as a boolean true.
1459
1460 assert(mirror != NULL, "should null-check on mirror before calling");
1461 Klass* k = java_lang_Class::as_Klass(mirror);
1462 return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
1463 JRT_END
1464
1465 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
1466 ResourceMark rm;
1467
1468 assert(!TieredCompilation, "incompatible with tiered compilation");
1469
1470 RegisterMap reg_map(thread, false);
1471 frame runtime_frame = thread->last_frame();
1505
1506 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1507
1508 JRT_END
1509
1510 #ifndef PRODUCT
1511 void Runtime1::print_statistics() {
1512 tty->print_cr("C1 Runtime statistics:");
1513 tty->print_cr(" _resolve_invoke_virtual_cnt: %d", SharedRuntime::_resolve_virtual_ctr);
1514 tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
1515 tty->print_cr(" _resolve_invoke_static_cnt: %d", SharedRuntime::_resolve_static_ctr);
1516 tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr);
1517 tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr);
1518 tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt);
1519 tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt);
1520 tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_stub_cnt);
1521 tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_stub_cnt);
1522 tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_stub_cnt);
1523 tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_stub_cnt);
1524 tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt);
1525 tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_stub_cnt);
1526 tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt);
1527 tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt);
1528 tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
1529
1530 tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt);
1531 tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);
1532 tty->print_cr(" _new_instance_slowcase_cnt: %d", _new_instance_slowcase_cnt);
1533 tty->print_cr(" _new_multi_array_slowcase_cnt: %d", _new_multi_array_slowcase_cnt);
1534 tty->print_cr(" _monitorenter_slowcase_cnt: %d", _monitorenter_slowcase_cnt);
1535 tty->print_cr(" _monitorexit_slowcase_cnt: %d", _monitorexit_slowcase_cnt);
1536 tty->print_cr(" _patch_code_slowcase_cnt: %d", _patch_code_slowcase_cnt);
1537
1538 tty->print_cr(" _throw_range_check_exception_count: %d:", _throw_range_check_exception_count);
1539 tty->print_cr(" _throw_index_exception_count: %d:", _throw_index_exception_count);
1540 tty->print_cr(" _throw_div0_exception_count: %d:", _throw_div0_exception_count);
1541 tty->print_cr(" _throw_null_pointer_exception_count: %d:", _throw_null_pointer_exception_count);
1542 tty->print_cr(" _throw_class_cast_exception_count: %d:", _throw_class_cast_exception_count);
1543 tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count);
1544 tty->print_cr(" _throw_array_store_exception_count: %d:", _throw_array_store_exception_count);
|