1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)c1_Runtime1.cpp 1.245 08/11/07 15:47:09 JVM"
3 #endif
4 /*
5 * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
154 ResourceMark rm;
155 // create code buffer for code storage
156 CodeBuffer code(get_buffer_blob()->instructions_begin(),
157 get_buffer_blob()->instructions_size());
158
159 setup_code_buffer(&code, 0);
160
161 // create assembler for code generation
162 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
163 // generate code for runtime stub
164 OopMapSet* oop_maps;
165 oop_maps = generate_code_for(id, sasm);
166 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
167 "if stub has an oop map it must have a valid frame size");
168
169 #ifdef ASSERT
170 // Make sure that stubs that need oopmaps have them
171 switch (id) {
172 // These stubs don't need to have an oopmap
173 case dtrace_object_alloc_id:
174 case slow_subtype_check_id:
175 case fpu2long_stub_id:
176 case unwind_exception_id:
177 #ifndef TIERED
178 case counter_overflow_id: // Not generated outside the tiered world
179 #endif
180 #ifdef SPARC
181 case handle_exception_nofpu_id: // Unused on sparc
182 #endif
183 break;
184
185 // All other stubs should have oopmaps
186 default:
187 assert(oop_maps != NULL, "must have an oopmap");
188 }
189 #endif
190
191 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
192 sasm->align(BytesPerWord);
193 // make sure all code is in code buffer
322 // Note: no handle for klass needed since they are not used
323 // anymore after new_objArray() and no GC can happen before.
324 // (This may have to change if this code changes!)
325 assert(oop(array_klass)->is_klass(), "not a class");
326 klassOop elem_klass = objArrayKlass::cast(array_klass)->element_klass();
327 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
328 thread->set_vm_result(obj);
329 // This is pretty rare but this runtime patch is stressful to deoptimization
330 // if we deoptimize here so force a deopt to stress the path.
331 if (DeoptimizeALot) {
332 deopt_caller();
333 }
334 JRT_END
335
336
337 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klass, int rank, jint* dims))
338 NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
339
340 assert(oop(klass)->is_klass(), "not a class");
341 assert(rank >= 1, "rank must be nonzero");
342 #ifdef _LP64
343 // In 64 bit mode, the sizes are stored in the top 32 bits
344 // of each 64 bit stack entry.
345 // dims is actually an intptr_t * because the arguments
346 // are pushed onto a 64 bit stack.
347 // We must create an array of jints to pass to multi_allocate.
348 // We reuse the current stack because it will be popped
349 // after this bytecode is completed.
350 if ( rank > 1 ) {
351 int index;
352 for ( index = 1; index < rank; index++ ) { // First size is ok
353 dims[index] = dims[index*2];
354 }
355 }
356 #endif
357 oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
358 thread->set_vm_result(obj);
359 JRT_END
360
361
362 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
363 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
364 JRT_END
365
366
367 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread))
368 THROW(vmSymbolHandles::java_lang_ArrayStoreException());
369 JRT_END
370
371
372 JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
373 if (JvmtiExport::can_post_exceptions()) {
374 vframeStream vfst(thread, true);
375 address bcp = vfst.method()->bcp_from(vfst.bci());
376 JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
1067 {
1068 // Enter VM mode
1069
1070 ResetNoHandleMark rnhm;
1071 patch_code(thread, access_field_patching_id);
1072 }
1073 // Back in JAVA, use no oops DON'T safepoint
1074
1075 // Return true if calling code is deoptimized
1076
1077 return caller_is_deopted();
1078 JRT_END
1079
1080
1081 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1082 // for now we just print out the block id
1083 tty->print("%d ", block_id);
1084 JRT_END
1085
1086
1087 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
1088 // and we did not copy anything
1089 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
1090 #ifndef PRODUCT
1091 _generic_arraycopy_cnt++; // Slow-path oop array copy
1092 #endif
1093
1094 enum {
1095 ac_failed = -1, // arraycopy failed
1096 ac_ok = 0 // arraycopy succeeded
1097 };
1098
1099 if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
1100 if (!dst->is_array() || !src->is_array()) return ac_failed;
1101 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1102 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1103
1104 if (length == 0) return ac_ok;
1105 if (src->is_typeArray()) {
1106 const klassOop klass_oop = src->klass();
1107 if (klass_oop != dst->klass()) return ac_failed;
1108 typeArrayKlass* klass = typeArrayKlass::cast(klass_oop);
1109 const int l2es = klass->log2_element_size();
1110 const int ihs = klass->array_header_in_bytes() / wordSize;
1111 char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
1112 char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
1113 // Potential problem: memmove is not guaranteed to be word atomic
1114 // Revisit in Merlin
1115 memmove(dst_addr, src_addr, length << l2es);
1116 return ac_ok;
1117 } else if (src->is_objArray() && dst->is_objArray()) {
1118 oop* src_addr = objArrayOop(src)->obj_at_addr(src_pos);
1119 oop* dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos);
1120 // For performance reasons, we assume we are using a card marking write
1121 // barrier. The assert will fail if this is not the case.
1122 // Note that we use the non-virtual inlineable variant of write_ref_array.
1123 BarrierSet* bs = Universe::heap()->barrier_set();
1124 assert(bs->has_write_ref_array_opt(),
1125 "Barrier set must have ref array opt");
1126 if (src == dst) {
1127 // same object, no check
1128 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1129 bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
1130 (HeapWord*)(dst_addr + length)));
1131 return ac_ok;
1132 } else {
1133 klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
1134 klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
1135 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
1136 // Elements are guaranteed to be subtypes, so no check necessary
1137 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1138 bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
1139 (HeapWord*)(dst_addr + length)));
1140 return ac_ok;
1141 }
1142 }
1143 }
1144 return ac_failed;
1145 JRT_END
1146
1147
1148 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
1149 #ifndef PRODUCT
1150 _primitive_arraycopy_cnt++;
1151 #endif
1152
1153 if (length == 0) return;
1154 // Not guaranteed to be word atomic, but that doesn't matter
1155 // for anything but an oop array, which is covered by oop_arraycopy.
1156 Copy::conjoint_bytes(src, dst, length);
1157 JRT_END
1158
1159 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
1160 #ifndef PRODUCT
1161 _oop_arraycopy_cnt++;
|
1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)c1_Runtime1.cpp 1.245 08/11/07 15:47:09 JVM"
3 #endif
4 /*
5 * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
154 ResourceMark rm;
155 // create code buffer for code storage
156 CodeBuffer code(get_buffer_blob()->instructions_begin(),
157 get_buffer_blob()->instructions_size());
158
159 setup_code_buffer(&code, 0);
160
161 // create assembler for code generation
162 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
163 // generate code for runtime stub
164 OopMapSet* oop_maps;
165 oop_maps = generate_code_for(id, sasm);
166 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
167 "if stub has an oop map it must have a valid frame size");
168
169 #ifdef ASSERT
170 // Make sure that stubs that need oopmaps have them
171 switch (id) {
172 // These stubs don't need to have an oopmap
173 case dtrace_object_alloc_id:
174 case g1_pre_barrier_slow_id:
175 case g1_post_barrier_slow_id:
176 case slow_subtype_check_id:
177 case fpu2long_stub_id:
178 case unwind_exception_id:
179 #ifndef TIERED
180 case counter_overflow_id: // Not generated outside the tiered world
181 #endif
182 #ifdef SPARC
183 case handle_exception_nofpu_id: // Unused on sparc
184 #endif
185 break;
186
187 // All other stubs should have oopmaps
188 default:
189 assert(oop_maps != NULL, "must have an oopmap");
190 }
191 #endif
192
193 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
194 sasm->align(BytesPerWord);
195 // make sure all code is in code buffer
324 // Note: no handle for klass needed since they are not used
325 // anymore after new_objArray() and no GC can happen before.
326 // (This may have to change if this code changes!)
327 assert(oop(array_klass)->is_klass(), "not a class");
328 klassOop elem_klass = objArrayKlass::cast(array_klass)->element_klass();
329 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
330 thread->set_vm_result(obj);
331 // This is pretty rare but this runtime patch is stressful to deoptimization
332 // if we deoptimize here so force a deopt to stress the path.
333 if (DeoptimizeALot) {
334 deopt_caller();
335 }
336 JRT_END
337
338
339 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klass, int rank, jint* dims))
340 NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
341
342 assert(oop(klass)->is_klass(), "not a class");
343 assert(rank >= 1, "rank must be nonzero");
344 oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
345 thread->set_vm_result(obj);
346 JRT_END
347
348
349 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
350 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
351 JRT_END
352
353
354 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread))
355 THROW(vmSymbolHandles::java_lang_ArrayStoreException());
356 JRT_END
357
358
359 JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
360 if (JvmtiExport::can_post_exceptions()) {
361 vframeStream vfst(thread, true);
362 address bcp = vfst.method()->bcp_from(vfst.bci());
363 JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
1054 {
1055 // Enter VM mode
1056
1057 ResetNoHandleMark rnhm;
1058 patch_code(thread, access_field_patching_id);
1059 }
1060 // Back in JAVA, use no oops DON'T safepoint
1061
1062 // Return true if calling code is deoptimized
1063
1064 return caller_is_deopted();
1065 JRT_END
1066
1067
1068 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1069 // for now we just print out the block id
1070 tty->print("%d ", block_id);
1071 JRT_END
1072
1073
1074 // Array copy return codes.
1075 enum {
1076 ac_failed = -1, // arraycopy failed
1077 ac_ok = 0 // arraycopy succeeded
1078 };
1079
1080
1081 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1082 oopDesc* dst, T* dst_addr,
1083 int length) {
1084
1085 // For performance reasons, we assume we are using a card marking write
1086 // barrier. The assert will fail if this is not the case.
1087 // Note that we use the non-virtual inlineable variant of write_ref_array.
1088 BarrierSet* bs = Universe::heap()->barrier_set();
1089 assert(bs->has_write_ref_array_opt(),
1090 "Barrier set must have ref array opt");
1091 if (src == dst) {
1092 // same object, no check
1093 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1094 bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
1095 (HeapWord*)(dst_addr + length)));
1096 return ac_ok;
1097 } else {
1098 klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
1099 klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
1100 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
1101 // Elements are guaranteed to be subtypes, so no check necessary
1102 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1103 bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
1104 (HeapWord*)(dst_addr + length)));
1105 return ac_ok;
1106 }
1107 }
1108 return ac_failed;
1109 }
1110
1111 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
1112 // and we did not copy anything
1113 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
1114 #ifndef PRODUCT
1115 _generic_arraycopy_cnt++; // Slow-path oop array copy
1116 #endif
1117
1118 if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
1119 if (!dst->is_array() || !src->is_array()) return ac_failed;
1120 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1121 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1122
1123 if (length == 0) return ac_ok;
1124 if (src->is_typeArray()) {
1125 const klassOop klass_oop = src->klass();
1126 if (klass_oop != dst->klass()) return ac_failed;
1127 typeArrayKlass* klass = typeArrayKlass::cast(klass_oop);
1128 const int l2es = klass->log2_element_size();
1129 const int ihs = klass->array_header_in_bytes() / wordSize;
1130 char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
1131 char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
1132 // Potential problem: memmove is not guaranteed to be word atomic
1133 // Revisit in Merlin
1134 memmove(dst_addr, src_addr, length << l2es);
1135 return ac_ok;
1136 } else if (src->is_objArray() && dst->is_objArray()) {
1137 if (UseCompressedOops) { // will need for tiered
1138 narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
1139 narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
1140 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1141 } else {
1142 oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);
1143 oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
1144 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1145 }
1146 }
1147 return ac_failed;
1148 JRT_END
1149
1150
1151 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
1152 #ifndef PRODUCT
1153 _primitive_arraycopy_cnt++;
1154 #endif
1155
1156 if (length == 0) return;
1157 // Not guaranteed to be word atomic, but that doesn't matter
1158 // for anything but an oop array, which is covered by oop_arraycopy.
1159 Copy::conjoint_bytes(src, dst, length);
1160 JRT_END
1161
1162 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
1163 #ifndef PRODUCT
1164 _oop_arraycopy_cnt++;
|