2223 result = (oop) compare_to;
2224 }
2225 }
2226 #endif
2227 if (result != NULL) {
2228 // Initialize object (if nonzero size and need) and then the header
2229 if (need_zero ) {
2230 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2231 obj_size -= sizeof(oopDesc) / oopSize;
2232 if (obj_size > 0 ) {
2233 memset(to_zero, 0, obj_size * HeapWordSize);
2234 }
2235 }
2236 if (UseBiasedLocking) {
2237 result->set_mark(ik->prototype_header());
2238 } else {
2239 result->set_mark(markOopDesc::prototype());
2240 }
2241 result->set_klass_gap(0);
2242 result->set_klass(k_entry);
2243 SET_STACK_OBJECT(result, 0);
2244 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2245 }
2246 }
2247 }
2248 // Slow case allocation
2249 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
2250 handle_exception);
2251 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2252 THREAD->set_vm_result(NULL);
2253 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2254 }
2255 CASE(_anewarray): {
2256 u2 index = Bytes::get_Java_u2(pc+1);
2257 jint size = STACK_INT(-1);
2258 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
2259 handle_exception);
2260 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2261 THREAD->set_vm_result(NULL);
2262 UPDATE_PC_AND_CONTINUE(3);
2263 }
2264 CASE(_multianewarray): {
2265 jint dims = *(pc+3);
2266 jint size = STACK_INT(-1);
2267 // stack grows down, dimensions are up!
2268 jint *dimarray =
2269 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2270 Interpreter::stackElementWords-1];
2271 //adjust pointer to start of stack element
2272 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2273 handle_exception);
2274 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
2275 THREAD->set_vm_result(NULL);
2276 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2277 }
2278 CASE(_checkcast):
2279 if (STACK_OBJECT(-1) != NULL) {
2280 VERIFY_OOP(STACK_OBJECT(-1));
2281 u2 index = Bytes::get_Java_u2(pc+1);
2282 // Constant pool may have actual klass or unresolved klass. If it is
2283 // unresolved we must resolve it.
2284 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2285 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2286 }
2287 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2288 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2289 //
2290 // Check for compatibilty. This check must not GC!!
2291 // Seems way more expensive now that we must dispatch.
2292 //
2293 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2676
2677 istate->set_callee(callee);
2678 istate->set_callee_entry_point(callee->from_interpreted_entry());
2679 #ifdef VM_JVMTI
2680 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2681 istate->set_callee_entry_point(callee->interpreter_entry());
2682 }
2683 #endif /* VM_JVMTI */
2684 istate->set_bcp_advance(3);
2685 UPDATE_PC_AND_RETURN(0); // I'll be back...
2686 }
2687 }
2688
2689 /* Allocate memory for a new java object. */
2690
2691 CASE(_newarray): {
2692 BasicType atype = (BasicType) *(pc+1);
2693 jint size = STACK_INT(-1);
2694 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2695 handle_exception);
2696 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2697 THREAD->set_vm_result(NULL);
2698
2699 UPDATE_PC_AND_CONTINUE(2);
2700 }
2701
2702 /* Throw an exception. */
2703
2704 CASE(_athrow): {
2705 oop except_oop = STACK_OBJECT(-1);
2706 CHECK_NULL(except_oop);
2707 // set pending_exception so we use common code
2708 THREAD->set_pending_exception(except_oop, NULL, 0);
2709 goto handle_exception;
2710 }
2711
2712 /* goto and jsr. They are exactly the same except jsr pushes
2713 * the address of the next instruction first.
2714 */
2715
2909 case T_DOUBLE:
2910 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
2911 MORE_STACK(2);
2912 break;
2913 case T_ARRAY:
2914 case T_OBJECT:
2915 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
2916 MORE_STACK(1);
2917 break;
2918 }
2919
2920 ts->clr_earlyret_value();
2921 ts->set_earlyret_oop(NULL);
2922 ts->clr_earlyret_pending();
2923
2924 // Fall through to handle_return.
2925
2926 } // handle_Early_Return
2927
2928 handle_return: {
2929 DECACHE_STATE();
2930
2931 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
2932 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
2933 Handle original_exception(THREAD, THREAD->pending_exception());
2934 Handle illegal_state_oop(THREAD, NULL);
2935
2936 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
2937 // in any following VM entries from freeing our live handles, but illegal_state_oop
2938 // isn't really allocated yet and so doesn't become live until later and
2939 // in unpredicatable places. Instead we must protect the places where we enter the
2940 // VM. It would be much simpler (and safer) if we could allocate a real handle with
2941 // a NULL oop in it and then overwrite the oop later as needed. This isn't
2942 // unfortunately isn't possible.
2943
2944 THREAD->clear_pending_exception();
2945
2946 //
2947 // As far as we are concerned we have returned. If we have a pending exception
2948 // that will be returned as this invocation's result. However if we get any
|
2223 result = (oop) compare_to;
2224 }
2225 }
2226 #endif
2227 if (result != NULL) {
2228 // Initialize object (if nonzero size and need) and then the header
2229 if (need_zero ) {
2230 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2231 obj_size -= sizeof(oopDesc) / oopSize;
2232 if (obj_size > 0 ) {
2233 memset(to_zero, 0, obj_size * HeapWordSize);
2234 }
2235 }
2236 if (UseBiasedLocking) {
2237 result->set_mark(ik->prototype_header());
2238 } else {
2239 result->set_mark(markOopDesc::prototype());
2240 }
2241 result->set_klass_gap(0);
2242 result->set_klass(k_entry);
2243 // Must prevent reordering of stores for object initialization
2244 // with stores that publish the new object.
2245 OrderAccess::storestore();
2246 SET_STACK_OBJECT(result, 0);
2247 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2248 }
2249 }
2250 }
2251 // Slow case allocation
2252 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
2253 handle_exception);
2254 // Must prevent reordering of stores for object initialization
2255 // with stores that publish the new object.
2256 OrderAccess::storestore();
2257 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2258 THREAD->set_vm_result(NULL);
2259 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2260 }
2261 CASE(_anewarray): {
2262 u2 index = Bytes::get_Java_u2(pc+1);
2263 jint size = STACK_INT(-1);
2264 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
2265 handle_exception);
2266 // Must prevent reordering of stores for object initialization
2267 // with stores that publish the new object.
2268 OrderAccess::storestore();
2269 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2270 THREAD->set_vm_result(NULL);
2271 UPDATE_PC_AND_CONTINUE(3);
2272 }
2273 CASE(_multianewarray): {
2274 jint dims = *(pc+3);
2275 jint size = STACK_INT(-1);
2276 // stack grows down, dimensions are up!
2277 jint *dimarray =
2278 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2279 Interpreter::stackElementWords-1];
2280 //adjust pointer to start of stack element
2281 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2282 handle_exception);
2283 // Must prevent reordering of stores for object initialization
2284 // with stores that publish the new object.
2285 OrderAccess::storestore();
2286 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
2287 THREAD->set_vm_result(NULL);
2288 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2289 }
2290 CASE(_checkcast):
2291 if (STACK_OBJECT(-1) != NULL) {
2292 VERIFY_OOP(STACK_OBJECT(-1));
2293 u2 index = Bytes::get_Java_u2(pc+1);
2294 // Constant pool may have actual klass or unresolved klass. If it is
2295 // unresolved we must resolve it.
2296 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2297 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2298 }
2299 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2300 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2301 //
2302 // Check for compatibilty. This check must not GC!!
2303 // Seems way more expensive now that we must dispatch.
2304 //
2305 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2688
2689 istate->set_callee(callee);
2690 istate->set_callee_entry_point(callee->from_interpreted_entry());
2691 #ifdef VM_JVMTI
2692 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2693 istate->set_callee_entry_point(callee->interpreter_entry());
2694 }
2695 #endif /* VM_JVMTI */
2696 istate->set_bcp_advance(3);
2697 UPDATE_PC_AND_RETURN(0); // I'll be back...
2698 }
2699 }
2700
2701 /* Allocate memory for a new java object. */
2702
2703 CASE(_newarray): {
2704 BasicType atype = (BasicType) *(pc+1);
2705 jint size = STACK_INT(-1);
2706 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2707 handle_exception);
2708 // Must prevent reordering of stores for object initialization
2709 // with stores that publish the new object.
2710 OrderAccess::storestore();
2711 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2712 THREAD->set_vm_result(NULL);
2713
2714 UPDATE_PC_AND_CONTINUE(2);
2715 }
2716
2717 /* Throw an exception. */
2718
2719 CASE(_athrow): {
2720 oop except_oop = STACK_OBJECT(-1);
2721 CHECK_NULL(except_oop);
2722 // set pending_exception so we use common code
2723 THREAD->set_pending_exception(except_oop, NULL, 0);
2724 goto handle_exception;
2725 }
2726
2727 /* goto and jsr. They are exactly the same except jsr pushes
2728 * the address of the next instruction first.
2729 */
2730
2924 case T_DOUBLE:
2925 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
2926 MORE_STACK(2);
2927 break;
2928 case T_ARRAY:
2929 case T_OBJECT:
2930 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
2931 MORE_STACK(1);
2932 break;
2933 }
2934
2935 ts->clr_earlyret_value();
2936 ts->set_earlyret_oop(NULL);
2937 ts->clr_earlyret_pending();
2938
2939 // Fall through to handle_return.
2940
2941 } // handle_Early_Return
2942
2943 handle_return: {
2944 // A storestore barrier is required to order initialization of
2945 // final fields with publishing the reference to the object that
2946 // holds the field. Without the barrier the value of final fields
2947 // can be observed to change.
2948 OrderAccess::storestore();
2949
2950 DECACHE_STATE();
2951
2952 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
2953 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
2954 Handle original_exception(THREAD, THREAD->pending_exception());
2955 Handle illegal_state_oop(THREAD, NULL);
2956
2957 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
2958 // in any following VM entries from freeing our live handles, but illegal_state_oop
2959 // isn't really allocated yet and so doesn't become live until later and
2960 // in unpredicatable places. Instead we must protect the places where we enter the
2961 // VM. It would be much simpler (and safer) if we could allocate a real handle with
2962 // a NULL oop in it and then overwrite the oop later as needed. This isn't
2963 // unfortunately isn't possible.
2964
2965 THREAD->clear_pending_exception();
2966
2967 //
2968 // As far as we are concerned we have returned. If we have a pending exception
2969 // that will be returned as this invocation's result. However if we get any
|