src/cpu/sparc/vm/assembler_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/sparc/vm/assembler_sparc.cpp	Mon Apr 26 07:45:19 2010
--- new/src/cpu/sparc/vm/assembler_sparc.cpp	Mon Apr 26 07:45:18 2010

*** 1,7 **** --- 1,7 ---- /* ! * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. ! * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 2331,2340 **** --- 2331,2352 ---- movcc(greater, false, xcc, 1, Rresult); } #endif + void MacroAssembler::load_sized_value(Address src, Register dst, + int size_in_bytes, bool is_signed) { + switch (size_in_bytes) { + case 8: ldx(src, dst); break; + case 4: ld( src, dst); break; + case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; + case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; + default: ShouldNotReachHere(); + } + } + + void MacroAssembler::float_cmp( bool is_float, int unordered_result, FloatRegister Fa, FloatRegister Fb, Register Rresult) { fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
*** 2623,2666 **** --- 2635,2753 ---- return RegisterOrConstant(tmp); } ! void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) { - assert(dest.register_or_noreg() != G0, "lost side effect"); ! if ((src.is_constant() && src.as_constant() == 0) || ! (src.is_register() && src.as_register() == G0)) { // do nothing ! } else if (dest.is_register()) { add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register()); } else if (src.is_constant()) { ! intptr_t res = dest.as_constant() + src.as_constant(); dest = RegisterOrConstant(res); // side effect seen by caller } else { assert(temp != noreg, "cannot handle constant += register"); ! add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp); dest = RegisterOrConstant(temp); // side effect seen by caller ! void MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant& d, Register temp) { ! assert(d.register_or_noreg() != G0, "lost side effect"); ! if ((s2.is_constant() && s2.as_constant() == 0) || ! (s2.is_register() && s2.as_register() == G0)) { + // Do nothing, just move value. ! if (s1.is_register()) { + if (d.is_constant()) { + assert(temp != noreg, "temp register required"); ! d = temp; + } + mov(s1.as_register(), d.as_register()); + } else { ! d = s1; + } + } + + if (s1.is_register()) { + if (d.is_constant()) { + assert(temp != noreg, "temp register required"); + d = temp; + } + andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); + } else { + if (s2.is_register()) { + assert(temp != noreg, "cannot handle constant & ~register"); + if (d.is_constant()) { + d = temp; + } + set(s1.as_constant(), temp); + andn(temp, s2.as_register(), d.as_register()); + } else { + intptr_t res = s1.as_constant() & ~s2.as_constant(); + d = res; + } } } ! void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) { - assert(dest.register_or_noreg() != G0, "lost side effect"); ! if (!is_simm13(src.constant_or_zero())) src = (src.as_constant() & 0xFF); if ((src.is_constant() && src.as_constant() == 0) || ! (src.is_register() && src.as_register() == G0)) { // do nothing } else if (dest.is_register()) { ! sll_ptr(dest.as_register(), src, dest.as_register()); } else if (src.is_constant()) { ! intptr_t res = dest.as_constant() << src.as_constant(); dest = RegisterOrConstant(res); // side effect seen by caller } else { assert(temp != noreg, "cannot handle constant <<= register"); set(dest.as_constant(), temp); sll_ptr(temp, src, temp); dest = RegisterOrConstant(temp); // side effect seen by caller ! void MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant& d, Register temp) { ! assert(d.register_or_noreg() != G0, "lost side effect"); ! if ((s2.is_constant() && s2.as_constant() == 0) || + (s2.is_register() && s2.as_register() == G0)) { + // Do nothing, just move value. ! if (s1.is_register()) { + if (d.is_constant()) { + assert(temp != noreg, "temp register required"); ! d = temp; + } ! mov(s1.as_register(), d.as_register()); + } else { + d = s1; + } + } + + if (s1.is_register()) { + if (d.is_constant()) { + assert(temp != noreg, "temp register required"); + d = temp; + } + add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); + } else { + if (s2.is_register()) { + if (d.is_constant()) { + assert(temp != noreg, "temp register required"); + d = temp; + } + add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); + } else { + intptr_t res = s1.as_constant() + s2.as_constant(); + d = res; + } + } + } + + void MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant& d, Register temp) { + assert(d.register_or_noreg() != G0, "lost side effect"); + if (!is_simm13(s2.constant_or_zero())) + s2 = (s2.as_constant() & 0xFF); + if ((s2.is_constant() && s2.as_constant() == 0) || + (s2.is_register() && s2.as_register() == G0)) { + // Do nothing, just move value. + if (s1.is_register()) { + if (d.is_constant()) { + assert(temp != noreg, "temp register required"); + d = temp; + } + mov(s1.as_register(), d.as_register()); + } else { + d = s1; + } + } + + if (s1.is_register()) { + if (d.is_constant()) { + assert(temp != noreg, "temp register required"); + d = temp; + } + sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); + } else { + if (s2.is_register()) { + assert(temp != noreg, "cannot handle constant << register"); + if (d.is_constant()) { + d = temp; + } + set(s1.as_constant(), temp); + sll_ptr(temp, s2.as_register(), d.as_register()); + } else { + intptr_t res = s1.as_constant() << s2.as_constant(); + d = res; + } } } // Look up the method for a megamorphic invokeinterface call.
*** 2706,2717 **** --- 2793,2804 ---- } add(recv_klass, scan_temp, scan_temp); // Adjust recv_klass by scaled itable_index, so we can free itable_index. RegisterOrConstant itable_offset = itable_index; ! regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize)); ! regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes()); ! regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); ! regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { // if (scan->interface() == intf) { // result = (klass + scan->offset() + itable_index);
*** 2803,2813 **** --- 2890,2900 ---- bool need_slow_path = (must_load_sco || super_check_offset.constant_or_zero() == sco_offset); assert_different_registers(sub_klass, super_klass, temp_reg); if (super_check_offset.is_register()) { ! assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); } else if (must_load_sco) { assert(temp2_reg != noreg, "supply either a temp or a register offset"); }
*** 2853,2862 **** --- 2940,2951 ---- // Check the supertype display: if (must_load_sco) { // The super check offset is always positive... lduw(super_klass, sco_offset, temp2_reg); super_check_offset = RegisterOrConstant(temp2_reg); + // super_check_offset is register. + assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); } ld_ptr(sub_klass, super_check_offset, temp_reg); cmp(super_klass, temp_reg); // This check has worked decisively for primary supers.
*** 3012,3061 **** --- 3101,3173 ---- bind(L_fallthrough); } void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type) { + if (UseCompressedOops) unimplemented("coop"); // field accesses must decode assert_different_registers(mtype_reg, mh_reg, temp_reg); // compare method type against that of the receiver RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg); ld_ptr(mh_reg, mhtype_offset, temp_reg); cmp(temp_reg, mtype_reg); br(Assembler::notEqual, false, Assembler::pn, wrong_method_type); delayed()->nop(); } void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) { + // A method handle has a "vmslots" field which gives the size of its + // argument list in JVM stack slots. This field is either located directly + // in every method handle, or else is indirectly accessed through the + // method handle's MethodType. This macro hides the distinction. + void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, + Register temp_reg) { + assert_different_registers(vmslots_reg, mh_reg, temp_reg); + if (UseCompressedOops) unimplemented("coop"); // field accesses must decode + // load mh.type.form.vmslots + if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) { + // hoist vmslots into every mh to avoid dependent load chain + ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); + } else { + Register temp2_reg = vmslots_reg; + ld_ptr(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg); + ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg); + ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); + } + } + + + void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) { assert(mh_reg == G3_method_handle, "caller must put MH object in G3"); assert_different_registers(mh_reg, temp_reg); + if (UseCompressedOops) unimplemented("coop"); // field accesses must decode + // pick out the interpreted side of the handler ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); // off we go... ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg); jmp(temp_reg, 0); // for the various stubs which take control at this point, // see MethodHandles::generate_method_handle_stub // (Can any caller use this delay slot? If so, add an option for supression.) + // Some callers can fill the delay slot. + if (emit_delayed_nop) { delayed()->nop(); + } } + RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, int extra_slot_offset) { // cf. TemplateTable::prepare_invoke(), if (load_receiver). ! int stackElementSize = Interpreter::stackElementWords() * wordSize; ! int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); assert(offset1 - offset == stackElementSize, "correct arithmetic"); ! int stackElementSize = Interpreter::stackElementSize(); ! int offset = extra_slot_offset * stackElementSize; if (arg_slot.is_constant()) { offset += arg_slot.as_constant() * stackElementSize; return offset; } else { Register temp = arg_slot.as_register();
*** 3065,3074 **** --- 3177,3191 ---- return temp; } } + Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, + int extra_slot_offset) { + return Address(Gargs, argument_offset(arg_slot, extra_slot_offset)); + } + void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg, Label& done, Label* slow_case, BiasedLockingCounters* counters) {

src/cpu/sparc/vm/assembler_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File