< prev index next >

src/cpu/sparc/vm/macroAssembler_sparc.cpp

Print this page

        

*** 294,325 **** mov(G1, L1); // avoid clobbering G1 // G2 saved below mov(G3, L3); // avoid clobbering G3 mov(G4, L4); // avoid clobbering G4 mov(G5_method, L5); // avoid clobbering G5_method - #if defined(COMPILER2) && !defined(_LP64) - // Save & restore possible 64-bit Long arguments in G-regs - srlx(G1,32,L0); - srlx(G4,32,L6); - #endif call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); delayed()->mov(G2_thread, O0); mov(L1, G1); // Restore G1 // G2 restored below mov(L3, G3); // restore G3 mov(L4, G4); // restore G4 mov(L5, G5_method); // restore G5_method - #if defined(COMPILER2) && !defined(_LP64) - // Save & restore possible 64-bit Long arguments in G-regs - sllx(L0,32,G2); // Move old high G1 bits high in G2 - srl(G1, 0,G1); // Clear current high G1 bits - or3 (G1,G2,G1); // Recover 64-bit G1 - sllx(L6,32,G2); // Move old high G4 bits high in G2 - srl(G4, 0,G4); // Clear current high G4 bits - or3 (G4,G2,G4); // Recover 64-bit G4 - #endif restore(O0, 0, G2_thread); } } --- 294,311 ----
*** 385,395 **** if (last_Java_pc->is_valid()) { st_ptr(last_Java_pc, pc_addr); } - #ifdef _LP64 #ifdef ASSERT // Make sure that we have an odd stack Label StackOk; andcc(last_java_sp, 0x01, G0); br(Assembler::notZero, false, Assembler::pt, StackOk); --- 371,380 ----
*** 398,410 **** bind(StackOk); #endif // ASSERT assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); add( last_java_sp, STACK_BIAS, G4_scratch ); st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); - #else - st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); - #endif // _LP64 } void MacroAssembler::reset_last_Java_frame(void) { assert_not_delayed(); --- 383,392 ----
*** 656,680 **** } } void MacroAssembler::card_table_write(jbyte* byte_map_base, Register tmp, Register obj) { - #ifdef _LP64 srlx(obj, CardTableModRefBS::card_shift, obj); - #else - srl(obj, CardTableModRefBS::card_shift, obj); - #endif assert(tmp != obj, "need separate temp reg"); set((address) byte_map_base, tmp); stb(G0, tmp, obj); } void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { address save_pc; int shiftcnt; - #ifdef _LP64 # ifdef CHECK_DELAY assert_not_delayed((char*) "cannot put two instructions in delay slot"); # endif v9_dep(); save_pc = pc(); --- 638,657 ----
*** 717,729 **** if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && addrlit.rtype() != relocInfo::runtime_call_type)) { while (pc() < (save_pc + (7 * BytesPerInstWord))) nop(); } - #else - Assembler::sethi(addrlit.value(), d, addrlit.rspec()); - #endif } void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { internal_sethi(addrlit, d, false); --- 694,703 ----
*** 734,744 **** internal_sethi(addrlit, d, true); } int MacroAssembler::insts_for_sethi(address a, bool worst_case) { - #ifdef _LP64 if (worst_case) return 7; intptr_t iaddr = (intptr_t) a; int msb32 = (int) (iaddr >> 32); int lsb32 = (int) (iaddr); int count; --- 708,717 ----
*** 754,766 **** if ((lsb32 >> 20) & 0xfff) count += 2; if ((lsb32 >> 10) & 0x3ff) count += 2; } } return count; - #else - return 1; - #endif } int MacroAssembler::worst_case_insts_for_set() { return insts_for_sethi(NULL, true) + 1; } --- 727,736 ----
*** 1486,1500 **** bind( no_extras ); } void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { - #ifdef _LP64 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); - #else - add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); - #endif bclr(1, Rresult); sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes } --- 1456,1466 ----
*** 1529,1554 **** // Compares a pointer register with zero and branches on null. // Does a test & branch on 32-bit systems and a register-branch on 64-bit. void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { assert_not_delayed(); - #ifdef _LP64 bpr( rc_z, a, p, s1, L ); - #else - tst(s1); - br ( zero, a, p, L ); - #endif } void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { assert_not_delayed(); - #ifdef _LP64 bpr( rc_nz, a, p, s1, L ); - #else - tst(s1); - br ( notZero, a, p, L ); - #endif } // Compare registers and branch with nop in delay slot or cbcond without delay slot. // Compare integer (32 bit) values (icc only). --- 1495,1510 ----
*** 1860,1877 **** clr(Rout_high); bind( done ); } - #ifdef _LP64 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { cmp(Ra, Rb); mov(-1, Rresult); movcc(equal, false, xcc, 0, Rresult); movcc(greater, false, xcc, 1, Rresult); } - #endif void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { switch (size_in_bytes) { case 8: ld_long(src, dst); break; --- 1816,1831 ----
*** 2666,2678 **** cas_ptr(mark_addr.base(), Rmark, Rscratch); // if compare/exchange succeeded we found an unlocked object and we now have locked it // hence we are done cmp(Rmark, Rscratch); - #ifdef _LP64 sub(Rscratch, STACK_BIAS, Rscratch); - #endif brx(Assembler::equal, false, Assembler::pt, done); delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot // we did not find an unlocked object so see if this is a recursive case // sub(Rscratch, SP, Rscratch); --- 2620,2630 ----
*** 2714,2726 **** brx(Assembler::equal, false, Assembler::pt, done); delayed()->sub(Rscratch, SP, Rscratch); // Stack-lock attempt failed - check for recursive stack-lock. // See the comments below about how we might remove this case. - #ifdef _LP64 sub(Rscratch, STACK_BIAS, Rscratch); - #endif assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); andcc(Rscratch, 0xfffff003, Rscratch); br(Assembler::always, false, Assembler::pt, done); delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); --- 2666,2676 ----
*** 2798,2810 **** // and showed a performance *increase*. In the same experiment I eliminated // the fast-path stack-lock code from the interpreter and always passed // control to the "slow" operators in synchronizer.cpp. // RScratch contains the fetched obj->mark value from the failed CAS. - #ifdef _LP64 sub(Rscratch, STACK_BIAS, Rscratch); - #endif sub(Rscratch, SP, Rscratch); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); andcc(Rscratch, 0xfffff003, Rscratch); if (counters != NULL) { // Accounting needs the Rscratch register --- 2748,2758 ----
*** 3718,3732 **** #define __ masm. address start = __ pc(); Label not_already_dirty, restart, refill, young_card; - #ifdef _LP64 __ srlx(O0, CardTableModRefBS::card_shift, O0); - #else - __ srl(O0, CardTableModRefBS::card_shift, O0); - #endif AddressLiteral addrlit(byte_map_base); __ set(addrlit, O1); // O1 := <card table base> __ ldub(O0, O1, O2); // O2 := [O0 + O1] __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); --- 3666,3676 ----
*** 3824,3838 **** G1SATBCardTableLoggingModRefBS* bs = barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set()); if (G1RSBarrierRegionFilter) { xor3(store_addr, new_val, tmp); - #ifdef _LP64 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); - #else - srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); - #endif // XXX Should I predict this taken or not? Does it matter? cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); } --- 3768,3778 ----
< prev index next >