src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp

Print this page




 580       }
 581       __ add(Rdividend, Rscratch, Rscratch);
 582       __ sra(Rscratch, log2_intptr(divisor), Rresult);
 583       return;
 584     } else {
 585       if (divisor == 2) {
 586         __ srl(Rdividend, 31, Rscratch);
 587       } else {
 588         __ sra(Rdividend, 31, Rscratch);
 589         __ and3(Rscratch, divisor - 1,Rscratch);
 590       }
 591       __ add(Rdividend, Rscratch, Rscratch);
 592       __ andn(Rscratch, divisor - 1,Rscratch);
 593       __ sub(Rdividend, Rscratch, Rresult);
 594       return;
 595     }
 596   }
 597 
 598   __ sra(Rdividend, 31, Rscratch);
 599   __ wry(Rscratch);
 600   if (!VM_Version::v9_instructions_work()) {
 601     // v9 doesn't require these nops
 602     __ nop();
 603     __ nop();
 604     __ nop();
 605     __ nop();
 606   }
 607 
 608   add_debug_info_for_div0_here(op->info());
 609 
 610   if (Rdivisor != noreg) {
 611     __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 612   } else {
 613     assert(Assembler::is_simm13(divisor), "can only handle simm13");
 614     __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 615   }
 616 
 617   Label skip;
 618   __ br(Assembler::overflowSet, true, Assembler::pn, skip);
 619   __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
 620   __ bind(skip);
 621 
 622   if (op->code() == lir_irem) {
 623     if (Rdivisor != noreg) {
 624       __ smul(Rscratch, Rdivisor, Rscratch);
 625     } else {
 626       __ smul(Rscratch, divisor, Rscratch);


 635   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
 636   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
 637   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
 638 #endif
 639   assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
 640 
 641   if (op->cond() == lir_cond_always) {
 642     __ br(Assembler::always, false, Assembler::pt, *(op->label()));
 643   } else if (op->code() == lir_cond_float_branch) {
 644     assert(op->ublock() != NULL, "must have unordered successor");
 645     bool is_unordered = (op->ublock() == op->block());
 646     Assembler::Condition acond;
 647     switch (op->cond()) {
 648       case lir_cond_equal:         acond = Assembler::f_equal;    break;
 649       case lir_cond_notEqual:      acond = Assembler::f_notEqual; break;
 650       case lir_cond_less:          acond = (is_unordered ? Assembler::f_unorderedOrLess          : Assembler::f_less);           break;
 651       case lir_cond_greater:       acond = (is_unordered ? Assembler::f_unorderedOrGreater       : Assembler::f_greater);        break;
 652       case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
 653       case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
 654       default :                         ShouldNotReachHere();
 655     };
 656 
 657     if (!VM_Version::v9_instructions_work()) {
 658       __ nop();
 659     }
 660     __ fb( acond, false, Assembler::pn, *(op->label()));
 661   } else {
 662     assert (op->code() == lir_branch, "just checking");
 663 
 664     Assembler::Condition acond;
 665     switch (op->cond()) {
 666       case lir_cond_equal:        acond = Assembler::equal;                break;
 667       case lir_cond_notEqual:     acond = Assembler::notEqual;             break;
 668       case lir_cond_less:         acond = Assembler::less;                 break;
 669       case lir_cond_lessEqual:    acond = Assembler::lessEqual;            break;
 670       case lir_cond_greaterEqual: acond = Assembler::greaterEqual;         break;
 671       case lir_cond_greater:      acond = Assembler::greater;              break;
 672       case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned; break;
 673       case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;    break;
 674       default:                         ShouldNotReachHere();
 675     };
 676 
 677     // sparc has different condition codes for testing 32-bit
 678     // vs. 64-bit values.  We could always test xcc is we could


 708       break;
 709     }
 710     case Bytecodes::_i2d:
 711     case Bytecodes::_i2f: {
 712       bool is_double = (code == Bytecodes::_i2d);
 713       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
 714       FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
 715       FloatRegister rsrc = op->in_opr()->as_float_reg();
 716       if (rsrc != rdst) {
 717         __ fmov(FloatRegisterImpl::S, rsrc, rdst);
 718       }
 719       __ fitof(w, rdst, rdst);
 720       break;
 721     }
 722     case Bytecodes::_f2i:{
 723       FloatRegister rsrc = op->in_opr()->as_float_reg();
 724       Address       addr = frame_map()->address_for_slot(dst->single_stack_ix());
 725       Label L;
 726       // result must be 0 if value is NaN; test by comparing value to itself
 727       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
 728       if (!VM_Version::v9_instructions_work()) {
 729         __ nop();
 730       }
 731       __ fb(Assembler::f_unordered, true, Assembler::pn, L);
 732       __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
 733       __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
 734       // move integer result from float register to int register
 735       __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
 736       __ bind (L);
 737       break;
 738     }
 739     case Bytecodes::_l2i: {
 740       Register rlo  = op->in_opr()->as_register_lo();
 741       Register rhi  = op->in_opr()->as_register_hi();
 742       Register rdst = dst->as_register();
 743 #ifdef _LP64
 744       __ sra(rlo, 0, rdst);
 745 #else
 746       __ mov(rlo, rdst);
 747 #endif
 748       break;
 749     }
 750     case Bytecodes::_d2f:


3217 
3218     // (extended to allow indexed as well as constant displaced for JSR-166)
3219     Register idx = noreg; // contains either constant offset or index
3220 
3221     int disp = mem_addr->disp();
3222     if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3223       if (!Assembler::is_simm13(disp)) {
3224         idx = O7;
3225         __ set(disp, idx);
3226       }
3227     } else {
3228       assert(disp == 0, "not both indexed and disp");
3229       idx = mem_addr->index()->as_register();
3230     }
3231 
3232     int null_check_offset = -1;
3233 
3234     Register base = mem_addr->base()->as_register();
3235     if (src->is_register() && dest->is_address()) {
3236       // G4 is high half, G5 is low half
3237       if (VM_Version::v9_instructions_work()) {
3238         // clear the top bits of G5, and scale up G4
3239         __ srl (src->as_register_lo(),  0, G5);
3240         __ sllx(src->as_register_hi(), 32, G4);
3241         // combine the two halves into the 64 bits of G4
3242         __ or3(G4, G5, G4);
3243         null_check_offset = __ offset();
3244         if (idx == noreg) {
3245           __ stx(G4, base, disp);
3246         } else {
3247           __ stx(G4, base, idx);
3248         }
3249       } else {
3250         __ mov (src->as_register_hi(), G4);
3251         __ mov (src->as_register_lo(), G5);
3252         null_check_offset = __ offset();
3253         if (idx == noreg) {
3254           __ std(G4, base, disp);
3255         } else {
3256           __ std(G4, base, idx);
3257         }
3258       }
3259     } else if (src->is_address() && dest->is_register()) {
3260       null_check_offset = __ offset();
3261       if (VM_Version::v9_instructions_work()) {
3262         if (idx == noreg) {
3263           __ ldx(base, disp, G5);
3264         } else {
3265           __ ldx(base, idx, G5);
3266         }
3267         __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3268         __ mov (G5, dest->as_register_lo());     // copy low half into lo
3269       } else {
3270         if (idx == noreg) {
3271           __ ldd(base, disp, G4);
3272         } else {
3273           __ ldd(base, idx, G4);
3274         }
3275         // G4 is high half, G5 is low half
3276         __ mov (G4, dest->as_register_hi());
3277         __ mov (G5, dest->as_register_lo());
3278       }
3279     } else {
3280       Unimplemented();
3281     }
3282     if (info != NULL) {
3283       add_debug_info_for_null_check(null_check_offset, info);
3284     }
3285 
3286   } else {
3287     // use normal move for all other volatiles since they don't need
3288     // special handling to remain atomic.
3289     move_op(src, dest, type, lir_patch_none, info, false, false, false);
3290   }
3291 }
3292 
3293 void LIR_Assembler::membar() {
3294   // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3295   __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3296 }
3297 
3298 void LIR_Assembler::membar_acquire() {
3299   // no-op on TSO




 580       }
 581       __ add(Rdividend, Rscratch, Rscratch);
 582       __ sra(Rscratch, log2_intptr(divisor), Rresult);
 583       return;
 584     } else {
 585       if (divisor == 2) {
 586         __ srl(Rdividend, 31, Rscratch);
 587       } else {
 588         __ sra(Rdividend, 31, Rscratch);
 589         __ and3(Rscratch, divisor - 1,Rscratch);
 590       }
 591       __ add(Rdividend, Rscratch, Rscratch);
 592       __ andn(Rscratch, divisor - 1,Rscratch);
 593       __ sub(Rdividend, Rscratch, Rresult);
 594       return;
 595     }
 596   }
 597 
 598   __ sra(Rdividend, 31, Rscratch);
 599   __ wry(Rscratch);







 600 
 601   add_debug_info_for_div0_here(op->info());
 602 
 603   if (Rdivisor != noreg) {
 604     __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 605   } else {
 606     assert(Assembler::is_simm13(divisor), "can only handle simm13");
 607     __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 608   }
 609 
 610   Label skip;
 611   __ br(Assembler::overflowSet, true, Assembler::pn, skip);
 612   __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
 613   __ bind(skip);
 614 
 615   if (op->code() == lir_irem) {
 616     if (Rdivisor != noreg) {
 617       __ smul(Rscratch, Rdivisor, Rscratch);
 618     } else {
 619       __ smul(Rscratch, divisor, Rscratch);


 628   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
 629   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
 630   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
 631 #endif
 632   assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
 633 
 634   if (op->cond() == lir_cond_always) {
 635     __ br(Assembler::always, false, Assembler::pt, *(op->label()));
 636   } else if (op->code() == lir_cond_float_branch) {
 637     assert(op->ublock() != NULL, "must have unordered successor");
 638     bool is_unordered = (op->ublock() == op->block());
 639     Assembler::Condition acond;
 640     switch (op->cond()) {
 641       case lir_cond_equal:         acond = Assembler::f_equal;    break;
 642       case lir_cond_notEqual:      acond = Assembler::f_notEqual; break;
 643       case lir_cond_less:          acond = (is_unordered ? Assembler::f_unorderedOrLess          : Assembler::f_less);           break;
 644       case lir_cond_greater:       acond = (is_unordered ? Assembler::f_unorderedOrGreater       : Assembler::f_greater);        break;
 645       case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
 646       case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
 647       default :                         ShouldNotReachHere();




 648     }
 649     __ fb( acond, false, Assembler::pn, *(op->label()));
 650   } else {
 651     assert (op->code() == lir_branch, "just checking");
 652 
 653     Assembler::Condition acond;
 654     switch (op->cond()) {
 655       case lir_cond_equal:        acond = Assembler::equal;                break;
 656       case lir_cond_notEqual:     acond = Assembler::notEqual;             break;
 657       case lir_cond_less:         acond = Assembler::less;                 break;
 658       case lir_cond_lessEqual:    acond = Assembler::lessEqual;            break;
 659       case lir_cond_greaterEqual: acond = Assembler::greaterEqual;         break;
 660       case lir_cond_greater:      acond = Assembler::greater;              break;
 661       case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned; break;
 662       case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;    break;
 663       default:                         ShouldNotReachHere();
 664     };
 665 
 666     // sparc has different condition codes for testing 32-bit
 667     // vs. 64-bit values.  We could always test xcc is we could


 697       break;
 698     }
 699     case Bytecodes::_i2d:
 700     case Bytecodes::_i2f: {
 701       bool is_double = (code == Bytecodes::_i2d);
 702       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
 703       FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
 704       FloatRegister rsrc = op->in_opr()->as_float_reg();
 705       if (rsrc != rdst) {
 706         __ fmov(FloatRegisterImpl::S, rsrc, rdst);
 707       }
 708       __ fitof(w, rdst, rdst);
 709       break;
 710     }
 711     case Bytecodes::_f2i:{
 712       FloatRegister rsrc = op->in_opr()->as_float_reg();
 713       Address       addr = frame_map()->address_for_slot(dst->single_stack_ix());
 714       Label L;
 715       // result must be 0 if value is NaN; test by comparing value to itself
 716       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);



 717       __ fb(Assembler::f_unordered, true, Assembler::pn, L);
 718       __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
 719       __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
 720       // move integer result from float register to int register
 721       __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
 722       __ bind (L);
 723       break;
 724     }
 725     case Bytecodes::_l2i: {
 726       Register rlo  = op->in_opr()->as_register_lo();
 727       Register rhi  = op->in_opr()->as_register_hi();
 728       Register rdst = dst->as_register();
 729 #ifdef _LP64
 730       __ sra(rlo, 0, rdst);
 731 #else
 732       __ mov(rlo, rdst);
 733 #endif
 734       break;
 735     }
 736     case Bytecodes::_d2f:


3203 
3204     // (extended to allow indexed as well as constant displaced for JSR-166)
3205     Register idx = noreg; // contains either constant offset or index
3206 
3207     int disp = mem_addr->disp();
3208     if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3209       if (!Assembler::is_simm13(disp)) {
3210         idx = O7;
3211         __ set(disp, idx);
3212       }
3213     } else {
3214       assert(disp == 0, "not both indexed and disp");
3215       idx = mem_addr->index()->as_register();
3216     }
3217 
3218     int null_check_offset = -1;
3219 
3220     Register base = mem_addr->base()->as_register();
3221     if (src->is_register() && dest->is_address()) {
3222       // G4 is high half, G5 is low half

3223       // clear the top bits of G5, and scale up G4
3224       __ srl (src->as_register_lo(),  0, G5);
3225       __ sllx(src->as_register_hi(), 32, G4);
3226       // combine the two halves into the 64 bits of G4
3227       __ or3(G4, G5, G4);
3228       null_check_offset = __ offset();
3229       if (idx == noreg) {
3230         __ stx(G4, base, disp);
3231       } else {
3232         __ stx(G4, base, idx);
3233       }










3234     } else if (src->is_address() && dest->is_register()) {
3235       null_check_offset = __ offset();

3236       if (idx == noreg) {
3237         __ ldx(base, disp, G5);
3238       } else {
3239         __ ldx(base, idx, G5);
3240       }
3241       __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3242       __ mov (G5, dest->as_register_lo());     // copy low half into lo
3243     } else {










3244       Unimplemented();
3245     }
3246     if (info != NULL) {
3247       add_debug_info_for_null_check(null_check_offset, info);
3248     }
3249 
3250   } else {
3251     // use normal move for all other volatiles since they don't need
3252     // special handling to remain atomic.
3253     move_op(src, dest, type, lir_patch_none, info, false, false, false);
3254   }
3255 }
3256 
3257 void LIR_Assembler::membar() {
3258   // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3259   __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3260 }
3261 
3262 void LIR_Assembler::membar_acquire() {
3263   // no-op on TSO