1 /* 2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_LIRGenerator.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArray.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "gc/shared/c1BarrierSetCodeGen.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "vmreg_x86.inline.hpp" 40 41 #ifdef ASSERT 42 #define __ gen()->lir(__FILE__, __LINE__)-> 43 #else 44 #define __ gen()->lir()-> 45 #endif 46 47 // Item will be loaded into a byte register; Intel only 48 void LIRItem::load_byte_item() { 49 load_item(); 50 LIR_Opr res = result(); 51 52 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { 53 // make sure that it is a byte register 54 assert(!value()->type()->is_float() && !value()->type()->is_double(), 55 "can't load floats in byte register"); 56 LIR_Opr reg = _gen->rlock_byte(T_BYTE); 57 __ move(res, reg); 58 59 _result = reg; 60 } 61 } 62 63 64 void LIRItem::load_nonconstant() { 65 LIR_Opr r = value()->operand(); 66 if (r->is_constant()) { 67 _result = r; 68 } else { 69 load_item(); 70 } 71 } 72 73 //-------------------------------------------------------------- 74 // LIRGenerator 75 //-------------------------------------------------------------- 76 77 78 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; } 79 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; } 80 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; } 81 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; } 82 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; } 83 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; } 84 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 85 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; } 86 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 87 88 89 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 90 LIR_Opr opr; 91 switch (type->tag()) { 92 case intTag: opr = FrameMap::rax_opr; break; 93 case objectTag: opr = FrameMap::rax_oop_opr; break; 94 case longTag: opr = FrameMap::long0_opr; break; 95 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; 96 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; 97 98 case addressTag: 99 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 100 } 101 102 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 103 return opr; 104 } 105 106 107 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 108 LIR_Opr reg = new_register(T_INT); 109 set_vreg_flag(reg, LIRGenerator::byte_reg); 110 return reg; 111 } 112 113 114 //--------- loading items into registers -------------------------------- 115 116 117 // i486 instructions can inline constants 118 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 119 if (type == T_SHORT || type == T_CHAR) { 120 // there is no immediate move of word values in asembler_i486.?pp 121 return false; 122 } 123 Constant* c = v->as_Constant(); 124 if (c && c->state_before() == NULL) { 125 // constants of any type can be stored directly, except for 126 // unloaded object constants. 127 return true; 128 } 129 return false; 130 } 131 132 133 bool LIRGenerator::can_inline_as_constant(Value v) const { 134 if (v->type()->tag() == longTag) return false; 135 return v->type()->tag() != objectTag || 136 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); 137 } 138 139 140 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 141 if (c->type() == T_LONG) return false; 142 return c->type() != T_OBJECT || c->as_jobject() == NULL; 143 } 144 145 146 LIR_Opr LIRGenerator::safepoint_poll_register() { 147 return LIR_OprFact::illegalOpr; 148 } 149 150 151 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 152 int shift, int disp, BasicType type) { 153 assert(base->is_register(), "must be"); 154 if (index->is_constant()) { 155 LIR_Const *constant = index->as_constant_ptr(); 156 #ifdef _LP64 157 jlong c; 158 if (constant->type() == T_INT) { 159 c = (jlong(index->as_jint()) << shift) + disp; 160 } else { 161 assert(constant->type() == T_LONG, "should be"); 162 c = (index->as_jlong() << shift) + disp; 163 } 164 if ((jlong)((jint)c) == c) { 165 return new LIR_Address(base, (jint)c, type); 166 } else { 167 LIR_Opr tmp = new_register(T_LONG); 168 __ move(index, tmp); 169 return new LIR_Address(base, tmp, type); 170 } 171 #else 172 return new LIR_Address(base, 173 ((intx)(constant->as_jint()) << shift) + disp, 174 type); 175 #endif 176 } else { 177 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); 178 } 179 } 180 181 182 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 183 BasicType type, bool needs_card_mark) { 184 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 185 186 LIR_Address* addr; 187 if (index_opr->is_constant()) { 188 int elem_size = type2aelembytes(type); 189 addr = new LIR_Address(array_opr, 190 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); 191 } else { 192 #ifdef _LP64 193 if (index_opr->type() == T_INT) { 194 LIR_Opr tmp = new_register(T_LONG); 195 __ convert(Bytecodes::_i2l, index_opr, tmp); 196 index_opr = tmp; 197 } 198 #endif // _LP64 199 addr = new LIR_Address(array_opr, 200 index_opr, 201 LIR_Address::scale(type), 202 offset_in_bytes, type); 203 } 204 if (needs_card_mark) { 205 // This store will need a precise card mark, so go ahead and 206 // compute the full adddres instead of computing once for the 207 // store and again for the card mark. 208 LIR_Opr tmp = new_pointer_register(); 209 __ leal(LIR_OprFact::address(addr), tmp); 210 return new LIR_Address(tmp, type); 211 } else { 212 return addr; 213 } 214 } 215 216 217 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 218 LIR_Opr r = NULL; 219 if (type == T_LONG) { 220 r = LIR_OprFact::longConst(x); 221 } else if (type == T_INT) { 222 r = LIR_OprFact::intConst(x); 223 } else { 224 ShouldNotReachHere(); 225 } 226 return r; 227 } 228 229 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 230 LIR_Opr pointer = new_pointer_register(); 231 __ move(LIR_OprFact::intptrConst(counter), pointer); 232 LIR_Address* addr = new LIR_Address(pointer, type); 233 increment_counter(addr, step); 234 } 235 236 237 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 238 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); 239 } 240 241 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 242 __ cmp_mem_int(condition, base, disp, c, info); 243 } 244 245 246 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 247 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); 248 } 249 250 251 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) { 252 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); 253 } 254 255 256 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 257 if (tmp->is_valid()) { 258 if (is_power_of_2(c + 1)) { 259 __ move(left, tmp); 260 __ shift_left(left, log2_intptr(c + 1), left); 261 __ sub(left, tmp, result); 262 return true; 263 } else if (is_power_of_2(c - 1)) { 264 __ move(left, tmp); 265 __ shift_left(left, log2_intptr(c - 1), left); 266 __ add(left, tmp, result); 267 return true; 268 } 269 } 270 return false; 271 } 272 273 274 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 275 BasicType type = item->type(); 276 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type)); 277 } 278 279 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 280 LIR_Opr tmp1 = new_register(objectType); 281 LIR_Opr tmp2 = new_register(objectType); 282 LIR_Opr tmp3 = new_register(objectType); 283 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 284 } 285 286 //---------------------------------------------------------------------- 287 // visitor functions 288 //---------------------------------------------------------------------- 289 290 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 291 assert(x->is_pinned(),""); 292 LIRItem obj(x->obj(), this); 293 obj.load_item(); 294 295 set_no_result(x); 296 297 // "lock" stores the address of the monitor stack slot, so this is not an oop 298 LIR_Opr lock = new_register(T_INT); 299 // Need a scratch register for biased locking on x86 300 LIR_Opr scratch = LIR_OprFact::illegalOpr; 301 if (UseBiasedLocking) { 302 scratch = new_register(T_INT); 303 } 304 305 CodeEmitInfo* info_for_exception = NULL; 306 if (x->needs_null_check()) { 307 info_for_exception = state_for(x); 308 } 309 // this CodeEmitInfo must not have the xhandlers because here the 310 // object is already locked (xhandlers expect object to be unlocked) 311 CodeEmitInfo* info = state_for(x, x->state(), true); 312 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 313 x->monitor_no(), info_for_exception, info); 314 } 315 316 317 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 318 assert(x->is_pinned(),""); 319 320 LIRItem obj(x->obj(), this); 321 obj.dont_load_item(); 322 323 LIR_Opr lock = new_register(T_INT); 324 LIR_Opr obj_temp = new_register(T_INT); 325 set_no_result(x); 326 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 327 } 328 329 330 // _ineg, _lneg, _fneg, _dneg 331 void LIRGenerator::do_NegateOp(NegateOp* x) { 332 LIRItem value(x->x(), this); 333 value.set_destroys_register(); 334 value.load_item(); 335 LIR_Opr reg = rlock(x); 336 __ negate(value.result(), reg); 337 338 set_result(x, round_item(reg)); 339 } 340 341 342 // for _fadd, _fmul, _fsub, _fdiv, _frem 343 // _dadd, _dmul, _dsub, _ddiv, _drem 344 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 345 LIRItem left(x->x(), this); 346 LIRItem right(x->y(), this); 347 LIRItem* left_arg = &left; 348 LIRItem* right_arg = &right; 349 assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); 350 bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); 351 if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { 352 left.load_item(); 353 } else { 354 left.dont_load_item(); 355 } 356 357 // do not load right operand if it is a constant. only 0 and 1 are 358 // loaded because there are special instructions for loading them 359 // without memory access (not needed for SSE2 instructions) 360 bool must_load_right = false; 361 if (right.is_constant()) { 362 LIR_Const* c = right.result()->as_constant_ptr(); 363 assert(c != NULL, "invalid constant"); 364 assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); 365 366 if (c->type() == T_FLOAT) { 367 must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float()); 368 } else { 369 must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); 370 } 371 } 372 373 if (must_load_both) { 374 // frem and drem destroy also right operand, so move it to a new register 375 right.set_destroys_register(); 376 right.load_item(); 377 } else if (right.is_register() || must_load_right) { 378 right.load_item(); 379 } else { 380 right.dont_load_item(); 381 } 382 LIR_Opr reg = rlock(x); 383 LIR_Opr tmp = LIR_OprFact::illegalOpr; 384 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { 385 tmp = new_register(T_DOUBLE); 386 } 387 388 if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) { 389 // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots 390 LIR_Opr fpu0, fpu1; 391 if (x->op() == Bytecodes::_frem) { 392 fpu0 = LIR_OprFact::single_fpu(0); 393 fpu1 = LIR_OprFact::single_fpu(1); 394 } else { 395 fpu0 = LIR_OprFact::double_fpu(0); 396 fpu1 = LIR_OprFact::double_fpu(1); 397 } 398 __ move(right.result(), fpu1); // order of left and right operand is important! 399 __ move(left.result(), fpu0); 400 __ rem (fpu0, fpu1, fpu0); 401 __ move(fpu0, reg); 402 403 } else { 404 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp); 405 } 406 407 set_result(x, round_item(reg)); 408 } 409 410 411 // for _ladd, _lmul, _lsub, _ldiv, _lrem 412 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 413 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { 414 // long division is implemented as a direct call into the runtime 415 LIRItem left(x->x(), this); 416 LIRItem right(x->y(), this); 417 418 // the check for division by zero destroys the right operand 419 right.set_destroys_register(); 420 421 BasicTypeList signature(2); 422 signature.append(T_LONG); 423 signature.append(T_LONG); 424 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 425 426 // check for division by zero (destroys registers of right operand!) 427 CodeEmitInfo* info = state_for(x); 428 429 const LIR_Opr result_reg = result_register_for(x->type()); 430 left.load_item_force(cc->at(1)); 431 right.load_item(); 432 433 __ move(right.result(), cc->at(0)); 434 435 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 436 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); 437 438 address entry = NULL; 439 switch (x->op()) { 440 case Bytecodes::_lrem: 441 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 442 break; // check if dividend is 0 is done elsewhere 443 case Bytecodes::_ldiv: 444 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 445 break; // check if dividend is 0 is done elsewhere 446 case Bytecodes::_lmul: 447 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); 448 break; 449 default: 450 ShouldNotReachHere(); 451 } 452 453 LIR_Opr result = rlock_result(x); 454 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 455 __ move(result_reg, result); 456 } else if (x->op() == Bytecodes::_lmul) { 457 // missing test if instr is commutative and if we should swap 458 LIRItem left(x->x(), this); 459 LIRItem right(x->y(), this); 460 461 // right register is destroyed by the long mul, so it must be 462 // copied to a new register. 463 right.set_destroys_register(); 464 465 left.load_item(); 466 right.load_item(); 467 468 LIR_Opr reg = FrameMap::long0_opr; 469 arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); 470 LIR_Opr result = rlock_result(x); 471 __ move(reg, result); 472 } else { 473 // missing test if instr is commutative and if we should swap 474 LIRItem left(x->x(), this); 475 LIRItem right(x->y(), this); 476 477 left.load_item(); 478 // don't load constants to save register 479 right.load_nonconstant(); 480 rlock_result(x); 481 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 482 } 483 } 484 485 486 487 // for: _iadd, _imul, _isub, _idiv, _irem 488 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 489 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 490 // The requirements for division and modulo 491 // input : rax,: dividend min_int 492 // reg: divisor (may not be rax,/rdx) -1 493 // 494 // output: rax,: quotient (= rax, idiv reg) min_int 495 // rdx: remainder (= rax, irem reg) 0 496 497 // rax, and rdx will be destroyed 498 499 // Note: does this invalidate the spec ??? 500 LIRItem right(x->y(), this); 501 LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid 502 503 // call state_for before load_item_force because state_for may 504 // force the evaluation of other instructions that are needed for 505 // correct debug info. Otherwise the live range of the fix 506 // register might be too long. 507 CodeEmitInfo* info = state_for(x); 508 509 left.load_item_force(divInOpr()); 510 511 right.load_item(); 512 513 LIR_Opr result = rlock_result(x); 514 LIR_Opr result_reg; 515 if (x->op() == Bytecodes::_idiv) { 516 result_reg = divOutOpr(); 517 } else { 518 result_reg = remOutOpr(); 519 } 520 521 if (!ImplicitDiv0Checks) { 522 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); 523 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); 524 } 525 LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation 526 if (x->op() == Bytecodes::_irem) { 527 __ irem(left.result(), right.result(), result_reg, tmp, info); 528 } else if (x->op() == Bytecodes::_idiv) { 529 __ idiv(left.result(), right.result(), result_reg, tmp, info); 530 } else { 531 ShouldNotReachHere(); 532 } 533 534 __ move(result_reg, result); 535 } else { 536 // missing test if instr is commutative and if we should swap 537 LIRItem left(x->x(), this); 538 LIRItem right(x->y(), this); 539 LIRItem* left_arg = &left; 540 LIRItem* right_arg = &right; 541 if (x->is_commutative() && left.is_stack() && right.is_register()) { 542 // swap them if left is real stack (or cached) and right is real register(not cached) 543 left_arg = &right; 544 right_arg = &left; 545 } 546 547 left_arg->load_item(); 548 549 // do not need to load right, as we can handle stack and constants 550 if (x->op() == Bytecodes::_imul ) { 551 // check if we can use shift instead 552 bool use_constant = false; 553 bool use_tmp = false; 554 if (right_arg->is_constant()) { 555 int iconst = right_arg->get_jint_constant(); 556 if (iconst > 0) { 557 if (is_power_of_2(iconst)) { 558 use_constant = true; 559 } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { 560 use_constant = true; 561 use_tmp = true; 562 } 563 } 564 } 565 if (use_constant) { 566 right_arg->dont_load_item(); 567 } else { 568 right_arg->load_item(); 569 } 570 LIR_Opr tmp = LIR_OprFact::illegalOpr; 571 if (use_tmp) { 572 tmp = new_register(T_INT); 573 } 574 rlock_result(x); 575 576 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 577 } else { 578 right_arg->dont_load_item(); 579 rlock_result(x); 580 LIR_Opr tmp = LIR_OprFact::illegalOpr; 581 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 582 } 583 } 584 } 585 586 587 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 588 // when an operand with use count 1 is the left operand, then it is 589 // likely that no move for 2-operand-LIR-form is necessary 590 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 591 x->swap_operands(); 592 } 593 594 ValueTag tag = x->type()->tag(); 595 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 596 switch (tag) { 597 case floatTag: 598 case doubleTag: do_ArithmeticOp_FPU(x); return; 599 case longTag: do_ArithmeticOp_Long(x); return; 600 case intTag: do_ArithmeticOp_Int(x); return; 601 } 602 ShouldNotReachHere(); 603 } 604 605 606 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 607 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 608 // count must always be in rcx 609 LIRItem value(x->x(), this); 610 LIRItem count(x->y(), this); 611 612 ValueTag elemType = x->type()->tag(); 613 bool must_load_count = !count.is_constant() || elemType == longTag; 614 if (must_load_count) { 615 // count for long must be in register 616 count.load_item_force(shiftCountOpr()); 617 } else { 618 count.dont_load_item(); 619 } 620 value.load_item(); 621 LIR_Opr reg = rlock_result(x); 622 623 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); 624 } 625 626 627 // _iand, _land, _ior, _lor, _ixor, _lxor 628 void LIRGenerator::do_LogicOp(LogicOp* x) { 629 // when an operand with use count 1 is the left operand, then it is 630 // likely that no move for 2-operand-LIR-form is necessary 631 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 632 x->swap_operands(); 633 } 634 635 LIRItem left(x->x(), this); 636 LIRItem right(x->y(), this); 637 638 left.load_item(); 639 right.load_nonconstant(); 640 LIR_Opr reg = rlock_result(x); 641 642 logic_op(x->op(), reg, left.result(), right.result()); 643 } 644 645 646 647 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 648 void LIRGenerator::do_CompareOp(CompareOp* x) { 649 LIRItem left(x->x(), this); 650 LIRItem right(x->y(), this); 651 ValueTag tag = x->x()->type()->tag(); 652 if (tag == longTag) { 653 left.set_destroys_register(); 654 } 655 left.load_item(); 656 right.load_item(); 657 LIR_Opr reg = rlock_result(x); 658 659 if (x->x()->type()->is_float_kind()) { 660 Bytecodes::Code code = x->op(); 661 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 662 } else if (x->x()->type()->tag() == longTag) { 663 __ lcmp2int(left.result(), right.result(), reg); 664 } else { 665 Unimplemented(); 666 } 667 } 668 669 LIR_Opr LIRGenerator::cas(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 670 LIR_Opr result = new_register(T_INT); 671 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 672 if (type == T_OBJECT || type == T_ARRAY) { 673 cmp_value.load_item_force(FrameMap::rax_oop_opr); 674 new_value.load_item(); 675 __ cas_obj(addr, cmp_value.result(), new_value.result(), ill, ill); 676 } else if (type == T_INT) { 677 cmp_value.load_item_force(FrameMap::rax_opr); 678 new_value.load_item(); 679 __ cas_int(addr, cmp_value.result(), new_value.result(), ill, ill); 680 } else if (type == T_LONG) { 681 cmp_value.load_item_force(FrameMap::long0_opr); 682 new_value.load_item_force(FrameMap::long1_opr); 683 __ cas_long(addr, cmp_value.result(), new_value.result(), ill, ill); 684 } else { 685 Unimplemented(); 686 } 687 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 688 result, as_BasicType(new_value.type())); 689 return result; 690 } 691 692 LIR_Opr LIRGenerator::swap(BasicType type, LIR_Opr addr, LIRItem& value) { 693 bool is_oop = type == T_OBJECT || type == T_ARRAY; 694 LIR_Opr result = new_register(type); 695 value.load_item(); 696 // Because we want a 2-arg form of xchg and xadd 697 __ move(value.result(), result); 698 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); 699 __ xchg(addr, result, result, LIR_OprFact::illegalOpr); 700 return result; 701 } 702 703 LIR_Opr LIRGenerator::add(BasicType type, LIR_Opr addr, LIRItem& value) { 704 LIR_Opr result = new_register(type); 705 value.load_item(); 706 // Because we want a 2-arg form of xchg and xadd 707 __ move(value.result(), result); 708 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type"); 709 __ xadd(addr, result, result, LIR_OprFact::illegalOpr); 710 return result; 711 } 712 713 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 714 assert(x->number_of_arguments() == 3, "wrong type"); 715 assert(UseFMA, "Needs FMA instructions support."); 716 LIRItem value(x->argument_at(0), this); 717 LIRItem value1(x->argument_at(1), this); 718 LIRItem value2(x->argument_at(2), this); 719 720 value2.set_destroys_register(); 721 722 value.load_item(); 723 value1.load_item(); 724 value2.load_item(); 725 726 LIR_Opr calc_input = value.result(); 727 LIR_Opr calc_input1 = value1.result(); 728 LIR_Opr calc_input2 = value2.result(); 729 LIR_Opr calc_result = rlock_result(x); 730 731 switch (x->id()) { 732 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 733 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 734 default: ShouldNotReachHere(); 735 } 736 737 } 738 739 740 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 741 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type"); 742 743 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || 744 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || 745 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || 746 x->id() == vmIntrinsics::_dlog10) { 747 do_LibmIntrinsic(x); 748 return; 749 } 750 751 LIRItem value(x->argument_at(0), this); 752 753 bool use_fpu = false; 754 if (UseSSE < 2) { 755 value.set_destroys_register(); 756 } 757 value.load_item(); 758 759 LIR_Opr calc_input = value.result(); 760 LIR_Opr calc_result = rlock_result(x); 761 762 switch(x->id()) { 763 case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, LIR_OprFact::illegalOpr); break; 764 case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break; 765 default: ShouldNotReachHere(); 766 } 767 768 if (use_fpu) { 769 __ move(calc_result, x->operand()); 770 } 771 } 772 773 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { 774 LIRItem value(x->argument_at(0), this); 775 value.set_destroys_register(); 776 777 LIR_Opr calc_result = rlock_result(x); 778 LIR_Opr result_reg = result_register_for(x->type()); 779 780 CallingConvention* cc = NULL; 781 782 if (x->id() == vmIntrinsics::_dpow) { 783 LIRItem value1(x->argument_at(1), this); 784 785 value1.set_destroys_register(); 786 787 BasicTypeList signature(2); 788 signature.append(T_DOUBLE); 789 signature.append(T_DOUBLE); 790 cc = frame_map()->c_calling_convention(&signature); 791 value.load_item_force(cc->at(0)); 792 value1.load_item_force(cc->at(1)); 793 } else { 794 BasicTypeList signature(1); 795 signature.append(T_DOUBLE); 796 cc = frame_map()->c_calling_convention(&signature); 797 value.load_item_force(cc->at(0)); 798 } 799 800 #ifndef _LP64 801 LIR_Opr tmp = FrameMap::fpu0_double_opr; 802 result_reg = tmp; 803 switch(x->id()) { 804 case vmIntrinsics::_dexp: 805 if (StubRoutines::dexp() != NULL) { 806 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 807 } else { 808 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 809 } 810 break; 811 case vmIntrinsics::_dlog: 812 if (StubRoutines::dlog() != NULL) { 813 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 814 } else { 815 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 816 } 817 break; 818 case vmIntrinsics::_dlog10: 819 if (StubRoutines::dlog10() != NULL) { 820 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 821 } else { 822 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 823 } 824 break; 825 case vmIntrinsics::_dpow: 826 if (StubRoutines::dpow() != NULL) { 827 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 828 } else { 829 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 830 } 831 break; 832 case vmIntrinsics::_dsin: 833 if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) { 834 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 835 } else { 836 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 837 } 838 break; 839 case vmIntrinsics::_dcos: 840 if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) { 841 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 842 } else { 843 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 844 } 845 break; 846 case vmIntrinsics::_dtan: 847 if (StubRoutines::dtan() != NULL) { 848 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 849 } else { 850 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 851 } 852 break; 853 default: ShouldNotReachHere(); 854 } 855 #else 856 switch (x->id()) { 857 case vmIntrinsics::_dexp: 858 if (StubRoutines::dexp() != NULL) { 859 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 860 } else { 861 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 862 } 863 break; 864 case vmIntrinsics::_dlog: 865 if (StubRoutines::dlog() != NULL) { 866 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 867 } else { 868 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 869 } 870 break; 871 case vmIntrinsics::_dlog10: 872 if (StubRoutines::dlog10() != NULL) { 873 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 874 } else { 875 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 876 } 877 break; 878 case vmIntrinsics::_dpow: 879 if (StubRoutines::dpow() != NULL) { 880 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 881 } else { 882 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 883 } 884 break; 885 case vmIntrinsics::_dsin: 886 if (StubRoutines::dsin() != NULL) { 887 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 888 } else { 889 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 890 } 891 break; 892 case vmIntrinsics::_dcos: 893 if (StubRoutines::dcos() != NULL) { 894 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 895 } else { 896 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 897 } 898 break; 899 case vmIntrinsics::_dtan: 900 if (StubRoutines::dtan() != NULL) { 901 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 902 } else { 903 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 904 } 905 break; 906 default: ShouldNotReachHere(); 907 } 908 #endif // _LP64 909 __ move(result_reg, calc_result); 910 } 911 912 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 913 assert(x->number_of_arguments() == 5, "wrong type"); 914 915 // Make all state_for calls early since they can emit code 916 CodeEmitInfo* info = state_for(x, x->state()); 917 918 LIRItem src(x->argument_at(0), this); 919 LIRItem src_pos(x->argument_at(1), this); 920 LIRItem dst(x->argument_at(2), this); 921 LIRItem dst_pos(x->argument_at(3), this); 922 LIRItem length(x->argument_at(4), this); 923 924 // operands for arraycopy must use fixed registers, otherwise 925 // LinearScan will fail allocation (because arraycopy always needs a 926 // call) 927 928 #ifndef _LP64 929 src.load_item_force (FrameMap::rcx_oop_opr); 930 src_pos.load_item_force (FrameMap::rdx_opr); 931 dst.load_item_force (FrameMap::rax_oop_opr); 932 dst_pos.load_item_force (FrameMap::rbx_opr); 933 length.load_item_force (FrameMap::rdi_opr); 934 LIR_Opr tmp = (FrameMap::rsi_opr); 935 #else 936 937 // The java calling convention will give us enough registers 938 // so that on the stub side the args will be perfect already. 939 // On the other slow/special case side we call C and the arg 940 // positions are not similar enough to pick one as the best. 941 // Also because the java calling convention is a "shifted" version 942 // of the C convention we can process the java args trivially into C 943 // args without worry of overwriting during the xfer 944 945 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 946 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 947 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 948 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 949 length.load_item_force (FrameMap::as_opr(j_rarg4)); 950 951 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 952 #endif // LP64 953 954 set_no_result(x); 955 956 int flags; 957 ciArrayKlass* expected_type; 958 arraycopy_helper(x, &flags, &expected_type); 959 960 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 961 } 962 963 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 964 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); 965 // Make all state_for calls early since they can emit code 966 LIR_Opr result = rlock_result(x); 967 int flags = 0; 968 switch (x->id()) { 969 case vmIntrinsics::_updateCRC32: { 970 LIRItem crc(x->argument_at(0), this); 971 LIRItem val(x->argument_at(1), this); 972 // val is destroyed by update_crc32 973 val.set_destroys_register(); 974 crc.load_item(); 975 val.load_item(); 976 __ update_crc32(crc.result(), val.result(), result); 977 break; 978 } 979 case vmIntrinsics::_updateBytesCRC32: 980 case vmIntrinsics::_updateByteBufferCRC32: { 981 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 982 983 LIRItem crc(x->argument_at(0), this); 984 LIRItem buf(x->argument_at(1), this); 985 LIRItem off(x->argument_at(2), this); 986 LIRItem len(x->argument_at(3), this); 987 buf.load_item(); 988 off.load_nonconstant(); 989 990 LIR_Opr index = off.result(); 991 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 992 if(off.result()->is_constant()) { 993 index = LIR_OprFact::illegalOpr; 994 offset += off.result()->as_jint(); 995 } 996 LIR_Opr base_op = buf.result(); 997 998 #ifndef _LP64 999 if (!is_updateBytes) { // long b raw address 1000 base_op = new_register(T_INT); 1001 __ convert(Bytecodes::_l2i, buf.result(), base_op); 1002 } 1003 #else 1004 if (index->is_valid()) { 1005 LIR_Opr tmp = new_register(T_LONG); 1006 __ convert(Bytecodes::_i2l, index, tmp); 1007 index = tmp; 1008 } 1009 #endif 1010 1011 LIR_Address* a = new LIR_Address(base_op, 1012 index, 1013 offset, 1014 T_BYTE); 1015 BasicTypeList signature(3); 1016 signature.append(T_INT); 1017 signature.append(T_ADDRESS); 1018 signature.append(T_INT); 1019 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1020 const LIR_Opr result_reg = result_register_for(x->type()); 1021 1022 LIR_Opr addr = new_pointer_register(); 1023 __ leal(LIR_OprFact::address(a), addr); 1024 1025 crc.load_item_force(cc->at(0)); 1026 __ move(addr, cc->at(1)); 1027 len.load_item_force(cc->at(2)); 1028 1029 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1030 __ move(result_reg, result); 1031 1032 break; 1033 } 1034 default: { 1035 ShouldNotReachHere(); 1036 } 1037 } 1038 } 1039 1040 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1041 Unimplemented(); 1042 } 1043 1044 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1045 assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support"); 1046 1047 // Make all state_for calls early since they can emit code 1048 LIR_Opr result = rlock_result(x); 1049 1050 LIRItem a(x->argument_at(0), this); // Object 1051 LIRItem aOffset(x->argument_at(1), this); // long 1052 LIRItem b(x->argument_at(2), this); // Object 1053 LIRItem bOffset(x->argument_at(3), this); // long 1054 LIRItem length(x->argument_at(4), this); // int 1055 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int 1056 1057 a.load_item(); 1058 aOffset.load_nonconstant(); 1059 b.load_item(); 1060 bOffset.load_nonconstant(); 1061 1062 long constant_aOffset = 0; 1063 LIR_Opr result_aOffset = aOffset.result(); 1064 if (result_aOffset->is_constant()) { 1065 constant_aOffset = result_aOffset->as_jlong(); 1066 result_aOffset = LIR_OprFact::illegalOpr; 1067 } 1068 LIR_Opr result_a = a.result(); 1069 1070 long constant_bOffset = 0; 1071 LIR_Opr result_bOffset = bOffset.result(); 1072 if (result_bOffset->is_constant()) { 1073 constant_bOffset = result_bOffset->as_jlong(); 1074 result_bOffset = LIR_OprFact::illegalOpr; 1075 } 1076 LIR_Opr result_b = b.result(); 1077 1078 #ifndef _LP64 1079 result_a = new_register(T_INT); 1080 __ convert(Bytecodes::_l2i, a.result(), result_a); 1081 result_b = new_register(T_INT); 1082 __ convert(Bytecodes::_l2i, b.result(), result_b); 1083 #endif 1084 1085 1086 LIR_Address* addr_a = new LIR_Address(result_a, 1087 result_aOffset, 1088 constant_aOffset, 1089 T_BYTE); 1090 1091 LIR_Address* addr_b = new LIR_Address(result_b, 1092 result_bOffset, 1093 constant_bOffset, 1094 T_BYTE); 1095 1096 BasicTypeList signature(4); 1097 signature.append(T_ADDRESS); 1098 signature.append(T_ADDRESS); 1099 signature.append(T_INT); 1100 signature.append(T_INT); 1101 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1102 const LIR_Opr result_reg = result_register_for(x->type()); 1103 1104 LIR_Opr ptr_addr_a = new_pointer_register(); 1105 __ leal(LIR_OprFact::address(addr_a), ptr_addr_a); 1106 1107 LIR_Opr ptr_addr_b = new_pointer_register(); 1108 __ leal(LIR_OprFact::address(addr_b), ptr_addr_b); 1109 1110 __ move(ptr_addr_a, cc->at(0)); 1111 __ move(ptr_addr_b, cc->at(1)); 1112 length.load_item_force(cc->at(2)); 1113 log2ArrayIndexScale.load_item_force(cc->at(3)); 1114 1115 __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args()); 1116 __ move(result_reg, result); 1117 } 1118 1119 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 1120 // _i2b, _i2c, _i2s 1121 LIR_Opr fixed_register_for(BasicType type) { 1122 switch (type) { 1123 case T_FLOAT: return FrameMap::fpu0_float_opr; 1124 case T_DOUBLE: return FrameMap::fpu0_double_opr; 1125 case T_INT: return FrameMap::rax_opr; 1126 case T_LONG: return FrameMap::long0_opr; 1127 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 1128 } 1129 } 1130 1131 void LIRGenerator::do_Convert(Convert* x) { 1132 // flags that vary for the different operations and different SSE-settings 1133 bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false; 1134 1135 switch (x->op()) { 1136 case Bytecodes::_i2l: // fall through 1137 case Bytecodes::_l2i: // fall through 1138 case Bytecodes::_i2b: // fall through 1139 case Bytecodes::_i2c: // fall through 1140 case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1141 1142 case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break; 1143 case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break; 1144 case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break; 1145 case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1146 case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1147 case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1148 case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break; 1149 case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break; 1150 case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1151 case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1152 default: ShouldNotReachHere(); 1153 } 1154 1155 LIRItem value(x->value(), this); 1156 value.load_item(); 1157 LIR_Opr input = value.result(); 1158 LIR_Opr result = rlock(x); 1159 1160 // arguments of lir_convert 1161 LIR_Opr conv_input = input; 1162 LIR_Opr conv_result = result; 1163 ConversionStub* stub = NULL; 1164 1165 if (fixed_input) { 1166 conv_input = fixed_register_for(input->type()); 1167 __ move(input, conv_input); 1168 } 1169 1170 assert(fixed_result == false || round_result == false, "cannot set both"); 1171 if (fixed_result) { 1172 conv_result = fixed_register_for(result->type()); 1173 } else if (round_result) { 1174 result = new_register(result->type()); 1175 set_vreg_flag(result, must_start_in_memory); 1176 } 1177 1178 if (needs_stub) { 1179 stub = new ConversionStub(x->op(), conv_input, conv_result); 1180 } 1181 1182 __ convert(x->op(), conv_input, conv_result, stub); 1183 1184 if (result != conv_result) { 1185 __ move(conv_result, result); 1186 } 1187 1188 assert(result->is_virtual(), "result must be virtual register"); 1189 set_result(x, result); 1190 } 1191 1192 1193 void LIRGenerator::do_NewInstance(NewInstance* x) { 1194 print_if_not_loaded(x); 1195 1196 CodeEmitInfo* info = state_for(x, x->state()); 1197 LIR_Opr reg = result_register_for(x->type()); 1198 new_instance(reg, x->klass(), x->is_unresolved(), 1199 FrameMap::rcx_oop_opr, 1200 FrameMap::rdi_oop_opr, 1201 FrameMap::rsi_oop_opr, 1202 LIR_OprFact::illegalOpr, 1203 FrameMap::rdx_metadata_opr, info); 1204 LIR_Opr result = rlock_result(x); 1205 __ move(reg, result); 1206 } 1207 1208 1209 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1210 CodeEmitInfo* info = state_for(x, x->state()); 1211 1212 LIRItem length(x->length(), this); 1213 length.load_item_force(FrameMap::rbx_opr); 1214 1215 LIR_Opr reg = result_register_for(x->type()); 1216 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1217 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1218 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1219 LIR_Opr tmp4 = reg; 1220 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1221 LIR_Opr len = length.result(); 1222 BasicType elem_type = x->elt_type(); 1223 1224 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1225 1226 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1227 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1228 1229 LIR_Opr result = rlock_result(x); 1230 __ move(reg, result); 1231 } 1232 1233 1234 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1235 LIRItem length(x->length(), this); 1236 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1237 // and therefore provide the state before the parameters have been consumed 1238 CodeEmitInfo* patching_info = NULL; 1239 if (!x->klass()->is_loaded() || PatchALot) { 1240 patching_info = state_for(x, x->state_before()); 1241 } 1242 1243 CodeEmitInfo* info = state_for(x, x->state()); 1244 1245 const LIR_Opr reg = result_register_for(x->type()); 1246 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1247 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1248 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1249 LIR_Opr tmp4 = reg; 1250 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1251 1252 length.load_item_force(FrameMap::rbx_opr); 1253 LIR_Opr len = length.result(); 1254 1255 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1256 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); 1257 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1258 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1259 } 1260 klass2reg_with_patching(klass_reg, obj, patching_info); 1261 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1262 1263 LIR_Opr result = rlock_result(x); 1264 __ move(reg, result); 1265 } 1266 1267 1268 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1269 Values* dims = x->dims(); 1270 int i = dims->length(); 1271 LIRItemList* items = new LIRItemList(i, i, NULL); 1272 while (i-- > 0) { 1273 LIRItem* size = new LIRItem(dims->at(i), this); 1274 items->at_put(i, size); 1275 } 1276 1277 // Evaluate state_for early since it may emit code. 1278 CodeEmitInfo* patching_info = NULL; 1279 if (!x->klass()->is_loaded() || PatchALot) { 1280 patching_info = state_for(x, x->state_before()); 1281 1282 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1283 // clone all handlers (NOTE: Usually this is handled transparently 1284 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1285 // is done explicitly here because a stub isn't being used). 1286 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1287 } 1288 CodeEmitInfo* info = state_for(x, x->state()); 1289 1290 i = dims->length(); 1291 while (i-- > 0) { 1292 LIRItem* size = items->at(i); 1293 size->load_nonconstant(); 1294 1295 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1296 } 1297 1298 LIR_Opr klass_reg = FrameMap::rax_metadata_opr; 1299 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1300 1301 LIR_Opr rank = FrameMap::rbx_opr; 1302 __ move(LIR_OprFact::intConst(x->rank()), rank); 1303 LIR_Opr varargs = FrameMap::rcx_opr; 1304 __ move(FrameMap::rsp_opr, varargs); 1305 LIR_OprList* args = new LIR_OprList(3); 1306 args->append(klass_reg); 1307 args->append(rank); 1308 args->append(varargs); 1309 LIR_Opr reg = result_register_for(x->type()); 1310 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1311 LIR_OprFact::illegalOpr, 1312 reg, args, info); 1313 1314 LIR_Opr result = rlock_result(x); 1315 __ move(reg, result); 1316 } 1317 1318 1319 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1320 // nothing to do for now 1321 } 1322 1323 1324 void LIRGenerator::do_CheckCast(CheckCast* x) { 1325 LIRItem obj(x->obj(), this); 1326 1327 CodeEmitInfo* patching_info = NULL; 1328 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { 1329 // must do this before locking the destination register as an oop register, 1330 // and before the obj is loaded (the latter is for deoptimization) 1331 patching_info = state_for(x, x->state_before()); 1332 } 1333 obj.load_item(); 1334 1335 // info for exceptions 1336 CodeEmitInfo* info_for_exception = state_for(x); 1337 1338 CodeStub* stub; 1339 if (x->is_incompatible_class_change_check()) { 1340 assert(patching_info == NULL, "can't patch this"); 1341 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1342 } else { 1343 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1344 } 1345 LIR_Opr reg = rlock_result(x); 1346 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1347 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1348 tmp3 = new_register(objectType); 1349 } 1350 __ checkcast(reg, obj.result(), x->klass(), 1351 new_register(objectType), new_register(objectType), tmp3, 1352 x->direct_compare(), info_for_exception, patching_info, stub, 1353 x->profiled_method(), x->profiled_bci()); 1354 } 1355 1356 1357 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1358 LIRItem obj(x->obj(), this); 1359 1360 // result and test object may not be in same register 1361 LIR_Opr reg = rlock_result(x); 1362 CodeEmitInfo* patching_info = NULL; 1363 if ((!x->klass()->is_loaded() || PatchALot)) { 1364 // must do this before locking the destination register as an oop register 1365 patching_info = state_for(x, x->state_before()); 1366 } 1367 obj.load_item(); 1368 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1369 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1370 tmp3 = new_register(objectType); 1371 } 1372 __ instanceof(reg, obj.result(), x->klass(), 1373 new_register(objectType), new_register(objectType), tmp3, 1374 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1375 } 1376 1377 1378 void LIRGenerator::do_If(If* x) { 1379 assert(x->number_of_sux() == 2, "inconsistency"); 1380 ValueTag tag = x->x()->type()->tag(); 1381 bool is_safepoint = x->is_safepoint(); 1382 1383 If::Condition cond = x->cond(); 1384 1385 LIRItem xitem(x->x(), this); 1386 LIRItem yitem(x->y(), this); 1387 LIRItem* xin = &xitem; 1388 LIRItem* yin = &yitem; 1389 1390 if (tag == longTag) { 1391 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1392 // mirror for other conditions 1393 if (cond == If::gtr || cond == If::leq) { 1394 cond = Instruction::mirror(cond); 1395 xin = &yitem; 1396 yin = &xitem; 1397 } 1398 xin->set_destroys_register(); 1399 } 1400 xin->load_item(); 1401 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { 1402 // inline long zero 1403 yin->dont_load_item(); 1404 } else if (tag == longTag || tag == floatTag || tag == doubleTag) { 1405 // longs cannot handle constants at right side 1406 yin->load_item(); 1407 } else { 1408 yin->dont_load_item(); 1409 } 1410 1411 // add safepoint before generating condition code so it can be recomputed 1412 if (x->is_safepoint()) { 1413 // increment backedge counter if needed 1414 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1415 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1416 } 1417 set_no_result(x); 1418 1419 LIR_Opr left = xin->result(); 1420 LIR_Opr right = yin->result(); 1421 __ cmp(lir_cond(cond), left, right); 1422 // Generate branch profiling. Profiling code doesn't kill flags. 1423 profile_branch(x, cond); 1424 move_to_phi(x->state()); 1425 if (x->x()->type()->is_float_kind()) { 1426 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1427 } else { 1428 __ branch(lir_cond(cond), right->type(), x->tsux()); 1429 } 1430 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1431 __ jump(x->default_sux()); 1432 } 1433 1434 1435 LIR_Opr LIRGenerator::getThreadPointer() { 1436 #ifdef _LP64 1437 return FrameMap::as_pointer_opr(r15_thread); 1438 #else 1439 LIR_Opr result = new_register(T_INT); 1440 __ get_thread(result); 1441 return result; 1442 #endif // 1443 } 1444 1445 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1446 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0)); 1447 LIR_OprList* args = new LIR_OprList(); 1448 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1449 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); 1450 } 1451 1452 1453 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1454 CodeEmitInfo* info) { 1455 if (address->type() == T_LONG) { 1456 address = new LIR_Address(address->base(), 1457 address->index(), address->scale(), 1458 address->disp(), T_DOUBLE); 1459 // Transfer the value atomically by using FP moves. This means 1460 // the value has to be moved between CPU and FPU registers. It 1461 // always has to be moved through spill slot since there's no 1462 // quick way to pack the value into an SSE register. 1463 LIR_Opr temp_double = new_register(T_DOUBLE); 1464 LIR_Opr spill = new_register(T_LONG); 1465 set_vreg_flag(spill, must_start_in_memory); 1466 __ move(value, spill); 1467 __ volatile_move(spill, temp_double, T_LONG); 1468 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info); 1469 } else { 1470 __ store(value, address, info); 1471 } 1472 } 1473 1474 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1475 CodeEmitInfo* info) { 1476 if (address->type() == T_LONG) { 1477 address = new LIR_Address(address->base(), 1478 address->index(), address->scale(), 1479 address->disp(), T_DOUBLE); 1480 // Transfer the value atomically by using FP moves. This means 1481 // the value has to be moved between CPU and FPU registers. In 1482 // SSE0 and SSE1 mode it has to be moved through spill slot but in 1483 // SSE2+ mode it can be moved directly. 1484 LIR_Opr temp_double = new_register(T_DOUBLE); 1485 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); 1486 __ volatile_move(temp_double, result, T_LONG); 1487 if (UseSSE < 2) { 1488 // no spill slot needed in SSE2 mode because xmm->cpu register move is possible 1489 set_vreg_flag(result, must_start_in_memory); 1490 } 1491 } else { 1492 __ load(address, result, info); 1493 } 1494 }