1 /* 2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_LIRGenerator.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArray.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "ci/ciValueKlass.hpp" 37 #include "gc/shared/c1/barrierSetC1.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "vmreg_x86.inline.hpp" 41 42 #ifdef ASSERT 43 #define __ gen()->lir(__FILE__, __LINE__)-> 44 #else 45 #define __ gen()->lir()-> 46 #endif 47 48 // Item will be loaded into a byte register; Intel only 49 void LIRItem::load_byte_item() { 50 load_item(); 51 LIR_Opr res = result(); 52 53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { 54 // make sure that it is a byte register 55 assert(!value()->type()->is_float() && !value()->type()->is_double(), 56 "can't load floats in byte register"); 57 LIR_Opr reg = _gen->rlock_byte(T_BYTE); 58 __ move(res, reg); 59 60 _result = reg; 61 } 62 } 63 64 65 void LIRItem::load_nonconstant() { 66 LIR_Opr r = value()->operand(); 67 if (r->is_constant()) { 68 _result = r; 69 } else { 70 load_item(); 71 } 72 } 73 74 //-------------------------------------------------------------- 75 // LIRGenerator 76 //-------------------------------------------------------------- 77 78 79 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; } 80 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; } 81 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; } 82 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; } 83 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; } 84 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; } 85 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 86 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; } 87 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 88 89 90 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 91 LIR_Opr opr; 92 switch (type->tag()) { 93 case intTag: opr = FrameMap::rax_opr; break; 94 case objectTag: opr = FrameMap::rax_oop_opr; break; 95 case longTag: opr = FrameMap::long0_opr; break; 96 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; 97 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; 98 99 case addressTag: 100 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 101 } 102 103 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 104 return opr; 105 } 106 107 108 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 109 LIR_Opr reg = new_register(T_INT); 110 set_vreg_flag(reg, LIRGenerator::byte_reg); 111 return reg; 112 } 113 114 115 //--------- loading items into registers -------------------------------- 116 117 118 // i486 instructions can inline constants 119 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 120 if (type == T_SHORT || type == T_CHAR) { 121 // there is no immediate move of word values in asembler_i486.?pp 122 return false; 123 } 124 Constant* c = v->as_Constant(); 125 if (c && c->state_before() == NULL) { 126 // constants of any type can be stored directly, except for 127 // unloaded object constants. 128 return true; 129 } 130 return false; 131 } 132 133 134 bool LIRGenerator::can_inline_as_constant(Value v) const { 135 if (v->type()->tag() == longTag) return false; 136 return v->type()->tag() != objectTag || 137 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); 138 } 139 140 141 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 142 if (c->type() == T_LONG) return false; 143 return c->type() != T_OBJECT || c->as_jobject() == NULL; 144 } 145 146 147 LIR_Opr LIRGenerator::safepoint_poll_register() { 148 NOT_LP64( if (SafepointMechanism::uses_thread_local_poll()) { return new_register(T_ADDRESS); } ) 149 return LIR_OprFact::illegalOpr; 150 } 151 152 153 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 154 int shift, int disp, BasicType type) { 155 assert(base->is_register(), "must be"); 156 if (index->is_constant()) { 157 LIR_Const *constant = index->as_constant_ptr(); 158 #ifdef _LP64 159 jlong c; 160 if (constant->type() == T_INT) { 161 c = (jlong(index->as_jint()) << shift) + disp; 162 } else { 163 assert(constant->type() == T_LONG, "should be"); 164 c = (index->as_jlong() << shift) + disp; 165 } 166 if ((jlong)((jint)c) == c) { 167 return new LIR_Address(base, (jint)c, type); 168 } else { 169 LIR_Opr tmp = new_register(T_LONG); 170 __ move(index, tmp); 171 return new LIR_Address(base, tmp, type); 172 } 173 #else 174 return new LIR_Address(base, 175 ((intx)(constant->as_jint()) << shift) + disp, 176 type); 177 #endif 178 } else { 179 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); 180 } 181 } 182 183 184 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 185 BasicType type) { 186 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 187 188 LIR_Address* addr; 189 if (index_opr->is_constant()) { 190 int elem_size = type2aelembytes(type); 191 addr = new LIR_Address(array_opr, 192 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); 193 } else { 194 #ifdef _LP64 195 if (index_opr->type() == T_INT) { 196 LIR_Opr tmp = new_register(T_LONG); 197 __ convert(Bytecodes::_i2l, index_opr, tmp); 198 index_opr = tmp; 199 } 200 #endif // _LP64 201 addr = new LIR_Address(array_opr, 202 index_opr, 203 LIR_Address::scale(type), 204 offset_in_bytes, type); 205 } 206 return addr; 207 } 208 209 210 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 211 LIR_Opr r = NULL; 212 if (type == T_LONG) { 213 r = LIR_OprFact::longConst(x); 214 } else if (type == T_INT) { 215 r = LIR_OprFact::intConst(x); 216 } else { 217 ShouldNotReachHere(); 218 } 219 return r; 220 } 221 222 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 223 LIR_Opr pointer = new_pointer_register(); 224 __ move(LIR_OprFact::intptrConst(counter), pointer); 225 LIR_Address* addr = new LIR_Address(pointer, type); 226 increment_counter(addr, step); 227 } 228 229 230 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 231 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); 232 } 233 234 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 235 __ cmp_mem_int(condition, base, disp, c, info); 236 } 237 238 239 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 240 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); 241 } 242 243 244 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { 245 if (tmp->is_valid() && c > 0 && c < max_jint) { 246 if (is_power_of_2(c + 1)) { 247 __ move(left, tmp); 248 __ shift_left(left, log2_jint(c + 1), left); 249 __ sub(left, tmp, result); 250 return true; 251 } else if (is_power_of_2(c - 1)) { 252 __ move(left, tmp); 253 __ shift_left(left, log2_jint(c - 1), left); 254 __ add(left, tmp, result); 255 return true; 256 } 257 } 258 return false; 259 } 260 261 262 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 263 BasicType type = item->type(); 264 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type)); 265 } 266 267 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 268 LIR_Opr tmp1 = new_register(objectType); 269 LIR_Opr tmp2 = new_register(objectType); 270 LIR_Opr tmp3 = new_register(objectType); 271 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 272 } 273 274 void LIRGenerator::flattened_array_store_check(LIR_Opr value, ciKlass* element_klass, CodeEmitInfo* store_check_info) { 275 LIR_Opr tmp1 = new_register(T_METADATA); 276 LIR_Opr tmp2 = LIR_OprFact::illegalOpr; 277 278 #ifdef _LP64 279 if (!UseCompressedClassPointers) { 280 tmp2 = new_register(T_METADATA); 281 __ metadata2reg(element_klass->constant_encoding(), tmp2); 282 } 283 #endif 284 285 __ flattened_store_check(value, element_klass, tmp1, tmp2, store_check_info); 286 } 287 288 //---------------------------------------------------------------------- 289 // visitor functions 290 //---------------------------------------------------------------------- 291 292 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 293 assert(x->is_pinned(),""); 294 LIRItem obj(x->obj(), this); 295 obj.load_item(); 296 297 set_no_result(x); 298 299 // "lock" stores the address of the monitor stack slot, so this is not an oop 300 LIR_Opr lock = new_register(T_INT); 301 // Need a scratch register for biased locking on x86 302 LIR_Opr scratch = LIR_OprFact::illegalOpr; 303 if (UseBiasedLocking || x->maybe_valuetype()) { 304 scratch = new_register(T_INT); 305 } 306 307 CodeEmitInfo* info_for_exception = NULL; 308 if (x->needs_null_check()) { 309 info_for_exception = state_for(x); 310 } 311 312 CodeStub* throw_imse_stub = x->maybe_valuetype() ? 313 new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, 314 LIR_OprFact::illegalOpr, state_for(x)) 315 : NULL; 316 317 // this CodeEmitInfo must not have the xhandlers because here the 318 // object is already locked (xhandlers expect object to be unlocked) 319 CodeEmitInfo* info = state_for(x, x->state(), true); 320 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 321 x->monitor_no(), info_for_exception, info, throw_imse_stub); 322 } 323 324 325 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 326 assert(x->is_pinned(),""); 327 328 LIRItem obj(x->obj(), this); 329 obj.dont_load_item(); 330 331 LIR_Opr lock = new_register(T_INT); 332 LIR_Opr obj_temp = new_register(T_INT); 333 set_no_result(x); 334 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 335 } 336 337 338 // _ineg, _lneg, _fneg, _dneg 339 void LIRGenerator::do_NegateOp(NegateOp* x) { 340 LIRItem value(x->x(), this); 341 value.set_destroys_register(); 342 value.load_item(); 343 LIR_Opr reg = rlock(x); 344 345 LIR_Opr tmp = LIR_OprFact::illegalOpr; 346 #ifdef _LP64 347 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 348 if (x->type()->tag() == doubleTag) { 349 tmp = new_register(T_DOUBLE); 350 __ move(LIR_OprFact::doubleConst(-0.0), tmp); 351 } 352 else if (x->type()->tag() == floatTag) { 353 tmp = new_register(T_FLOAT); 354 __ move(LIR_OprFact::floatConst(-0.0), tmp); 355 } 356 } 357 #endif 358 __ negate(value.result(), reg, tmp); 359 360 set_result(x, round_item(reg)); 361 } 362 363 364 // for _fadd, _fmul, _fsub, _fdiv, _frem 365 // _dadd, _dmul, _dsub, _ddiv, _drem 366 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 367 LIRItem left(x->x(), this); 368 LIRItem right(x->y(), this); 369 LIRItem* left_arg = &left; 370 LIRItem* right_arg = &right; 371 assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); 372 bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); 373 if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { 374 left.load_item(); 375 } else { 376 left.dont_load_item(); 377 } 378 379 // do not load right operand if it is a constant. only 0 and 1 are 380 // loaded because there are special instructions for loading them 381 // without memory access (not needed for SSE2 instructions) 382 bool must_load_right = false; 383 if (right.is_constant()) { 384 LIR_Const* c = right.result()->as_constant_ptr(); 385 assert(c != NULL, "invalid constant"); 386 assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); 387 388 if (c->type() == T_FLOAT) { 389 must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float()); 390 } else { 391 must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); 392 } 393 } 394 395 if (must_load_both) { 396 // frem and drem destroy also right operand, so move it to a new register 397 right.set_destroys_register(); 398 right.load_item(); 399 } else if (right.is_register() || must_load_right) { 400 right.load_item(); 401 } else { 402 right.dont_load_item(); 403 } 404 LIR_Opr reg = rlock(x); 405 LIR_Opr tmp = LIR_OprFact::illegalOpr; 406 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { 407 tmp = new_register(T_DOUBLE); 408 } 409 410 if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) { 411 // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots 412 LIR_Opr fpu0, fpu1; 413 if (x->op() == Bytecodes::_frem) { 414 fpu0 = LIR_OprFact::single_fpu(0); 415 fpu1 = LIR_OprFact::single_fpu(1); 416 } else { 417 fpu0 = LIR_OprFact::double_fpu(0); 418 fpu1 = LIR_OprFact::double_fpu(1); 419 } 420 __ move(right.result(), fpu1); // order of left and right operand is important! 421 __ move(left.result(), fpu0); 422 __ rem (fpu0, fpu1, fpu0); 423 __ move(fpu0, reg); 424 425 } else { 426 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp); 427 } 428 429 set_result(x, round_item(reg)); 430 } 431 432 433 // for _ladd, _lmul, _lsub, _ldiv, _lrem 434 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 435 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { 436 // long division is implemented as a direct call into the runtime 437 LIRItem left(x->x(), this); 438 LIRItem right(x->y(), this); 439 440 // the check for division by zero destroys the right operand 441 right.set_destroys_register(); 442 443 BasicTypeList signature(2); 444 signature.append(T_LONG); 445 signature.append(T_LONG); 446 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 447 448 // check for division by zero (destroys registers of right operand!) 449 CodeEmitInfo* info = state_for(x); 450 451 const LIR_Opr result_reg = result_register_for(x->type()); 452 left.load_item_force(cc->at(1)); 453 right.load_item(); 454 455 __ move(right.result(), cc->at(0)); 456 457 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 458 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); 459 460 address entry = NULL; 461 switch (x->op()) { 462 case Bytecodes::_lrem: 463 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 464 break; // check if dividend is 0 is done elsewhere 465 case Bytecodes::_ldiv: 466 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 467 break; // check if dividend is 0 is done elsewhere 468 case Bytecodes::_lmul: 469 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); 470 break; 471 default: 472 ShouldNotReachHere(); 473 } 474 475 LIR_Opr result = rlock_result(x); 476 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 477 __ move(result_reg, result); 478 } else if (x->op() == Bytecodes::_lmul) { 479 // missing test if instr is commutative and if we should swap 480 LIRItem left(x->x(), this); 481 LIRItem right(x->y(), this); 482 483 // right register is destroyed by the long mul, so it must be 484 // copied to a new register. 485 right.set_destroys_register(); 486 487 left.load_item(); 488 right.load_item(); 489 490 LIR_Opr reg = FrameMap::long0_opr; 491 arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); 492 LIR_Opr result = rlock_result(x); 493 __ move(reg, result); 494 } else { 495 // missing test if instr is commutative and if we should swap 496 LIRItem left(x->x(), this); 497 LIRItem right(x->y(), this); 498 499 left.load_item(); 500 // don't load constants to save register 501 right.load_nonconstant(); 502 rlock_result(x); 503 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 504 } 505 } 506 507 508 509 // for: _iadd, _imul, _isub, _idiv, _irem 510 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 511 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 512 // The requirements for division and modulo 513 // input : rax,: dividend min_int 514 // reg: divisor (may not be rax,/rdx) -1 515 // 516 // output: rax,: quotient (= rax, idiv reg) min_int 517 // rdx: remainder (= rax, irem reg) 0 518 519 // rax, and rdx will be destroyed 520 521 // Note: does this invalidate the spec ??? 522 LIRItem right(x->y(), this); 523 LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid 524 525 // call state_for before load_item_force because state_for may 526 // force the evaluation of other instructions that are needed for 527 // correct debug info. Otherwise the live range of the fix 528 // register might be too long. 529 CodeEmitInfo* info = state_for(x); 530 531 left.load_item_force(divInOpr()); 532 533 right.load_item(); 534 535 LIR_Opr result = rlock_result(x); 536 LIR_Opr result_reg; 537 if (x->op() == Bytecodes::_idiv) { 538 result_reg = divOutOpr(); 539 } else { 540 result_reg = remOutOpr(); 541 } 542 543 if (!ImplicitDiv0Checks) { 544 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); 545 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); 546 // Idiv/irem cannot trap (passing info would generate an assertion). 547 info = NULL; 548 } 549 LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation 550 if (x->op() == Bytecodes::_irem) { 551 __ irem(left.result(), right.result(), result_reg, tmp, info); 552 } else if (x->op() == Bytecodes::_idiv) { 553 __ idiv(left.result(), right.result(), result_reg, tmp, info); 554 } else { 555 ShouldNotReachHere(); 556 } 557 558 __ move(result_reg, result); 559 } else { 560 // missing test if instr is commutative and if we should swap 561 LIRItem left(x->x(), this); 562 LIRItem right(x->y(), this); 563 LIRItem* left_arg = &left; 564 LIRItem* right_arg = &right; 565 if (x->is_commutative() && left.is_stack() && right.is_register()) { 566 // swap them if left is real stack (or cached) and right is real register(not cached) 567 left_arg = &right; 568 right_arg = &left; 569 } 570 571 left_arg->load_item(); 572 573 // do not need to load right, as we can handle stack and constants 574 if (x->op() == Bytecodes::_imul ) { 575 // check if we can use shift instead 576 bool use_constant = false; 577 bool use_tmp = false; 578 if (right_arg->is_constant()) { 579 jint iconst = right_arg->get_jint_constant(); 580 if (iconst > 0 && iconst < max_jint) { 581 if (is_power_of_2(iconst)) { 582 use_constant = true; 583 } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { 584 use_constant = true; 585 use_tmp = true; 586 } 587 } 588 } 589 if (use_constant) { 590 right_arg->dont_load_item(); 591 } else { 592 right_arg->load_item(); 593 } 594 LIR_Opr tmp = LIR_OprFact::illegalOpr; 595 if (use_tmp) { 596 tmp = new_register(T_INT); 597 } 598 rlock_result(x); 599 600 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 601 } else { 602 right_arg->dont_load_item(); 603 rlock_result(x); 604 LIR_Opr tmp = LIR_OprFact::illegalOpr; 605 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 606 } 607 } 608 } 609 610 611 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 612 // when an operand with use count 1 is the left operand, then it is 613 // likely that no move for 2-operand-LIR-form is necessary 614 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 615 x->swap_operands(); 616 } 617 618 ValueTag tag = x->type()->tag(); 619 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 620 switch (tag) { 621 case floatTag: 622 case doubleTag: do_ArithmeticOp_FPU(x); return; 623 case longTag: do_ArithmeticOp_Long(x); return; 624 case intTag: do_ArithmeticOp_Int(x); return; 625 default: ShouldNotReachHere(); return; 626 } 627 } 628 629 630 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 631 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 632 // count must always be in rcx 633 LIRItem value(x->x(), this); 634 LIRItem count(x->y(), this); 635 636 ValueTag elemType = x->type()->tag(); 637 bool must_load_count = !count.is_constant() || elemType == longTag; 638 if (must_load_count) { 639 // count for long must be in register 640 count.load_item_force(shiftCountOpr()); 641 } else { 642 count.dont_load_item(); 643 } 644 value.load_item(); 645 LIR_Opr reg = rlock_result(x); 646 647 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); 648 } 649 650 651 // _iand, _land, _ior, _lor, _ixor, _lxor 652 void LIRGenerator::do_LogicOp(LogicOp* x) { 653 // when an operand with use count 1 is the left operand, then it is 654 // likely that no move for 2-operand-LIR-form is necessary 655 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 656 x->swap_operands(); 657 } 658 659 LIRItem left(x->x(), this); 660 LIRItem right(x->y(), this); 661 662 left.load_item(); 663 right.load_nonconstant(); 664 LIR_Opr reg = rlock_result(x); 665 666 logic_op(x->op(), reg, left.result(), right.result()); 667 } 668 669 670 671 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 672 void LIRGenerator::do_CompareOp(CompareOp* x) { 673 LIRItem left(x->x(), this); 674 LIRItem right(x->y(), this); 675 ValueTag tag = x->x()->type()->tag(); 676 if (tag == longTag) { 677 left.set_destroys_register(); 678 } 679 left.load_item(); 680 right.load_item(); 681 LIR_Opr reg = rlock_result(x); 682 683 if (x->x()->type()->is_float_kind()) { 684 Bytecodes::Code code = x->op(); 685 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 686 } else if (x->x()->type()->tag() == longTag) { 687 __ lcmp2int(left.result(), right.result(), reg); 688 } else { 689 Unimplemented(); 690 } 691 } 692 693 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 694 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 695 if (type == T_OBJECT || type == T_ARRAY) { 696 cmp_value.load_item_force(FrameMap::rax_oop_opr); 697 new_value.load_item(); 698 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 699 } else if (type == T_INT) { 700 cmp_value.load_item_force(FrameMap::rax_opr); 701 new_value.load_item(); 702 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 703 } else if (type == T_LONG) { 704 cmp_value.load_item_force(FrameMap::long0_opr); 705 new_value.load_item_force(FrameMap::long1_opr); 706 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 707 } else { 708 Unimplemented(); 709 } 710 LIR_Opr result = new_register(T_INT); 711 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 712 result, T_INT); 713 return result; 714 } 715 716 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { 717 bool is_oop = type == T_OBJECT || type == T_ARRAY; 718 LIR_Opr result = new_register(type); 719 value.load_item(); 720 // Because we want a 2-arg form of xchg and xadd 721 __ move(value.result(), result); 722 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); 723 __ xchg(addr, result, result, LIR_OprFact::illegalOpr); 724 return result; 725 } 726 727 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { 728 LIR_Opr result = new_register(type); 729 value.load_item(); 730 // Because we want a 2-arg form of xchg and xadd 731 __ move(value.result(), result); 732 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type"); 733 __ xadd(addr, result, result, LIR_OprFact::illegalOpr); 734 return result; 735 } 736 737 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 738 assert(x->number_of_arguments() == 3, "wrong type"); 739 assert(UseFMA, "Needs FMA instructions support."); 740 LIRItem value(x->argument_at(0), this); 741 LIRItem value1(x->argument_at(1), this); 742 LIRItem value2(x->argument_at(2), this); 743 744 value2.set_destroys_register(); 745 746 value.load_item(); 747 value1.load_item(); 748 value2.load_item(); 749 750 LIR_Opr calc_input = value.result(); 751 LIR_Opr calc_input1 = value1.result(); 752 LIR_Opr calc_input2 = value2.result(); 753 LIR_Opr calc_result = rlock_result(x); 754 755 switch (x->id()) { 756 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 757 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 758 default: ShouldNotReachHere(); 759 } 760 761 } 762 763 764 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 765 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type"); 766 767 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || 768 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || 769 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || 770 x->id() == vmIntrinsics::_dlog10) { 771 do_LibmIntrinsic(x); 772 return; 773 } 774 775 LIRItem value(x->argument_at(0), this); 776 777 bool use_fpu = false; 778 if (UseSSE < 2) { 779 value.set_destroys_register(); 780 } 781 value.load_item(); 782 783 LIR_Opr calc_input = value.result(); 784 LIR_Opr calc_result = rlock_result(x); 785 786 LIR_Opr tmp = LIR_OprFact::illegalOpr; 787 #ifdef _LP64 788 if (UseAVX > 2 && (!VM_Version::supports_avx512vl()) && 789 (x->id() == vmIntrinsics::_dabs)) { 790 tmp = new_register(T_DOUBLE); 791 __ move(LIR_OprFact::doubleConst(-0.0), tmp); 792 } 793 #endif 794 795 switch(x->id()) { 796 case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, tmp); break; 797 case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break; 798 default: ShouldNotReachHere(); 799 } 800 801 if (use_fpu) { 802 __ move(calc_result, x->operand()); 803 } 804 } 805 806 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { 807 LIRItem value(x->argument_at(0), this); 808 value.set_destroys_register(); 809 810 LIR_Opr calc_result = rlock_result(x); 811 LIR_Opr result_reg = result_register_for(x->type()); 812 813 CallingConvention* cc = NULL; 814 815 if (x->id() == vmIntrinsics::_dpow) { 816 LIRItem value1(x->argument_at(1), this); 817 818 value1.set_destroys_register(); 819 820 BasicTypeList signature(2); 821 signature.append(T_DOUBLE); 822 signature.append(T_DOUBLE); 823 cc = frame_map()->c_calling_convention(&signature); 824 value.load_item_force(cc->at(0)); 825 value1.load_item_force(cc->at(1)); 826 } else { 827 BasicTypeList signature(1); 828 signature.append(T_DOUBLE); 829 cc = frame_map()->c_calling_convention(&signature); 830 value.load_item_force(cc->at(0)); 831 } 832 833 #ifndef _LP64 834 LIR_Opr tmp = FrameMap::fpu0_double_opr; 835 result_reg = tmp; 836 switch(x->id()) { 837 case vmIntrinsics::_dexp: 838 if (StubRoutines::dexp() != NULL) { 839 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 840 } else { 841 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 842 } 843 break; 844 case vmIntrinsics::_dlog: 845 if (StubRoutines::dlog() != NULL) { 846 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 847 } else { 848 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 849 } 850 break; 851 case vmIntrinsics::_dlog10: 852 if (StubRoutines::dlog10() != NULL) { 853 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 854 } else { 855 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 856 } 857 break; 858 case vmIntrinsics::_dpow: 859 if (StubRoutines::dpow() != NULL) { 860 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 861 } else { 862 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 863 } 864 break; 865 case vmIntrinsics::_dsin: 866 if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) { 867 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 868 } else { 869 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 870 } 871 break; 872 case vmIntrinsics::_dcos: 873 if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) { 874 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 875 } else { 876 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 877 } 878 break; 879 case vmIntrinsics::_dtan: 880 if (StubRoutines::dtan() != NULL) { 881 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 882 } else { 883 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 884 } 885 break; 886 default: ShouldNotReachHere(); 887 } 888 #else 889 switch (x->id()) { 890 case vmIntrinsics::_dexp: 891 if (StubRoutines::dexp() != NULL) { 892 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 893 } else { 894 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 895 } 896 break; 897 case vmIntrinsics::_dlog: 898 if (StubRoutines::dlog() != NULL) { 899 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 900 } else { 901 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 902 } 903 break; 904 case vmIntrinsics::_dlog10: 905 if (StubRoutines::dlog10() != NULL) { 906 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 907 } else { 908 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 909 } 910 break; 911 case vmIntrinsics::_dpow: 912 if (StubRoutines::dpow() != NULL) { 913 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 914 } else { 915 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 916 } 917 break; 918 case vmIntrinsics::_dsin: 919 if (StubRoutines::dsin() != NULL) { 920 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 921 } else { 922 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 923 } 924 break; 925 case vmIntrinsics::_dcos: 926 if (StubRoutines::dcos() != NULL) { 927 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 928 } else { 929 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 930 } 931 break; 932 case vmIntrinsics::_dtan: 933 if (StubRoutines::dtan() != NULL) { 934 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 935 } else { 936 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 937 } 938 break; 939 default: ShouldNotReachHere(); 940 } 941 #endif // _LP64 942 __ move(result_reg, calc_result); 943 } 944 945 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 946 assert(x->number_of_arguments() == 5, "wrong type"); 947 948 // Make all state_for calls early since they can emit code 949 CodeEmitInfo* info = state_for(x, x->state()); 950 951 LIRItem src(x->argument_at(0), this); 952 LIRItem src_pos(x->argument_at(1), this); 953 LIRItem dst(x->argument_at(2), this); 954 LIRItem dst_pos(x->argument_at(3), this); 955 LIRItem length(x->argument_at(4), this); 956 957 // operands for arraycopy must use fixed registers, otherwise 958 // LinearScan will fail allocation (because arraycopy always needs a 959 // call) 960 961 #ifndef _LP64 962 src.load_item_force (FrameMap::rcx_oop_opr); 963 src_pos.load_item_force (FrameMap::rdx_opr); 964 dst.load_item_force (FrameMap::rax_oop_opr); 965 dst_pos.load_item_force (FrameMap::rbx_opr); 966 length.load_item_force (FrameMap::rdi_opr); 967 LIR_Opr tmp = (FrameMap::rsi_opr); 968 #else 969 970 // The java calling convention will give us enough registers 971 // so that on the stub side the args will be perfect already. 972 // On the other slow/special case side we call C and the arg 973 // positions are not similar enough to pick one as the best. 974 // Also because the java calling convention is a "shifted" version 975 // of the C convention we can process the java args trivially into C 976 // args without worry of overwriting during the xfer 977 978 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 979 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 980 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 981 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 982 length.load_item_force (FrameMap::as_opr(j_rarg4)); 983 984 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 985 #endif // LP64 986 987 set_no_result(x); 988 989 int flags; 990 ciArrayKlass* expected_type; 991 arraycopy_helper(x, &flags, &expected_type); 992 993 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 994 } 995 996 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 997 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); 998 // Make all state_for calls early since they can emit code 999 LIR_Opr result = rlock_result(x); 1000 int flags = 0; 1001 switch (x->id()) { 1002 case vmIntrinsics::_updateCRC32: { 1003 LIRItem crc(x->argument_at(0), this); 1004 LIRItem val(x->argument_at(1), this); 1005 // val is destroyed by update_crc32 1006 val.set_destroys_register(); 1007 crc.load_item(); 1008 val.load_item(); 1009 __ update_crc32(crc.result(), val.result(), result); 1010 break; 1011 } 1012 case vmIntrinsics::_updateBytesCRC32: 1013 case vmIntrinsics::_updateByteBufferCRC32: { 1014 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 1015 1016 LIRItem crc(x->argument_at(0), this); 1017 LIRItem buf(x->argument_at(1), this); 1018 LIRItem off(x->argument_at(2), this); 1019 LIRItem len(x->argument_at(3), this); 1020 buf.load_item(); 1021 off.load_nonconstant(); 1022 1023 LIR_Opr index = off.result(); 1024 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1025 if(off.result()->is_constant()) { 1026 index = LIR_OprFact::illegalOpr; 1027 offset += off.result()->as_jint(); 1028 } 1029 LIR_Opr base_op = buf.result(); 1030 1031 #ifndef _LP64 1032 if (!is_updateBytes) { // long b raw address 1033 base_op = new_register(T_INT); 1034 __ convert(Bytecodes::_l2i, buf.result(), base_op); 1035 } 1036 #else 1037 if (index->is_valid()) { 1038 LIR_Opr tmp = new_register(T_LONG); 1039 __ convert(Bytecodes::_i2l, index, tmp); 1040 index = tmp; 1041 } 1042 #endif 1043 1044 if (is_updateBytes) { 1045 base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op); 1046 } 1047 1048 LIR_Address* a = new LIR_Address(base_op, 1049 index, 1050 offset, 1051 T_BYTE); 1052 BasicTypeList signature(3); 1053 signature.append(T_INT); 1054 signature.append(T_ADDRESS); 1055 signature.append(T_INT); 1056 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1057 const LIR_Opr result_reg = result_register_for(x->type()); 1058 1059 LIR_Opr addr = new_pointer_register(); 1060 __ leal(LIR_OprFact::address(a), addr); 1061 1062 crc.load_item_force(cc->at(0)); 1063 __ move(addr, cc->at(1)); 1064 len.load_item_force(cc->at(2)); 1065 1066 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1067 __ move(result_reg, result); 1068 1069 break; 1070 } 1071 default: { 1072 ShouldNotReachHere(); 1073 } 1074 } 1075 } 1076 1077 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1078 Unimplemented(); 1079 } 1080 1081 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1082 assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support"); 1083 1084 // Make all state_for calls early since they can emit code 1085 LIR_Opr result = rlock_result(x); 1086 1087 LIRItem a(x->argument_at(0), this); // Object 1088 LIRItem aOffset(x->argument_at(1), this); // long 1089 LIRItem b(x->argument_at(2), this); // Object 1090 LIRItem bOffset(x->argument_at(3), this); // long 1091 LIRItem length(x->argument_at(4), this); // int 1092 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int 1093 1094 a.load_item(); 1095 aOffset.load_nonconstant(); 1096 b.load_item(); 1097 bOffset.load_nonconstant(); 1098 1099 long constant_aOffset = 0; 1100 LIR_Opr result_aOffset = aOffset.result(); 1101 if (result_aOffset->is_constant()) { 1102 constant_aOffset = result_aOffset->as_jlong(); 1103 result_aOffset = LIR_OprFact::illegalOpr; 1104 } 1105 LIR_Opr result_a = access_resolve(ACCESS_READ, a.result()); 1106 1107 long constant_bOffset = 0; 1108 LIR_Opr result_bOffset = bOffset.result(); 1109 if (result_bOffset->is_constant()) { 1110 constant_bOffset = result_bOffset->as_jlong(); 1111 result_bOffset = LIR_OprFact::illegalOpr; 1112 } 1113 LIR_Opr result_b = access_resolve(ACCESS_READ, b.result()); 1114 1115 #ifndef _LP64 1116 result_a = new_register(T_INT); 1117 __ convert(Bytecodes::_l2i, a.result(), result_a); 1118 result_b = new_register(T_INT); 1119 __ convert(Bytecodes::_l2i, b.result(), result_b); 1120 #endif 1121 1122 1123 LIR_Address* addr_a = new LIR_Address(result_a, 1124 result_aOffset, 1125 constant_aOffset, 1126 T_BYTE); 1127 1128 LIR_Address* addr_b = new LIR_Address(result_b, 1129 result_bOffset, 1130 constant_bOffset, 1131 T_BYTE); 1132 1133 BasicTypeList signature(4); 1134 signature.append(T_ADDRESS); 1135 signature.append(T_ADDRESS); 1136 signature.append(T_INT); 1137 signature.append(T_INT); 1138 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1139 const LIR_Opr result_reg = result_register_for(x->type()); 1140 1141 LIR_Opr ptr_addr_a = new_pointer_register(); 1142 __ leal(LIR_OprFact::address(addr_a), ptr_addr_a); 1143 1144 LIR_Opr ptr_addr_b = new_pointer_register(); 1145 __ leal(LIR_OprFact::address(addr_b), ptr_addr_b); 1146 1147 __ move(ptr_addr_a, cc->at(0)); 1148 __ move(ptr_addr_b, cc->at(1)); 1149 length.load_item_force(cc->at(2)); 1150 log2ArrayIndexScale.load_item_force(cc->at(3)); 1151 1152 __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args()); 1153 __ move(result_reg, result); 1154 } 1155 1156 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 1157 // _i2b, _i2c, _i2s 1158 LIR_Opr fixed_register_for(BasicType type) { 1159 switch (type) { 1160 case T_FLOAT: return FrameMap::fpu0_float_opr; 1161 case T_DOUBLE: return FrameMap::fpu0_double_opr; 1162 case T_INT: return FrameMap::rax_opr; 1163 case T_LONG: return FrameMap::long0_opr; 1164 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 1165 } 1166 } 1167 1168 void LIRGenerator::do_Convert(Convert* x) { 1169 // flags that vary for the different operations and different SSE-settings 1170 bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false; 1171 1172 switch (x->op()) { 1173 case Bytecodes::_i2l: // fall through 1174 case Bytecodes::_l2i: // fall through 1175 case Bytecodes::_i2b: // fall through 1176 case Bytecodes::_i2c: // fall through 1177 case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1178 1179 case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break; 1180 case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break; 1181 case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break; 1182 case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1183 case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1184 case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1185 case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break; 1186 case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break; 1187 case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1188 case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1189 default: ShouldNotReachHere(); 1190 } 1191 1192 LIRItem value(x->value(), this); 1193 value.load_item(); 1194 LIR_Opr input = value.result(); 1195 LIR_Opr result = rlock(x); 1196 1197 // arguments of lir_convert 1198 LIR_Opr conv_input = input; 1199 LIR_Opr conv_result = result; 1200 ConversionStub* stub = NULL; 1201 1202 if (fixed_input) { 1203 conv_input = fixed_register_for(input->type()); 1204 __ move(input, conv_input); 1205 } 1206 1207 assert(fixed_result == false || round_result == false, "cannot set both"); 1208 if (fixed_result) { 1209 conv_result = fixed_register_for(result->type()); 1210 } else if (round_result) { 1211 result = new_register(result->type()); 1212 set_vreg_flag(result, must_start_in_memory); 1213 } 1214 1215 if (needs_stub) { 1216 stub = new ConversionStub(x->op(), conv_input, conv_result); 1217 } 1218 1219 __ convert(x->op(), conv_input, conv_result, stub); 1220 1221 if (result != conv_result) { 1222 __ move(conv_result, result); 1223 } 1224 1225 assert(result->is_virtual(), "result must be virtual register"); 1226 set_result(x, result); 1227 } 1228 1229 1230 void LIRGenerator::do_NewInstance(NewInstance* x) { 1231 print_if_not_loaded(x); 1232 1233 CodeEmitInfo* info = state_for(x, x->state()); 1234 LIR_Opr reg = result_register_for(x->type()); 1235 new_instance(reg, x->klass(), x->is_unresolved(), 1236 FrameMap::rcx_oop_opr, 1237 FrameMap::rdi_oop_opr, 1238 FrameMap::rsi_oop_opr, 1239 LIR_OprFact::illegalOpr, 1240 FrameMap::rdx_metadata_opr, info); 1241 LIR_Opr result = rlock_result(x); 1242 __ move(reg, result); 1243 } 1244 1245 void LIRGenerator::do_NewValueTypeInstance (NewValueTypeInstance* x) { 1246 // Mapping to do_NewInstance (same code) 1247 CodeEmitInfo* info = state_for(x, x->state()); 1248 x->set_to_object_type(); 1249 LIR_Opr reg = result_register_for(x->type()); 1250 new_instance(reg, x->klass(), x->is_unresolved(), 1251 FrameMap::rcx_oop_opr, 1252 FrameMap::rdi_oop_opr, 1253 FrameMap::rsi_oop_opr, 1254 LIR_OprFact::illegalOpr, 1255 FrameMap::rdx_metadata_opr, info); 1256 LIR_Opr result = rlock_result(x); 1257 __ move(reg, result); 1258 1259 } 1260 1261 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1262 CodeEmitInfo* info = state_for(x, x->state()); 1263 1264 LIRItem length(x->length(), this); 1265 length.load_item_force(FrameMap::rbx_opr); 1266 1267 LIR_Opr reg = result_register_for(x->type()); 1268 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1269 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1270 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1271 LIR_Opr tmp4 = reg; 1272 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1273 LIR_Opr len = length.result(); 1274 BasicType elem_type = x->elt_type(); 1275 1276 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1277 1278 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1279 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1280 1281 LIR_Opr result = rlock_result(x); 1282 __ move(reg, result); 1283 } 1284 1285 1286 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1287 LIRItem length(x->length(), this); 1288 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1289 // and therefore provide the state before the parameters have been consumed 1290 CodeEmitInfo* patching_info = NULL; 1291 if (!x->klass()->is_loaded() || PatchALot) { 1292 patching_info = state_for(x, x->state_before()); 1293 } 1294 1295 CodeEmitInfo* info = state_for(x, x->state()); 1296 1297 const LIR_Opr reg = result_register_for(x->type()); 1298 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1299 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1300 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1301 LIR_Opr tmp4 = reg; 1302 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1303 1304 length.load_item_force(FrameMap::rbx_opr); 1305 LIR_Opr len = length.result(); 1306 1307 ciKlass* obj = (ciKlass*) x->exact_type(); 1308 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_never_null()); 1309 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1310 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1311 } 1312 klass2reg_with_patching(klass_reg, obj, patching_info); 1313 if (x->is_never_null()) { 1314 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_VALUETYPE, klass_reg, slow_path); 1315 } else { 1316 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1317 } 1318 1319 LIR_Opr result = rlock_result(x); 1320 __ move(reg, result); 1321 } 1322 1323 1324 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1325 Values* dims = x->dims(); 1326 int i = dims->length(); 1327 LIRItemList* items = new LIRItemList(i, i, NULL); 1328 while (i-- > 0) { 1329 LIRItem* size = new LIRItem(dims->at(i), this); 1330 items->at_put(i, size); 1331 } 1332 1333 // Evaluate state_for early since it may emit code. 1334 CodeEmitInfo* patching_info = NULL; 1335 if (!x->klass()->is_loaded() || PatchALot) { 1336 patching_info = state_for(x, x->state_before()); 1337 1338 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1339 // clone all handlers (NOTE: Usually this is handled transparently 1340 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1341 // is done explicitly here because a stub isn't being used). 1342 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1343 } 1344 CodeEmitInfo* info = state_for(x, x->state()); 1345 1346 i = dims->length(); 1347 while (i-- > 0) { 1348 LIRItem* size = items->at(i); 1349 size->load_nonconstant(); 1350 1351 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1352 } 1353 1354 LIR_Opr klass_reg = FrameMap::rax_metadata_opr; 1355 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1356 1357 LIR_Opr rank = FrameMap::rbx_opr; 1358 __ move(LIR_OprFact::intConst(x->rank()), rank); 1359 LIR_Opr varargs = FrameMap::rcx_opr; 1360 __ move(FrameMap::rsp_opr, varargs); 1361 LIR_OprList* args = new LIR_OprList(3); 1362 args->append(klass_reg); 1363 args->append(rank); 1364 args->append(varargs); 1365 LIR_Opr reg = result_register_for(x->type()); 1366 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1367 LIR_OprFact::illegalOpr, 1368 reg, args, info); 1369 1370 LIR_Opr result = rlock_result(x); 1371 __ move(reg, result); 1372 } 1373 1374 1375 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1376 // nothing to do for now 1377 } 1378 1379 1380 void LIRGenerator::do_CheckCast(CheckCast* x) { 1381 LIRItem obj(x->obj(), this); 1382 1383 CodeEmitInfo* patching_info = NULL; 1384 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { 1385 // must do this before locking the destination register as an oop register, 1386 // and before the obj is loaded (the latter is for deoptimization) 1387 patching_info = state_for(x, x->state_before()); 1388 } 1389 obj.load_item(); 1390 1391 // info for exceptions 1392 CodeEmitInfo* info_for_exception = 1393 (x->needs_exception_state() ? state_for(x) : 1394 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1395 1396 if (x->is_never_null()) { 1397 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception)); 1398 } 1399 1400 CodeStub* stub; 1401 if (x->is_incompatible_class_change_check()) { 1402 assert(patching_info == NULL, "can't patch this"); 1403 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1404 } else if (x->is_invokespecial_receiver_check()) { 1405 assert(patching_info == NULL, "can't patch this"); 1406 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); 1407 } else { 1408 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1409 } 1410 LIR_Opr reg = rlock_result(x); 1411 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1412 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1413 tmp3 = new_register(objectType); 1414 } 1415 __ checkcast(reg, obj.result(), x->klass(), 1416 new_register(objectType), new_register(objectType), tmp3, 1417 x->direct_compare(), info_for_exception, patching_info, stub, 1418 x->profiled_method(), x->profiled_bci(), x->is_never_null()); 1419 } 1420 1421 1422 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1423 LIRItem obj(x->obj(), this); 1424 1425 // result and test object may not be in same register 1426 LIR_Opr reg = rlock_result(x); 1427 CodeEmitInfo* patching_info = NULL; 1428 if ((!x->klass()->is_loaded() || PatchALot)) { 1429 // must do this before locking the destination register as an oop register 1430 patching_info = state_for(x, x->state_before()); 1431 } 1432 obj.load_item(); 1433 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1434 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1435 tmp3 = new_register(objectType); 1436 } 1437 __ instanceof(reg, obj.result(), x->klass(), 1438 new_register(objectType), new_register(objectType), tmp3, 1439 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1440 } 1441 1442 1443 void LIRGenerator::do_If(If* x) { 1444 assert(x->number_of_sux() == 2, "inconsistency"); 1445 ValueTag tag = x->x()->type()->tag(); 1446 bool is_safepoint = x->is_safepoint(); 1447 1448 If::Condition cond = x->cond(); 1449 1450 LIRItem xitem(x->x(), this); 1451 LIRItem yitem(x->y(), this); 1452 LIRItem* xin = &xitem; 1453 LIRItem* yin = &yitem; 1454 1455 if (tag == longTag) { 1456 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1457 // mirror for other conditions 1458 if (cond == If::gtr || cond == If::leq) { 1459 cond = Instruction::mirror(cond); 1460 xin = &yitem; 1461 yin = &xitem; 1462 } 1463 xin->set_destroys_register(); 1464 } 1465 xin->load_item(); 1466 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { 1467 // inline long zero 1468 yin->dont_load_item(); 1469 } else if (tag == longTag || tag == floatTag || tag == doubleTag) { 1470 // longs cannot handle constants at right side 1471 yin->load_item(); 1472 } else { 1473 yin->dont_load_item(); 1474 } 1475 1476 LIR_Opr left = xin->result(); 1477 LIR_Opr right = yin->result(); 1478 1479 set_no_result(x); 1480 1481 // add safepoint before generating condition code so it can be recomputed 1482 if (x->is_safepoint()) { 1483 // increment backedge counter if needed 1484 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), 1485 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); 1486 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1487 } 1488 1489 __ cmp(lir_cond(cond), left, right); 1490 // Generate branch profiling. Profiling code doesn't kill flags. 1491 profile_branch(x, cond); 1492 move_to_phi(x->state()); 1493 if (x->x()->type()->is_float_kind()) { 1494 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1495 } else { 1496 __ branch(lir_cond(cond), right->type(), x->tsux()); 1497 } 1498 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1499 __ jump(x->default_sux()); 1500 } 1501 1502 1503 LIR_Opr LIRGenerator::getThreadPointer() { 1504 #ifdef _LP64 1505 return FrameMap::as_pointer_opr(r15_thread); 1506 #else 1507 LIR_Opr result = new_register(T_INT); 1508 __ get_thread(result); 1509 return result; 1510 #endif // 1511 } 1512 1513 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1514 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0)); 1515 LIR_OprList* args = new LIR_OprList(); 1516 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1517 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); 1518 } 1519 1520 1521 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1522 CodeEmitInfo* info) { 1523 if (address->type() == T_LONG) { 1524 address = new LIR_Address(address->base(), 1525 address->index(), address->scale(), 1526 address->disp(), T_DOUBLE); 1527 // Transfer the value atomically by using FP moves. This means 1528 // the value has to be moved between CPU and FPU registers. It 1529 // always has to be moved through spill slot since there's no 1530 // quick way to pack the value into an SSE register. 1531 LIR_Opr temp_double = new_register(T_DOUBLE); 1532 LIR_Opr spill = new_register(T_LONG); 1533 set_vreg_flag(spill, must_start_in_memory); 1534 __ move(value, spill); 1535 __ volatile_move(spill, temp_double, T_LONG); 1536 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info); 1537 } else { 1538 __ store(value, address, info); 1539 } 1540 } 1541 1542 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1543 CodeEmitInfo* info) { 1544 if (address->type() == T_LONG) { 1545 address = new LIR_Address(address->base(), 1546 address->index(), address->scale(), 1547 address->disp(), T_DOUBLE); 1548 // Transfer the value atomically by using FP moves. This means 1549 // the value has to be moved between CPU and FPU registers. In 1550 // SSE0 and SSE1 mode it has to be moved through spill slot but in 1551 // SSE2+ mode it can be moved directly. 1552 LIR_Opr temp_double = new_register(T_DOUBLE); 1553 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); 1554 __ volatile_move(temp_double, result, T_LONG); 1555 if (UseSSE < 2) { 1556 // no spill slot needed in SSE2 mode because xmm->cpu register move is possible 1557 set_vreg_flag(result, must_start_in_memory); 1558 } 1559 } else { 1560 __ load(address, result, info); 1561 } 1562 }