< prev index next >

src/cpu/sparc/vm/templateTable_sparc.cpp

Print this page




 231 void TemplateTable::shouldnotreachhere() {
 232   transition(vtos, vtos);
 233   __ stop("shouldnotreachhere bytecode");
 234 }
 235 
 236 void TemplateTable::aconst_null() {
 237   transition(vtos, atos);
 238   __ clr(Otos_i);
 239 }
 240 
 241 
 242 void TemplateTable::iconst(int value) {
 243   transition(vtos, itos);
 244   __ set(value, Otos_i);
 245 }
 246 
 247 
 248 void TemplateTable::lconst(int value) {
 249   transition(vtos, ltos);
 250   assert(value >= 0, "check this code");
 251 #ifdef _LP64
 252   __ set(value, Otos_l);
 253 #else
 254   __ set(value, Otos_l2);
 255   __ clr( Otos_l1);
 256 #endif
 257 }
 258 
 259 
 260 void TemplateTable::fconst(int value) {
 261   transition(vtos, ftos);
 262   static float zero = 0.0, one = 1.0, two = 2.0;
 263   float* p;
 264   switch( value ) {
 265    default: ShouldNotReachHere();
 266    case 0:  p = &zero;  break;
 267    case 1:  p = &one;   break;
 268    case 2:  p = &two;   break;
 269   }
 270   AddressLiteral a(p);
 271   __ sethi(a, G3_scratch);
 272   __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
 273 }
 274 
 275 
 276 void TemplateTable::dconst(int value) {


 389   transition(vtos, vtos);
 390   Label Long, exit;
 391 
 392   __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
 393   __ get_cpool_and_tags(O0, O2);
 394 
 395   const int base_offset = ConstantPool::header_size() * wordSize;
 396   const int tags_offset = Array<u1>::base_offset_in_bytes();
 397   // get type from tags
 398   __ add(O2, tags_offset, O2);
 399   __ ldub(O2, O1, O2);
 400 
 401   __ sll(O1, LogBytesPerWord, O1);
 402   __ add(O0, O1, G3_scratch);
 403 
 404   __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
 405   // A double can be placed at word-aligned locations in the constant pool.
 406   // Check out Conversions.java for an example.
 407   // Also ConstantPool::header_size() is 20, which makes it very difficult
 408   // to double-align double on the constant pool.  SG, 11/7/97
 409 #ifdef _LP64
 410   __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
 411 #else
 412   FloatRegister f = Ftos_d;
 413   __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
 414   __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
 415          f->successor());
 416 #endif
 417   __ push(dtos);
 418   __ ba_short(exit);
 419 
 420   __ bind(Long);
 421 #ifdef _LP64
 422   __ ldx(G3_scratch, base_offset, Otos_l);
 423 #else
 424   __ ld(G3_scratch, base_offset, Otos_l);
 425   __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
 426 #endif
 427   __ push(ltos);
 428 
 429   __ bind(exit);
 430 }
 431 
 432 void TemplateTable::locals_index(Register reg, int offset) {
 433   __ ldub( at_bcp(offset), reg );
 434 }
 435 
 436 void TemplateTable::locals_index_wide(Register reg) {
 437   // offset is 2, not 1, because Lbcp points to wide prefix code
 438   __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
 439 }
 440 
 441 void TemplateTable::iload() {
 442   iload_internal();
 443 }
 444 
 445 void TemplateTable::nofast_iload() {
 446   iload_internal(may_not_rewrite);


1111   switch (op) {
1112    case  add:  __  add(O1, Otos_i, Otos_i);  break;
1113    case  sub:  __  sub(O1, Otos_i, Otos_i);  break;
1114      // %%%%% Mul may not exist: better to call .mul?
1115    case  mul:  __ smul(O1, Otos_i, Otos_i);  break;
1116    case _and:  __ and3(O1, Otos_i, Otos_i);  break;
1117    case  _or:  __  or3(O1, Otos_i, Otos_i);  break;
1118    case _xor:  __ xor3(O1, Otos_i, Otos_i);  break;
1119    case  shl:  __  sll(O1, Otos_i, Otos_i);  break;
1120    case  shr:  __  sra(O1, Otos_i, Otos_i);  break;
1121    case ushr:  __  srl(O1, Otos_i, Otos_i);  break;
1122    default: ShouldNotReachHere();
1123   }
1124 }
1125 
1126 
1127 void TemplateTable::lop2(Operation op) {
1128   transition(ltos, ltos);
1129   __ pop_l(O2);
1130   switch (op) {
1131 #ifdef _LP64
1132    case  add:  __  add(O2, Otos_l, Otos_l);  break;
1133    case  sub:  __  sub(O2, Otos_l, Otos_l);  break;
1134    case _and:  __ and3(O2, Otos_l, Otos_l);  break;
1135    case  _or:  __  or3(O2, Otos_l, Otos_l);  break;
1136    case _xor:  __ xor3(O2, Otos_l, Otos_l);  break;
1137 #else
1138    case  add:  __ addcc(O3, Otos_l2, Otos_l2);  __ addc(O2, Otos_l1, Otos_l1);  break;
1139    case  sub:  __ subcc(O3, Otos_l2, Otos_l2);  __ subc(O2, Otos_l1, Otos_l1);  break;
1140    case _and:  __  and3(O3, Otos_l2, Otos_l2);  __ and3(O2, Otos_l1, Otos_l1);  break;
1141    case  _or:  __   or3(O3, Otos_l2, Otos_l2);  __  or3(O2, Otos_l1, Otos_l1);  break;
1142    case _xor:  __  xor3(O3, Otos_l2, Otos_l2);  __ xor3(O2, Otos_l1, Otos_l1);  break;
1143 #endif
1144    default: ShouldNotReachHere();
1145   }
1146 }
1147 
1148 
1149 void TemplateTable::idiv() {
1150   // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1151   // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1152 
1153   transition(itos, itos);
1154   __ pop_i(O1); // get 1st op
1155 
1156   // Y contains upper 32 bits of result, set it to 0 or all ones
1157   __ wry(G0);
1158   __ mov(~0, G3_scratch);
1159 
1160   __ tst(O1);
1161      Label neg;
1162   __ br(Assembler::negative, true, Assembler::pn, neg);
1163   __ delayed()->wry(G3_scratch);
1164   __ bind(neg);
1165 
1166      Label ok;
1167   __ tst(Otos_i);
1168   __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1169 
1170   const int min_int = 0x80000000;
1171   Label regular;
1172   __ cmp(Otos_i, -1);
1173   __ br(Assembler::notEqual, false, Assembler::pt, regular);
1174 #ifdef _LP64
1175   // Don't put set in delay slot
1176   // Set will turn into multiple instructions in 64 bit mode
1177   __ delayed()->nop();
1178   __ set(min_int, G4_scratch);
1179 #else
1180   __ delayed()->set(min_int, G4_scratch);
1181 #endif
1182   Label done;
1183   __ cmp(O1, G4_scratch);
1184   __ br(Assembler::equal, true, Assembler::pt, done);
1185   __ delayed()->mov(O1, Otos_i);   // (mov only executed if branch taken)
1186 
1187   __ bind(regular);
1188   __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1189   __ bind(done);
1190 }
1191 
1192 
1193 void TemplateTable::irem() {
1194   transition(itos, itos);
1195   __ mov(Otos_i, O2); // save divisor
1196   idiv();                               // %%%% Hack: exploits fact that idiv leaves dividend in O1
1197   __ smul(Otos_i, O2, Otos_i);
1198   __ sub(O1, Otos_i, Otos_i);
1199 }
1200 
1201 
1202 void TemplateTable::lmul() {
1203   transition(ltos, ltos);
1204   __ pop_l(O2);
1205 #ifdef _LP64
1206   __ mulx(Otos_l, O2, Otos_l);
1207 #else
1208   __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1209 #endif
1210 
1211 }
1212 
1213 
1214 void TemplateTable::ldiv() {
1215   transition(ltos, ltos);
1216 
1217   // check for zero
1218   __ pop_l(O2);
1219 #ifdef _LP64
1220   __ tst(Otos_l);
1221   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1222   __ sdivx(O2, Otos_l, Otos_l);
1223 #else
1224   __ orcc(Otos_l1, Otos_l2, G0);
1225   __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1226   __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1227 #endif
1228 }
1229 
1230 
1231 void TemplateTable::lrem() {
1232   transition(ltos, ltos);
1233 
1234   // check for zero
1235   __ pop_l(O2);
1236 #ifdef _LP64
1237   __ tst(Otos_l);
1238   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1239   __ sdivx(O2, Otos_l, Otos_l2);
1240   __ mulx (Otos_l2, Otos_l, Otos_l2);
1241   __ sub  (O2, Otos_l2, Otos_l);
1242 #else
1243   __ orcc(Otos_l1, Otos_l2, G0);
1244   __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1245   __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1246 #endif
1247 }
1248 
1249 
1250 void TemplateTable::lshl() {
1251   transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1252 
1253   __ pop_l(O2);                          // shift value in O2, O3
1254 #ifdef _LP64
1255   __ sllx(O2, Otos_i, Otos_l);
1256 #else
1257   __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1258 #endif
1259 }
1260 
1261 
1262 void TemplateTable::lshr() {
1263   transition(itos, ltos); // %%%% see lshl comment
1264 
1265   __ pop_l(O2);                          // shift value in O2, O3
1266 #ifdef _LP64
1267   __ srax(O2, Otos_i, Otos_l);
1268 #else
1269   __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1270 #endif
1271 }
1272 
1273 
1274 
1275 void TemplateTable::lushr() {
1276   transition(itos, ltos); // %%%% see lshl comment
1277 
1278   __ pop_l(O2);                          // shift value in O2, O3
1279 #ifdef _LP64
1280   __ srlx(O2, Otos_i, Otos_l);
1281 #else
1282   __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1283 #endif
1284 }
1285 
1286 
1287 void TemplateTable::fop2(Operation op) {
1288   transition(ftos, ftos);
1289   switch (op) {
1290    case  add:  __  pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1291    case  sub:  __  pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1292    case  mul:  __  pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1293    case  div:  __  pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1294    case  rem:
1295      assert(Ftos_f == F0, "just checking");
1296 #ifdef _LP64
1297      // LP64 calling conventions use F1, F3 for passing 2 floats
1298      __ pop_f(F1);
1299      __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1300 #else
1301      __ pop_i(O0);
1302      __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1303      __ ld( __ d_tmp, O1 );
1304 #endif
1305      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1306      assert( Ftos_f == F0, "fix this code" );
1307      break;
1308 
1309    default: ShouldNotReachHere();
1310   }
1311 }
1312 
1313 
1314 void TemplateTable::dop2(Operation op) {
1315   transition(dtos, dtos);
1316   switch (op) {
1317    case  add:  __  pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1318    case  sub:  __  pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1319    case  mul:  __  pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1320    case  div:  __  pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1321    case  rem:
1322 #ifdef _LP64
1323      // Pass arguments in D0, D2
1324      __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1325      __ pop_d( F0 );
1326 #else
1327      // Pass arguments in O0O1, O2O3
1328      __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1329      __ ldd( __ d_tmp, O2 );
1330      __ pop_d(Ftos_f);
1331      __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1332      __ ldd( __ d_tmp, O0 );
1333 #endif
1334      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1335      assert( Ftos_d == F0, "fix this code" );
1336      break;
1337 
1338    default: ShouldNotReachHere();
1339   }
1340 }
1341 
1342 
1343 void TemplateTable::ineg() {
1344   transition(itos, itos);
1345   __ neg(Otos_i);
1346 }
1347 
1348 
1349 void TemplateTable::lneg() {
1350   transition(ltos, ltos);
1351 #ifdef _LP64
1352   __ sub(G0, Otos_l, Otos_l);
1353 #else
1354   __ lneg(Otos_l1, Otos_l2);
1355 #endif
1356 }
1357 
1358 
1359 void TemplateTable::fneg() {
1360   transition(ftos, ftos);
1361   __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
1362 }
1363 
1364 
1365 void TemplateTable::dneg() {
1366   transition(dtos, dtos);
1367   __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
1368 }
1369 
1370 
1371 void TemplateTable::iinc() {
1372   transition(vtos, vtos);
1373   locals_index(G3_scratch);
1374   __ ldsb(Lbcp, 2, O2);  // load constant
1375   __ access_local_int(G3_scratch, Otos_i);


1420       case Bytecodes::_i2s: tos_out = itos; break;
1421       case Bytecodes::_i2l: // fall through
1422       case Bytecodes::_f2l: // fall through
1423       case Bytecodes::_d2l: tos_out = ltos; break;
1424       case Bytecodes::_i2f: // fall through
1425       case Bytecodes::_l2f: // fall through
1426       case Bytecodes::_d2f: tos_out = ftos; break;
1427       case Bytecodes::_i2d: // fall through
1428       case Bytecodes::_l2d: // fall through
1429       case Bytecodes::_f2d: tos_out = dtos; break;
1430       default             : ShouldNotReachHere();
1431     }
1432     transition(tos_in, tos_out);
1433   #endif
1434 
1435 
1436   // Conversion
1437   Label done;
1438   switch (bytecode()) {
1439    case Bytecodes::_i2l:
1440 #ifdef _LP64
1441     // Sign extend the 32 bits
1442     __ sra ( Otos_i, 0, Otos_l );
1443 #else
1444     __ addcc(Otos_i, 0, Otos_l2);
1445     __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1446     __ delayed()->clr(Otos_l1);
1447     __ set(~0, Otos_l1);
1448 #endif
1449     break;
1450 
1451    case Bytecodes::_i2f:
1452     __ st(Otos_i, __ d_tmp );
1453     __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
1454     __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1455     break;
1456 
1457    case Bytecodes::_i2d:
1458     __ st(Otos_i, __ d_tmp);
1459     __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
1460     __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1461     break;
1462 
1463    case Bytecodes::_i2b:
1464     __ sll(Otos_i, 24, Otos_i);
1465     __ sra(Otos_i, 24, Otos_i);
1466     break;
1467 
1468    case Bytecodes::_i2c:
1469     __ sll(Otos_i, 16, Otos_i);
1470     __ srl(Otos_i, 16, Otos_i);
1471     break;
1472 
1473    case Bytecodes::_i2s:
1474     __ sll(Otos_i, 16, Otos_i);
1475     __ sra(Otos_i, 16, Otos_i);
1476     break;
1477 
1478    case Bytecodes::_l2i:
1479 #ifndef _LP64
1480     __ mov(Otos_l2, Otos_i);
1481 #else
1482     // Sign-extend into the high 32 bits
1483     __ sra(Otos_l, 0, Otos_i);
1484 #endif
1485     break;
1486 
1487    case Bytecodes::_l2f:
1488    case Bytecodes::_l2d:
1489     __ st_long(Otos_l, __ d_tmp);
1490     __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1491 
1492     if (bytecode() == Bytecodes::_l2f) {
1493       __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1494     } else {
1495       __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1496     }
1497     break;
1498 
1499   case Bytecodes::_f2i:  {
1500       Label isNaN;
1501       // result must be 0 if value is NaN; test by comparing value to itself
1502       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1503       __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1504       __ delayed()->clr(Otos_i);                                     // NaN
1505       __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1506       __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1507       __ ld(__ d_tmp, Otos_i);
1508       __ bind(isNaN);
1509     }
1510     break;
1511 
1512    case Bytecodes::_f2l:
1513     // must uncache tos
1514     __ push_f();
1515 #ifdef _LP64
1516     __ pop_f(F1);
1517 #else
1518     __ pop_i(O0);
1519 #endif
1520     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1521     break;
1522 
1523    case Bytecodes::_f2d:
1524     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1525     break;
1526 
1527    case Bytecodes::_d2i:
1528    case Bytecodes::_d2l:
1529     // must uncache tos
1530     __ push_d();
1531 #ifdef _LP64
1532     // LP64 calling conventions pass first double arg in D0
1533     __ pop_d( Ftos_d );
1534 #else
1535     __ pop_i( O0 );
1536     __ pop_i( O1 );
1537 #endif
1538     __ call_VM_leaf(Lscratch,
1539         bytecode() == Bytecodes::_d2i
1540           ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1541           : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1542     break;
1543 
1544     case Bytecodes::_d2f:
1545       __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1546     break;
1547 
1548     default: ShouldNotReachHere();
1549   }
1550   __ bind(done);
1551 }
1552 
1553 
1554 void TemplateTable::lcmp() {
1555   transition(ltos, itos);
1556 
1557 #ifdef _LP64
1558   __ pop_l(O1); // pop off value 1, value 2 is in O0
1559   __ lcmp( O1, Otos_l, Otos_i );
1560 #else
1561   __ pop_l(O2); // cmp O2,3 to O0,1
1562   __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1563 #endif
1564 }
1565 
1566 
1567 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1568 
1569   if (is_float) __ pop_f(F2);
1570   else          __ pop_d(F2);
1571 
1572   assert(Ftos_f == F0  &&  Ftos_d == F0,  "alias checking:");
1573 
1574   __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1575 }
1576 
1577 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1578   // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1579   __ verify_thread();
1580 
1581   const Register O2_bumped_count = O2;
1582   __ profile_taken_branch(G3_scratch, O2_bumped_count);
1583 


1739 }
1740 
1741 
1742 void TemplateTable::if_acmp(Condition cc) {
1743   transition(atos, vtos);
1744   __ pop_ptr(O1);
1745   __ verify_oop(O1);
1746   __ verify_oop(Otos_i);
1747   __ cmp(O1, Otos_i);
1748   __ if_cmp(ccNot(cc), true);
1749 }
1750 
1751 
1752 
1753 void TemplateTable::ret() {
1754   transition(vtos, vtos);
1755   locals_index(G3_scratch);
1756   __ access_local_returnAddress(G3_scratch, Otos_i);
1757   // Otos_i contains the bci, compute the bcp from that
1758 
1759 #ifdef _LP64
1760 #ifdef ASSERT
1761   // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1762   // the result.  The return address (really a BCI) was stored with an
1763   // 'astore' because JVM specs claim it's a pointer-sized thing.  Hence in
1764   // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1765   // loaded value.
1766   { Label zzz ;
1767      __ set (65536, G3_scratch) ;
1768      __ cmp (Otos_i, G3_scratch) ;
1769      __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1770      __ delayed()->nop();
1771      __ stop("BCI is in the wrong register half?");
1772      __ bind (zzz) ;
1773   }
1774 #endif
1775 #endif
1776 
1777   __ profile_ret(vtos, Otos_i, G4_scratch);
1778 
1779   __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1780   __ add(G3_scratch, Otos_i, G3_scratch);
1781   __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1782   __ dispatch_next(vtos);
1783 }
1784 
1785 
1786 void TemplateTable::wide_ret() {
1787   transition(vtos, vtos);
1788   locals_index_wide(G3_scratch);
1789   __ access_local_returnAddress(G3_scratch, Otos_i);
1790   // Otos_i contains the bci, compute the bcp from that
1791 
1792   __ profile_ret(vtos, Otos_i, G4_scratch);
1793 
1794   __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1795   __ add(G3_scratch, Otos_i, G3_scratch);
1796   __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1797   __ dispatch_next(vtos);
1798 }
1799 
1800 
1801 void TemplateTable::tableswitch() {
1802   transition(itos, vtos);
1803   Label default_case, continue_execution;
1804 
1805   // align bcp
1806   __ add(Lbcp, BytesPerInt, O1);
1807   __ and3(O1, -BytesPerInt, O1);
1808   // load lo, hi
1809   __ ld(O1, 1 * BytesPerInt, O2);       // Low Byte
1810   __ ld(O1, 2 * BytesPerInt, O3);       // High Byte
1811 #ifdef _LP64
1812   // Sign extend the 32 bits
1813   __ sra ( Otos_i, 0, Otos_i );
1814 #endif /* _LP64 */
1815 
1816   // check against lo & hi
1817   __ cmp( Otos_i, O2);
1818   __ br( Assembler::less, false, Assembler::pn, default_case);
1819   __ delayed()->cmp( Otos_i, O3 );
1820   __ br( Assembler::greater, false, Assembler::pn, default_case);
1821   // lookup dispatch offset
1822   __ delayed()->sub(Otos_i, O2, O2);
1823   __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1824   __ sll(O2, LogBytesPerInt, O2);
1825   __ add(O2, 3 * BytesPerInt, O2);
1826   __ ba(continue_execution);
1827   __ delayed()->ld(O1, O2, O2);
1828   // handle default
1829   __ bind(default_case);
1830   __ profile_switch_default(O3);
1831   __ ld(O1, 0, O2); // get default offset
1832   // continue execution
1833   __ bind(continue_execution);
1834   __ add(Lbcp, O2, Lbcp);


3383     // check if we can allocate in the TLAB
3384     __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3385     __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3386     __ add(RoldTopValue, Roffset, RnewTopValue);
3387 
3388     // if there is enough space, we do not CAS and do not clear
3389     __ cmp(RnewTopValue, RendValue);
3390     if(ZeroTLAB) {
3391       // the fields have already been cleared
3392       __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3393     } else {
3394       // initialize both the header and fields
3395       __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3396     }
3397     __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3398 
3399     if (allow_shared_alloc) {
3400       // Check if tlab should be discarded (refill_waste_limit >= free)
3401       __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3402       __ sub(RendValue, RoldTopValue, RfreeValue);
3403 #ifdef _LP64
3404       __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3405 #else
3406       __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3407 #endif
3408       __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3409 
3410       // increment waste limit to prevent getting stuck on this slow path
3411       if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
3412         __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3413       } else {
3414         // set64 does not use the temp register if the given constant is 32 bit. So
3415         // we can just use any register; using G0 results in ignoring of the upper 32 bit
3416         // of that value.
3417         __ set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), G4_scratch, G0);
3418         __ add(RtlabWasteLimitValue, G4_scratch, RtlabWasteLimitValue);
3419       }
3420       __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3421     } else {
3422       // No allocation in the shared eden.
3423       __ ba_short(slow_case);
3424     }
3425   }
3426 
3427   // Allocation in the shared Eden




 231 void TemplateTable::shouldnotreachhere() {
 232   transition(vtos, vtos);
 233   __ stop("shouldnotreachhere bytecode");
 234 }
 235 
 236 void TemplateTable::aconst_null() {
 237   transition(vtos, atos);
 238   __ clr(Otos_i);
 239 }
 240 
 241 
 242 void TemplateTable::iconst(int value) {
 243   transition(vtos, itos);
 244   __ set(value, Otos_i);
 245 }
 246 
 247 
 248 void TemplateTable::lconst(int value) {
 249   transition(vtos, ltos);
 250   assert(value >= 0, "check this code");

 251   __ set(value, Otos_l);




 252 }
 253 
 254 
 255 void TemplateTable::fconst(int value) {
 256   transition(vtos, ftos);
 257   static float zero = 0.0, one = 1.0, two = 2.0;
 258   float* p;
 259   switch( value ) {
 260    default: ShouldNotReachHere();
 261    case 0:  p = &zero;  break;
 262    case 1:  p = &one;   break;
 263    case 2:  p = &two;   break;
 264   }
 265   AddressLiteral a(p);
 266   __ sethi(a, G3_scratch);
 267   __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
 268 }
 269 
 270 
 271 void TemplateTable::dconst(int value) {


 384   transition(vtos, vtos);
 385   Label Long, exit;
 386 
 387   __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
 388   __ get_cpool_and_tags(O0, O2);
 389 
 390   const int base_offset = ConstantPool::header_size() * wordSize;
 391   const int tags_offset = Array<u1>::base_offset_in_bytes();
 392   // get type from tags
 393   __ add(O2, tags_offset, O2);
 394   __ ldub(O2, O1, O2);
 395 
 396   __ sll(O1, LogBytesPerWord, O1);
 397   __ add(O0, O1, G3_scratch);
 398 
 399   __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
 400   // A double can be placed at word-aligned locations in the constant pool.
 401   // Check out Conversions.java for an example.
 402   // Also ConstantPool::header_size() is 20, which makes it very difficult
 403   // to double-align double on the constant pool.  SG, 11/7/97

 404   __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);






 405   __ push(dtos);
 406   __ ba_short(exit);
 407 
 408   __ bind(Long);

 409   __ ldx(G3_scratch, base_offset, Otos_l);




 410   __ push(ltos);
 411 
 412   __ bind(exit);
 413 }
 414 
 415 void TemplateTable::locals_index(Register reg, int offset) {
 416   __ ldub( at_bcp(offset), reg );
 417 }
 418 
 419 void TemplateTable::locals_index_wide(Register reg) {
 420   // offset is 2, not 1, because Lbcp points to wide prefix code
 421   __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
 422 }
 423 
 424 void TemplateTable::iload() {
 425   iload_internal();
 426 }
 427 
 428 void TemplateTable::nofast_iload() {
 429   iload_internal(may_not_rewrite);


1094   switch (op) {
1095    case  add:  __  add(O1, Otos_i, Otos_i);  break;
1096    case  sub:  __  sub(O1, Otos_i, Otos_i);  break;
1097      // %%%%% Mul may not exist: better to call .mul?
1098    case  mul:  __ smul(O1, Otos_i, Otos_i);  break;
1099    case _and:  __ and3(O1, Otos_i, Otos_i);  break;
1100    case  _or:  __  or3(O1, Otos_i, Otos_i);  break;
1101    case _xor:  __ xor3(O1, Otos_i, Otos_i);  break;
1102    case  shl:  __  sll(O1, Otos_i, Otos_i);  break;
1103    case  shr:  __  sra(O1, Otos_i, Otos_i);  break;
1104    case ushr:  __  srl(O1, Otos_i, Otos_i);  break;
1105    default: ShouldNotReachHere();
1106   }
1107 }
1108 
1109 
1110 void TemplateTable::lop2(Operation op) {
1111   transition(ltos, ltos);
1112   __ pop_l(O2);
1113   switch (op) {

1114    case  add:  __  add(O2, Otos_l, Otos_l);  break;
1115    case  sub:  __  sub(O2, Otos_l, Otos_l);  break;
1116    case _and:  __ and3(O2, Otos_l, Otos_l);  break;
1117    case  _or:  __  or3(O2, Otos_l, Otos_l);  break;
1118    case _xor:  __ xor3(O2, Otos_l, Otos_l);  break;







1119    default: ShouldNotReachHere();
1120   }
1121 }
1122 
1123 
1124 void TemplateTable::idiv() {
1125   // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1126   // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1127 
1128   transition(itos, itos);
1129   __ pop_i(O1); // get 1st op
1130 
1131   // Y contains upper 32 bits of result, set it to 0 or all ones
1132   __ wry(G0);
1133   __ mov(~0, G3_scratch);
1134 
1135   __ tst(O1);
1136      Label neg;
1137   __ br(Assembler::negative, true, Assembler::pn, neg);
1138   __ delayed()->wry(G3_scratch);
1139   __ bind(neg);
1140 
1141      Label ok;
1142   __ tst(Otos_i);
1143   __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1144 
1145   const int min_int = 0x80000000;
1146   Label regular;
1147   __ cmp(Otos_i, -1);
1148   __ br(Assembler::notEqual, false, Assembler::pt, regular);

1149   // Don't put set in delay slot
1150   // Set will turn into multiple instructions in 64 bit mode
1151   __ delayed()->nop();
1152   __ set(min_int, G4_scratch);



1153   Label done;
1154   __ cmp(O1, G4_scratch);
1155   __ br(Assembler::equal, true, Assembler::pt, done);
1156   __ delayed()->mov(O1, Otos_i);   // (mov only executed if branch taken)
1157 
1158   __ bind(regular);
1159   __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1160   __ bind(done);
1161 }
1162 
1163 
1164 void TemplateTable::irem() {
1165   transition(itos, itos);
1166   __ mov(Otos_i, O2); // save divisor
1167   idiv();                               // %%%% Hack: exploits fact that idiv leaves dividend in O1
1168   __ smul(Otos_i, O2, Otos_i);
1169   __ sub(O1, Otos_i, Otos_i);
1170 }
1171 
1172 
1173 void TemplateTable::lmul() {
1174   transition(ltos, ltos);
1175   __ pop_l(O2);

1176   __ mulx(Otos_l, O2, Otos_l);



1177 
1178 }
1179 
1180 
1181 void TemplateTable::ldiv() {
1182   transition(ltos, ltos);
1183 
1184   // check for zero
1185   __ pop_l(O2);

1186   __ tst(Otos_l);
1187   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1188   __ sdivx(O2, Otos_l, Otos_l);





1189 }
1190 
1191 
1192 void TemplateTable::lrem() {
1193   transition(ltos, ltos);
1194 
1195   // check for zero
1196   __ pop_l(O2);

1197   __ tst(Otos_l);
1198   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1199   __ sdivx(O2, Otos_l, Otos_l2);
1200   __ mulx (Otos_l2, Otos_l, Otos_l2);
1201   __ sub  (O2, Otos_l2, Otos_l);





1202 }
1203 
1204 
1205 void TemplateTable::lshl() {
1206   transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1207 
1208   __ pop_l(O2);                          // shift value in O2, O3

1209   __ sllx(O2, Otos_i, Otos_l);



1210 }
1211 
1212 
1213 void TemplateTable::lshr() {
1214   transition(itos, ltos); // %%%% see lshl comment
1215 
1216   __ pop_l(O2);                          // shift value in O2, O3

1217   __ srax(O2, Otos_i, Otos_l);



1218 }
1219 
1220 
1221 
1222 void TemplateTable::lushr() {
1223   transition(itos, ltos); // %%%% see lshl comment
1224 
1225   __ pop_l(O2);                          // shift value in O2, O3

1226   __ srlx(O2, Otos_i, Otos_l);



1227 }
1228 
1229 
1230 void TemplateTable::fop2(Operation op) {
1231   transition(ftos, ftos);
1232   switch (op) {
1233    case  add:  __  pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1234    case  sub:  __  pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1235    case  mul:  __  pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1236    case  div:  __  pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1237    case  rem:
1238      assert(Ftos_f == F0, "just checking");

1239      // LP64 calling conventions use F1, F3 for passing 2 floats
1240      __ pop_f(F1);
1241      __ fmov(FloatRegisterImpl::S, Ftos_f, F3);





1242      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1243      assert( Ftos_f == F0, "fix this code" );
1244      break;
1245 
1246    default: ShouldNotReachHere();
1247   }
1248 }
1249 
1250 
1251 void TemplateTable::dop2(Operation op) {
1252   transition(dtos, dtos);
1253   switch (op) {
1254    case  add:  __  pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1255    case  sub:  __  pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1256    case  mul:  __  pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1257    case  div:  __  pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1258    case  rem:

1259      // Pass arguments in D0, D2
1260      __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1261      __ pop_d( F0 );








1262      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1263      assert( Ftos_d == F0, "fix this code" );
1264      break;
1265 
1266    default: ShouldNotReachHere();
1267   }
1268 }
1269 
1270 
1271 void TemplateTable::ineg() {
1272   transition(itos, itos);
1273   __ neg(Otos_i);
1274 }
1275 
1276 
1277 void TemplateTable::lneg() {
1278   transition(ltos, ltos);

1279   __ sub(G0, Otos_l, Otos_l);



1280 }
1281 
1282 
1283 void TemplateTable::fneg() {
1284   transition(ftos, ftos);
1285   __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
1286 }
1287 
1288 
1289 void TemplateTable::dneg() {
1290   transition(dtos, dtos);
1291   __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
1292 }
1293 
1294 
1295 void TemplateTable::iinc() {
1296   transition(vtos, vtos);
1297   locals_index(G3_scratch);
1298   __ ldsb(Lbcp, 2, O2);  // load constant
1299   __ access_local_int(G3_scratch, Otos_i);


1344       case Bytecodes::_i2s: tos_out = itos; break;
1345       case Bytecodes::_i2l: // fall through
1346       case Bytecodes::_f2l: // fall through
1347       case Bytecodes::_d2l: tos_out = ltos; break;
1348       case Bytecodes::_i2f: // fall through
1349       case Bytecodes::_l2f: // fall through
1350       case Bytecodes::_d2f: tos_out = ftos; break;
1351       case Bytecodes::_i2d: // fall through
1352       case Bytecodes::_l2d: // fall through
1353       case Bytecodes::_f2d: tos_out = dtos; break;
1354       default             : ShouldNotReachHere();
1355     }
1356     transition(tos_in, tos_out);
1357   #endif
1358 
1359 
1360   // Conversion
1361   Label done;
1362   switch (bytecode()) {
1363    case Bytecodes::_i2l:

1364     // Sign extend the 32 bits
1365     __ sra ( Otos_i, 0, Otos_l );






1366     break;
1367 
1368    case Bytecodes::_i2f:
1369     __ st(Otos_i, __ d_tmp );
1370     __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
1371     __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1372     break;
1373 
1374    case Bytecodes::_i2d:
1375     __ st(Otos_i, __ d_tmp);
1376     __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
1377     __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1378     break;
1379 
1380    case Bytecodes::_i2b:
1381     __ sll(Otos_i, 24, Otos_i);
1382     __ sra(Otos_i, 24, Otos_i);
1383     break;
1384 
1385    case Bytecodes::_i2c:
1386     __ sll(Otos_i, 16, Otos_i);
1387     __ srl(Otos_i, 16, Otos_i);
1388     break;
1389 
1390    case Bytecodes::_i2s:
1391     __ sll(Otos_i, 16, Otos_i);
1392     __ sra(Otos_i, 16, Otos_i);
1393     break;
1394 
1395    case Bytecodes::_l2i:



1396     // Sign-extend into the high 32 bits
1397     __ sra(Otos_l, 0, Otos_i);

1398     break;
1399 
1400    case Bytecodes::_l2f:
1401    case Bytecodes::_l2d:
1402     __ st_long(Otos_l, __ d_tmp);
1403     __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1404 
1405     if (bytecode() == Bytecodes::_l2f) {
1406       __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1407     } else {
1408       __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1409     }
1410     break;
1411 
1412   case Bytecodes::_f2i:  {
1413       Label isNaN;
1414       // result must be 0 if value is NaN; test by comparing value to itself
1415       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1416       __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1417       __ delayed()->clr(Otos_i);                                     // NaN
1418       __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1419       __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1420       __ ld(__ d_tmp, Otos_i);
1421       __ bind(isNaN);
1422     }
1423     break;
1424 
1425    case Bytecodes::_f2l:
1426     // must uncache tos
1427     __ push_f();

1428     __ pop_f(F1);



1429     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1430     break;
1431 
1432    case Bytecodes::_f2d:
1433     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1434     break;
1435 
1436    case Bytecodes::_d2i:
1437    case Bytecodes::_d2l:
1438     // must uncache tos
1439     __ push_d();

1440     // LP64 calling conventions pass first double arg in D0
1441     __ pop_d( Ftos_d );




1442     __ call_VM_leaf(Lscratch,
1443         bytecode() == Bytecodes::_d2i
1444           ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1445           : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1446     break;
1447 
1448     case Bytecodes::_d2f:
1449       __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1450     break;
1451 
1452     default: ShouldNotReachHere();
1453   }
1454   __ bind(done);
1455 }
1456 
1457 
1458 void TemplateTable::lcmp() {
1459   transition(ltos, itos);
1460 

1461   __ pop_l(O1); // pop off value 1, value 2 is in O0
1462   __ lcmp( O1, Otos_l, Otos_i );




1463 }
1464 
1465 
1466 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1467 
1468   if (is_float) __ pop_f(F2);
1469   else          __ pop_d(F2);
1470 
1471   assert(Ftos_f == F0  &&  Ftos_d == F0,  "alias checking:");
1472 
1473   __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1474 }
1475 
1476 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1477   // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1478   __ verify_thread();
1479 
1480   const Register O2_bumped_count = O2;
1481   __ profile_taken_branch(G3_scratch, O2_bumped_count);
1482 


1638 }
1639 
1640 
1641 void TemplateTable::if_acmp(Condition cc) {
1642   transition(atos, vtos);
1643   __ pop_ptr(O1);
1644   __ verify_oop(O1);
1645   __ verify_oop(Otos_i);
1646   __ cmp(O1, Otos_i);
1647   __ if_cmp(ccNot(cc), true);
1648 }
1649 
1650 
1651 
1652 void TemplateTable::ret() {
1653   transition(vtos, vtos);
1654   locals_index(G3_scratch);
1655   __ access_local_returnAddress(G3_scratch, Otos_i);
1656   // Otos_i contains the bci, compute the bcp from that
1657 

1658 #ifdef ASSERT
1659   // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1660   // the result.  The return address (really a BCI) was stored with an
1661   // 'astore' because JVM specs claim it's a pointer-sized thing.  Hence in
1662   // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1663   // loaded value.
1664   { Label zzz ;
1665      __ set (65536, G3_scratch) ;
1666      __ cmp (Otos_i, G3_scratch) ;
1667      __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1668      __ delayed()->nop();
1669      __ stop("BCI is in the wrong register half?");
1670      __ bind (zzz) ;
1671   }
1672 #endif

1673 
1674   __ profile_ret(vtos, Otos_i, G4_scratch);
1675 
1676   __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1677   __ add(G3_scratch, Otos_i, G3_scratch);
1678   __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1679   __ dispatch_next(vtos);
1680 }
1681 
1682 
1683 void TemplateTable::wide_ret() {
1684   transition(vtos, vtos);
1685   locals_index_wide(G3_scratch);
1686   __ access_local_returnAddress(G3_scratch, Otos_i);
1687   // Otos_i contains the bci, compute the bcp from that
1688 
1689   __ profile_ret(vtos, Otos_i, G4_scratch);
1690 
1691   __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1692   __ add(G3_scratch, Otos_i, G3_scratch);
1693   __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1694   __ dispatch_next(vtos);
1695 }
1696 
1697 
1698 void TemplateTable::tableswitch() {
1699   transition(itos, vtos);
1700   Label default_case, continue_execution;
1701 
1702   // align bcp
1703   __ add(Lbcp, BytesPerInt, O1);
1704   __ and3(O1, -BytesPerInt, O1);
1705   // load lo, hi
1706   __ ld(O1, 1 * BytesPerInt, O2);       // Low Byte
1707   __ ld(O1, 2 * BytesPerInt, O3);       // High Byte

1708   // Sign extend the 32 bits
1709   __ sra ( Otos_i, 0, Otos_i );

1710 
1711   // check against lo & hi
1712   __ cmp( Otos_i, O2);
1713   __ br( Assembler::less, false, Assembler::pn, default_case);
1714   __ delayed()->cmp( Otos_i, O3 );
1715   __ br( Assembler::greater, false, Assembler::pn, default_case);
1716   // lookup dispatch offset
1717   __ delayed()->sub(Otos_i, O2, O2);
1718   __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1719   __ sll(O2, LogBytesPerInt, O2);
1720   __ add(O2, 3 * BytesPerInt, O2);
1721   __ ba(continue_execution);
1722   __ delayed()->ld(O1, O2, O2);
1723   // handle default
1724   __ bind(default_case);
1725   __ profile_switch_default(O3);
1726   __ ld(O1, 0, O2); // get default offset
1727   // continue execution
1728   __ bind(continue_execution);
1729   __ add(Lbcp, O2, Lbcp);


3278     // check if we can allocate in the TLAB
3279     __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3280     __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3281     __ add(RoldTopValue, Roffset, RnewTopValue);
3282 
3283     // if there is enough space, we do not CAS and do not clear
3284     __ cmp(RnewTopValue, RendValue);
3285     if(ZeroTLAB) {
3286       // the fields have already been cleared
3287       __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3288     } else {
3289       // initialize both the header and fields
3290       __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3291     }
3292     __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3293 
3294     if (allow_shared_alloc) {
3295       // Check if tlab should be discarded (refill_waste_limit >= free)
3296       __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3297       __ sub(RendValue, RoldTopValue, RfreeValue);

3298       __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);



3299       __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3300 
3301       // increment waste limit to prevent getting stuck on this slow path
3302       if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
3303         __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3304       } else {
3305         // set64 does not use the temp register if the given constant is 32 bit. So
3306         // we can just use any register; using G0 results in ignoring of the upper 32 bit
3307         // of that value.
3308         __ set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), G4_scratch, G0);
3309         __ add(RtlabWasteLimitValue, G4_scratch, RtlabWasteLimitValue);
3310       }
3311       __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3312     } else {
3313       // No allocation in the shared eden.
3314       __ ba_short(slow_case);
3315     }
3316   }
3317 
3318   // Allocation in the shared Eden


< prev index next >