396 assert(ranges[rp].hi() == highest, "");
397 if( highest != max_jint
398 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
399 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
400 }
401 assert(rp < rnum, "not too many ranges");
402
403 // Safepoint in case backward branch observed
404 if( makes_backward_branch && UseLoopSafepoints )
405 add_safepoint();
406
407 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
408 }
409
410 //----------------------------create_jump_tables-------------------------------
411 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
412 // Are jumptables enabled
413 if (!UseJumpTables) return false;
414
415 // Are jumptables supported
416 if (!Matcher::has_match_rule(Op_Jump)) return false;
417
418 // Don't make jump table if profiling
419 if (method_data_update()) return false;
420
421 // Decide if a guard is needed to lop off big ranges at either (or
422 // both) end(s) of the input set. We'll call this the default target
423 // even though we can't be sure that it is the true "default".
424
425 bool needs_guard = false;
426 int default_dest;
427 int64_t total_outlier_size = 0;
428 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
429 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
430
431 if (lo->dest() == hi->dest()) {
432 total_outlier_size = hi_size + lo_size;
433 default_dest = lo->dest();
434 } else if (lo_size > hi_size) {
435 total_outlier_size = lo_size;
436 default_dest = lo->dest();
773 merge(jsr_bci);
774 }
775
776 // Handle ret bytecode
777 void Parse::do_ret() {
778 // Find to whom we return.
779 assert(block()->num_successors() == 1, "a ret can only go one place now");
780 Block* target = block()->successor_at(0);
781 assert(!target->is_ready(), "our arrival must be expected");
782 profile_ret(target->flow()->start());
783 int pnum = target->next_path_num();
784 merge_common(target, pnum);
785 }
786
787 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
788 if (btest != BoolTest::eq && btest != BoolTest::ne) {
789 // Only ::eq and ::ne are supported for profile injection.
790 return false;
791 }
792 if (test->is_Cmp() &&
793 test->in(1)->Opcode() == Op_ProfileBoolean) {
794 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
795 int false_cnt = profile->false_count();
796 int true_cnt = profile->true_count();
797
798 // Counts matching depends on the actual test operation (::eq or ::ne).
799 // No need to scale the counts because profile injection was designed
800 // to feed exact counts into VM.
801 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt;
802 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt;
803
804 profile->consume();
805 return true;
806 }
807 return false;
808 }
809 //--------------------------dynamic_branch_prediction--------------------------
810 // Try to gather dynamic branch prediction behavior. Return a probability
811 // of the branch being taken and set the "cnt" field. Returns a -1.0
812 // if we need to use static prediction for some reason.
813 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1216 tcon = tval;
1217 val = c->in(2);
1218 tval = _gvn.type(val);
1219 btest = BoolTest(btest).commute();
1220 have_con = true;
1221 } else {
1222 // Do we have two constants? Then leave well enough alone.
1223 have_con = false;
1224 }
1225 }
1226 if (!have_con) // remaining adjustments need a con
1227 return;
1228
1229 sharpen_type_after_if(btest, con, tcon, val, tval);
1230 }
1231
1232
1233 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
1234 Node* ldk;
1235 if (n->is_DecodeNKlass()) {
1236 if (n->in(1)->Opcode() != Op_LoadNKlass) {
1237 return NULL;
1238 } else {
1239 ldk = n->in(1);
1240 }
1241 } else if (n->Opcode() != Op_LoadKlass) {
1242 return NULL;
1243 } else {
1244 ldk = n;
1245 }
1246 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
1247
1248 Node* adr = ldk->in(MemNode::Address);
1249 intptr_t off = 0;
1250 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
1251 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
1252 return NULL;
1253 const TypePtr* tp = gvn->type(obj)->is_ptr();
1254 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
1255 return NULL;
1256
1257 return obj;
1258 }
1259
1260 void Parse::sharpen_type_after_if(BoolTest::mask btest,
1261 Node* con, const Type* tcon,
1353 cast = ccast;
1354 }
1355
1356 if (cast != NULL) { // Here's the payoff.
1357 replace_in_map(val, cast);
1358 }
1359 }
1360
1361 /**
1362 * Use speculative type to optimize CmpP node: if comparison is
1363 * against the low level class, cast the object to the speculative
1364 * type if any. CmpP should then go away.
1365 *
1366 * @param c expected CmpP node
1367 * @return result of CmpP on object casted to speculative type
1368 *
1369 */
1370 Node* Parse::optimize_cmp_with_klass(Node* c) {
1371 // If this is transformed by the _gvn to a comparison with the low
1372 // level klass then we may be able to use speculation
1373 if (c->Opcode() == Op_CmpP &&
1374 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1375 c->in(2)->is_Con()) {
1376 Node* load_klass = NULL;
1377 Node* decode = NULL;
1378 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1379 decode = c->in(1);
1380 load_klass = c->in(1)->in(1);
1381 } else {
1382 load_klass = c->in(1);
1383 }
1384 if (load_klass->in(2)->is_AddP()) {
1385 Node* addp = load_klass->in(2);
1386 Node* obj = addp->in(AddPNode::Address);
1387 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1388 if (obj_type->speculative_type_not_null() != NULL) {
1389 ciKlass* k = obj_type->speculative_type();
1390 inc_sp(2);
1391 obj = maybe_cast_profiled_obj(obj, k);
1392 dec_sp(2);
1393 // Make the CmpP use the casted obj
1394 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1395 load_klass = load_klass->clone();
1396 load_klass->set_req(2, addp);
1397 load_klass = _gvn.transform(load_klass);
1398 if (decode != NULL) {
1850 push( d );
1851 break;
1852
1853 case Bytecodes::_fmul:
1854 b = pop();
1855 a = pop();
1856 c = _gvn.transform( new MulFNode(a,b) );
1857 d = precision_rounding(c);
1858 push( d );
1859 break;
1860
1861 case Bytecodes::_fdiv:
1862 b = pop();
1863 a = pop();
1864 c = _gvn.transform( new DivFNode(0,a,b) );
1865 d = precision_rounding(c);
1866 push( d );
1867 break;
1868
1869 case Bytecodes::_frem:
1870 if (Matcher::has_match_rule(Op_ModF)) {
1871 // Generate a ModF node.
1872 b = pop();
1873 a = pop();
1874 c = _gvn.transform( new ModFNode(0,a,b) );
1875 d = precision_rounding(c);
1876 push( d );
1877 }
1878 else {
1879 // Generate a call.
1880 modf();
1881 }
1882 break;
1883
1884 case Bytecodes::_fcmpl:
1885 b = pop();
1886 a = pop();
1887 c = _gvn.transform( new CmpF3Node( a, b));
1888 push(c);
1889 break;
1890 case Bytecodes::_fcmpg:
1985 c = _gvn.transform( new MulDNode(a,b) );
1986 d = dprecision_rounding(c);
1987 push_pair( d );
1988 break;
1989
1990 case Bytecodes::_ddiv:
1991 b = pop_pair();
1992 a = pop_pair();
1993 c = _gvn.transform( new DivDNode(0,a,b) );
1994 d = dprecision_rounding(c);
1995 push_pair( d );
1996 break;
1997
1998 case Bytecodes::_dneg:
1999 a = pop_pair();
2000 b = _gvn.transform(new NegDNode (a));
2001 push_pair(b);
2002 break;
2003
2004 case Bytecodes::_drem:
2005 if (Matcher::has_match_rule(Op_ModD)) {
2006 // Generate a ModD node.
2007 b = pop_pair();
2008 a = pop_pair();
2009 // a % b
2010
2011 c = _gvn.transform( new ModDNode(0,a,b) );
2012 d = dprecision_rounding(c);
2013 push_pair( d );
2014 }
2015 else {
2016 // Generate a call.
2017 modd();
2018 }
2019 break;
2020
2021 case Bytecodes::_dcmpl:
2022 b = pop_pair();
2023 a = pop_pair();
2024 c = _gvn.transform( new CmpD3Node( a, b));
2025 push(c);
|
396 assert(ranges[rp].hi() == highest, "");
397 if( highest != max_jint
398 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
399 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
400 }
401 assert(rp < rnum, "not too many ranges");
402
403 // Safepoint in case backward branch observed
404 if( makes_backward_branch && UseLoopSafepoints )
405 add_safepoint();
406
407 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
408 }
409
410 //----------------------------create_jump_tables-------------------------------
411 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
412 // Are jumptables enabled
413 if (!UseJumpTables) return false;
414
415 // Are jumptables supported
416 if (!Matcher::has_match_rule(Opcodes::Op_Jump)) return false;
417
418 // Don't make jump table if profiling
419 if (method_data_update()) return false;
420
421 // Decide if a guard is needed to lop off big ranges at either (or
422 // both) end(s) of the input set. We'll call this the default target
423 // even though we can't be sure that it is the true "default".
424
425 bool needs_guard = false;
426 int default_dest;
427 int64_t total_outlier_size = 0;
428 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
429 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
430
431 if (lo->dest() == hi->dest()) {
432 total_outlier_size = hi_size + lo_size;
433 default_dest = lo->dest();
434 } else if (lo_size > hi_size) {
435 total_outlier_size = lo_size;
436 default_dest = lo->dest();
773 merge(jsr_bci);
774 }
775
776 // Handle ret bytecode
777 void Parse::do_ret() {
778 // Find to whom we return.
779 assert(block()->num_successors() == 1, "a ret can only go one place now");
780 Block* target = block()->successor_at(0);
781 assert(!target->is_ready(), "our arrival must be expected");
782 profile_ret(target->flow()->start());
783 int pnum = target->next_path_num();
784 merge_common(target, pnum);
785 }
786
787 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
788 if (btest != BoolTest::eq && btest != BoolTest::ne) {
789 // Only ::eq and ::ne are supported for profile injection.
790 return false;
791 }
792 if (test->is_Cmp() &&
793 test->in(1)->Opcode() == Opcodes::Op_ProfileBoolean) {
794 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
795 int false_cnt = profile->false_count();
796 int true_cnt = profile->true_count();
797
798 // Counts matching depends on the actual test operation (::eq or ::ne).
799 // No need to scale the counts because profile injection was designed
800 // to feed exact counts into VM.
801 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt;
802 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt;
803
804 profile->consume();
805 return true;
806 }
807 return false;
808 }
809 //--------------------------dynamic_branch_prediction--------------------------
810 // Try to gather dynamic branch prediction behavior. Return a probability
811 // of the branch being taken and set the "cnt" field. Returns a -1.0
812 // if we need to use static prediction for some reason.
813 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1216 tcon = tval;
1217 val = c->in(2);
1218 tval = _gvn.type(val);
1219 btest = BoolTest(btest).commute();
1220 have_con = true;
1221 } else {
1222 // Do we have two constants? Then leave well enough alone.
1223 have_con = false;
1224 }
1225 }
1226 if (!have_con) // remaining adjustments need a con
1227 return;
1228
1229 sharpen_type_after_if(btest, con, tcon, val, tval);
1230 }
1231
1232
1233 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
1234 Node* ldk;
1235 if (n->is_DecodeNKlass()) {
1236 if (n->in(1)->Opcode() != Opcodes::Op_LoadNKlass) {
1237 return NULL;
1238 } else {
1239 ldk = n->in(1);
1240 }
1241 } else if (n->Opcode() != Opcodes::Op_LoadKlass) {
1242 return NULL;
1243 } else {
1244 ldk = n;
1245 }
1246 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
1247
1248 Node* adr = ldk->in(MemNode::Address);
1249 intptr_t off = 0;
1250 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
1251 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
1252 return NULL;
1253 const TypePtr* tp = gvn->type(obj)->is_ptr();
1254 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
1255 return NULL;
1256
1257 return obj;
1258 }
1259
1260 void Parse::sharpen_type_after_if(BoolTest::mask btest,
1261 Node* con, const Type* tcon,
1353 cast = ccast;
1354 }
1355
1356 if (cast != NULL) { // Here's the payoff.
1357 replace_in_map(val, cast);
1358 }
1359 }
1360
1361 /**
1362 * Use speculative type to optimize CmpP node: if comparison is
1363 * against the low level class, cast the object to the speculative
1364 * type if any. CmpP should then go away.
1365 *
1366 * @param c expected CmpP node
1367 * @return result of CmpP on object casted to speculative type
1368 *
1369 */
1370 Node* Parse::optimize_cmp_with_klass(Node* c) {
1371 // If this is transformed by the _gvn to a comparison with the low
1372 // level klass then we may be able to use speculation
1373 if (c->Opcode() == Opcodes::Op_CmpP &&
1374 (c->in(1)->Opcode() == Opcodes::Op_LoadKlass || c->in(1)->Opcode() == Opcodes::Op_DecodeNKlass) &&
1375 c->in(2)->is_Con()) {
1376 Node* load_klass = NULL;
1377 Node* decode = NULL;
1378 if (c->in(1)->Opcode() == Opcodes::Op_DecodeNKlass) {
1379 decode = c->in(1);
1380 load_klass = c->in(1)->in(1);
1381 } else {
1382 load_klass = c->in(1);
1383 }
1384 if (load_klass->in(2)->is_AddP()) {
1385 Node* addp = load_klass->in(2);
1386 Node* obj = addp->in(AddPNode::Address);
1387 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1388 if (obj_type->speculative_type_not_null() != NULL) {
1389 ciKlass* k = obj_type->speculative_type();
1390 inc_sp(2);
1391 obj = maybe_cast_profiled_obj(obj, k);
1392 dec_sp(2);
1393 // Make the CmpP use the casted obj
1394 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1395 load_klass = load_klass->clone();
1396 load_klass->set_req(2, addp);
1397 load_klass = _gvn.transform(load_klass);
1398 if (decode != NULL) {
1850 push( d );
1851 break;
1852
1853 case Bytecodes::_fmul:
1854 b = pop();
1855 a = pop();
1856 c = _gvn.transform( new MulFNode(a,b) );
1857 d = precision_rounding(c);
1858 push( d );
1859 break;
1860
1861 case Bytecodes::_fdiv:
1862 b = pop();
1863 a = pop();
1864 c = _gvn.transform( new DivFNode(0,a,b) );
1865 d = precision_rounding(c);
1866 push( d );
1867 break;
1868
1869 case Bytecodes::_frem:
1870 if (Matcher::has_match_rule(Opcodes::Op_ModF)) {
1871 // Generate a ModF node.
1872 b = pop();
1873 a = pop();
1874 c = _gvn.transform( new ModFNode(0,a,b) );
1875 d = precision_rounding(c);
1876 push( d );
1877 }
1878 else {
1879 // Generate a call.
1880 modf();
1881 }
1882 break;
1883
1884 case Bytecodes::_fcmpl:
1885 b = pop();
1886 a = pop();
1887 c = _gvn.transform( new CmpF3Node( a, b));
1888 push(c);
1889 break;
1890 case Bytecodes::_fcmpg:
1985 c = _gvn.transform( new MulDNode(a,b) );
1986 d = dprecision_rounding(c);
1987 push_pair( d );
1988 break;
1989
1990 case Bytecodes::_ddiv:
1991 b = pop_pair();
1992 a = pop_pair();
1993 c = _gvn.transform( new DivDNode(0,a,b) );
1994 d = dprecision_rounding(c);
1995 push_pair( d );
1996 break;
1997
1998 case Bytecodes::_dneg:
1999 a = pop_pair();
2000 b = _gvn.transform(new NegDNode (a));
2001 push_pair(b);
2002 break;
2003
2004 case Bytecodes::_drem:
2005 if (Matcher::has_match_rule(Opcodes::Op_ModD)) {
2006 // Generate a ModD node.
2007 b = pop_pair();
2008 a = pop_pair();
2009 // a % b
2010
2011 c = _gvn.transform( new ModDNode(0,a,b) );
2012 d = dprecision_rounding(c);
2013 push_pair( d );
2014 }
2015 else {
2016 // Generate a call.
2017 modd();
2018 }
2019 break;
2020
2021 case Bytecodes::_dcmpl:
2022 b = pop_pair();
2023 a = pop_pair();
2024 c = _gvn.transform( new CmpD3Node( a, b));
2025 push(c);
|