< prev index next >

src/share/vm/opto/compile.cpp

Print this page
rev 10955 : undo
rev 10974 : 8154826: AArch64: take advantage better of base + shifted offset addressing mode
Summary: reshape address subtree to fit aarch64 addressing mode
Reviewed-by:
rev 10979 : 8154943: AArch64: redundant address computation instructions with vectorization
Summary: duplicated i2l nodes can be eliminated to optimize redundant address computations
Reviewed-by:

*** 3261,3270 **** --- 3261,3303 ---- Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt); n->subsume_by(iff, this); frc._tests.push(iff); break; } + case Op_ConvI2L: { + #ifdef AARCH64 + // Code generation on ARM doesn't need accurate ConvI2L + // types. Widening the type can help remove redundant address + // computations. + n->as_Type()->set_type(TypeLong::INT); + ResourceMark rm; + Node_List wq; + wq.push(n); + for (uint next = 0; next < wq.size(); next++) { + Node *m = wq.at(next); + + for(;;) { + Node* k = m->find_similar(m->Opcode()); + if (k == NULL || + !(k->Opcode() == Op_ConvI2L || + k->Opcode() == Op_LShiftL || + k->Opcode() == Op_AddL || + k->Opcode() == Op_SubL || + k->Opcode() == Op_AddP)) { + break; + } + for (DUIterator_Fast imax, i = k->fast_outs(imax); i < imax; i++) { + Node* u = k->fast_out(i); + assert(!wq.contains(u), "shouldn't process one node several times"); + wq.push(u); + } + k->subsume_by(m, this); + } + } + #endif + break; + } default: assert( !n->is_Call(), "" ); assert( !n->is_Mem(), "" ); assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN"); break;
< prev index next >