296 // The non-null-block should dominate the memory op, too. Live
297 // range spilling will insert a spill in the non-null-block if it is
298 // needs to spill the memory op for an implicit null check.
299 if (cb->_dom_depth == (block->_dom_depth + 1)) {
300 if (cb != not_null_block) continue;
301 cb = cb->_idom;
302 }
303 }
304 if( cb != block ) continue;
305
306 // Found a memory user; see if it can be hoisted to check-block
307 uint vidx = 0; // Capture index of value into memop
308 uint j;
309 for( j = mach->req()-1; j > 0; j-- ) {
310 if( mach->in(j) == val ) {
311 vidx = j;
312 // Ignore DecodeN val which could be hoisted to where needed.
313 if( is_decoden ) continue;
314 }
315 // Block of memory-op input
316 Block *inb = get_block_for_node(mach->in(j));
317 Block *b = block; // Start from nul check
318 while( b != inb && b->_dom_depth > inb->_dom_depth )
319 b = b->_idom; // search upwards for input
320 // See if input dominates null check
321 if( b != inb )
322 break;
323 }
324 if( j > 0 )
325 continue;
326 Block *mb = get_block_for_node(mach);
327 // Hoisting stores requires more checks for the anti-dependence case.
328 // Give up hoisting if we have to move the store past any load.
329 if( was_store ) {
330 Block *b = mb; // Start searching here for a local load
331 // mach use (faulting) trying to hoist
332 // n might be blocker to hoisting
333 while( b != block ) {
334 uint k;
335 for( k = 1; k < b->number_of_nodes(); k++ ) {
336 Node *n = b->get_node(k);
372
373 if( is_decoden ) {
374 // Check if we need to hoist decodeHeapOop_not_null first.
375 Block *valb = get_block_for_node(val);
376 if( block != valb && block->_dom_depth < valb->_dom_depth ) {
377 // Hoist it up to the end of the test block.
378 valb->find_remove(val);
379 block->add_inst(val);
380 map_node_to_block(val, block);
381 // DecodeN on x86 may kill flags. Check for flag-killing projections
382 // that also need to be hoisted.
383 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
384 Node* n = val->fast_out(j);
385 if( n->is_MachProj() ) {
386 get_block_for_node(n)->find_remove(n);
387 block->add_inst(n);
388 map_node_to_block(n, block);
389 }
390 }
391 }
392 }
393 // Hoist the memory candidate up to the end of the test block.
394 Block *old_block = get_block_for_node(best);
395 old_block->find_remove(best);
396 block->add_inst(best);
397 map_node_to_block(best, block);
398
399 // Move the control dependence if it is pinned to not-null block.
400 // Don't change it in other cases: NULL or dominating control.
401 if (best->in(0) == not_null_block->head()) {
402 // Set it to control edge of null check.
403 best->set_req(0, proj->in(0)->in(0));
404 }
405
406 // Check for flag-killing projections that also need to be hoisted
407 // Should be DU safe because no edge updates.
408 for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
409 Node* n = best->fast_out(j);
410 if( n->is_MachProj() ) {
411 get_block_for_node(n)->find_remove(n);
412 block->add_inst(n);
|
296 // The non-null-block should dominate the memory op, too. Live
297 // range spilling will insert a spill in the non-null-block if it is
298 // needs to spill the memory op for an implicit null check.
299 if (cb->_dom_depth == (block->_dom_depth + 1)) {
300 if (cb != not_null_block) continue;
301 cb = cb->_idom;
302 }
303 }
304 if( cb != block ) continue;
305
306 // Found a memory user; see if it can be hoisted to check-block
307 uint vidx = 0; // Capture index of value into memop
308 uint j;
309 for( j = mach->req()-1; j > 0; j-- ) {
310 if( mach->in(j) == val ) {
311 vidx = j;
312 // Ignore DecodeN val which could be hoisted to where needed.
313 if( is_decoden ) continue;
314 }
315 // Block of memory-op input
316 Block* inb = get_block_for_node(mach->in(j));
317 if (mach->in(j)->is_Con() && inb == get_block_for_node(mach)) {
318 // Ignore constant loads scheduled in the same block (we can simply hoist them as well)
319 continue;
320 }
321 Block *b = block; // Start from nul check
322 while( b != inb && b->_dom_depth > inb->_dom_depth )
323 b = b->_idom; // search upwards for input
324 // See if input dominates null check
325 if( b != inb )
326 break;
327 }
328 if( j > 0 )
329 continue;
330 Block *mb = get_block_for_node(mach);
331 // Hoisting stores requires more checks for the anti-dependence case.
332 // Give up hoisting if we have to move the store past any load.
333 if( was_store ) {
334 Block *b = mb; // Start searching here for a local load
335 // mach use (faulting) trying to hoist
336 // n might be blocker to hoisting
337 while( b != block ) {
338 uint k;
339 for( k = 1; k < b->number_of_nodes(); k++ ) {
340 Node *n = b->get_node(k);
376
377 if( is_decoden ) {
378 // Check if we need to hoist decodeHeapOop_not_null first.
379 Block *valb = get_block_for_node(val);
380 if( block != valb && block->_dom_depth < valb->_dom_depth ) {
381 // Hoist it up to the end of the test block.
382 valb->find_remove(val);
383 block->add_inst(val);
384 map_node_to_block(val, block);
385 // DecodeN on x86 may kill flags. Check for flag-killing projections
386 // that also need to be hoisted.
387 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
388 Node* n = val->fast_out(j);
389 if( n->is_MachProj() ) {
390 get_block_for_node(n)->find_remove(n);
391 block->add_inst(n);
392 map_node_to_block(n, block);
393 }
394 }
395 }
396 } else {
397 // Hoist constant load inputs as well.
398 for (uint i = 1; i < best->req(); ++i) {
399 Node* n = best->in(i);
400 if (n->is_Con() && get_block_for_node(n) == get_block_for_node(best)) {
401 get_block_for_node(n)->find_remove(n);
402 block->add_inst(n);
403 map_node_to_block(n, block);
404 // Constant loads may kill flags (for example, when XORing a register).
405 // Check for flag-killing projections that also need to be hoisted.
406 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
407 Node* proj = n->fast_out(j);
408 if (proj->is_MachProj()) {
409 get_block_for_node(proj)->find_remove(proj);
410 block->add_inst(proj);
411 map_node_to_block(proj, block);
412 }
413 }
414 }
415 }
416 }
417
418 // Hoist the memory candidate up to the end of the test block.
419 Block *old_block = get_block_for_node(best);
420 old_block->find_remove(best);
421 block->add_inst(best);
422 map_node_to_block(best, block);
423
424 // Move the control dependence if it is pinned to not-null block.
425 // Don't change it in other cases: NULL or dominating control.
426 if (best->in(0) == not_null_block->head()) {
427 // Set it to control edge of null check.
428 best->set_req(0, proj->in(0)->in(0));
429 }
430
431 // Check for flag-killing projections that also need to be hoisted
432 // Should be DU safe because no edge updates.
433 for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
434 Node* n = best->fast_out(j);
435 if( n->is_MachProj() ) {
436 get_block_for_node(n)->find_remove(n);
437 block->add_inst(n);
|