389 if (_table != (Node**)badAddress) clear();
390 }
391
392 void NodeHash::operator=(const NodeHash& nh) {
393 // Unlock all nodes upon replacement of table.
394 if (&nh == this) return;
395 if (_table != (Node**)badAddress) clear();
396 memcpy(this, &nh, sizeof(*this));
397 // Do not increment hash_lock counts again.
398 // Instead, be sure we never again use the source table.
399 ((NodeHash*)&nh)->_table = (Node**)badAddress;
400 }
401
402
403 #endif
404
405
406 //=============================================================================
407 //------------------------------PhaseRemoveUseless-----------------------------
408 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
409 PhaseRemoveUseless::PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist ) : Phase(Remove_Useless),
410 _useful(Thread::current()->resource_area()) {
411
412 // Implementation requires 'UseLoopSafepoints == true' and an edge from root
413 // to each SafePointNode at a backward branch. Inserted in add_safepoint().
414 if( !UseLoopSafepoints || !OptoRemoveUseless ) return;
415
416 // Identify nodes that are reachable from below, useful.
417 C->identify_useful_nodes(_useful);
418 // Update dead node list
419 C->update_dead_node_list(_useful);
420
421 // Remove all useless nodes from PhaseValues' recorded types
422 // Must be done before disconnecting nodes to preserve hash-table-invariant
423 gvn->remove_useless_nodes(_useful.member_set());
424
425 // Remove all useless nodes from future worklist
426 worklist->remove_useless_nodes(_useful.member_set());
427
428 // Disconnect 'useless' nodes that are adjacent to useful nodes
429 C->remove_useless_nodes(_useful);
430
431 // Remove edges from "root" to each SafePoint at a backward branch.
432 // They were inserted during parsing (see add_safepoint()) to make infinite
433 // loops without calls or exceptions visible to root, i.e., useful.
434 Node *root = C->root();
435 if( root != NULL ) {
436 for( uint i = root->req(); i < root->len(); ++i ) {
437 Node *n = root->in(i);
438 if( n != NULL && n->is_SafePoint() ) {
439 root->rm_prec(i);
440 --i;
441 }
442 }
443 }
444 }
445
446
447 //=============================================================================
448 //------------------------------PhaseTransform---------------------------------
449 PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum),
450 _arena(Thread::current()->resource_area()),
451 _nodes(_arena),
452 _types(_arena)
453 {
454 init_con_caches();
455 #ifndef PRODUCT
456 clear_progress();
457 clear_transforms();
458 set_allow_progress(true);
459 #endif
460 // Force allocation for currently existing nodes
461 _types.map(C->unique(), NULL);
462 }
463
464 //------------------------------PhaseTransform---------------------------------
465 PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum),
1898 }
1899 }
1900 i -= uses_found; // we deleted 1 or more copies of this edge
1901 }
1902 }
1903
1904 //=============================================================================
1905 //-----------------------------------------------------------------------------
1906 void Type_Array::grow( uint i ) {
1907 if( !_max ) {
1908 _max = 1;
1909 _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
1910 _types[0] = NULL;
1911 }
1912 uint old = _max;
1913 while( i >= _max ) _max <<= 1; // Double to fit
1914 _types = (const Type**)_a->Arealloc( _types, old*sizeof(Type*),_max*sizeof(Type*));
1915 memset( &_types[old], 0, (_max-old)*sizeof(Type*) );
1916 }
1917
1918 //------------------------------dump-------------------------------------------
1919 #ifndef PRODUCT
1920 void Type_Array::dump() const {
1921 uint max = Size();
1922 for( uint i = 0; i < max; i++ ) {
1923 if( _types[i] != NULL ) {
1924 tty->print(" %d\t== ", i); _types[i]->dump(); tty->cr();
1925 }
1926 }
1927 }
1928 #endif
|
389 if (_table != (Node**)badAddress) clear();
390 }
391
392 void NodeHash::operator=(const NodeHash& nh) {
393 // Unlock all nodes upon replacement of table.
394 if (&nh == this) return;
395 if (_table != (Node**)badAddress) clear();
396 memcpy(this, &nh, sizeof(*this));
397 // Do not increment hash_lock counts again.
398 // Instead, be sure we never again use the source table.
399 ((NodeHash*)&nh)->_table = (Node**)badAddress;
400 }
401
402
403 #endif
404
405
406 //=============================================================================
407 //------------------------------PhaseRemoveUseless-----------------------------
408 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
409 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist, PhaseNumber phase_num) : Phase(phase_num),
410 _useful(Thread::current()->resource_area()) {
411
412 // Implementation requires 'UseLoopSafepoints == true' and an edge from root
413 // to each SafePointNode at a backward branch. Inserted in add_safepoint().
414 if( !UseLoopSafepoints || !OptoRemoveUseless ) return;
415
416 // Identify nodes that are reachable from below, useful.
417 C->identify_useful_nodes(_useful);
418 // Update dead node list
419 C->update_dead_node_list(_useful);
420
421 // Remove all useless nodes from PhaseValues' recorded types
422 // Must be done before disconnecting nodes to preserve hash-table-invariant
423 gvn->remove_useless_nodes(_useful.member_set());
424
425 // Remove all useless nodes from future worklist
426 worklist->remove_useless_nodes(_useful.member_set());
427
428 // Disconnect 'useless' nodes that are adjacent to useful nodes
429 C->remove_useless_nodes(_useful);
430
431 // Remove edges from "root" to each SafePoint at a backward branch.
432 // They were inserted during parsing (see add_safepoint()) to make infinite
433 // loops without calls or exceptions visible to root, i.e., useful.
434 Node *root = C->root();
435 if( root != NULL ) {
436 for( uint i = root->req(); i < root->len(); ++i ) {
437 Node *n = root->in(i);
438 if( n != NULL && n->is_SafePoint() ) {
439 root->rm_prec(i);
440 --i;
441 }
442 }
443 }
444 }
445
446 //=============================================================================
447 //------------------------------PhaseRenumberLive------------------------------
448 // First, remove useless nodes (equivalent to identifying live nodes).
449 // Then, renumber live nodes.
450 //
451 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
452 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
453 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
454 // value in the range [0, x).
455 //
456 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
457 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
458 //
459 // The PhaseRenumberLive phase updates two data structures with the new node IDs.
460 // (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
461 // processed. The worklist is updated with the new node IDs.
462 // (2) Type information (the field PhaseGVN::_types) maps type information to each
463 // node ID. The mapping is updated to use the new node IDs as well.
464 //
465 // The PhaseRenumberLive phase does not preserve the order of elements in the worklist.
466 //
467 // Other data structures used by the compiler are not updated. The hash table for value
468 // numbering (the field PhaseGVN::_table) is not updated because computing the hash
469 // values is not based on node IDs. The field PhaseGVN::_nodes is not updated either
470 // because it is empty wherever PhaseRenumberLive is used.
471 PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn, Unique_Node_List* worklist, PhaseNumber phase_num) :
472 PhaseRemoveUseless(gvn, worklist, RenumberLiveNodes ? Renumber_Live : Remove_Useless) {
473
474 assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes");
475 uint old_unique_count = C->unique();
476 uint live_node_count = C->live_nodes();
477 uint worklist_size = worklist->size();
478
479 // Storage for the updated type information.
480 Type_Array new_type_array(Thread::current()->resource_area());
481
482 // Storage for holding the worklist with the updated IDs.
483 Unique_Node_List new_worklist(Thread::current()->resource_area());
484
485 // Process live nodes in the worklist.
486 uint current_idx = 0; // The current new node ID. Incremented after every assignment.
487 for (uint i = 0; i < _useful.size(); i++) {
488 Node* n = _useful.at(i);
489 const Type* type = gvn->type_or_null(n);
490 new_type_array.map(current_idx, type);
491
492 bool in_worklist = false;
493 if (worklist->member(n)) {
494 worklist->remove(n);
495 in_worklist = true;
496 }
497
498 n->set_idx(current_idx); // Update node ID.
499
500 if (in_worklist) {
501 new_worklist.push(n);
502 }
503
504 current_idx++;
505 }
506
507 assert(worklist->size() == 0, "original worklist must be empty");
508 assert(worklist_size == new_worklist.size(), "the new worklist must have the same size as the original worklist");
509 assert(live_node_count == current_idx, "all live nodes must be processed");
510
511 // Push nodes from the temporary worklist to the compiler's work
512 // list. The worklist usually contains only a few items (rarely
513 // more than 10), so pushing individual nodes does have a negligible
514 // effect on performance.
515 for (uint i = 0; i < worklist_size; i++) {
516 worklist->push(new_worklist.pop());
517 }
518
519 // Replace the compiler's type information with the updated type information.
520 // The number of nodes for which type information is stored can be large (up
521 // to several tens of thousands), therefore type information is updated with
522 // a memory copy.
523 gvn->types().replace_with(&new_type_array);
524
525 // Update the unique node count of the compilation to the number of currently live nodes.
526 C->set_unique(live_node_count);
527
528 // Set the dead node count to 0 and reset dead node list.
529 C->reset_dead_node_list();
530 }
531
532 //=============================================================================
533 //------------------------------PhaseTransform---------------------------------
534 PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum),
535 _arena(Thread::current()->resource_area()),
536 _nodes(_arena),
537 _types(_arena)
538 {
539 init_con_caches();
540 #ifndef PRODUCT
541 clear_progress();
542 clear_transforms();
543 set_allow_progress(true);
544 #endif
545 // Force allocation for currently existing nodes
546 _types.map(C->unique(), NULL);
547 }
548
549 //------------------------------PhaseTransform---------------------------------
550 PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum),
1983 }
1984 }
1985 i -= uses_found; // we deleted 1 or more copies of this edge
1986 }
1987 }
1988
1989 //=============================================================================
1990 //-----------------------------------------------------------------------------
1991 void Type_Array::grow( uint i ) {
1992 if( !_max ) {
1993 _max = 1;
1994 _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
1995 _types[0] = NULL;
1996 }
1997 uint old = _max;
1998 while( i >= _max ) _max <<= 1; // Double to fit
1999 _types = (const Type**)_a->Arealloc( _types, old*sizeof(Type*),_max*sizeof(Type*));
2000 memset( &_types[old], 0, (_max-old)*sizeof(Type*) );
2001 }
2002
2003 void Type_Array::replace_with(Type_Array* ta) {
2004 assert(ta->Size() <= Size(), "size of current type must be larger than the size of the type copied in");
2005 if (ta->Size() <= Size()) {
2006 memcpy(_types, ta->_types, ta->_max * sizeof(Type*));
2007 uint diff = Size() - ta->Size();
2008 if (diff > 0) {
2009 memset(&_types[ta->Size()], 0, diff * sizeof(Type*));
2010 }
2011 }
2012 }
2013 //------------------------------dump-------------------------------------------
2014 #ifndef PRODUCT
2015 void Type_Array::dump() const {
2016 uint max = Size();
2017 for( uint i = 0; i < max; i++ ) {
2018 if( _types[i] != NULL ) {
2019 tty->print(" %d\t== ", i); _types[i]->dump(); tty->cr();
2020 }
2021 }
2022 }
2023 #endif
|