< prev index next >

src/share/vm/opto/graphKit.hpp

Print this page




 500   MergeMemNode* merged_memory() {
 501     Node* mem = map_not_null()->memory();
 502     assert(mem->is_MergeMem(), "parse memory is always pre-split");
 503     return mem->as_MergeMem();
 504   }
 505 
 506   // Set the entire memory state; produce a new MergeMemNode.
 507   void set_all_memory(Node* newmem);
 508 
 509   // Create a memory projection from the call, then set_all_memory.
 510   void set_all_memory_call(Node* call, bool separate_io_proj = false);
 511 
 512   // Create a LoadNode, reading from the parser's memory state.
 513   // (Note:  require_atomic_access is useful only with T_LONG.)
 514   //
 515   // We choose the unordered semantics by default because we have
 516   // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
 517   // of volatile fields.
 518   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
 519                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 520                   bool require_atomic_access = false) {

 521     // This version computes alias_index from bottom_type
 522     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
 523                      mo, control_dependency, require_atomic_access);

 524   }
 525   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
 526                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 527                   bool require_atomic_access = false) {

 528     // This version computes alias_index from an address type
 529     assert(adr_type != NULL, "use other make_load factory");
 530     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
 531                      mo, control_dependency, require_atomic_access);

 532   }
 533   // This is the base version which is given an alias index.
 534   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
 535                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 536                   bool require_atomic_access = false);

 537 
 538   // Create & transform a StoreNode and store the effect into the
 539   // parser's memory state.
 540   //
 541   // We must ensure that stores of object references will be visible
 542   // only after the object's initialization. So the clients of this
 543   // procedure must indicate that the store requires `release'
 544   // semantics, if the stored value is an object reference that might
 545   // point to a new object and may become externally visible.
 546   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 547                         const TypePtr* adr_type,
 548                         MemNode::MemOrd mo,
 549                         bool require_atomic_access = false) {


 550     // This version computes alias_index from an address type
 551     assert(adr_type != NULL, "use other store_to_memory factory");
 552     return store_to_memory(ctl, adr, val, bt,
 553                            C->get_alias_index(adr_type),
 554                            mo, require_atomic_access);

 555   }
 556   // This is the base version which is given alias index
 557   // Return the new StoreXNode
 558   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 559                         int adr_idx,
 560                         MemNode::MemOrd,
 561                         bool require_atomic_access = false);


 562 
 563 
 564   // All in one pre-barrier, store, post_barrier
 565   // Insert a write-barrier'd store.  This is to let generational GC
 566   // work; we have to flag all oop-stores before the next GC point.
 567   //
 568   // It comes in 3 flavors of store to an object, array, or unknown.
 569   // We use precise card marks for arrays to avoid scanning the entire
 570   // array. We use imprecise for object. We use precise for unknown
 571   // since we don't know if we have an array or and object or even
 572   // where the object starts.
 573   //
 574   // If val==NULL, it is taken to be a completely unknown value. QQQ
 575 
 576   Node* store_oop(Node* ctl,
 577                   Node* obj,   // containing obj
 578                   Node* adr,   // actual adress to store val at
 579                   const TypePtr* adr_type,
 580                   Node* val,
 581                   const TypeOopPtr* val_type,
 582                   BasicType bt,
 583                   bool use_precise,
 584                   MemNode::MemOrd mo);

 585 
 586   Node* store_oop_to_object(Node* ctl,
 587                             Node* obj,   // containing obj
 588                             Node* adr,   // actual adress to store val at
 589                             const TypePtr* adr_type,
 590                             Node* val,
 591                             const TypeOopPtr* val_type,
 592                             BasicType bt,
 593                             MemNode::MemOrd mo) {
 594     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
 595   }
 596 
 597   Node* store_oop_to_array(Node* ctl,
 598                            Node* obj,   // containing obj
 599                            Node* adr,   // actual adress to store val at
 600                            const TypePtr* adr_type,
 601                            Node* val,
 602                            const TypeOopPtr* val_type,
 603                            BasicType bt,
 604                            MemNode::MemOrd mo) {
 605     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
 606   }
 607 
 608   // Could be an array or object we don't know at compile time (unsafe ref.)
 609   Node* store_oop_to_unknown(Node* ctl,
 610                              Node* obj,   // containing obj
 611                              Node* adr,   // actual adress to store val at
 612                              const TypePtr* adr_type,
 613                              Node* val,
 614                              BasicType bt,
 615                              MemNode::MemOrd mo);

 616 
 617   // For the few case where the barriers need special help
 618   void pre_barrier(bool do_load, Node* ctl,
 619                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 620                    Node* pre_val,
 621                    BasicType bt);
 622 
 623   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 624                     Node* val, BasicType bt, bool use_precise);
 625 
 626   // Return addressing for an array element.
 627   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 628                               // Optional constraint on the array size:
 629                               const TypeInt* sizetype = NULL,
 630                               // Optional control dependency (for example, on range check)
 631                               Node* ctrl = NULL);
 632 
 633   // Return a load of array element at idx.
 634   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 635 




 500   MergeMemNode* merged_memory() {
 501     Node* mem = map_not_null()->memory();
 502     assert(mem->is_MergeMem(), "parse memory is always pre-split");
 503     return mem->as_MergeMem();
 504   }
 505 
 506   // Set the entire memory state; produce a new MergeMemNode.
 507   void set_all_memory(Node* newmem);
 508 
 509   // Create a memory projection from the call, then set_all_memory.
 510   void set_all_memory_call(Node* call, bool separate_io_proj = false);
 511 
 512   // Create a LoadNode, reading from the parser's memory state.
 513   // (Note:  require_atomic_access is useful only with T_LONG.)
 514   //
 515   // We choose the unordered semantics by default because we have
 516   // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
 517   // of volatile fields.
 518   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
 519                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 520                   bool require_atomic_access = false, bool unaligned = false,
 521                   bool mismatched = false) {
 522     // This version computes alias_index from bottom_type
 523     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
 524                      mo, control_dependency, require_atomic_access,
 525                      unaligned, mismatched);
 526   }
 527   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
 528                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 529                   bool require_atomic_access = false, bool unaligned = false,
 530                   bool mismatched = false) {
 531     // This version computes alias_index from an address type
 532     assert(adr_type != NULL, "use other make_load factory");
 533     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
 534                      mo, control_dependency, require_atomic_access,
 535                      unaligned, mismatched);
 536   }
 537   // This is the base version which is given an alias index.
 538   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
 539                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 540                   bool require_atomic_access = false, bool unaligned = false,
 541                   bool mismatched = false);
 542 
 543   // Create & transform a StoreNode and store the effect into the
 544   // parser's memory state.
 545   //
 546   // We must ensure that stores of object references will be visible
 547   // only after the object's initialization. So the clients of this
 548   // procedure must indicate that the store requires `release'
 549   // semantics, if the stored value is an object reference that might
 550   // point to a new object and may become externally visible.
 551   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 552                         const TypePtr* adr_type,
 553                         MemNode::MemOrd mo,
 554                         bool require_atomic_access = false,
 555                         bool unaligned = false,
 556                         bool mismatched = false) {
 557     // This version computes alias_index from an address type
 558     assert(adr_type != NULL, "use other store_to_memory factory");
 559     return store_to_memory(ctl, adr, val, bt,
 560                            C->get_alias_index(adr_type),
 561                            mo, require_atomic_access,
 562                            unaligned, mismatched);
 563   }
 564   // This is the base version which is given alias index
 565   // Return the new StoreXNode
 566   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 567                         int adr_idx,
 568                         MemNode::MemOrd,
 569                         bool require_atomic_access = false,
 570                         bool unaligned = false,
 571                         bool mismatched = false);
 572 
 573 
 574   // All in one pre-barrier, store, post_barrier
 575   // Insert a write-barrier'd store.  This is to let generational GC
 576   // work; we have to flag all oop-stores before the next GC point.
 577   //
 578   // It comes in 3 flavors of store to an object, array, or unknown.
 579   // We use precise card marks for arrays to avoid scanning the entire
 580   // array. We use imprecise for object. We use precise for unknown
 581   // since we don't know if we have an array or and object or even
 582   // where the object starts.
 583   //
 584   // If val==NULL, it is taken to be a completely unknown value. QQQ
 585 
 586   Node* store_oop(Node* ctl,
 587                   Node* obj,   // containing obj
 588                   Node* adr,   // actual adress to store val at
 589                   const TypePtr* adr_type,
 590                   Node* val,
 591                   const TypeOopPtr* val_type,
 592                   BasicType bt,
 593                   bool use_precise,
 594                   MemNode::MemOrd mo,
 595                   bool mismatched = false);
 596 
 597   Node* store_oop_to_object(Node* ctl,
 598                             Node* obj,   // containing obj
 599                             Node* adr,   // actual adress to store val at
 600                             const TypePtr* adr_type,
 601                             Node* val,
 602                             const TypeOopPtr* val_type,
 603                             BasicType bt,
 604                             MemNode::MemOrd mo) {
 605     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
 606   }
 607 
 608   Node* store_oop_to_array(Node* ctl,
 609                            Node* obj,   // containing obj
 610                            Node* adr,   // actual adress to store val at
 611                            const TypePtr* adr_type,
 612                            Node* val,
 613                            const TypeOopPtr* val_type,
 614                            BasicType bt,
 615                            MemNode::MemOrd mo) {
 616     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
 617   }
 618 
 619   // Could be an array or object we don't know at compile time (unsafe ref.)
 620   Node* store_oop_to_unknown(Node* ctl,
 621                              Node* obj,   // containing obj
 622                              Node* adr,   // actual adress to store val at
 623                              const TypePtr* adr_type,
 624                              Node* val,
 625                              BasicType bt,
 626                              MemNode::MemOrd mo,
 627                              bool mismatched = false);
 628 
 629   // For the few case where the barriers need special help
 630   void pre_barrier(bool do_load, Node* ctl,
 631                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 632                    Node* pre_val,
 633                    BasicType bt);
 634 
 635   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 636                     Node* val, BasicType bt, bool use_precise);
 637 
 638   // Return addressing for an array element.
 639   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 640                               // Optional constraint on the array size:
 641                               const TypeInt* sizetype = NULL,
 642                               // Optional control dependency (for example, on range check)
 643                               Node* ctrl = NULL);
 644 
 645   // Return a load of array element at idx.
 646   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 647 


< prev index next >