src/share/vm/opto/graphKit.hpp

Print this page
rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.


 493 
 494   // Get the entire memory state (probably a MergeMemNode), and reset it
 495   // (The resetting prevents somebody from using the dangling Node pointer.)
 496   Node* reset_memory();
 497 
 498   // Get the entire memory state, asserted to be a MergeMemNode.
 499   MergeMemNode* merged_memory() {
 500     Node* mem = map_not_null()->memory();
 501     assert(mem->is_MergeMem(), "parse memory is always pre-split");
 502     return mem->as_MergeMem();
 503   }
 504 
 505   // Set the entire memory state; produce a new MergeMemNode.
 506   void set_all_memory(Node* newmem);
 507 
 508   // Create a memory projection from the call, then set_all_memory.
 509   void set_all_memory_call(Node* call, bool separate_io_proj = false);
 510 
 511   // Create a LoadNode, reading from the parser's memory state.
 512   // (Note:  require_atomic_access is useful only with T_LONG.)




 513   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
 514                   bool require_atomic_access = false) {
 515     // This version computes alias_index from bottom_type
 516     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
 517                      require_atomic_access);
 518   }
 519   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {

 520     // This version computes alias_index from an address type
 521     assert(adr_type != NULL, "use other make_load factory");
 522     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
 523                      require_atomic_access);
 524   }
 525   // This is the base version which is given an alias index.
 526   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);

 527 
 528   // Create & transform a StoreNode and store the effect into the
 529   // parser's memory state.






 530   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 531                         const TypePtr* adr_type,
 532                         bool require_atomic_access = false) {

 533     // This version computes alias_index from an address type
 534     assert(adr_type != NULL, "use other store_to_memory factory");
 535     return store_to_memory(ctl, adr, val, bt,
 536                            C->get_alias_index(adr_type),
 537                            require_atomic_access);
 538   }
 539   // This is the base version which is given alias index
 540   // Return the new StoreXNode
 541   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 542                         int adr_idx,
 543                         bool require_atomic_access = false);

 544 
 545 
 546   // All in one pre-barrier, store, post_barrier
 547   // Insert a write-barrier'd store.  This is to let generational GC
 548   // work; we have to flag all oop-stores before the next GC point.
 549   //
 550   // It comes in 3 flavors of store to an object, array, or unknown.
 551   // We use precise card marks for arrays to avoid scanning the entire
 552   // array. We use imprecise for object. We use precise for unknown
 553   // since we don't know if we have an array or and object or even
 554   // where the object starts.
 555   //
 556   // If val==NULL, it is taken to be a completely unknown value. QQQ
 557 
 558   Node* store_oop(Node* ctl,
 559                   Node* obj,   // containing obj
 560                   Node* adr,  // actual adress to store val at
 561                   const TypePtr* adr_type,
 562                   Node* val,
 563                   const TypeOopPtr* val_type,
 564                   BasicType bt,
 565                   bool use_precise);

 566 
 567   Node* store_oop_to_object(Node* ctl,
 568                             Node* obj,   // containing obj
 569                             Node* adr,  // actual adress to store val at
 570                             const TypePtr* adr_type,
 571                             Node* val,
 572                             const TypeOopPtr* val_type,
 573                             BasicType bt) {
 574     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);

 575   }
 576 
 577   Node* store_oop_to_array(Node* ctl,
 578                            Node* obj,   // containing obj
 579                            Node* adr,  // actual adress to store val at
 580                            const TypePtr* adr_type,
 581                            Node* val,
 582                            const TypeOopPtr* val_type,
 583                            BasicType bt) {
 584     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);

 585   }
 586 
 587   // Could be an array or object we don't know at compile time (unsafe ref.)
 588   Node* store_oop_to_unknown(Node* ctl,
 589                              Node* obj,   // containing obj
 590                              Node* adr,  // actual adress to store val at
 591                              const TypePtr* adr_type,
 592                              Node* val,
 593                              BasicType bt);

 594 
 595   // For the few case where the barriers need special help
 596   void pre_barrier(bool do_load, Node* ctl,
 597                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 598                    Node* pre_val,
 599                    BasicType bt);
 600 
 601   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 602                     Node* val, BasicType bt, bool use_precise);
 603 
 604   // Return addressing for an array element.
 605   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 606                               // Optional constraint on the array size:
 607                               const TypeInt* sizetype = NULL);
 608 
 609   // Return a load of array element at idx.
 610   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 611 
 612   //---------------- Dtrace support --------------------
 613   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);




 493 
 494   // Get the entire memory state (probably a MergeMemNode), and reset it
 495   // (The resetting prevents somebody from using the dangling Node pointer.)
 496   Node* reset_memory();
 497 
 498   // Get the entire memory state, asserted to be a MergeMemNode.
 499   MergeMemNode* merged_memory() {
 500     Node* mem = map_not_null()->memory();
 501     assert(mem->is_MergeMem(), "parse memory is always pre-split");
 502     return mem->as_MergeMem();
 503   }
 504 
 505   // Set the entire memory state; produce a new MergeMemNode.
 506   void set_all_memory(Node* newmem);
 507 
 508   // Create a memory projection from the call, then set_all_memory.
 509   void set_all_memory_call(Node* call, bool separate_io_proj = false);
 510 
 511   // Create a LoadNode, reading from the parser's memory state.
 512   // (Note:  require_atomic_access is useful only with T_LONG.)
 513   //
 514   // We choose the unordered semantics by default because we have
 515   // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
 516   // of volatile fields.
 517   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
 518                   bool require_atomic_access, LoadNode::Sem sem) {
 519     // This version computes alias_index from bottom_type
 520     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
 521                      require_atomic_access, sem);
 522   }
 523   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
 524                   bool require_atomic_access, LoadNode::Sem sem) {
 525     // This version computes alias_index from an address type
 526     assert(adr_type != NULL, "use other make_load factory");
 527     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
 528                      require_atomic_access, sem);
 529   }
 530   // This is the base version which is given an alias index.
 531   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
 532                   bool require_atomic_access, LoadNode::Sem sem);
 533 
 534   // Create & transform a StoreNode and store the effect into the
 535   // parser's memory state.
 536   //
 537   // We must ensure that stores of object references will be visible
 538   // only after the object's initialization. So the clients of this
 539   // procedure must indicate that the store requires `release'
 540   // semantics, if the stored value is an object reference that might
 541   // point to a new object and may become externally visible.
 542   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 543                         const TypePtr* adr_type,
 544                         bool require_atomic_access,
 545                         StoreNode::Sem sem) {
 546     // This version computes alias_index from an address type
 547     assert(adr_type != NULL, "use other store_to_memory factory");
 548     return store_to_memory(ctl, adr, val, bt,
 549                            C->get_alias_index(adr_type),
 550                            require_atomic_access, sem);
 551   }
 552   // This is the base version which is given alias index
 553   // Return the new StoreXNode
 554   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 555                         int adr_idx,
 556                         bool require_atomic_access,
 557                         StoreNode::Sem);
 558 
 559 
 560   // All in one pre-barrier, store, post_barrier
 561   // Insert a write-barrier'd store.  This is to let generational GC
 562   // work; we have to flag all oop-stores before the next GC point.
 563   //
 564   // It comes in 3 flavors of store to an object, array, or unknown.
 565   // We use precise card marks for arrays to avoid scanning the entire
 566   // array. We use imprecise for object. We use precise for unknown
 567   // since we don't know if we have an array or and object or even
 568   // where the object starts.
 569   //
 570   // If val==NULL, it is taken to be a completely unknown value. QQQ
 571 
 572   Node* store_oop(Node* ctl,
 573                   Node* obj,   // containing obj
 574                   Node* adr,  // actual adress to store val at
 575                   const TypePtr* adr_type,
 576                   Node* val,
 577                   const TypeOopPtr* val_type,
 578                   BasicType bt,
 579                   bool use_precise,
 580                   StoreNode::Sem sem);
 581 
 582   Node* store_oop_to_object(Node* ctl,
 583                             Node* obj,   // containing obj
 584                             Node* adr,  // actual adress to store val at
 585                             const TypePtr* adr_type,
 586                             Node* val,
 587                             const TypeOopPtr* val_type,
 588                             BasicType bt,
 589                             StoreNode::Sem sem) {
 590     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, sem);
 591   }
 592 
 593   Node* store_oop_to_array(Node* ctl,
 594                            Node* obj,   // containing obj
 595                            Node* adr,  // actual adress to store val at
 596                            const TypePtr* adr_type,
 597                            Node* val,
 598                            const TypeOopPtr* val_type,
 599                            BasicType bt,
 600                            StoreNode::Sem sem) {
 601     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, sem);
 602   }
 603 
 604   // Could be an array or object we don't know at compile time (unsafe ref.)
 605   Node* store_oop_to_unknown(Node* ctl,
 606                              Node* obj,   // containing obj
 607                              Node* adr,  // actual adress to store val at
 608                              const TypePtr* adr_type,
 609                              Node* val,
 610                              BasicType bt,
 611                              StoreNode::Sem sem);
 612 
 613   // For the few case where the barriers need special help
 614   void pre_barrier(bool do_load, Node* ctl,
 615                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 616                    Node* pre_val,
 617                    BasicType bt);
 618 
 619   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 620                     Node* val, BasicType bt, bool use_precise);
 621 
 622   // Return addressing for an array element.
 623   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 624                               // Optional constraint on the array size:
 625                               const TypeInt* sizetype = NULL);
 626 
 627   // Return a load of array element at idx.
 628   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 629 
 630   //---------------- Dtrace support --------------------
 631   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);