src/share/vm/opto/graphKit.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/graphKit.hpp

Print this page
rev 9120 : 8136473: failed: no mismatched stores, except on raw memory: StoreB StoreI
Summary: Mismatched stores on same slice possible with Unsafe.Put*Unaligned methods
Reviewed-by:


 496   MergeMemNode* merged_memory() {
 497     Node* mem = map_not_null()->memory();
 498     assert(mem->is_MergeMem(), "parse memory is always pre-split");
 499     return mem->as_MergeMem();
 500   }
 501 
 502   // Set the entire memory state; produce a new MergeMemNode.
 503   void set_all_memory(Node* newmem);
 504 
 505   // Create a memory projection from the call, then set_all_memory.
 506   void set_all_memory_call(Node* call, bool separate_io_proj = false);
 507 
 508   // Create a LoadNode, reading from the parser's memory state.
 509   // (Note:  require_atomic_access is useful only with T_LONG.)
 510   //
 511   // We choose the unordered semantics by default because we have
 512   // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
 513   // of volatile fields.
 514   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
 515                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 516                   bool require_atomic_access = false) {

 517     // This version computes alias_index from bottom_type
 518     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
 519                      mo, control_dependency, require_atomic_access);

 520   }
 521   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
 522                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 523                   bool require_atomic_access = false) {

 524     // This version computes alias_index from an address type
 525     assert(adr_type != NULL, "use other make_load factory");
 526     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
 527                      mo, control_dependency, require_atomic_access);

 528   }
 529   // This is the base version which is given an alias index.
 530   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
 531                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 532                   bool require_atomic_access = false);

 533 
 534   // Create & transform a StoreNode and store the effect into the
 535   // parser's memory state.
 536   //
 537   // We must ensure that stores of object references will be visible
 538   // only after the object's initialization. So the clients of this
 539   // procedure must indicate that the store requires `release'
 540   // semantics, if the stored value is an object reference that might
 541   // point to a new object and may become externally visible.
 542   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 543                         const TypePtr* adr_type,
 544                         MemNode::MemOrd mo,
 545                         bool require_atomic_access = false) {


 546     // This version computes alias_index from an address type
 547     assert(adr_type != NULL, "use other store_to_memory factory");
 548     return store_to_memory(ctl, adr, val, bt,
 549                            C->get_alias_index(adr_type),
 550                            mo, require_atomic_access);

 551   }
 552   // This is the base version which is given alias index
 553   // Return the new StoreXNode
 554   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 555                         int adr_idx,
 556                         MemNode::MemOrd,
 557                         bool require_atomic_access = false);


 558 
 559 
 560   // All in one pre-barrier, store, post_barrier
 561   // Insert a write-barrier'd store.  This is to let generational GC
 562   // work; we have to flag all oop-stores before the next GC point.
 563   //
 564   // It comes in 3 flavors of store to an object, array, or unknown.
 565   // We use precise card marks for arrays to avoid scanning the entire
 566   // array. We use imprecise for object. We use precise for unknown
 567   // since we don't know if we have an array or and object or even
 568   // where the object starts.
 569   //
 570   // If val==NULL, it is taken to be a completely unknown value. QQQ
 571 
 572   Node* store_oop(Node* ctl,
 573                   Node* obj,   // containing obj
 574                   Node* adr,   // actual adress to store val at
 575                   const TypePtr* adr_type,
 576                   Node* val,
 577                   const TypeOopPtr* val_type,
 578                   BasicType bt,
 579                   bool use_precise,
 580                   MemNode::MemOrd mo);

 581 
 582   Node* store_oop_to_object(Node* ctl,
 583                             Node* obj,   // containing obj
 584                             Node* adr,   // actual adress to store val at
 585                             const TypePtr* adr_type,
 586                             Node* val,
 587                             const TypeOopPtr* val_type,
 588                             BasicType bt,
 589                             MemNode::MemOrd mo) {
 590     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
 591   }
 592 
 593   Node* store_oop_to_array(Node* ctl,
 594                            Node* obj,   // containing obj
 595                            Node* adr,   // actual adress to store val at
 596                            const TypePtr* adr_type,
 597                            Node* val,
 598                            const TypeOopPtr* val_type,
 599                            BasicType bt,
 600                            MemNode::MemOrd mo) {
 601     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
 602   }
 603 
 604   // Could be an array or object we don't know at compile time (unsafe ref.)
 605   Node* store_oop_to_unknown(Node* ctl,
 606                              Node* obj,   // containing obj
 607                              Node* adr,   // actual adress to store val at
 608                              const TypePtr* adr_type,
 609                              Node* val,
 610                              BasicType bt,
 611                              MemNode::MemOrd mo);

 612 
 613   // For the few case where the barriers need special help
 614   void pre_barrier(bool do_load, Node* ctl,
 615                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 616                    Node* pre_val,
 617                    BasicType bt);
 618 
 619   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 620                     Node* val, BasicType bt, bool use_precise);
 621 
 622   // Return addressing for an array element.
 623   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 624                               // Optional constraint on the array size:
 625                               const TypeInt* sizetype = NULL);
 626 
 627   // Return a load of array element at idx.
 628   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 629 
 630   //---------------- Dtrace support --------------------
 631   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);




 496   MergeMemNode* merged_memory() {
 497     Node* mem = map_not_null()->memory();
 498     assert(mem->is_MergeMem(), "parse memory is always pre-split");
 499     return mem->as_MergeMem();
 500   }
 501 
 502   // Set the entire memory state; produce a new MergeMemNode.
 503   void set_all_memory(Node* newmem);
 504 
 505   // Create a memory projection from the call, then set_all_memory.
 506   void set_all_memory_call(Node* call, bool separate_io_proj = false);
 507 
 508   // Create a LoadNode, reading from the parser's memory state.
 509   // (Note:  require_atomic_access is useful only with T_LONG.)
 510   //
 511   // We choose the unordered semantics by default because we have
 512   // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
 513   // of volatile fields.
 514   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
 515                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 516                   bool require_atomic_access = false, bool unaligned = false,
 517                   bool mismatched = false) {
 518     // This version computes alias_index from bottom_type
 519     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
 520                      mo, control_dependency, require_atomic_access,
 521                      unaligned, mismatched);
 522   }
 523   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
 524                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 525                   bool require_atomic_access = false, bool unaligned = false,
 526                   bool mismatched = false) {
 527     // This version computes alias_index from an address type
 528     assert(adr_type != NULL, "use other make_load factory");
 529     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
 530                      mo, control_dependency, require_atomic_access,
 531                      unaligned, mismatched);
 532   }
 533   // This is the base version which is given an alias index.
 534   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
 535                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
 536                   bool require_atomic_access = false, bool unaligned = false,
 537                   bool mismatched = false);
 538 
 539   // Create & transform a StoreNode and store the effect into the
 540   // parser's memory state.
 541   //
 542   // We must ensure that stores of object references will be visible
 543   // only after the object's initialization. So the clients of this
 544   // procedure must indicate that the store requires `release'
 545   // semantics, if the stored value is an object reference that might
 546   // point to a new object and may become externally visible.
 547   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 548                         const TypePtr* adr_type,
 549                         MemNode::MemOrd mo,
 550                         bool require_atomic_access = false,
 551                         bool unaligned = false,
 552                         bool mismatched = false) {
 553     // This version computes alias_index from an address type
 554     assert(adr_type != NULL, "use other store_to_memory factory");
 555     return store_to_memory(ctl, adr, val, bt,
 556                            C->get_alias_index(adr_type),
 557                            mo, require_atomic_access,
 558                            unaligned, mismatched);
 559   }
 560   // This is the base version which is given alias index
 561   // Return the new StoreXNode
 562   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 563                         int adr_idx,
 564                         MemNode::MemOrd,
 565                         bool require_atomic_access = false,
 566                         bool unaligned = false,
 567                         bool mismatched = false);
 568 
 569 
 570   // All in one pre-barrier, store, post_barrier
 571   // Insert a write-barrier'd store.  This is to let generational GC
 572   // work; we have to flag all oop-stores before the next GC point.
 573   //
 574   // It comes in 3 flavors of store to an object, array, or unknown.
 575   // We use precise card marks for arrays to avoid scanning the entire
 576   // array. We use imprecise for object. We use precise for unknown
 577   // since we don't know if we have an array or and object or even
 578   // where the object starts.
 579   //
 580   // If val==NULL, it is taken to be a completely unknown value. QQQ
 581 
 582   Node* store_oop(Node* ctl,
 583                   Node* obj,   // containing obj
 584                   Node* adr,   // actual adress to store val at
 585                   const TypePtr* adr_type,
 586                   Node* val,
 587                   const TypeOopPtr* val_type,
 588                   BasicType bt,
 589                   bool use_precise,
 590                   MemNode::MemOrd mo,
 591                   bool mismatched = false);
 592 
 593   Node* store_oop_to_object(Node* ctl,
 594                             Node* obj,   // containing obj
 595                             Node* adr,   // actual adress to store val at
 596                             const TypePtr* adr_type,
 597                             Node* val,
 598                             const TypeOopPtr* val_type,
 599                             BasicType bt,
 600                             MemNode::MemOrd mo) {
 601     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
 602   }
 603 
 604   Node* store_oop_to_array(Node* ctl,
 605                            Node* obj,   // containing obj
 606                            Node* adr,   // actual adress to store val at
 607                            const TypePtr* adr_type,
 608                            Node* val,
 609                            const TypeOopPtr* val_type,
 610                            BasicType bt,
 611                            MemNode::MemOrd mo) {
 612     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
 613   }
 614 
 615   // Could be an array or object we don't know at compile time (unsafe ref.)
 616   Node* store_oop_to_unknown(Node* ctl,
 617                              Node* obj,   // containing obj
 618                              Node* adr,   // actual adress to store val at
 619                              const TypePtr* adr_type,
 620                              Node* val,
 621                              BasicType bt,
 622                              MemNode::MemOrd mo,
 623                              bool mismatched = false);
 624 
 625   // For the few case where the barriers need special help
 626   void pre_barrier(bool do_load, Node* ctl,
 627                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 628                    Node* pre_val,
 629                    BasicType bt);
 630 
 631   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 632                     Node* val, BasicType bt, bool use_precise);
 633 
 634   // Return addressing for an array element.
 635   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 636                               // Optional constraint on the array size:
 637                               const TypeInt* sizetype = NULL);
 638 
 639   // Return a load of array element at idx.
 640   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 641 
 642   //---------------- Dtrace support --------------------
 643   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);


src/share/vm/opto/graphKit.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File