< prev index next >

src/share/vm/opto/idealKit.hpp

Print this page
rev 8502 : 8046943: JEP 246: Leverage CPU Instructions for GHASH and RSA
Summary: Add montgomeryMultiply intrinsic
Reviewed-by: kvn


 193   Node* CmpL(Node* l, Node* r) { return transform(new CmpLNode(l, r)); }
 194 
 195   // TLS
 196   Node* thread()  {  return gvn().transform(new ThreadLocalNode()); }
 197 
 198   // Pointers
 199 
 200   // Raw address should be transformed regardless 'delay_transform' flag
 201   // to produce canonical form CastX2P(offset).
 202   Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new AddPNode(base, ptr, off)); }
 203 
 204   Node* CmpP(Node* l, Node* r) { return transform(new CmpPNode(l, r)); }
 205 #ifdef _LP64
 206   Node* XorX(Node* l, Node* r) { return transform(new XorLNode(l, r)); }
 207 #else // _LP64
 208   Node* XorX(Node* l, Node* r) { return transform(new XorINode(l, r)); }
 209 #endif // _LP64
 210   Node* URShiftX(Node* l, Node* r) { return transform(new URShiftXNode(l, r)); }
 211   Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
 212   Node* CastPX(Node* ctl, Node* p) { return transform(new CastP2XNode(ctl, p)); }



 213 
 214   // Memory operations
 215 
 216   // This is the base version which is given an alias index.
 217   Node* load(Node* ctl,
 218              Node* adr,
 219              const Type* t,
 220              BasicType bt,
 221              int adr_idx,
 222              bool require_atomic_access = false);
 223 
 224   // Return the new StoreXNode
 225   Node* store(Node* ctl,
 226               Node* adr,
 227               Node* val,
 228               BasicType bt,
 229               int adr_idx,
 230               MemNode::MemOrd mo,
 231               bool require_atomic_access = false);
 232 




 193   Node* CmpL(Node* l, Node* r) { return transform(new CmpLNode(l, r)); }
 194 
 195   // TLS
 196   Node* thread()  {  return gvn().transform(new ThreadLocalNode()); }
 197 
 198   // Pointers
 199 
 200   // Raw address should be transformed regardless 'delay_transform' flag
 201   // to produce canonical form CastX2P(offset).
 202   Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new AddPNode(base, ptr, off)); }
 203 
 204   Node* CmpP(Node* l, Node* r) { return transform(new CmpPNode(l, r)); }
 205 #ifdef _LP64
 206   Node* XorX(Node* l, Node* r) { return transform(new XorLNode(l, r)); }
 207 #else // _LP64
 208   Node* XorX(Node* l, Node* r) { return transform(new XorINode(l, r)); }
 209 #endif // _LP64
 210   Node* URShiftX(Node* l, Node* r) { return transform(new URShiftXNode(l, r)); }
 211   Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
 212   Node* CastPX(Node* ctl, Node* p) { return transform(new CastP2XNode(ctl, p)); }
 213   Node* CastII(Node* ctl, const Type* type, bool carry_dependency = false) {
 214     return transform(new CastIINode(ctl, type, carry_dependency));
 215   }
 216 
 217   // Memory operations
 218 
 219   // This is the base version which is given an alias index.
 220   Node* load(Node* ctl,
 221              Node* adr,
 222              const Type* t,
 223              BasicType bt,
 224              int adr_idx,
 225              bool require_atomic_access = false);
 226 
 227   // Return the new StoreXNode
 228   Node* store(Node* ctl,
 229               Node* adr,
 230               Node* val,
 231               BasicType bt,
 232               int adr_idx,
 233               MemNode::MemOrd mo,
 234               bool require_atomic_access = false);
 235 


< prev index next >