< prev index next >

src/share/vm/runtime/vm_version.hpp

Print this page




  81   static const char* vm_vendor();
  82   // VM version information string printed by launcher (java -version)
  83   static const char* vm_info_string();
  84   static const char* vm_release();
  85   static const char* vm_platform_string();
  86   static const char* vm_build_user();
  87 
  88   static int vm_major_version()               { assert(_initialized, "not initialized"); return _vm_major_version; }
  89   static int vm_minor_version()               { assert(_initialized, "not initialized"); return _vm_minor_version; }
  90   static int vm_build_number()                { assert(_initialized, "not initialized"); return _vm_build_number; }
  91 
  92   // Gets the jvm_version_info.jvm_version defined in jvm.h
  93   static unsigned int jvm_version();
  94 
  95   // Internal version providing additional build information
  96   static const char* internal_vm_info_string();
  97   static const char* jre_release_version();
  98 
  99   // does HW support an 8-byte compare-exchange operation?
 100   static bool supports_cx8()  {

 101 #ifdef SUPPORTS_NATIVE_CX8
 102     return true;
 103 #else
 104     return _supports_cx8;
 105 #endif
 106   }
 107   // does HW support atomic get-and-set or atomic get-and-add?  Used
 108   // to guide intrinsification decisions for Unsafe atomic ops
 109   static bool supports_atomic_getset4()  {return _supports_atomic_getset4;}
 110   static bool supports_atomic_getset8()  {return _supports_atomic_getset8;}
 111   static bool supports_atomic_getadd4()  {return _supports_atomic_getadd4;}
 112   static bool supports_atomic_getadd8()  {return _supports_atomic_getadd8;}
 113 
 114   static unsigned int logical_processors_per_package() {
 115     return _logical_processors_per_package;
 116   }
 117 
 118   static unsigned int L1_data_cache_line_size() {
 119     return _L1_data_cache_line_size;
 120   }
 121 
 122   // Need a space at the end of TLAB for prefetch instructions
 123   // which may fault when accessing memory outside of heap.
 124   static int reserve_for_allocation_prefetch() {
 125     return _reserve_for_allocation_prefetch;
 126   }
 127 
 128   // ARCH specific policy for the BiasedLocking
 129   static bool use_biased_locking()  { return true; }
 130 
 131   // Number of page sizes efficiently supported by the hardware.  Most chips now
 132   // support two sizes, thus this default implementation.  Processor-specific


  81   static const char* vm_vendor();
  82   // VM version information string printed by launcher (java -version)
  83   static const char* vm_info_string();
  84   static const char* vm_release();
  85   static const char* vm_platform_string();
  86   static const char* vm_build_user();
  87 
  88   static int vm_major_version()               { assert(_initialized, "not initialized"); return _vm_major_version; }
  89   static int vm_minor_version()               { assert(_initialized, "not initialized"); return _vm_minor_version; }
  90   static int vm_build_number()                { assert(_initialized, "not initialized"); return _vm_build_number; }
  91 
  92   // Gets the jvm_version_info.jvm_version defined in jvm.h
  93   static unsigned int jvm_version();
  94 
  95   // Internal version providing additional build information
  96   static const char* internal_vm_info_string();
  97   static const char* jre_release_version();
  98 
  99   // does HW support an 8-byte compare-exchange operation?
 100   static bool supports_cx8()  {
 101     assert(_initialized, "not initialized");
 102 #ifdef SUPPORTS_NATIVE_CX8
 103     return true;
 104 #else
 105     return _supports_cx8;
 106 #endif
 107   }
 108   // does HW support atomic get-and-set or atomic get-and-add?  Used
 109   // to guide intrinsification decisions for Unsafe atomic ops
 110   static bool supports_atomic_getset4()  { assert(_initialized, "not initialized"); return _supports_atomic_getset4;}
 111   static bool supports_atomic_getset8()  { assert(_initialized, "not initialized"); return _supports_atomic_getset8;}
 112   static bool supports_atomic_getadd4()  { assert(_initialized, "not initialized"); return _supports_atomic_getadd4;}
 113   static bool supports_atomic_getadd8()  { assert(_initialized, "not initialized"); return _supports_atomic_getadd8;}
 114 
 115   static unsigned int logical_processors_per_package() {
 116     return _logical_processors_per_package;
 117   }
 118 
 119   static unsigned int L1_data_cache_line_size() {
 120     return _L1_data_cache_line_size;
 121   }
 122 
 123   // Need a space at the end of TLAB for prefetch instructions
 124   // which may fault when accessing memory outside of heap.
 125   static int reserve_for_allocation_prefetch() {
 126     return _reserve_for_allocation_prefetch;
 127   }
 128 
 129   // ARCH specific policy for the BiasedLocking
 130   static bool use_biased_locking()  { return true; }
 131 
 132   // Number of page sizes efficiently supported by the hardware.  Most chips now
 133   // support two sizes, thus this default implementation.  Processor-specific
< prev index next >