< prev index next >

src/share/vm/utilities/globalDefinitions.hpp

Print this page




 183 #endif
 184 };
 185 
 186 // Analogous opaque struct for metadata allocated from
 187 // metaspaces.
 188 class MetaWord {
 189  private:
 190   char* i;
 191 };
 192 
 193 // HeapWordSize must be 2^LogHeapWordSize.
 194 const int HeapWordSize        = sizeof(HeapWord);
 195 #ifdef _LP64
 196 const int LogHeapWordSize     = 3;
 197 #else
 198 const int LogHeapWordSize     = 2;
 199 #endif
 200 const int HeapWordsPerLong    = BytesPerLong / HeapWordSize;
 201 const int LogHeapWordsPerLong = LogBytesPerLong - LogHeapWordSize;
 202 
 203 // The larger HeapWordSize for 64bit requires larger heaps
 204 // for the same application running in 64bit.  See bug 4967770.
 205 // The minimum alignment to a heap word size is done.  Other
 206 // parts of the memory system may require additional alignment
 207 // and are responsible for those alignments.
 208 #ifdef _LP64
 209 #define ScaleForWordSize(x) align_down_((x) * 13 / 10, HeapWordSize)
 210 #else
 211 #define ScaleForWordSize(x) (x)
 212 #endif
 213 
 214 // The minimum number of native machine words necessary to contain "byte_size"
 215 // bytes.
 216 inline size_t heap_word_size(size_t byte_size) {
 217   return (byte_size + (HeapWordSize-1)) >> LogHeapWordSize;
 218 }
 219 
 220 //-------------------------------------------
 221 // Constant for jlong (standardized by C++11)
 222 
 223 // Build a 64bit integer constant
 224 #define CONST64(x)  (x ## LL)
 225 #define UCONST64(x) (x ## ULL)
 226 
 227 const jlong min_jlong = CONST64(0x8000000000000000);
 228 const jlong max_jlong = CONST64(0x7fffffffffffffff);
 229 
 230 const size_t K                  = 1024;
 231 const size_t M                  = K*K;
 232 const size_t G                  = M*K;
 233 const size_t HWperKB            = K / sizeof(HeapWord);


 478 
 479 // Machine dependent stuff
 480 
 481 // The maximum size of the code cache.  Can be overridden by targets.
 482 #define CODE_CACHE_SIZE_LIMIT (2*G)
 483 // Allow targets to reduce the default size of the code cache.
 484 #define CODE_CACHE_DEFAULT_LIMIT CODE_CACHE_SIZE_LIMIT
 485 
 486 #include CPU_HEADER(globalDefinitions)
 487 
 488 // To assure the IRIW property on processors that are not multiple copy
 489 // atomic, sync instructions must be issued between volatile reads to
 490 // assure their ordering, instead of after volatile stores.
 491 // (See "A Tutorial Introduction to the ARM and POWER Relaxed Memory Models"
 492 // by Luc Maranget, Susmit Sarkar and Peter Sewell, INRIA/Cambridge)
 493 #ifdef CPU_NOT_MULTIPLE_COPY_ATOMIC
 494 const bool support_IRIW_for_not_multiple_copy_atomic_cpu = true;
 495 #else
 496 const bool support_IRIW_for_not_multiple_copy_atomic_cpu = false;
 497 #endif
 498 
 499 // The byte alignment to be used by Arena::Amalloc.  See bugid 4169348.
 500 // Note: this value must be a power of 2
 501 
 502 #define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord)
 503 
 504 // Signed variants of alignment helpers.  There are two versions of each, a macro
 505 // for use in places like enum definitions that require compile-time constant
 506 // expressions and a function for all other places so as to get type checking.
 507 
 508 // Using '(what) & ~align_mask(alignment)' to align 'what' down is broken when
 509 // 'alignment' is an unsigned int and 'what' is a wider type. The & operation
 510 // will widen the inverted mask, and not sign extend it, leading to a mask with
 511 // zeros in the most significant bits. The use of align_mask_widened() solves
 512 // this problem.
 513 #define align_mask(alignment) ((alignment) - 1)
 514 #define widen_to_type_of(what, type_carrier) (true ? (what) : (type_carrier))
 515 #define align_mask_widened(alignment, type_carrier) widen_to_type_of(align_mask(alignment), (type_carrier))
 516 
 517 #define align_down_(size, alignment) ((size) & ~align_mask_widened((alignment), (size)))
 518 
 519 #define align_up_(size, alignment) (align_down_((size) + align_mask(alignment), (alignment)))
 520 
 521 #define is_aligned_(size, alignment) ((size) == (align_up_((size), (alignment))))
 522 
 523 // Temporary declaration until this file has been restructured.
 524 template <typename T>
 525 bool is_power_of_2_t(T x) {
 526   return (x != T(0)) && ((x & (x - 1)) == T(0));
 527 }
 528 
 529 // Helpers to align sizes and check for alignment
 530 
 531 template <typename T, typename A>
 532 inline T align_up(T size, A alignment) {
 533   assert(is_power_of_2_t(alignment), "must be a power of 2: " UINT64_FORMAT, (uint64_t)alignment);
 534 
 535   T ret = align_up_(size, alignment);
 536   assert(is_aligned_(ret, alignment), "must be aligned: " UINT64_FORMAT, (uint64_t)ret);
 537 
 538   return ret;
 539 }
 540 
 541 template <typename T, typename A>
 542 inline T align_down(T size, A alignment) {
 543   assert(is_power_of_2_t(alignment), "must be a power of 2: " UINT64_FORMAT, (uint64_t)alignment);
 544 
 545   T ret = align_down_(size, alignment);
 546   assert(is_aligned_(ret, alignment), "must be aligned: " UINT64_FORMAT, (uint64_t)ret);
 547 
 548   return ret;
 549 }
 550 
 551 template <typename T, typename A>
 552 inline bool is_aligned(T size, A alignment) {
 553   assert(is_power_of_2_t(alignment), "must be a power of 2: " UINT64_FORMAT, (uint64_t)alignment);
 554 
 555   return is_aligned_(size, alignment);
 556 }
 557 
 558 // Align down with a lower bound. If the aligning results in 0, return 'alignment'.
 559 template <typename T, typename A>
 560 inline T align_down_bounded(T size, A alignment) {
 561   A aligned_size = align_down(size, alignment);
 562   return aligned_size > 0 ? aligned_size : alignment;
 563 }
 564 
 565 // Helpers to align pointers and check for alignment.
 566 
 567 template <typename T, typename A>
 568 inline T* align_up(T* ptr, A alignment) {
 569   return (T*)align_up((uintptr_t)ptr, alignment);
 570 }
 571 
 572 template <typename T, typename A>
 573 inline T* align_down(T* ptr, A alignment) {
 574   return (T*)align_down((uintptr_t)ptr, alignment);
 575 }
 576 
 577 template <typename T, typename A>
 578 inline bool is_aligned(T* ptr, A alignment) {
 579   return is_aligned((uintptr_t)ptr, alignment);
 580 }
 581 
 582 // Align metaspace objects by rounding up to natural word boundary
 583 template <typename T>
 584 inline T align_metadata_size(T size) {
 585   return align_up(size, 1);
 586 }
 587 
 588 // Align objects in the Java Heap by rounding up their size, in HeapWord units.
 589 template <typename T>
 590 inline T align_object_size(T word_size) {
 591   return align_up(word_size, MinObjAlignment);
 592 }
 593 
 594 inline bool is_object_aligned(size_t word_size) {
 595   return is_aligned(word_size, MinObjAlignment);
 596 }
 597 
 598 inline bool is_object_aligned(const void* addr) {
 599   return is_aligned(addr, MinObjAlignmentInBytes);
 600 }
 601 
 602 // Pad out certain offsets to jlong alignment, in HeapWord units.
 603 template <typename T>
 604 inline T align_object_offset(T offset) {
 605   return align_up(offset, HeapWordsPerLong);
 606 }
 607 
 608 // Clamp an address to be within a specific page
 609 // 1. If addr is on the page it is returned as is
 610 // 2. If addr is above the page_address the start of the *next* page will be returned
 611 // 3. Otherwise, if addr is below the page_address the start of the page will be returned
 612 template <typename T>
 613 inline T* clamp_address_in_page(T* addr, T* page_address, size_t page_size) {
 614   if (align_down(addr, page_size) == align_down(page_address, page_size)) {
 615     // address is in the specified page, just return it as is
 616     return addr;
 617   } else if (addr > page_address) {
 618     // address is above specified page, return start of next page
 619     return align_down(page_address, page_size) + page_size;
 620   } else {
 621     // address is below specified page, return start of page
 622     return align_down(page_address, page_size);
 623   }
 624 }
 625 
 626 
 627 // The expected size in bytes of a cache line, used to pad data structures.
 628 #ifndef DEFAULT_CACHE_LINE_SIZE
 629   #define DEFAULT_CACHE_LINE_SIZE 64
 630 #endif
 631 
 632 
 633 //----------------------------------------------------------------------------------------------------
 634 // Utility macros for compilers
 635 // used to silence compiler warnings
 636 
 637 #define Unused_Variable(var) var
 638 
 639 
 640 //----------------------------------------------------------------------------------------------------
 641 // Miscellaneous
 642 
 643 // 6302670 Eliminate Hotspot __fabsf dependency
 644 // All fabs() callers should call this function instead, which will implicitly
 645 // convert the operand to double, avoiding a dependency on __fabsf which




 183 #endif
 184 };
 185 
 186 // Analogous opaque struct for metadata allocated from
 187 // metaspaces.
 188 class MetaWord {
 189  private:
 190   char* i;
 191 };
 192 
 193 // HeapWordSize must be 2^LogHeapWordSize.
 194 const int HeapWordSize        = sizeof(HeapWord);
 195 #ifdef _LP64
 196 const int LogHeapWordSize     = 3;
 197 #else
 198 const int LogHeapWordSize     = 2;
 199 #endif
 200 const int HeapWordsPerLong    = BytesPerLong / HeapWordSize;
 201 const int LogHeapWordsPerLong = LogBytesPerLong - LogHeapWordSize;
 202 











 203 // The minimum number of native machine words necessary to contain "byte_size"
 204 // bytes.
 205 inline size_t heap_word_size(size_t byte_size) {
 206   return (byte_size + (HeapWordSize-1)) >> LogHeapWordSize;
 207 }
 208 
 209 //-------------------------------------------
 210 // Constant for jlong (standardized by C++11)
 211 
 212 // Build a 64bit integer constant
 213 #define CONST64(x)  (x ## LL)
 214 #define UCONST64(x) (x ## ULL)
 215 
 216 const jlong min_jlong = CONST64(0x8000000000000000);
 217 const jlong max_jlong = CONST64(0x7fffffffffffffff);
 218 
 219 const size_t K                  = 1024;
 220 const size_t M                  = K*K;
 221 const size_t G                  = M*K;
 222 const size_t HWperKB            = K / sizeof(HeapWord);


 467 
 468 // Machine dependent stuff
 469 
 470 // The maximum size of the code cache.  Can be overridden by targets.
 471 #define CODE_CACHE_SIZE_LIMIT (2*G)
 472 // Allow targets to reduce the default size of the code cache.
 473 #define CODE_CACHE_DEFAULT_LIMIT CODE_CACHE_SIZE_LIMIT
 474 
 475 #include CPU_HEADER(globalDefinitions)
 476 
 477 // To assure the IRIW property on processors that are not multiple copy
 478 // atomic, sync instructions must be issued between volatile reads to
 479 // assure their ordering, instead of after volatile stores.
 480 // (See "A Tutorial Introduction to the ARM and POWER Relaxed Memory Models"
 481 // by Luc Maranget, Susmit Sarkar and Peter Sewell, INRIA/Cambridge)
 482 #ifdef CPU_NOT_MULTIPLE_COPY_ATOMIC
 483 const bool support_IRIW_for_not_multiple_copy_atomic_cpu = true;
 484 #else
 485 const bool support_IRIW_for_not_multiple_copy_atomic_cpu = false;
 486 #endif
































































































































 487 
 488 // The expected size in bytes of a cache line, used to pad data structures.
 489 #ifndef DEFAULT_CACHE_LINE_SIZE
 490   #define DEFAULT_CACHE_LINE_SIZE 64
 491 #endif
 492 
 493 
 494 //----------------------------------------------------------------------------------------------------
 495 // Utility macros for compilers
 496 // used to silence compiler warnings
 497 
 498 #define Unused_Variable(var) var
 499 
 500 
 501 //----------------------------------------------------------------------------------------------------
 502 // Miscellaneous
 503 
 504 // 6302670 Eliminate Hotspot __fabsf dependency
 505 // All fabs() callers should call this function instead, which will implicitly
 506 // convert the operand to double, avoiding a dependency on __fabsf which


< prev index next >