src/os/linux/vm/os_linux.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hsx-rt.8007074 Sdiff src/os/linux/vm

src/os/linux/vm/os_linux.cpp

Print this page




2703   int err = os::Linux::commit_memory_impl(addr, size, exec);
2704   if (err != 0) {
2705     // the caller wants all commit errors to exit with the specified mesg:
2706     warn_fail_commit_memory(addr, size, exec, err);
2707     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2708   }
2709 }
2710 
2711 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2712 #ifndef MAP_HUGETLB
2713 #define MAP_HUGETLB 0x40000
2714 #endif
2715 
2716 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2717 #ifndef MADV_HUGEPAGE
2718 #define MADV_HUGEPAGE 14
2719 #endif
2720 
2721 int os::Linux::commit_memory_impl(char* addr, size_t size,
2722                                   size_t alignment_hint, bool exec) {
2723   int err;
2724   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2725     int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2726     uintptr_t res =
2727       (uintptr_t) ::mmap(addr, size, prot,
2728                          MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
2729                          -1, 0);
2730     if (res != (uintptr_t) MAP_FAILED) {
2731       if (UseNUMAInterleaving) {
2732         numa_make_global(addr, size);
2733       }
2734       return 0;
2735     }
2736 
2737     err = errno;  // save errno from mmap() call above
2738 
2739     if (!recoverable_mmap_error(err)) {
2740       // However, it is not clear that this loss of our reserved mapping
2741       // happens with large pages on Linux or that we cannot recover
2742       // from the loss. For now, we just issue a warning and we don't
2743       // call vm_exit_out_of_memory(). This issue is being tracked by
2744       // JBS-8007074.
2745       warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2746 //    vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
2747 //                          "committing reserved memory.");
2748     }
2749     // Fall through and try to use small pages
2750   }
2751 
2752   err = os::Linux::commit_memory_impl(addr, size, exec);
2753   if (err == 0) {
2754     realign_memory(addr, size, alignment_hint);
2755   }
2756   return err;
2757 }
2758 
2759 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2760                           bool exec) {
2761   return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2762 }
2763 
2764 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2765                                   size_t alignment_hint, bool exec,
2766                                   const char* mesg) {
2767   assert(mesg != NULL, "mesg must be specified");
2768   int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2769   if (err != 0) {
2770     // the caller wants all commit errors to exit with the specified mesg:
2771     warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2772     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2773   }
2774 }
2775 
2776 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2777   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2778     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2779     // be supported or the memory may already be backed by huge pages.
2780     ::madvise(addr, bytes, MADV_HUGEPAGE);
2781   }
2782 }
2783 
2784 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2785   // This method works by doing an mmap over an existing mmaping and effectively discarding
2786   // the existing pages. However it won't work for SHM-based large pages that cannot be
2787   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2788   // small pages on top of the SHM segment. This method always works for small pages, so we
2789   // allow that in any case.
2790   if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
2791     commit_memory(addr, bytes, alignment_hint, !ExecMem);
2792   }
2793 }
2794 
2795 void os::numa_make_global(char *addr, size_t bytes) {
2796   Linux::numa_interleave_memory(addr, bytes);
2797 }
2798 
2799 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2800   Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2801 }
2802 
2803 bool os::numa_topology_changed()   { return false; }
2804 
2805 size_t os::numa_get_groups_num() {
2806   int max_node = Linux::numa_max_node();
2807   return max_node > 0 ? max_node + 1 : 1;
2808 }
2809 
2810 int os::numa_get_group_id() {


3140   switch (prot) {
3141   case MEM_PROT_NONE: p = PROT_NONE; break;
3142   case MEM_PROT_READ: p = PROT_READ; break;
3143   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3144   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3145   default:
3146     ShouldNotReachHere();
3147   }
3148   // is_committed is unused.
3149   return linux_mprotect(addr, bytes, p);
3150 }
3151 
3152 bool os::guard_memory(char* addr, size_t size) {
3153   return linux_mprotect(addr, size, PROT_NONE);
3154 }
3155 
3156 bool os::unguard_memory(char* addr, size_t size) {
3157   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3158 }
3159 




















3160 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3161   bool result = false;
3162   void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
3163                   MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
3164                   -1, 0);
3165 
3166   if (p != MAP_FAILED) {
3167     // We don't know if this really is a huge page or not.
3168     FILE *fp = fopen("/proc/self/maps", "r");
3169     if (fp) {
3170       while (!feof(fp)) {
3171         char chars[257];
3172         long x = 0;
3173         if (fgets(chars, sizeof(chars), fp)) {
3174           if (sscanf(chars, "%lx-%*x", &x) == 1
3175               && x == (long)p) {
3176             if (strstr (chars, "hugepage")) {
3177               result = true;
3178               break;
3179             }
3180           }
3181         }
3182       }
3183       fclose(fp);
3184     }
3185     munmap (p, page_size);
3186     if (result)
3187       return true;
3188   }
3189 
3190   if (warn) {
3191     warning("HugeTLBFS is not supported by the operating system.");
3192   }
3193 
3194   return result;
3195 }
3196 
3197 /*
3198 * Set the coredump_filter bits to include largepages in core dump (bit 6)
3199 *
3200 * From the coredump_filter documentation:
3201 *
3202 * - (bit 0) anonymous private memory
3203 * - (bit 1) anonymous shared memory
3204 * - (bit 2) file-backed private memory
3205 * - (bit 3) file-backed shared memory
3206 * - (bit 4) ELF header pages in file-backed private memory areas (it is
3207 *           effective only if the bit 2 is cleared)
3208 * - (bit 5) hugetlb private memory
3209 * - (bit 6) hugetlb shared memory
3210 */


3218 
3219   if (fscanf(f, "%lx", &cdm) != 1) {
3220     fclose(f);
3221     return;
3222   }
3223 
3224   rewind(f);
3225 
3226   if ((cdm & LARGEPAGES_BIT) == 0) {
3227     cdm |= LARGEPAGES_BIT;
3228     fprintf(f, "%#lx", cdm);
3229   }
3230 
3231   fclose(f);
3232 }
3233 
3234 // Large page support
3235 
3236 static size_t _large_page_size = 0;
3237 
3238 void os::large_page_init() {
3239   if (!UseLargePages) {
3240     UseHugeTLBFS = false;
3241     UseSHM = false;
3242     return;
3243   }
3244 
3245   if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
3246     // If UseLargePages is specified on the command line try both methods,
3247     // if it's default, then try only HugeTLBFS.
3248     if (FLAG_IS_DEFAULT(UseLargePages)) {
3249       UseHugeTLBFS = true;
3250     } else {
3251       UseHugeTLBFS = UseSHM = true;
3252     }
3253   }
3254 
3255   if (LargePageSizeInBytes) {
3256     _large_page_size = LargePageSizeInBytes;
3257   } else {
3258     // large_page_size on Linux is used to round up heap size. x86 uses either
3259     // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3260     // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3261     // page as large as 256M.
3262     //
3263     // Here we try to figure out page size by parsing /proc/meminfo and looking
3264     // for a line with the following format:
3265     //    Hugepagesize:     2048 kB
3266     //
3267     // If we can't determine the value (e.g. /proc is not mounted, or the text
3268     // format has been changed), we'll use the largest page size supported by
3269     // the processor.
3270 
3271 #ifndef ZERO
3272     _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
3273                        ARM_ONLY(2 * M) PPC_ONLY(4 * M);
3274 #endif // ZERO
3275 
3276     FILE *fp = fopen("/proc/meminfo", "r");
3277     if (fp) {
3278       while (!feof(fp)) {
3279         int x = 0;
3280         char buf[16];
3281         if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3282           if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3283             _large_page_size = x * K;
3284             break;
3285           }
3286         } else {
3287           // skip to next line
3288           for (;;) {
3289             int ch = fgetc(fp);
3290             if (ch == EOF || ch == (int)'\n') break;
3291           }
3292         }
3293       }
3294       fclose(fp);
3295     }





3296   }
3297 
3298   // print a warning if any large page related flag is specified on command line
3299   bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3300 
3301   const size_t default_page_size = (size_t)Linux::page_size();


3302   if (_large_page_size > default_page_size) {
3303     _page_sizes[0] = _large_page_size;
3304     _page_sizes[1] = default_page_size;
3305     _page_sizes[2] = 0;
3306   }
3307   UseHugeTLBFS = UseHugeTLBFS &&
3308                  Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
3309 
3310   if (UseHugeTLBFS)









































3311     UseSHM = false;


3312 
3313   UseLargePages = UseHugeTLBFS || UseSHM;

3314 
3315   set_coredump_filter();
3316 }
3317 
3318 #ifndef SHM_HUGETLB
3319 #define SHM_HUGETLB 04000
3320 #endif
3321 
3322 char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
3323   // "exec" is passed in but not used.  Creating the shared image for
3324   // the code cache doesn't have an SHM_X executable permission to check.
3325   assert(UseLargePages && UseSHM, "only for SHM large pages");





3326 
3327   key_t key = IPC_PRIVATE;
3328   char *addr;
3329 
3330   bool warn_on_failure = UseLargePages &&
3331                         (!FLAG_IS_DEFAULT(UseLargePages) ||

3332                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
3333                         );
3334   char msg[128];
3335 
3336   // Create a large shared memory region to attach to based on size.
3337   // Currently, size is the total size of the heap
3338   int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3339   if (shmid == -1) {
3340      // Possible reasons for shmget failure:
3341      // 1. shmmax is too small for Java heap.
3342      //    > check shmmax value: cat /proc/sys/kernel/shmmax
3343      //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
3344      // 2. not enough large page memory.
3345      //    > check available large pages: cat /proc/meminfo
3346      //    > increase amount of large pages:
3347      //          echo new_value > /proc/sys/vm/nr_hugepages
3348      //      Note 1: different Linux may use different name for this property,
3349      //            e.g. on Redhat AS-3 it is "hugetlb_pool".
3350      //      Note 2: it's possible there's enough physical memory available but
3351      //            they are so fragmented after a long run that they can't


3359   }
3360 
3361   // attach to the region
3362   addr = (char*)shmat(shmid, req_addr, 0);
3363   int err = errno;
3364 
3365   // Remove shmid. If shmat() is successful, the actual shared memory segment
3366   // will be deleted when it's detached by shmdt() or when the process
3367   // terminates. If shmat() is not successful this will remove the shared
3368   // segment immediately.
3369   shmctl(shmid, IPC_RMID, NULL);
3370 
3371   if ((intptr_t)addr == -1) {
3372      if (warn_on_failure) {
3373        jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3374        warning(msg);
3375      }
3376      return NULL;
3377   }
3378 
3379   if ((addr != NULL) && UseNUMAInterleaving) {


























































































































































3380     numa_make_global(addr, bytes);
3381   }
3382 
3383   // The memory is committed
3384   MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);

3385 
3386   return addr;
3387 }
3388 









3389 bool os::release_memory_special(char* base, size_t bytes) {


3390   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
3391   // detaching the SHM segment will also delete it, see reserve_memory_special()
3392   int rslt = shmdt(base);
3393   if (rslt == 0) {







3394     tkr.record((address)base, bytes);
3395     return true;
3396   } else {
3397     tkr.discard();
3398     return false;
3399   }


3400 }
3401 
3402 size_t os::large_page_size() {
3403   return _large_page_size;
3404 }
3405 
3406 // HugeTLBFS allows application to commit large page memory on demand;
3407 // with SysV SHM the entire memory region must be allocated as shared
3408 // memory.





3409 bool os::can_commit_large_page_memory() {
3410   return UseHugeTLBFS;
3411 }
3412 
3413 bool os::can_execute_large_page_memory() {
3414   return UseHugeTLBFS;
3415 }
3416 
3417 // Reserve memory at an arbitrary address, only if that area is
3418 // available (and not reserved for something else).
3419 
3420 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3421   const int max_tries = 10;
3422   char* base[max_tries];
3423   size_t size[max_tries];
3424   const size_t gap = 0x000000;
3425 
3426   // Assert only that the size is a multiple of the page size, since
3427   // that's all that mmap requires, and since that's all we really know
3428   // about at this low abstraction level.  If we need higher alignment,
3429   // we can either pass an alignment to this method or verify alignment
3430   // in one of the methods further up the call chain.  See bug 5044738.
3431   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3432 
3433   // Repeatedly allocate blocks until the block is allocated at the
3434   // right spot. Give up after max_tries. Note that reserve_memory() will


4546         vm_page_size()));
4547 
4548   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4549 
4550   Linux::libpthread_init();
4551   if (PrintMiscellaneous && (Verbose || WizardMode)) {
4552      tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
4553           Linux::glibc_version(), Linux::libpthread_version(),
4554           Linux::is_floating_stack() ? "floating stack" : "fixed stack");
4555   }
4556 
4557   if (UseNUMA) {
4558     if (!Linux::libnuma_init()) {
4559       UseNUMA = false;
4560     } else {
4561       if ((Linux::numa_max_node() < 1)) {
4562         // There's only one node(they start from 0), disable NUMA.
4563         UseNUMA = false;
4564       }
4565     }
4566     // With SHM large pages we cannot uncommit a page, so there's not way
4567     // we can make the adaptive lgrp chunk resizing work. If the user specified
4568     // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
4569     // disable adaptive resizing.
4570     if (UseNUMA && UseLargePages && UseSHM) {
4571       if (!FLAG_IS_DEFAULT(UseNUMA)) {
4572         if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {




4573           UseLargePages = false;
4574         } else {
4575           warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
4576           UseAdaptiveSizePolicy = false;
4577           UseAdaptiveNUMAChunkSizing = false;
4578         }
4579       } else {
4580         UseNUMA = false;
4581       }
4582     }
4583     if (!UseNUMA && ForceNUMA) {
4584       UseNUMA = true;
4585     }
4586   }
4587 
4588   if (MaxFDLimit) {
4589     // set the number of file descriptors to max. print out error
4590     // if getrlimit/setrlimit fails but continue regardless.
4591     struct rlimit nbr_files;
4592     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4593     if (status != 0) {
4594       if (PrintMiscellaneous && (Verbose || WizardMode))
4595         perror("os::init_2 getrlimit failed");
4596     } else {
4597       nbr_files.rlim_cur = nbr_files.rlim_max;
4598       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4599       if (status != 0) {
4600         if (PrintMiscellaneous && (Verbose || WizardMode))


5831     }
5832   }
5833 }
5834 
5835 //
5836 // See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
5837 //
5838 void MemNotifyThread::start() {
5839   int    fd;
5840   fd = open ("/dev/mem_notify", O_RDONLY, 0);
5841   if (fd < 0) {
5842       return;
5843   }
5844 
5845   if (memnotify_thread() == NULL) {
5846     new MemNotifyThread(fd);
5847   }
5848 }
5849 
5850 #endif // JAVASE_EMBEDDED




















































































































































2703   int err = os::Linux::commit_memory_impl(addr, size, exec);
2704   if (err != 0) {
2705     // the caller wants all commit errors to exit with the specified mesg:
2706     warn_fail_commit_memory(addr, size, exec, err);
2707     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2708   }
2709 }
2710 
2711 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2712 #ifndef MAP_HUGETLB
2713 #define MAP_HUGETLB 0x40000
2714 #endif
2715 
2716 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2717 #ifndef MADV_HUGEPAGE
2718 #define MADV_HUGEPAGE 14
2719 #endif
2720 
2721 int os::Linux::commit_memory_impl(char* addr, size_t size,
2722                                   size_t alignment_hint, bool exec) {
2723   int err = os::Linux::commit_memory_impl(addr, size, exec);





























2724   if (err == 0) {
2725     realign_memory(addr, size, alignment_hint);
2726   }
2727   return err;
2728 }
2729 
2730 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2731                           bool exec) {
2732   return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2733 }
2734 
2735 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2736                                   size_t alignment_hint, bool exec,
2737                                   const char* mesg) {
2738   assert(mesg != NULL, "mesg must be specified");
2739   int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2740   if (err != 0) {
2741     // the caller wants all commit errors to exit with the specified mesg:
2742     warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2743     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2744   }
2745 }
2746 
2747 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2748   if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2749     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2750     // be supported or the memory may already be backed by huge pages.
2751     ::madvise(addr, bytes, MADV_HUGEPAGE);
2752   }
2753 }
2754 
2755 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2756   // This method works by doing an mmap over an existing mmaping and effectively discarding
2757   // the existing pages. However it won't work for SHM-based large pages that cannot be
2758   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2759   // small pages on top of the SHM segment. This method always works for small pages, so we
2760   // allow that in any case.
2761   if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2762     commit_memory(addr, bytes, alignment_hint, !ExecMem);
2763   }
2764 }
2765 
2766 void os::numa_make_global(char *addr, size_t bytes) {
2767   Linux::numa_interleave_memory(addr, bytes);
2768 }
2769 
2770 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2771   Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2772 }
2773 
2774 bool os::numa_topology_changed()   { return false; }
2775 
2776 size_t os::numa_get_groups_num() {
2777   int max_node = Linux::numa_max_node();
2778   return max_node > 0 ? max_node + 1 : 1;
2779 }
2780 
2781 int os::numa_get_group_id() {


3111   switch (prot) {
3112   case MEM_PROT_NONE: p = PROT_NONE; break;
3113   case MEM_PROT_READ: p = PROT_READ; break;
3114   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3115   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3116   default:
3117     ShouldNotReachHere();
3118   }
3119   // is_committed is unused.
3120   return linux_mprotect(addr, bytes, p);
3121 }
3122 
3123 bool os::guard_memory(char* addr, size_t size) {
3124   return linux_mprotect(addr, size, PROT_NONE);
3125 }
3126 
3127 bool os::unguard_memory(char* addr, size_t size) {
3128   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3129 }
3130 
3131 bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
3132   bool result = false;
3133   void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
3134                  MAP_ANONYMOUS|MAP_PRIVATE,
3135                  -1, 0);
3136   if (p != MAP_FAILED) {
3137     void *aligned_p = align_ptr_up(p, page_size);
3138 
3139     result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
3140 
3141     munmap(p, page_size * 2);
3142   }
3143 
3144   if (warn && !result) {
3145     warning("TransparentHugePages is not supported by the operating system.");
3146   }
3147 
3148   return result;
3149 }
3150 
3151 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3152   bool result = false;
3153   void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
3154                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
3155                  -1, 0);
3156 
3157   if (p != MAP_FAILED) {
3158     // We don't know if this really is a huge page or not.
3159     FILE *fp = fopen("/proc/self/maps", "r");
3160     if (fp) {
3161       while (!feof(fp)) {
3162         char chars[257];
3163         long x = 0;
3164         if (fgets(chars, sizeof(chars), fp)) {
3165           if (sscanf(chars, "%lx-%*x", &x) == 1
3166               && x == (long)p) {
3167             if (strstr (chars, "hugepage")) {
3168               result = true;
3169               break;
3170             }
3171           }
3172         }
3173       }
3174       fclose(fp);
3175     }
3176     munmap(p, page_size);


3177   }
3178 
3179   if (warn && !result) {
3180     warning("HugeTLBFS is not supported by the operating system.");
3181   }
3182 
3183   return result;
3184 }
3185 
3186 /*
3187 * Set the coredump_filter bits to include largepages in core dump (bit 6)
3188 *
3189 * From the coredump_filter documentation:
3190 *
3191 * - (bit 0) anonymous private memory
3192 * - (bit 1) anonymous shared memory
3193 * - (bit 2) file-backed private memory
3194 * - (bit 3) file-backed shared memory
3195 * - (bit 4) ELF header pages in file-backed private memory areas (it is
3196 *           effective only if the bit 2 is cleared)
3197 * - (bit 5) hugetlb private memory
3198 * - (bit 6) hugetlb shared memory
3199 */


3207 
3208   if (fscanf(f, "%lx", &cdm) != 1) {
3209     fclose(f);
3210     return;
3211   }
3212 
3213   rewind(f);
3214 
3215   if ((cdm & LARGEPAGES_BIT) == 0) {
3216     cdm |= LARGEPAGES_BIT;
3217     fprintf(f, "%#lx", cdm);
3218   }
3219 
3220   fclose(f);
3221 }
3222 
3223 // Large page support
3224 
3225 static size_t _large_page_size = 0;
3226 
3227 size_t os::Linux::find_large_page_size() {
3228   size_t large_page_size = 0;














3229 



3230   // large_page_size on Linux is used to round up heap size. x86 uses either
3231   // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3232   // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3233   // page as large as 256M.
3234   //
3235   // Here we try to figure out page size by parsing /proc/meminfo and looking
3236   // for a line with the following format:
3237   //    Hugepagesize:     2048 kB
3238   //
3239   // If we can't determine the value (e.g. /proc is not mounted, or the text
3240   // format has been changed), we'll use the largest page size supported by
3241   // the processor.
3242 
3243 #ifndef ZERO
3244   large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
3245                      ARM_ONLY(2 * M) PPC_ONLY(4 * M);
3246 #endif // ZERO
3247 
3248   FILE *fp = fopen("/proc/meminfo", "r");
3249   if (fp) {
3250     while (!feof(fp)) {
3251       int x = 0;
3252       char buf[16];
3253       if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3254         if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3255           large_page_size = x * K;
3256           break;
3257         }
3258       } else {
3259         // skip to next line
3260         for (;;) {
3261           int ch = fgetc(fp);
3262           if (ch == EOF || ch == (int)'\n') break;
3263         }
3264       }
3265     }
3266     fclose(fp);
3267   }
3268 
3269   if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
3270     warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
3271         SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
3272         proper_unit_for_byte_size(large_page_size));
3273   }
3274 
3275   return large_page_size;
3276 }
3277 
3278 size_t os::Linux::setup_large_page_size() {
3279   _large_page_size = Linux::find_large_page_size();
3280   size_t default_page_size = (size_t)Linux::page_size();
3281   if (_large_page_size > default_page_size) {
3282     _page_sizes[0] = _large_page_size;
3283     _page_sizes[1] = default_page_size;
3284     _page_sizes[2] = 0;
3285   }


3286 
3287   return _large_page_size;
3288 }
3289 
3290 bool os::Linux::setup_large_page_type(size_t page_size) {
3291   if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3292       FLAG_IS_DEFAULT(UseSHM) &&
3293       FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3294     // If UseLargePages is specified on the command line try all methods,
3295     // if it's default, then try only UseTransparentHugePages.
3296     if (FLAG_IS_DEFAULT(UseLargePages)) {
3297       UseTransparentHugePages = true;
3298     } else {
3299       UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
3300     }
3301   }
3302 
3303   if (UseTransparentHugePages) {
3304     bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3305     if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3306       UseHugeTLBFS = false;
3307       UseSHM = false;
3308       return true;
3309     }
3310     UseTransparentHugePages = false;
3311   }
3312 
3313   if (UseHugeTLBFS) {
3314     bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3315     if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3316       UseSHM = false;
3317       return true;
3318     }
3319     UseHugeTLBFS = false;
3320   }
3321 
3322   return UseSHM;
3323 }
3324 
3325 void os::large_page_init() {
3326   if (!UseLargePages) {
3327     UseHugeTLBFS = false;
3328     UseTransparentHugePages = false;
3329     UseSHM = false;
3330     return;
3331   }
3332 
3333   size_t large_page_size = Linux::setup_large_page_size();
3334   UseLargePages          = Linux::setup_large_page_type(large_page_size);
3335 
3336   set_coredump_filter();
3337 }
3338 
3339 #ifndef SHM_HUGETLB
3340 #define SHM_HUGETLB 04000
3341 #endif
3342 
3343 char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3344   // "exec" is passed in but not used.  Creating the shared image for
3345   // the code cache doesn't have an SHM_X executable permission to check.
3346   assert(UseLargePages && UseSHM, "only for SHM large pages");
3347   assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3348 
3349   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3350     return NULL; // Fallback to small pages.
3351   }
3352 
3353   key_t key = IPC_PRIVATE;
3354   char *addr;
3355 
3356   bool warn_on_failure = UseLargePages &&
3357                         (!FLAG_IS_DEFAULT(UseLargePages) ||
3358                          !FLAG_IS_DEFAULT(UseSHM) ||
3359                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
3360                         );
3361   char msg[128];
3362 
3363   // Create a large shared memory region to attach to based on size.
3364   // Currently, size is the total size of the heap
3365   int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3366   if (shmid == -1) {
3367      // Possible reasons for shmget failure:
3368      // 1. shmmax is too small for Java heap.
3369      //    > check shmmax value: cat /proc/sys/kernel/shmmax
3370      //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
3371      // 2. not enough large page memory.
3372      //    > check available large pages: cat /proc/meminfo
3373      //    > increase amount of large pages:
3374      //          echo new_value > /proc/sys/vm/nr_hugepages
3375      //      Note 1: different Linux may use different name for this property,
3376      //            e.g. on Redhat AS-3 it is "hugetlb_pool".
3377      //      Note 2: it's possible there's enough physical memory available but
3378      //            they are so fragmented after a long run that they can't


3386   }
3387 
3388   // attach to the region
3389   addr = (char*)shmat(shmid, req_addr, 0);
3390   int err = errno;
3391 
3392   // Remove shmid. If shmat() is successful, the actual shared memory segment
3393   // will be deleted when it's detached by shmdt() or when the process
3394   // terminates. If shmat() is not successful this will remove the shared
3395   // segment immediately.
3396   shmctl(shmid, IPC_RMID, NULL);
3397 
3398   if ((intptr_t)addr == -1) {
3399      if (warn_on_failure) {
3400        jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3401        warning(msg);
3402      }
3403      return NULL;
3404   }
3405 
3406   return addr;
3407 }
3408 
3409 static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
3410   assert(error == ENOMEM, "Only expect to fail if no memory is available");
3411 
3412   bool warn_on_failure = UseLargePages &&
3413       (!FLAG_IS_DEFAULT(UseLargePages) ||
3414        !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
3415        !FLAG_IS_DEFAULT(LargePageSizeInBytes));
3416 
3417   if (warn_on_failure) {
3418     char msg[128];
3419     jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
3420         PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
3421     warning(msg);
3422   }
3423 }
3424 
3425 char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
3426   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3427   assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
3428   assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3429 
3430   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3431   char* addr = (char*)::mmap(req_addr, bytes, prot,
3432                              MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
3433                              -1, 0);
3434 
3435   if (addr == MAP_FAILED) {
3436     warn_on_large_pages_failure(req_addr, bytes, errno);
3437     return NULL;
3438   }
3439 
3440   assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
3441 
3442   return addr;
3443 }
3444 
3445 char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3446   size_t large_page_size = os::large_page_size();
3447 
3448   assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
3449 
3450   // Allocate small pages.
3451 
3452   char* start;
3453   if (req_addr != NULL) {
3454     assert(is_ptr_aligned(req_addr, alignment), "Must be");
3455     assert(is_size_aligned(bytes, alignment), "Must be");
3456     start = os::reserve_memory(bytes, req_addr);
3457     assert(start == NULL || start == req_addr, "Must be");
3458   } else {
3459     start = os::reserve_memory_aligned(bytes, alignment);
3460   }
3461 
3462   if (start == NULL) {
3463     return NULL;
3464   }
3465 
3466   assert(is_ptr_aligned(start, alignment), "Must be");
3467 
3468   // os::reserve_memory_special will record this memory area.
3469   // Need to release it here to prevent overlapping reservations.
3470   MemTracker::record_virtual_memory_release((address)start, bytes);
3471 
3472   char* end = start + bytes;
3473 
3474   // Find the regions of the allocated chunk that can be promoted to large pages.
3475   char* lp_start = (char*)align_ptr_up(start, large_page_size);
3476   char* lp_end   = (char*)align_ptr_down(end, large_page_size);
3477 
3478   size_t lp_bytes = lp_end - lp_start;
3479 
3480   assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
3481 
3482   if (lp_bytes == 0) {
3483     // The mapped region doesn't even span the start and the end of a large page.
3484     // Fall back to allocate a non-special area.
3485     ::munmap(start, end - start);
3486     return NULL;
3487   }
3488 
3489   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3490 
3491 
3492   void* result;
3493 
3494   if (start != lp_start) {
3495     result = ::mmap(start, lp_start - start, prot,
3496                     MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3497                     -1, 0);
3498     if (result == MAP_FAILED) {
3499       ::munmap(lp_start, end - lp_start);
3500       return NULL;
3501     }
3502   }
3503 
3504   result = ::mmap(lp_start, lp_bytes, prot,
3505                   MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
3506                   -1, 0);
3507   if (result == MAP_FAILED) {
3508     warn_on_large_pages_failure(req_addr, bytes, errno);
3509     // If the mmap above fails, the large pages region will be unmapped and we
3510     // have regions before and after with small pages. Release these regions.
3511     //
3512     // |  mapped  |  unmapped  |  mapped  |
3513     // ^          ^            ^          ^
3514     // start      lp_start     lp_end     end
3515     //
3516     ::munmap(start, lp_start - start);
3517     ::munmap(lp_end, end - lp_end);
3518     return NULL;
3519   }
3520 
3521   if (lp_end != end) {
3522       result = ::mmap(lp_end, end - lp_end, prot,
3523                       MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3524                       -1, 0);
3525     if (result == MAP_FAILED) {
3526       ::munmap(start, lp_end - start);
3527       return NULL;
3528     }
3529   }
3530 
3531   return start;
3532 }
3533 
3534 char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3535   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3536   assert(is_ptr_aligned(req_addr, alignment), "Must be");
3537   assert(is_power_of_2(alignment), "Must be");
3538   assert(is_power_of_2(os::large_page_size()), "Must be");
3539   assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
3540 
3541   if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
3542     return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
3543   } else {
3544     return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
3545   }
3546 }
3547 
3548 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3549   assert(UseLargePages, "only for large pages");
3550 
3551   char* addr;
3552   if (UseSHM) {
3553     addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
3554   } else {
3555     assert(UseHugeTLBFS, "must be");
3556     addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
3557   }
3558 
3559   if (addr != NULL) {
3560     if (UseNUMAInterleaving) {
3561       numa_make_global(addr, bytes);
3562     }
3563 
3564     // The memory is committed
3565     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
3566   }
3567 
3568   return addr;
3569 }
3570 
3571 bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
3572   // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
3573   return shmdt(base) == 0;
3574 }
3575 
3576 bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
3577   return pd_release_memory(base, bytes);
3578 }
3579 
3580 bool os::release_memory_special(char* base, size_t bytes) {
3581   assert(UseLargePages, "only for large pages");
3582 
3583   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
3584 
3585   bool res;
3586   if (UseSHM) {
3587     res = os::Linux::release_memory_special_shm(base, bytes);
3588   } else {
3589     assert(UseHugeTLBFS, "must be");
3590     res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
3591   }
3592 
3593   if (res) {
3594     tkr.record((address)base, bytes);

3595   } else {
3596     tkr.discard();

3597   }
3598 
3599   return res;
3600 }
3601 
3602 size_t os::large_page_size() {
3603   return _large_page_size;
3604 }
3605 
3606 // With SysV SHM the entire memory region must be allocated as shared

3607 // memory.
3608 // HugeTLBFS allows application to commit large page memory on demand.
3609 // However, when committing memory with HugeTLBFS fails, the region
3610 // that was supposed to be committed will loose the old reservation
3611 // and allow other threads to steal that memory region. Because of this
3612 // behavior we can't commit HugeTLBFS memory.
3613 bool os::can_commit_large_page_memory() {
3614   return UseTransparentHugePages;
3615 }
3616 
3617 bool os::can_execute_large_page_memory() {
3618   return UseTransparentHugePages || UseHugeTLBFS;
3619 }
3620 
3621 // Reserve memory at an arbitrary address, only if that area is
3622 // available (and not reserved for something else).
3623 
3624 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3625   const int max_tries = 10;
3626   char* base[max_tries];
3627   size_t size[max_tries];
3628   const size_t gap = 0x000000;
3629 
3630   // Assert only that the size is a multiple of the page size, since
3631   // that's all that mmap requires, and since that's all we really know
3632   // about at this low abstraction level.  If we need higher alignment,
3633   // we can either pass an alignment to this method or verify alignment
3634   // in one of the methods further up the call chain.  See bug 5044738.
3635   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3636 
3637   // Repeatedly allocate blocks until the block is allocated at the
3638   // right spot. Give up after max_tries. Note that reserve_memory() will


4750         vm_page_size()));
4751 
4752   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4753 
4754   Linux::libpthread_init();
4755   if (PrintMiscellaneous && (Verbose || WizardMode)) {
4756      tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
4757           Linux::glibc_version(), Linux::libpthread_version(),
4758           Linux::is_floating_stack() ? "floating stack" : "fixed stack");
4759   }
4760 
4761   if (UseNUMA) {
4762     if (!Linux::libnuma_init()) {
4763       UseNUMA = false;
4764     } else {
4765       if ((Linux::numa_max_node() < 1)) {
4766         // There's only one node(they start from 0), disable NUMA.
4767         UseNUMA = false;
4768       }
4769     }
4770     // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
4771     // we can make the adaptive lgrp chunk resizing work. If the user specified
4772     // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
4773     // disable adaptive resizing.
4774     if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
4775       if (FLAG_IS_DEFAULT(UseNUMA)) {
4776         UseNUMA = false;
4777       } else {
4778         if (FLAG_IS_DEFAULT(UseLargePages) &&
4779             FLAG_IS_DEFAULT(UseSHM) &&
4780             FLAG_IS_DEFAULT(UseHugeTLBFS)) {
4781           UseLargePages = false;
4782         } else {
4783           warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
4784           UseAdaptiveSizePolicy = false;
4785           UseAdaptiveNUMAChunkSizing = false;
4786         }


4787       }
4788     }
4789     if (!UseNUMA && ForceNUMA) {
4790       UseNUMA = true;
4791     }
4792   }
4793 
4794   if (MaxFDLimit) {
4795     // set the number of file descriptors to max. print out error
4796     // if getrlimit/setrlimit fails but continue regardless.
4797     struct rlimit nbr_files;
4798     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4799     if (status != 0) {
4800       if (PrintMiscellaneous && (Verbose || WizardMode))
4801         perror("os::init_2 getrlimit failed");
4802     } else {
4803       nbr_files.rlim_cur = nbr_files.rlim_max;
4804       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4805       if (status != 0) {
4806         if (PrintMiscellaneous && (Verbose || WizardMode))


6037     }
6038   }
6039 }
6040 
6041 //
6042 // See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
6043 //
6044 void MemNotifyThread::start() {
6045   int    fd;
6046   fd = open ("/dev/mem_notify", O_RDONLY, 0);
6047   if (fd < 0) {
6048       return;
6049   }
6050 
6051   if (memnotify_thread() == NULL) {
6052     new MemNotifyThread(fd);
6053   }
6054 }
6055 
6056 #endif // JAVASE_EMBEDDED
6057 
6058 
6059 /////////////// Unit tests ///////////////
6060 
6061 #ifndef PRODUCT
6062 
6063 #define test_log(...) \
6064   do {\
6065     if (VerboseInternalVMTests) { \
6066       tty->print_cr(__VA_ARGS__); \
6067       tty->flush(); \
6068     }\
6069   } while (false)
6070 
6071 class TestReserveMemorySpecial : AllStatic {
6072  public:
6073   static void small_page_write(void* addr, size_t size) {
6074     size_t page_size = os::vm_page_size();
6075 
6076     char* end = (char*)addr + size;
6077     for (char* p = (char*)addr; p < end; p += page_size) {
6078       *p = 1;
6079     }
6080   }
6081 
6082   static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
6083     if (!UseHugeTLBFS) {
6084       return;
6085     }
6086 
6087     test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
6088 
6089     char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
6090 
6091     if (addr != NULL) {
6092       small_page_write(addr, size);
6093 
6094       os::Linux::release_memory_special_huge_tlbfs(addr, size);
6095     }
6096   }
6097 
6098   static void test_reserve_memory_special_huge_tlbfs_only() {
6099     if (!UseHugeTLBFS) {
6100       return;
6101     }
6102 
6103     size_t lp = os::large_page_size();
6104 
6105     for (size_t size = lp; size <= lp * 10; size += lp) {
6106       test_reserve_memory_special_huge_tlbfs_only(size);
6107     }
6108   }
6109 
6110   static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
6111     if (!UseHugeTLBFS) {
6112         return;
6113     }
6114 
6115     test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
6116         size, alignment);
6117 
6118     assert(size >= os::large_page_size(), "Incorrect input to test");
6119 
6120     char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
6121 
6122     if (addr != NULL) {
6123       small_page_write(addr, size);
6124 
6125       os::Linux::release_memory_special_huge_tlbfs(addr, size);
6126     }
6127   }
6128 
6129   static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
6130     size_t lp = os::large_page_size();
6131     size_t ag = os::vm_allocation_granularity();
6132 
6133     for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6134       test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
6135     }
6136   }
6137 
6138   static void test_reserve_memory_special_huge_tlbfs_mixed() {
6139     size_t lp = os::large_page_size();
6140     size_t ag = os::vm_allocation_granularity();
6141 
6142     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
6143     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
6144     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
6145     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
6146     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
6147     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
6148     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
6149     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
6150     test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
6151   }
6152 
6153   static void test_reserve_memory_special_huge_tlbfs() {
6154     if (!UseHugeTLBFS) {
6155       return;
6156     }
6157 
6158     test_reserve_memory_special_huge_tlbfs_only();
6159     test_reserve_memory_special_huge_tlbfs_mixed();
6160   }
6161 
6162   static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
6163     if (!UseSHM) {
6164       return;
6165     }
6166 
6167     test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
6168 
6169     char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
6170 
6171     if (addr != NULL) {
6172       assert(is_ptr_aligned(addr, alignment), "Check");
6173       assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
6174 
6175       small_page_write(addr, size);
6176 
6177       os::Linux::release_memory_special_shm(addr, size);
6178     }
6179   }
6180 
6181   static void test_reserve_memory_special_shm() {
6182     size_t lp = os::large_page_size();
6183     size_t ag = os::vm_allocation_granularity();
6184 
6185     for (size_t size = ag; size < lp * 3; size += ag) {
6186       for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6187         test_reserve_memory_special_shm(size, alignment);
6188       }
6189     }
6190   }
6191 
6192   static void test() {
6193     test_reserve_memory_special_huge_tlbfs();
6194     test_reserve_memory_special_shm();
6195   }
6196 };
6197 
6198 void TestReserveMemorySpecial_test() {
6199   TestReserveMemorySpecial::test();
6200 }
6201 
6202 #endif
src/os/linux/vm/os_linux.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File