1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #ifdef LINUX
  27 
  28 #include <sys/mman.h>
  29 
  30 #include "runtime/os.hpp"
  31 #include "unittest.hpp"
  32 
  33 namespace {
  34   static void small_page_write(void* addr, size_t size) {
  35     size_t page_size = os::vm_page_size();
  36 
  37     char* end = (char*)addr + size;
  38     for (char* p = (char*)addr; p < end; p += page_size) {
  39       *p = 1;
  40     }
  41   }
  42 
  43   class HugeTlbfsMemory : private ::os::Linux {
  44     char* const _ptr;
  45     const size_t _size;
  46    public:
  47     static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
  48       return os::Linux::reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
  49     }
  50     static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
  51       return os::Linux::reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
  52     }
  53     HugeTlbfsMemory(char* const ptr, size_t size) : _ptr(ptr), _size(size) { }
  54     ~HugeTlbfsMemory() {
  55       if (_ptr != NULL) {
  56         os::Linux::release_memory_special_huge_tlbfs(_ptr, _size);
  57       }
  58     }
  59   };
  60 
  61   class ShmMemory : private ::os::Linux {
  62     char* const _ptr;
  63     const size_t _size;
  64    public:
  65     static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
  66       return os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
  67     }
  68     ShmMemory(char* const ptr, size_t size) : _ptr(ptr), _size(size) { }
  69     ~ShmMemory() {
  70       os::Linux::release_memory_special_shm(_ptr, _size);
  71     }
  72   };
  73 
  74   // have to use these functions, as gtest's _PRED macros don't like is_aligned
  75   // nor (is_aligned<size_t, size_t>)
  76   static bool is_size_aligned(size_t size, size_t alignment) {
  77     return is_aligned(size, alignment);
  78   }
  79   static bool is_ptr_aligned(char* ptr, size_t alignment) {
  80     return is_aligned(ptr, alignment);
  81   }
  82 
  83   static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
  84     ASSERT_TRUE(UseSHM) << "must be used only when UseSHM is true";
  85     char* addr = ShmMemory::reserve_memory_special_shm(size, alignment, NULL, false);
  86     if (addr != NULL) {
  87       ShmMemory mr(addr, size);
  88       EXPECT_PRED2(is_ptr_aligned, addr, alignment);
  89       EXPECT_PRED2(is_ptr_aligned, addr, os::large_page_size());
  90 
  91       small_page_write(addr, size);
  92     }
  93   }
  94 }
  95 
  96 TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_only) {
  97   if (!UseHugeTLBFS) {
  98     return;
  99   }
 100   size_t lp = os::large_page_size();
 101 
 102   for (size_t size = lp; size <= lp * 10; size += lp) {
 103     char* addr = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
 104 
 105     if (addr != NULL) {
 106       HugeTlbfsMemory mr(addr, size);
 107       small_page_write(addr, size);
 108     }
 109   }
 110 }
 111 
 112 TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_without_addr) {
 113   if (!UseHugeTLBFS) {
 114     return;
 115   }
 116   size_t lp = os::large_page_size();
 117   size_t ag = os::vm_allocation_granularity();
 118 
 119   // sizes to test
 120   const size_t sizes[] = {
 121     lp, lp + ag, lp + lp / 2, lp * 2,
 122     lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
 123     lp * 10, lp * 10 + lp / 2
 124   };
 125   const int num_sizes = sizeof(sizes) / sizeof(size_t);
 126   for (int i = 0; i < num_sizes; i++) {
 127     const size_t size = sizes[i];
 128     for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
 129       char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
 130       if (p != NULL) {
 131         HugeTlbfsMemory mr(p, size);
 132         EXPECT_PRED2(is_ptr_aligned, p, alignment) << " size = " << size;
 133         small_page_write(p, size);
 134       }
 135     }
 136   }
 137 }
 138 
 139 TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_with_good_req_addr) {
 140   if (!UseHugeTLBFS) {
 141     return;
 142   }
 143   size_t lp = os::large_page_size();
 144   size_t ag = os::vm_allocation_granularity();
 145 
 146   // sizes to test
 147   const size_t sizes[] = {
 148     lp, lp + ag, lp + lp / 2, lp * 2,
 149     lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
 150     lp * 10, lp * 10 + lp / 2
 151   };
 152   const int num_sizes = sizeof(sizes) / sizeof(size_t);
 153 
 154   // Pre-allocate an area as large as the largest allocation
 155   // and aligned to the largest alignment we will be testing.
 156   const size_t mapping_size = sizes[num_sizes - 1] * 2;
 157   char* const mapping = (char*) ::mmap(NULL, mapping_size,
 158       PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
 159       -1, 0);
 160   ASSERT_TRUE(mapping != NULL) << " mmap failed, mapping_size = " << mapping_size;
 161   // Unmap the mapping, it will serve as a value for a "good" req_addr
 162   ::munmap(mapping, mapping_size);
 163 
 164   for (int i = 0; i < num_sizes; i++) {
 165     const size_t size = sizes[i];
 166     for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
 167       char* const req_addr = align_up(mapping, alignment);
 168       char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
 169       if (p != NULL) {
 170         HugeTlbfsMemory mr(p, size);
 171         ASSERT_EQ(req_addr, p) << " size = " << size << ", alignment = " << alignment;
 172         small_page_write(p, size);
 173       }
 174     }
 175   }
 176 }
 177 
 178 
 179 TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_with_bad_req_addr) {
 180   if (!UseHugeTLBFS) {
 181     return;
 182   }
 183   size_t lp = os::large_page_size();
 184   size_t ag = os::vm_allocation_granularity();
 185 
 186   // sizes to test
 187   const size_t sizes[] = {
 188     lp, lp + ag, lp + lp / 2, lp * 2,
 189     lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
 190     lp * 10, lp * 10 + lp / 2
 191   };
 192   const int num_sizes = sizeof(sizes) / sizeof(size_t);
 193 
 194   // Pre-allocate an area as large as the largest allocation
 195   // and aligned to the largest alignment we will be testing.
 196   const size_t mapping_size = sizes[num_sizes - 1] * 2;
 197   char* const mapping = (char*) ::mmap(NULL, mapping_size,
 198       PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
 199       -1, 0);
 200   ASSERT_TRUE(mapping != NULL) << " mmap failed, mapping_size = " << mapping_size;
 201   // Leave the mapping intact, it will server as "bad" req_addr
 202 
 203   class MappingHolder {
 204     char* const _mapping;
 205     size_t _size;
 206    public:
 207     MappingHolder(char* mapping, size_t size) : _mapping(mapping), _size(size) { }
 208     ~MappingHolder() {
 209       ::munmap(_mapping, _size);
 210     }
 211   } holder(mapping, mapping_size);
 212 
 213   for (int i = 0; i < num_sizes; i++) {
 214     const size_t size = sizes[i];
 215     for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
 216       char* const req_addr = align_up(mapping, alignment);
 217       char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
 218       HugeTlbfsMemory mr(p, size);
 219       // as the area around req_addr contains already existing mappings, the API should always
 220       // return NULL (as per contract, it cannot return another address)
 221       EXPECT_TRUE(p == NULL) << " size = " << size
 222                              << ", alignment = " << alignment
 223                              << ", req_addr = " << req_addr
 224                              << ", p = " << p;
 225     }
 226   }
 227 }
 228 
 229 TEST_VM(os_linux, reserve_memory_special_shm) {
 230   if (!UseSHM) {
 231     return;
 232   }
 233   size_t lp = os::large_page_size();
 234   size_t ag = os::vm_allocation_granularity();
 235 
 236   for (size_t size = ag; size < lp * 3; size += ag) {
 237     for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
 238       EXPECT_NO_FATAL_FAILURE(test_reserve_memory_special_shm(size, alignment));
 239     }
 240   }
 241 }
 242 
 243 #endif