1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 // Included early because the NMT flags don't include it.
  27 #include "utilities/macros.hpp"
  28 
  29 #if INCLUDE_NMT
  30 
  31 #include "runtime/thread.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "services/virtualMemoryTracker.hpp"
  34 #include "utilities/globalDefinitions.hpp"
  35 #include "unittest.hpp"
  36 
  37 
  38 class CommittedVirtualMemoryTest {
  39 public:
  40   static void test() {
  41     Thread* thr = Thread::current();
  42     address stack_end = thr->stack_end();
  43     size_t  stack_size = thr->stack_size();
  44 
  45     MemTracker::record_thread_stack(stack_end, stack_size);
  46 
  47     VirtualMemoryTracker::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
  48 
  49     // snapshot current stack usage
  50     VirtualMemoryTracker::snapshot_thread_stacks();
  51 
  52     ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(stack_end, stack_size));
  53     ASSERT_TRUE(rmr != NULL);
  54 
  55     ASSERT_EQ(rmr->base(), stack_end);
  56     ASSERT_EQ(rmr->size(), stack_size);
  57 
  58     CommittedRegionIterator iter = rmr->iterate_committed_regions();
  59     int i = 0;
  60     address i_addr = (address)&i;
  61     bool found_i_addr = false;
  62 
  63     // stack grows downward
  64     address stack_top = stack_end + stack_size;
  65     bool found_stack_top = false;
  66 
  67     for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
  68       if (region->base() + region->size() == stack_top) {
  69         ASSERT_TRUE(region->size() <= stack_size);
  70         found_stack_top = true;
  71       }
  72 
  73       if(i_addr < stack_top && i_addr >= region->base()) {
  74         found_i_addr = true;
  75       }
  76 
  77       i++;
  78     }
  79 
  80     // stack and guard pages may be contiguous as one region
  81     ASSERT_TRUE(i >= 1);
  82     ASSERT_TRUE(found_stack_top);
  83     ASSERT_TRUE(found_i_addr);
  84   }
  85 
  86   static void check_covered_pages(address addr, size_t size, address base, size_t touch_pages, int* page_num) {
  87     const size_t page_sz = os::vm_page_size();
  88     size_t index;
  89     for (index = 0; index < touch_pages; index ++) {
  90       address page_addr = base + page_num[index] * page_sz;
  91       // The range covers this page, marks the page
  92       if (page_addr >= addr && page_addr < addr + size) {
  93         page_num[index] = -1;
  94       }
  95     }
  96   }
  97 
  98   static void test_committed_region_impl(size_t num_pages, size_t touch_pages, int* page_num) {
  99     const size_t page_sz = os::vm_page_size();
 100     const size_t size = num_pages * page_sz;
 101     char* base = os::reserve_memory(size, NULL, page_sz, mtThreadStack);
 102     bool result = os::commit_memory(base, size, false);
 103     size_t index;
 104     ASSERT_NE(base, (char*)NULL);
 105     for (index = 0; index < touch_pages; index ++) {
 106       char* touch_addr = base + page_sz * page_num[index];
 107       *touch_addr = 'a';
 108     }
 109 
 110     address frame = (address)0x1235;
 111     NativeCallStack stack(&frame, 1);
 112     VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
 113 
 114     // trigger the test
 115     VirtualMemoryTracker::snapshot_thread_stacks();
 116 
 117     ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
 118     ASSERT_TRUE(rmr != NULL);
 119 
 120     bool precise_tracking_supported = false;
 121     CommittedRegionIterator iter = rmr->iterate_committed_regions();
 122     for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
 123       if (region->size() == size) {
 124         // platforms that do not support precise tracking.
 125         ASSERT_TRUE(iter.next() == NULL);
 126         break;
 127       } else {
 128         precise_tracking_supported = true;
 129         check_covered_pages(region->base(), region->size(), (address)base, touch_pages, page_num);
 130       }
 131     }
 132 
 133     if (precise_tracking_supported) {
 134       // All touched pages should be committed
 135       for (size_t index = 0; index < touch_pages; index ++) {
 136         ASSERT_EQ(page_num[index], -1);
 137       }
 138     }
 139 
 140     // Cleanup
 141     os::free_memory(base, size, page_sz);
 142     VirtualMemoryTracker::remove_released_region((address)base, size);
 143 
 144     rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
 145     ASSERT_TRUE(rmr == NULL);
 146   }
 147 
 148   static void test_committed_region() {
 149     // On Linux, we scan 1024 pages at a time.
 150     // Here, we test scenario that scans < 1024 pages.
 151     int small_range[] = {3, 9, 46};
 152     int mid_range[] = {0, 45, 100, 399, 400, 1000, 1031};
 153     int large_range[] = {100, 301, 1024, 2047, 2048, 2049, 2050, 3000};
 154 
 155     test_committed_region_impl(47, 3, small_range);
 156     test_committed_region_impl(1088, 5, mid_range);
 157     test_committed_region_impl(3074, 8, large_range);
 158   }
 159 
 160   static void test_partial_region() {
 161     bool   result;
 162     size_t committed_size;
 163     address committed_start;
 164     size_t index;
 165 
 166     const size_t page_sz = os::vm_page_size();
 167     const size_t num_pages = 4;
 168     const size_t size = num_pages * page_sz;
 169     char* base = os::reserve_memory(size, NULL, page_sz, mtTest);
 170     ASSERT_NE(base, (char*)NULL);
 171     result = os::commit_memory(base, size, false);
 172 
 173     ASSERT_TRUE(result);
 174     // touch all pages
 175     for (index = 0; index < num_pages; index ++) {
 176       *(base + index * page_sz) = 'a';
 177     }
 178 
 179     // Test whole range
 180     result = os::committed_in_range((address)base, size, committed_start, committed_size);
 181     ASSERT_TRUE(result);
 182     ASSERT_EQ(num_pages * page_sz, committed_size);
 183     ASSERT_EQ(committed_start, (address)base);
 184 
 185     // Test beginning of the range
 186     result = os::committed_in_range((address)base, 2 * page_sz, committed_start, committed_size);
 187     ASSERT_TRUE(result);
 188     ASSERT_EQ(2 * page_sz, committed_size);
 189     ASSERT_EQ(committed_start, (address)base);
 190 
 191     // Test end of the range
 192     result = os::committed_in_range((address)(base + page_sz), 3 * page_sz, committed_start, committed_size);
 193     ASSERT_TRUE(result);
 194     ASSERT_EQ(3 * page_sz, committed_size);
 195     ASSERT_EQ(committed_start, (address)(base + page_sz));
 196 
 197     // Test middle of the range
 198     result = os::committed_in_range((address)(base + page_sz), 2 * page_sz, committed_start, committed_size);
 199     ASSERT_TRUE(result);
 200     ASSERT_EQ(2 * page_sz, committed_size);
 201     ASSERT_EQ(committed_start, (address)(base + page_sz));
 202   }
 203 };
 204 
 205 TEST_VM(CommittedVirtualMemoryTracker, test_committed_virtualmemory_region) {
 206   VirtualMemoryTracker::initialize(NMT_detail);
 207   VirtualMemoryTracker::late_initialize(NMT_detail);
 208 
 209   CommittedVirtualMemoryTest::test();
 210   CommittedVirtualMemoryTest::test_committed_region();
 211   CommittedVirtualMemoryTest::test_partial_region();
 212 }
 213 
 214 #endif // INCLUDE_NMT