1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zList.inline.hpp"
  26 #include "gc/z/zNUMA.hpp"
  27 #include "gc/z/zPage.inline.hpp"
  28 #include "gc/z/zPageCache.hpp"
  29 #include "gc/z/zStat.hpp"
  30 #include "logging/log.hpp"
  31 
  32 static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
  33 static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
  34 static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
  35 
  36 ZPageCache::ZPageCache() :
  37     _available(0),
  38     _small(),
  39     _medium(),
  40     _large() {}
  41 
  42 ZPage* ZPageCache::alloc_small_page() {
  43   const uint32_t numa_id = ZNUMA::id();
  44   const uint32_t numa_count = ZNUMA::count();
  45 
  46   // Try NUMA local page cache
  47   ZPage* const l1_page = _small.get(numa_id).remove_first();
  48   if (l1_page != NULL) {
  49     ZStatInc(ZCounterPageCacheHitL1);
  50     return l1_page;
  51   }
  52 
  53   // Try NUMA remote page cache(s)
  54   uint32_t remote_numa_id = numa_id + 1;
  55   const uint32_t remote_numa_count = numa_count - 1;
  56   for (uint32_t i = 0; i < remote_numa_count; i++) {
  57     if (remote_numa_id == numa_count) {
  58       remote_numa_id = 0;
  59     }
  60 
  61     ZPage* const l2_page = _small.get(remote_numa_id).remove_first();
  62     if (l2_page != NULL) {
  63       ZStatInc(ZCounterPageCacheHitL2);
  64       return l2_page;
  65     }
  66 
  67     remote_numa_id++;
  68   }
  69 
  70   ZStatInc(ZCounterPageCacheMiss);
  71   return NULL;
  72 }
  73 
  74 ZPage* ZPageCache::alloc_medium_page() {
  75   ZPage* const l1_page = _medium.remove_first();
  76   if (l1_page != NULL) {
  77     ZStatInc(ZCounterPageCacheHitL1);
  78     return l1_page;
  79   }
  80 
  81   ZStatInc(ZCounterPageCacheMiss);
  82   return NULL;
  83 }
  84 
  85 ZPage* ZPageCache::alloc_large_page(size_t size) {
  86   // Find a page with the right size
  87   ZListIterator<ZPage> iter(&_large);
  88   for (ZPage* l1_page; iter.next(&l1_page);) {
  89     if (l1_page->size() == size) {
  90       // Page found
  91       _large.remove(l1_page);
  92       ZStatInc(ZCounterPageCacheHitL1);
  93       return l1_page;
  94     }
  95   }
  96 
  97   ZStatInc(ZCounterPageCacheMiss);
  98   return NULL;
  99 }
 100 
 101 ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
 102   ZPage* page;
 103 
 104   if (type == ZPageTypeSmall) {
 105     page = alloc_small_page();
 106   } else if (type == ZPageTypeMedium) {
 107     page = alloc_medium_page();
 108   } else {
 109     page = alloc_large_page(size);
 110   }
 111 
 112   if (page != NULL) {
 113     _available -= page->size();
 114   }
 115 
 116   return page;
 117 }
 118 
 119 void ZPageCache::free_page(ZPage* page) {
 120   const uint8_t type = page->type();
 121   if (type == ZPageTypeSmall) {
 122     _small.get(page->numa_id()).insert_first(page);
 123   } else if (type == ZPageTypeMedium) {
 124     _medium.insert_first(page);
 125   } else {
 126     _large.insert_first(page);
 127   }
 128 
 129   _available += page->size();
 130 }
 131 
 132 bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
 133   ZPage* const page = from->last();
 134   if (page == NULL || !cl->do_page(page)) {
 135     // Don't flush page
 136     return false;
 137   }
 138 
 139   // Flush page
 140   _available -= page->size();
 141   from->remove(page);
 142   to->insert_last(page);
 143   return true;
 144 }
 145 
 146 void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
 147   while (flush_list_inner(cl, from, to));
 148 }
 149 
 150 void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to) {
 151   const uint32_t numa_count = ZNUMA::count();
 152   uint32_t numa_done = 0;
 153   uint32_t numa_next = 0;
 154 
 155   // Flush lists round-robin
 156   while (numa_done < numa_count) {
 157     ZList<ZPage>* numa_list = from->addr(numa_next);
 158     if (++numa_next == numa_count) {
 159       numa_next = 0;
 160     }
 161 
 162     if (flush_list_inner(cl, numa_list, to)) {
 163       // Not done
 164       numa_done = 0;
 165     } else {
 166       // Done
 167       numa_done++;
 168     }
 169   }
 170 }
 171 
 172 void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
 173   // Prefer flushing large, then medium and last small pages
 174   flush_list(cl, &_large, to);
 175   flush_list(cl, &_medium, to);
 176   flush_per_numa_lists(cl, &_small, to);
 177 }