< prev index next >

src/hotspot/share/gc/z/zPageCache.cpp

Print this page


   1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"

  25 #include "gc/z/zList.inline.hpp"
  26 #include "gc/z/zNUMA.hpp"
  27 #include "gc/z/zPage.inline.hpp"
  28 #include "gc/z/zPageCache.hpp"
  29 #include "gc/z/zStat.hpp"
  30 #include "gc/z/zValue.inline.hpp"

  31 #include "logging/log.hpp"

  32 
  33 static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
  34 static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
  35 static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
  36 static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
  37 












  38 ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
  39     _requested(requested),
  40     _flushed(0) {}
  41 
  42 size_t ZPageCacheFlushClosure::overflushed() const {
  43   return _flushed > _requested ? _flushed - _requested : 0;
  44 }
  45 
  46 ZPageCache::ZPageCache() :
  47     _available(0),
  48     _small(),
  49     _medium(),
  50     _large() {}
  51 
  52 ZPage* ZPageCache::alloc_small_page() {
  53   const uint32_t numa_id = ZNUMA::id();
  54   const uint32_t numa_count = ZNUMA::count();
  55 
  56   // Try NUMA local page cache
  57   ZPage* const l1_page = _small.get(numa_id).remove_first();
  58   if (l1_page != NULL) {
  59     ZStatInc(ZCounterPageCacheHitL1);
  60     return l1_page;
  61   }
  62 
  63   // Try NUMA remote page cache(s)
  64   uint32_t remote_numa_id = numa_id + 1;
  65   const uint32_t remote_numa_count = numa_count - 1;
  66   for (uint32_t i = 0; i < remote_numa_count; i++) {
  67     if (remote_numa_id == numa_count) {


 144   ZPage* page;
 145 
 146   // Try allocate exact page
 147   if (type == ZPageTypeSmall) {
 148     page = alloc_small_page();
 149   } else if (type == ZPageTypeMedium) {
 150     page = alloc_medium_page();
 151   } else {
 152     page = alloc_large_page(size);
 153   }
 154 
 155   if (page == NULL) {
 156     // Try allocate potentially oversized page
 157     ZPage* const oversized = alloc_oversized_page(size);
 158     if (oversized != NULL) {
 159       if (size < oversized->size()) {
 160         // Split oversized page
 161         page = oversized->split(type, size);
 162 
 163         // Cache remainder
 164         free_page_inner(oversized);
 165       } else {
 166         // Re-type correctly sized page
 167         page = oversized->retype(type);
 168       }
 169     }
 170   }
 171 
 172   if (page != NULL) {
 173     _available -= page->size();
 174   } else {
 175     ZStatInc(ZCounterPageCacheMiss);
 176   }
 177 
 178   return page;
 179 }
 180 
 181 void ZPageCache::free_page_inner(ZPage* page) {
 182   const uint8_t type = page->type();
 183   if (type == ZPageTypeSmall) {
 184     _small.get(page->numa_id()).insert_first(page);
 185   } else if (type == ZPageTypeMedium) {
 186     _medium.insert_first(page);
 187   } else {
 188     _large.insert_first(page);
 189   }
 190 }
 191 
 192 void ZPageCache::free_page(ZPage* page) {
 193   free_page_inner(page);
 194   _available += page->size();
 195 }
 196 
 197 bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
 198   ZPage* const page = from->last();
 199   if (page == NULL || !cl->do_page(page)) {
 200     // Don't flush page
 201     return false;
 202   }
 203 
 204   // Flush page
 205   _available -= page->size();
 206   from->remove(page);
 207   to->insert_last(page);
 208   return true;
 209 }
 210 
 211 void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
 212   while (flush_list_inner(cl, from, to));
 213 }
 214 
 215 void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to) {
 216   const uint32_t numa_count = ZNUMA::count();
 217   uint32_t numa_done = 0;
 218   uint32_t numa_next = 0;
 219 
 220   // Flush lists round-robin
 221   while (numa_done < numa_count) {
 222     ZList<ZPage>* numa_list = from->addr(numa_next);
 223     if (++numa_next == numa_count) {
 224       numa_next = 0;
 225     }
 226 
 227     if (flush_list_inner(cl, numa_list, to)) {
 228       // Not done
 229       numa_done = 0;
 230     } else {
 231       // Done
 232       numa_done++;
 233     }
 234   }
 235 }
 236 
 237 void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
 238   // Prefer flushing large, then medium and last small pages
 239   flush_list(cl, &_large, to);
 240   flush_list(cl, &_medium, to);
 241   flush_per_numa_lists(cl, &_small, to);




















































































 242 }
 243 
 244 void ZPageCache::pages_do(ZPageClosure* cl) const {
 245   // Small
 246   ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
 247   for (const ZList<ZPage>* list; iter_numa.next(&list);) {
 248     ZListIterator<ZPage> iter_small(list);
 249     for (ZPage* page; iter_small.next(&page);) {
 250       cl->do_page(page);
 251     }
 252   }
 253 
 254   // Medium
 255   ZListIterator<ZPage> iter_medium(&_medium);
 256   for (ZPage* page; iter_medium.next(&page);) {
 257     cl->do_page(page);
 258   }
 259 
 260   // Large
 261   ZListIterator<ZPage> iter_large(&_large);
   1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zGlobals.hpp"
  26 #include "gc/z/zList.inline.hpp"
  27 #include "gc/z/zNUMA.hpp"
  28 #include "gc/z/zPage.inline.hpp"
  29 #include "gc/z/zPageCache.hpp"
  30 #include "gc/z/zStat.hpp"
  31 #include "gc/z/zValue.inline.hpp"
  32 #include "jfr/jfrEvents.hpp"
  33 #include "logging/log.hpp"
  34 #include "memory/allocation.hpp"
  35 
  36 static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
  37 static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
  38 static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
  39 static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
  40 
  41 class ZPageCacheFlushClosure : public StackObj {
  42   friend class ZPageCache;
  43 
  44 protected:
  45   const size_t _requested;
  46   size_t       _flushed;
  47 
  48 public:
  49   ZPageCacheFlushClosure(size_t requested);
  50   virtual bool do_page(const ZPage* page) = 0;
  51 };
  52 
  53 ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
  54     _requested(requested),
  55     _flushed(0) {}
  56 




  57 ZPageCache::ZPageCache() :

  58     _small(),
  59     _medium(),
  60     _large() {}
  61 
  62 ZPage* ZPageCache::alloc_small_page() {
  63   const uint32_t numa_id = ZNUMA::id();
  64   const uint32_t numa_count = ZNUMA::count();
  65 
  66   // Try NUMA local page cache
  67   ZPage* const l1_page = _small.get(numa_id).remove_first();
  68   if (l1_page != NULL) {
  69     ZStatInc(ZCounterPageCacheHitL1);
  70     return l1_page;
  71   }
  72 
  73   // Try NUMA remote page cache(s)
  74   uint32_t remote_numa_id = numa_id + 1;
  75   const uint32_t remote_numa_count = numa_count - 1;
  76   for (uint32_t i = 0; i < remote_numa_count; i++) {
  77     if (remote_numa_id == numa_count) {


 154   ZPage* page;
 155 
 156   // Try allocate exact page
 157   if (type == ZPageTypeSmall) {
 158     page = alloc_small_page();
 159   } else if (type == ZPageTypeMedium) {
 160     page = alloc_medium_page();
 161   } else {
 162     page = alloc_large_page(size);
 163   }
 164 
 165   if (page == NULL) {
 166     // Try allocate potentially oversized page
 167     ZPage* const oversized = alloc_oversized_page(size);
 168     if (oversized != NULL) {
 169       if (size < oversized->size()) {
 170         // Split oversized page
 171         page = oversized->split(type, size);
 172 
 173         // Cache remainder
 174         free_page(oversized);
 175       } else {
 176         // Re-type correctly sized page
 177         page = oversized->retype(type);
 178       }
 179     }
 180   }
 181 
 182   if (page == NULL) {


 183     ZStatInc(ZCounterPageCacheMiss);
 184   }
 185 
 186   return page;
 187 }
 188 
 189 void ZPageCache::free_page(ZPage* page) {
 190   const uint8_t type = page->type();
 191   if (type == ZPageTypeSmall) {
 192     _small.get(page->numa_id()).insert_first(page);
 193   } else if (type == ZPageTypeMedium) {
 194     _medium.insert_first(page);
 195   } else {
 196     _large.insert_first(page);
 197   }
 198 }
 199 





 200 bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
 201   ZPage* const page = from->last();
 202   if (page == NULL || !cl->do_page(page)) {
 203     // Don't flush page
 204     return false;
 205   }
 206 
 207   // Flush page

 208   from->remove(page);
 209   to->insert_last(page);
 210   return true;
 211 }
 212 
 213 void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
 214   while (flush_list_inner(cl, from, to));
 215 }
 216 
 217 void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to) {
 218   const uint32_t numa_count = ZNUMA::count();
 219   uint32_t numa_done = 0;
 220   uint32_t numa_next = 0;
 221 
 222   // Flush lists round-robin
 223   while (numa_done < numa_count) {
 224     ZList<ZPage>* numa_list = from->addr(numa_next);
 225     if (++numa_next == numa_count) {
 226       numa_next = 0;
 227     }
 228 
 229     if (flush_list_inner(cl, numa_list, to)) {
 230       // Not done
 231       numa_done = 0;
 232     } else {
 233       // Done
 234       numa_done++;
 235     }
 236   }
 237 }
 238 
 239 void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
 240   // Prefer flushing large, then medium and last small pages
 241   flush_list(cl, &_large, to);
 242   flush_list(cl, &_medium, to);
 243   flush_per_numa_lists(cl, &_small, to);
 244 
 245   if (cl->_flushed > cl->_requested) {
 246     // Overflushed, keep part of last page
 247     const size_t overflushed = cl->_flushed - cl->_requested;
 248     free_page(to->last()->split(overflushed));
 249     cl->_flushed -= overflushed;
 250   }
 251 }
 252 
 253 class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
 254 public:
 255   ZPageCacheFlushForAllocationClosure(size_t requested) :
 256       ZPageCacheFlushClosure(requested) {}
 257 
 258   virtual bool do_page(const ZPage* page) {
 259     if (_flushed < _requested) {
 260       // Flush page
 261       _flushed += page->size();
 262       return true;
 263     }
 264 
 265     // Don't flush page
 266     return false;
 267   }
 268 };
 269 
 270 void ZPageCache::flush_for_allocation(size_t requested, ZList<ZPage>* to) {
 271   EventZPageCacheFlush event;
 272 
 273   // Flush
 274   ZPageCacheFlushForAllocationClosure cl(requested);
 275   flush(&cl, to);
 276 
 277   // Send event
 278   event.commit(requested, true /* for_allocation */);
 279 }
 280 
 281 class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
 282 private:
 283   const uint64_t _now;
 284   const uint64_t _delay;
 285   uint64_t*      _timeout;
 286 
 287 public:
 288   ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay, uint64_t* timeout) :
 289       ZPageCacheFlushClosure(requested),
 290       _now(os::elapsedTime()),
 291       _delay(delay),
 292       _timeout(timeout) {}
 293 
 294   virtual bool do_page(const ZPage* page) {
 295     const uint64_t expires = page->last_used() + _delay;
 296     const uint64_t timeout = expires - MIN2(expires, _now);
 297 
 298     if (_flushed < _requested && timeout == 0) {
 299       // Flush page
 300       _flushed += page->size();
 301       return true;
 302     }
 303 
 304     // Record shortest non-expired timeout
 305     *_timeout = MIN2(*_timeout, timeout);
 306 
 307     // Don't flush page
 308     return false;
 309   }
 310 };
 311 
 312 size_t ZPageCache::flush_for_uncommit(size_t requested, uint64_t delay, uint64_t* timeout, ZList<ZPage>* to) {
 313   if (requested == 0) {
 314     // Nothing to flush
 315     return 0;
 316   }
 317 
 318   EventZPageCacheFlush event;
 319 
 320   // Flush
 321   ZPageCacheFlushForUncommitClosure cl(requested, delay, timeout);
 322   flush(&cl, to);
 323 
 324   // Send event
 325   event.commit(requested, false /* for_allocation */);
 326 
 327   return cl._flushed;
 328 }
 329 
 330 void ZPageCache::pages_do(ZPageClosure* cl) const {
 331   // Small
 332   ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
 333   for (const ZList<ZPage>* list; iter_numa.next(&list);) {
 334     ZListIterator<ZPage> iter_small(list);
 335     for (ZPage* page; iter_small.next(&page);) {
 336       cl->do_page(page);
 337     }
 338   }
 339 
 340   // Medium
 341   ZListIterator<ZPage> iter_medium(&_medium);
 342   for (ZPage* page; iter_medium.next(&page);) {
 343     cl->do_page(page);
 344   }
 345 
 346   // Large
 347   ZListIterator<ZPage> iter_large(&_large);
< prev index next >