1 /* 2 * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/z/zGlobals.hpp" 26 #include "gc/z/zList.inline.hpp" 27 #include "gc/z/zNUMA.hpp" 28 #include "gc/z/zPage.inline.hpp" 29 #include "gc/z/zPageCache.hpp" 30 #include "gc/z/zStat.hpp" 31 #include "gc/z/zValue.inline.hpp" 32 #include "logging/log.hpp" 33 #include "memory/allocation.hpp" 34 #include "runtime/globals.hpp" 35 #include "runtime/os.hpp" 36 37 static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond); 38 static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond); 39 static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond); 40 static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond); 41 42 class ZPageCacheFlushClosure : public StackObj { 43 friend class ZPageCache; 44 45 protected: 46 const size_t _requested; 47 size_t _flushed; 48 49 public: 50 ZPageCacheFlushClosure(size_t requested); 51 virtual bool do_page(const ZPage* page) = 0; 52 }; 53 54 ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) : 55 _requested(requested), 56 _flushed(0) {} 57 58 ZPageCache::ZPageCache() : 59 _small(), 60 _medium(), 61 _large(), 62 _last_commit(0) {} 63 64 ZPage* ZPageCache::alloc_small_page() { 65 const uint32_t numa_id = ZNUMA::id(); 66 const uint32_t numa_count = ZNUMA::count(); 67 68 // Try NUMA local page cache 69 ZPage* const l1_page = _small.get(numa_id).remove_first(); 70 if (l1_page != NULL) { 71 ZStatInc(ZCounterPageCacheHitL1); 72 return l1_page; 73 } 74 75 // Try NUMA remote page cache(s) 76 uint32_t remote_numa_id = numa_id + 1; 77 const uint32_t remote_numa_count = numa_count - 1; 78 for (uint32_t i = 0; i < remote_numa_count; i++) { 79 if (remote_numa_id == numa_count) { 80 remote_numa_id = 0; 81 } 82 83 ZPage* const l2_page = _small.get(remote_numa_id).remove_first(); 84 if (l2_page != NULL) { 85 ZStatInc(ZCounterPageCacheHitL2); 86 return l2_page; 87 } 88 89 remote_numa_id++; 90 } 91 92 return NULL; 93 } 94 95 ZPage* ZPageCache::alloc_medium_page() { 96 ZPage* const page = _medium.remove_first(); 97 if (page != NULL) { 98 ZStatInc(ZCounterPageCacheHitL1); 99 return page; 100 } 101 102 return NULL; 103 } 104 105 ZPage* ZPageCache::alloc_large_page(size_t size) { 106 // Find a page with the right size 107 ZListIterator<ZPage> iter(&_large); 108 for (ZPage* page; iter.next(&page);) { 109 if (size == page->size()) { 110 // Page found 111 _large.remove(page); 112 ZStatInc(ZCounterPageCacheHitL1); 113 return page; 114 } 115 } 116 117 return NULL; 118 } 119 120 ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) { 121 if (size <= ZPageSizeMedium) { 122 return _medium.remove_first(); 123 } 124 125 return NULL; 126 } 127 128 ZPage* ZPageCache::alloc_oversized_large_page(size_t size) { 129 // Find a page that is large enough 130 ZListIterator<ZPage> iter(&_large); 131 for (ZPage* page; iter.next(&page);) { 132 if (size <= page->size()) { 133 // Page found 134 _large.remove(page); 135 return page; 136 } 137 } 138 139 return NULL; 140 } 141 142 ZPage* ZPageCache::alloc_oversized_page(size_t size) { 143 ZPage* page = alloc_oversized_large_page(size); 144 if (page == NULL) { 145 page = alloc_oversized_medium_page(size); 146 } 147 148 if (page != NULL) { 149 ZStatInc(ZCounterPageCacheHitL3); 150 } 151 152 return page; 153 } 154 155 ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) { 156 ZPage* page; 157 158 // Try allocate exact page 159 if (type == ZPageTypeSmall) { 160 page = alloc_small_page(); 161 } else if (type == ZPageTypeMedium) { 162 page = alloc_medium_page(); 163 } else { 164 page = alloc_large_page(size); 165 } 166 167 if (page == NULL) { 168 // Try allocate potentially oversized page 169 ZPage* const oversized = alloc_oversized_page(size); 170 if (oversized != NULL) { 171 if (size < oversized->size()) { 172 // Split oversized page 173 page = oversized->split(type, size); 174 175 // Cache remainder 176 free_page(oversized); 177 } else { 178 // Re-type correctly sized page 179 page = oversized->retype(type); 180 } 181 } 182 } 183 184 if (page == NULL) { 185 ZStatInc(ZCounterPageCacheMiss); 186 } 187 188 return page; 189 } 190 191 void ZPageCache::free_page(ZPage* page) { 192 const uint8_t type = page->type(); 193 if (type == ZPageTypeSmall) { 194 _small.get(page->numa_id()).insert_first(page); 195 } else if (type == ZPageTypeMedium) { 196 _medium.insert_first(page); 197 } else { 198 _large.insert_first(page); 199 } 200 } 201 202 bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) { 203 ZPage* const page = from->last(); 204 if (page == NULL || !cl->do_page(page)) { 205 // Don't flush page 206 return false; 207 } 208 209 // Flush page 210 from->remove(page); 211 to->insert_last(page); 212 return true; 213 } 214 215 void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) { 216 while (flush_list_inner(cl, from, to)); 217 } 218 219 void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to) { 220 const uint32_t numa_count = ZNUMA::count(); 221 uint32_t numa_done = 0; 222 uint32_t numa_next = 0; 223 224 // Flush lists round-robin 225 while (numa_done < numa_count) { 226 ZList<ZPage>* numa_list = from->addr(numa_next); 227 if (++numa_next == numa_count) { 228 numa_next = 0; 229 } 230 231 if (flush_list_inner(cl, numa_list, to)) { 232 // Not done 233 numa_done = 0; 234 } else { 235 // Done 236 numa_done++; 237 } 238 } 239 } 240 241 void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) { 242 // Prefer flushing large, then medium and last small pages 243 flush_list(cl, &_large, to); 244 flush_list(cl, &_medium, to); 245 flush_per_numa_lists(cl, &_small, to); 246 247 if (cl->_flushed > cl->_requested) { 248 // Overflushed, re-insert part of last page into the cache 249 const size_t overflushed = cl->_flushed - cl->_requested; 250 ZPage* const reinsert = to->last()->split(overflushed); 251 free_page(reinsert); 252 cl->_flushed -= overflushed; 253 } 254 } 255 256 class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure { 257 public: 258 ZPageCacheFlushForAllocationClosure(size_t requested) : 259 ZPageCacheFlushClosure(requested) {} 260 261 virtual bool do_page(const ZPage* page) { 262 if (_flushed < _requested) { 263 // Flush page 264 _flushed += page->size(); 265 return true; 266 } 267 268 // Don't flush page 269 return false; 270 } 271 }; 272 273 void ZPageCache::flush_for_allocation(size_t requested, ZList<ZPage>* to) { 274 ZPageCacheFlushForAllocationClosure cl(requested); 275 flush(&cl, to); 276 } 277 278 class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure { 279 private: 280 const uint64_t _now; 281 uint64_t* _timeout; 282 283 public: 284 ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) : 285 ZPageCacheFlushClosure(requested), 286 _now(now), 287 _timeout(timeout) { 288 // Set initial timeout 289 *_timeout = ZUncommitDelay; 290 } 291 292 virtual bool do_page(const ZPage* page) { 293 const uint64_t expires = page->last_used() + ZUncommitDelay; 294 if (expires > _now) { 295 // Don't flush page, record shortest non-expired timeout 296 *_timeout = MIN2(*_timeout, expires - _now); 297 return false; 298 } 299 300 if (_flushed >= _requested) { 301 // Don't flush page, requested amount flushed 302 return false; 303 } 304 305 // Flush page 306 _flushed += page->size(); 307 return true; 308 } 309 }; 310 311 size_t ZPageCache::flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout) { 312 const uint64_t now = os::elapsedTime(); 313 const uint64_t expires = _last_commit + ZUncommitDelay; 314 if (expires > now) { 315 // Delay uncommit, set next timeout 316 *timeout = expires - now; 317 return 0; 318 } 319 320 if (requested == 0) { 321 // Nothing to flush, set next timeout 322 *timeout = ZUncommitDelay; 323 return 0; 324 } 325 326 ZPageCacheFlushForUncommitClosure cl(requested, now, timeout); 327 flush(&cl, to); 328 329 return cl._flushed; 330 } 331 332 void ZPageCache::set_last_commit() { 333 _last_commit = os::elapsedTime(); 334 } 335 336 void ZPageCache::pages_do(ZPageClosure* cl) const { 337 // Small 338 ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small); 339 for (const ZList<ZPage>* list; iter_numa.next(&list);) { 340 ZListIterator<ZPage> iter_small(list); 341 for (ZPage* page; iter_small.next(&page);) { 342 cl->do_page(page); 343 } 344 } 345 346 // Medium 347 ZListIterator<ZPage> iter_medium(&_medium); 348 for (ZPage* page; iter_medium.next(&page);) { 349 cl->do_page(page); 350 } 351 352 // Large 353 ZListIterator<ZPage> iter_large(&_large); 354 for (ZPage* page; iter_large.next(&page);) { 355 cl->do_page(page); 356 } 357 }