1 /*
   2  * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 
  41 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
  42   // Snapshot current thread stacks
  43   VirtualMemoryTracker::snapshot_thread_stacks();
  44   as_snapshot()->copy_to(s);
  45 }
  46 
  47 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  48 
  49 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  50   return r1.compare(r2);
  51 }
  52 
  53 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  54   return r1.compare(r2);
  55 }
  56 
  57 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  58   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
  59 }
  60 
  61 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  62   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
  63   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
  64 }
  65 
  66 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
  67   LinkedListNode<CommittedMemoryRegion>* preceding = NULL;
  68 
  69   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != NULL; node = node->next()) {
  70     CommittedMemoryRegion* rgn = node->data();
  71 
  72     // We searched past the region start.
  73     if (rgn->end() > addr) {
  74       break;
  75     }
  76 
  77     preceding = node;
  78   }
  79 
  80   return preceding;
  81 }
  82 
  83 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
  84   if (node != NULL) {
  85     CommittedMemoryRegion* rgn = node->data();
  86 
  87     if (is_mergeable_with(rgn, addr, size, stack)) {
  88       rgn->expand_region(addr, size);
  89       return true;
  90     }
  91   }
  92 
  93   return false;
  94 }
  95 
  96 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
  97   if (other == NULL) {
  98     return false;
  99   }
 100 
 101   CommittedMemoryRegion* rgn = other->data();
 102   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
 103 }
 104 
 105 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
 106   assert(addr != NULL, "Invalid address");
 107   assert(size > 0, "Invalid size");
 108   assert(contain_region(addr, size), "Not contain this region");
 109 
 110   // Find the region that fully precedes the [addr, addr + size) region.
 111   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
 112   LinkedListNode<CommittedMemoryRegion>* next = (prev != NULL ? prev->next() : _committed_regions.head());
 113 
 114   if (next != NULL) {
 115     // Ignore request if region already exists.
 116     if (is_same_as(next->data(), addr, size, stack)) {
 117       return true;
 118     }
 119 
 120     // The new region is after prev, and either overlaps with the
 121     // next region (and maybe more regions), or overlaps with no region.
 122     if (next->data()->overlap_region(addr, size)) {
 123       // Remove _all_ overlapping regions, and parts of regions,
 124       // in preparation for the addition of this new region.
 125       remove_uncommitted_region(addr, size);
 126 
 127       // The remove could have split a region into two and created a
 128       // new prev region. Need to reset the prev and next pointers.
 129       prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr);
 130       next = (prev != NULL ? prev->next() : _committed_regions.head());
 131     }
 132   }
 133 
 134   // At this point the previous overlapping regions have been
 135   // cleared, and the full region is guaranteed to be inserted.
 136   VirtualMemorySummary::record_committed_memory(size, flag());
 137 
 138   // Try to merge with prev and possibly next.
 139   if (try_merge_with(prev, addr, size, stack)) {
 140     if (try_merge_with(prev, next)) {
 141       // prev was expanded to contain the new region
 142       // and next, need to remove next from the list
 143       _committed_regions.remove_after(prev);
 144     }
 145 
 146     return true;
 147   }
 148 
 149   // Didn't merge with prev, try with next.
 150   if (try_merge_with(next, addr, size, stack)) {
 151     return true;
 152   }
 153 
 154   // Couldn't merge with any regions - create a new region.
 155   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
 156 }
 157 
 158 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
 159   address addr, size_t size) {
 160   assert(addr != NULL, "Invalid address");
 161   assert(size > 0, "Invalid size");
 162 
 163   CommittedMemoryRegion* rgn = node->data();
 164   assert(rgn->contain_region(addr, size), "Has to be contained");
 165   assert(!rgn->same_region(addr, size), "Can not be the same region");
 166 
 167   if (rgn->base() == addr ||
 168       rgn->end() == addr + size) {
 169     rgn->exclude_region(addr, size);
 170     return true;
 171   } else {
 172     // split this region
 173     address top =rgn->end();
 174     // use this region for lower part
 175     size_t exclude_size = rgn->end() - addr;
 176     rgn->exclude_region(addr, exclude_size);
 177 
 178     // higher part
 179     address high_base = addr + size;
 180     size_t  high_size = top - high_base;
 181 
 182     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
 183     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
 184     assert(high_node == NULL || node->next() == high_node, "Should be right after");
 185     return (high_node != NULL);
 186   }
 187 
 188   return false;
 189 }
 190 
 191 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
 192   assert(addr != NULL, "Invalid address");
 193   assert(sz > 0, "Invalid size");
 194 
 195   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
 196   address end = addr + sz;
 197 
 198   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
 199   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 200   CommittedMemoryRegion* crgn;
 201 
 202   while (head != NULL) {
 203     crgn = head->data();
 204 
 205     if (crgn->same_region(addr, sz)) {
 206       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
 207       _committed_regions.remove_after(prev);
 208       return true;
 209     }
 210 
 211     // del_rgn contains crgn
 212     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
 213       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
 214       head = head->next();
 215       _committed_regions.remove_after(prev);
 216       continue;  // don't update head or prev
 217     }
 218 
 219     // Found addr in the current crgn. There are 2 subcases:
 220     if (crgn->contain_address(addr)) {
 221 
 222       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
 223       if (crgn->contain_address(end - 1)) {
 224         VirtualMemorySummary::record_uncommitted_memory(sz, flag());
 225         return remove_uncommitted_region(head, addr, sz); // done!
 226       } else {
 227         // (2) Did not find del_rgn's end in crgn.
 228         size_t size = crgn->end() - del_rgn.base();
 229         crgn->exclude_region(addr, size);
 230         VirtualMemorySummary::record_uncommitted_memory(size, flag());
 231       }
 232 
 233     } else if (crgn->contain_address(end - 1)) {
 234       // Found del_rgn's end, but not its base addr.
 235       size_t size = del_rgn.end() - crgn->base();
 236       crgn->exclude_region(crgn->base(), size);
 237       VirtualMemorySummary::record_uncommitted_memory(size, flag());
 238       return true;  // should be done if the list is sorted properly!
 239     }
 240 
 241     prev = head;
 242     head = head->next();
 243   }
 244 
 245   return true;
 246 }
 247 
 248 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
 249   assert(addr != NULL, "Invalid address");
 250 
 251   // split committed regions
 252   LinkedListNode<CommittedMemoryRegion>* head =
 253     _committed_regions.head();
 254   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 255 
 256   while (head != NULL) {
 257     if (head->data()->base() >= addr) {
 258       break;
 259     }
 260     prev = head;
 261     head = head->next();
 262   }
 263 
 264   if (head != NULL) {
 265     if (prev != NULL) {
 266       prev->set_next(head->next());
 267     } else {
 268       _committed_regions.set_head(NULL);
 269     }
 270   }
 271 
 272   rgn._committed_regions.set_head(head);
 273 }
 274 
 275 size_t ReservedMemoryRegion::committed_size() const {
 276   size_t committed = 0;
 277   LinkedListNode<CommittedMemoryRegion>* head =
 278     _committed_regions.head();
 279   while (head != NULL) {
 280     committed += head->data()->size();
 281     head = head->next();
 282   }
 283   return committed;
 284 }
 285 
 286 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 287   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 288   if (flag() != f) {
 289     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 290     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 291     _flag = f;
 292   }
 293 }
 294 
 295 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
 296   assert(flag() == mtThreadStack, "Only for thread stack");
 297   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
 298   address bottom = base();
 299   address top = base() + size();
 300   while (head != NULL) {
 301     address committed_top = head->data()->base() + head->data()->size();
 302     if (committed_top < top) {
 303       // committed stack guard pages, skip them
 304       bottom = head->data()->base() + head->data()->size();
 305       head = head->next();
 306     } else {
 307       assert(top == committed_top, "Sanity");
 308       break;
 309     }
 310   }
 311 
 312   return bottom;
 313 }
 314 
 315 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 316   if (level >= NMT_summary) {
 317     VirtualMemorySummary::initialize();
 318   }
 319   return true;
 320 }
 321 
 322 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 323   if (level >= NMT_summary) {
 324     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 325       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 326     return (_reserved_regions != NULL);
 327   }
 328   return true;
 329 }
 330 
 331 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 332     const NativeCallStack& stack, MEMFLAGS flag) {
 333   assert(base_addr != NULL, "Invalid address");
 334   assert(size > 0, "Invalid size");
 335   assert(_reserved_regions != NULL, "Sanity check");
 336   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
 337   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 338 
 339   if (reserved_rgn == NULL) {
 340     VirtualMemorySummary::record_reserved_memory(size, flag);
 341     return _reserved_regions->add(rgn) != NULL;
 342   } else {
 343     if (reserved_rgn->same_region(base_addr, size)) {
 344       reserved_rgn->set_call_stack(stack);
 345       reserved_rgn->set_flag(flag);
 346       return true;
 347     } else if (reserved_rgn->adjacent_to(base_addr, size)) {
 348       VirtualMemorySummary::record_reserved_memory(size, flag);
 349       reserved_rgn->expand_region(base_addr, size);
 350       reserved_rgn->set_call_stack(stack);
 351       return true;
 352     } else {
 353       // Overlapped reservation.
 354       // It can happen when the regions are thread stacks, as JNI
 355       // thread does not detach from VM before exits, and leads to
 356       // leak JavaThread object
 357       if (reserved_rgn->flag() == mtThreadStack) {
 358         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
 359         // Overwrite with new region
 360 
 361         // Release old region
 362         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
 363         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
 364 
 365         // Add new region
 366         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
 367 
 368         *reserved_rgn = rgn;
 369         return true;
 370       }
 371 
 372       // CDS mapping region.
 373       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
 374       // NMT reports CDS as a whole.
 375       if (reserved_rgn->flag() == mtClassShared) {
 376         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
 377         return true;
 378       }
 379 
 380       // Mapped CDS string region.
 381       // The string region(s) is part of the java heap.
 382       if (reserved_rgn->flag() == mtJavaHeap) {
 383         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
 384         return true;
 385       }
 386 
 387       ShouldNotReachHere();
 388       return false;
 389     }
 390   }
 391 }
 392 
 393 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
 394   assert(addr != NULL, "Invalid address");
 395   assert(_reserved_regions != NULL, "Sanity check");
 396 
 397   ReservedMemoryRegion   rgn(addr, 1);
 398   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
 399   if (reserved_rgn != NULL) {
 400     assert(reserved_rgn->contain_address(addr), "Containment");
 401     if (reserved_rgn->flag() != flag) {
 402       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
 403       reserved_rgn->set_flag(flag);
 404     }
 405   }
 406 }
 407 
 408 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
 409   const NativeCallStack& stack) {
 410   assert(addr != NULL, "Invalid address");
 411   assert(size > 0, "Invalid size");
 412   assert(_reserved_regions != NULL, "Sanity check");
 413 
 414   ReservedMemoryRegion  rgn(addr, size);
 415   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 416 
 417   assert(reserved_rgn != NULL, "No reserved region");
 418   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 419   bool result = reserved_rgn->add_committed_region(addr, size, stack);
 420   return result;
 421 }
 422 
 423 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
 424   assert(addr != NULL, "Invalid address");
 425   assert(size > 0, "Invalid size");
 426   assert(_reserved_regions != NULL, "Sanity check");
 427 
 428   ReservedMemoryRegion  rgn(addr, size);
 429   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 430   assert(reserved_rgn != NULL, "No reserved region");
 431   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 432   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
 433   return result;
 434 }
 435 
 436 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
 437   assert(addr != NULL, "Invalid address");
 438   assert(size > 0, "Invalid size");
 439   assert(_reserved_regions != NULL, "Sanity check");
 440 
 441   ReservedMemoryRegion  rgn(addr, size);
 442   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 443 
 444   assert(reserved_rgn != NULL, "No reserved region");
 445 
 446   // uncommit regions within the released region
 447   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
 448     return false;
 449   }
 450 
 451   if (reserved_rgn->flag() == mtClassShared &&
 452       reserved_rgn->contain_region(addr, size) &&
 453       !reserved_rgn->same_region(addr, size)) {
 454     // This is an unmapped CDS region, which is part of the reserved shared
 455     // memory region.
 456     // See special handling in VirtualMemoryTracker::add_reserved_region also.
 457     return true;
 458   }
 459 
 460   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
 461 
 462   if (reserved_rgn->same_region(addr, size)) {
 463     return _reserved_regions->remove(rgn);
 464   } else {
 465     assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 466     if (reserved_rgn->base() == addr ||
 467         reserved_rgn->end() == addr + size) {
 468         reserved_rgn->exclude_region(addr, size);
 469       return true;
 470     } else {
 471       address top = reserved_rgn->end();
 472       address high_base = addr + size;
 473       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 474         *reserved_rgn->call_stack(), reserved_rgn->flag());
 475 
 476       // use original region for lower region
 477       reserved_rgn->exclude_region(addr, top - addr);
 478       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 479       if (new_rgn == NULL) {
 480         return false;
 481       } else {
 482         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 483         return true;
 484       }
 485     }
 486   }
 487 }
 488 
 489 // Walk all known thread stacks, snapshot their committed ranges.
 490 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
 491 public:
 492   SnapshotThreadStackWalker() {}
 493 
 494   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
 495     if (rgn->flag() == mtThreadStack) {
 496       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
 497       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
 498       size_t committed_size = os::committed_stack_size(stack_bottom, stack_size);
 499       if (committed_size > 0) {
 500         ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
 501         NativeCallStack ncs; // empty stack
 502 
 503         // Stack grows downward
 504         region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs);
 505       }
 506     }
 507     return true;
 508   }
 509 };
 510 
 511 void VirtualMemoryTracker::snapshot_thread_stacks() {
 512   SnapshotThreadStackWalker walker;
 513   walk_virtual_memory(&walker);
 514 }
 515 
 516 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 517   assert(_reserved_regions != NULL, "Sanity check");
 518   ThreadCritical tc;
 519   // Check that the _reserved_regions haven't been deleted.
 520   if (_reserved_regions != NULL) {
 521     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 522     while (head != NULL) {
 523       const ReservedMemoryRegion* rgn = head->peek();
 524       if (!walker->do_allocation_site(rgn)) {
 525         return false;
 526       }
 527       head = head->next();
 528     }
 529    }
 530   return true;
 531 }
 532 
 533 // Transition virtual memory tracking level.
 534 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
 535   assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
 536   if (to == NMT_minimal) {
 537     assert(from == NMT_summary || from == NMT_detail, "Just check");
 538     // Clean up virtual memory tracking data structures.
 539     ThreadCritical tc;
 540     // Check for potential race with other thread calling transition
 541     if (_reserved_regions != NULL) {
 542       delete _reserved_regions;
 543       _reserved_regions = NULL;
 544     }
 545   }
 546 
 547   return true;
 548 }
 549 
 550 // Metaspace Support
 551 MetaspaceSnapshot::MetaspaceSnapshot() {
 552   for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) {
 553     Metaspace::MetadataType type = (Metaspace::MetadataType)index;
 554     assert_valid_metadata_type(type);
 555     _reserved_in_bytes[type]  = 0;
 556     _committed_in_bytes[type] = 0;
 557     _used_in_bytes[type]      = 0;
 558     _free_in_bytes[type]      = 0;
 559   }
 560 }
 561 
 562 void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) {
 563   assert_valid_metadata_type(type);
 564 
 565   mss._reserved_in_bytes[type]   = MetaspaceAux::reserved_bytes(type);
 566   mss._committed_in_bytes[type]  = MetaspaceAux::committed_bytes(type);
 567   mss._used_in_bytes[type]       = MetaspaceAux::used_bytes(type);
 568 
 569   size_t free_in_bytes = (MetaspaceAux::capacity_bytes(type) - MetaspaceAux::used_bytes(type))
 570                        + MetaspaceAux::free_chunks_total_bytes(type)
 571                        + MetaspaceAux::free_bytes(type);
 572   mss._free_in_bytes[type] = free_in_bytes;
 573 }
 574 
 575 void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) {
 576   snapshot(Metaspace::ClassType, mss);
 577   if (Metaspace::using_class_space()) {
 578     snapshot(Metaspace::NonClassType, mss);
 579   }
 580 }