procrank 如何计算uss

发布时间:2023年12月18日

bool ProcMemInfo::ReadVmaStats(int pagemap_fd, Vma& vma, bool get_wss, bool use_pageidle,
                               bool swap_only) {
    PageAcct& pinfo = PageAcct::Instance();
    if (get_wss && use_pageidle && !pinfo.InitPageAcct(true)) {
        LOG(ERROR) << "Failed to init idle page accounting";
        return false;
    }

    uint64_t pagesz_kb = getpagesize() / 1024;
    size_t num_pages = (vma.end - vma.start) / (pagesz_kb * 1024);
    size_t first_page = vma.start / (pagesz_kb * 1024);

    std::vector<uint64_t> page_cache;
    size_t cur_page_cache_index = 0;
    size_t num_in_page_cache = 0;
    size_t num_leftover_pages = num_pages;
    for (size_t cur_page = first_page; cur_page < first_page + num_pages; ++cur_page) {
        // Cache page map data.
        if (cur_page_cache_index == num_in_page_cache) {
            static constexpr size_t kMaxPages = 2048;
            num_leftover_pages -= num_in_page_cache;
            if (num_leftover_pages > kMaxPages) {
                num_in_page_cache = kMaxPages;
            } else {
                num_in_page_cache = num_leftover_pages;
            }
            page_cache.resize(num_in_page_cache);
            size_t total_bytes = page_cache.size() * sizeof(uint64_t);
            ssize_t bytes = pread64(pagemap_fd, page_cache.data(), total_bytes,
                                    cur_page * sizeof(uint64_t));
            if (bytes != total_bytes) {
                if (bytes == -1) {
                    PLOG(ERROR) << "Failed to read page data at offset 0x" << std::hex
                                << cur_page * sizeof(uint64_t);
                } else {
                    LOG(ERROR) << "Failed to read page data at offset 0x" << std::hex
                               << cur_page * sizeof(uint64_t) << std::dec << " read bytes " << bytes
                               << " expected bytes " << total_bytes;
                }
                return false;
            }
            cur_page_cache_index = 0;
        }

        uint64_t page_info = page_cache[cur_page_cache_index++];
        if (!PAGE_PRESENT(page_info) && !PAGE_SWAPPED(page_info)) continue;

        if (PAGE_SWAPPED(page_info)) {
            vma.usage.swap += pagesz_kb;
            swap_offsets_.emplace_back(PAGE_SWAP_OFFSET(page_info));
            continue;
        }

        if (swap_only)
            continue;

        uint64_t page_frame = PAGE_PFN(page_info);
        uint64_t cur_page_flags;
        if (!pinfo.PageFlags(page_frame, &cur_page_flags)) {
            LOG(ERROR) << "Failed to get page flags for " << page_frame << " in process " << pid_;
            swap_offsets_.clear();
            return false;
        }

        if (KPAGEFLAG_THP(cur_page_flags)) {
            vma.usage.thp += pagesz_kb;
        }

        // skip unwanted pages from the count
        if ((cur_page_flags & pgflags_mask_) != pgflags_) continue;

        uint64_t cur_page_counts;
        if (!pinfo.PageMapCount(page_frame, &cur_page_counts)) {
            LOG(ERROR) << "Failed to get page count for " << page_frame << " in process " << pid_;
            swap_offsets_.clear();
            return false;
        }

        // Page was unmapped between the presence check at the beginning of the loop and here.
        if (cur_page_counts == 0) {
            continue;
        }

        bool is_dirty = !!(cur_page_flags & (1 << KPF_DIRTY));
        bool is_private = (cur_page_counts == 1);
        // Working set
        if (get_wss) {
            bool is_referenced = use_pageidle ? (pinfo.IsPageIdle(page_frame) == 1)
                                              : !!(cur_page_flags & (1 << KPF_REFERENCED));
            if (!is_referenced) {
                continue;
            }
            // This effectively makes vss = rss for the working set is requested.
            // The libpagemap implementation returns vss > rss for
            // working set, which doesn't make sense.
            vma.usage.vss += pagesz_kb;
        }

        vma.usage.rss += pagesz_kb;
        vma.usage.uss += is_private ? pagesz_kb : 0;
        vma.usage.pss += pagesz_kb / cur_page_counts;
        if (is_private) {
            vma.usage.private_dirty += is_dirty ? pagesz_kb : 0;
            vma.usage.private_clean += is_dirty ? 0 : pagesz_kb;
        } else {
            vma.usage.shared_dirty += is_dirty ? pagesz_kb : 0;
            vma.usage.shared_clean += is_dirty ? 0 : pagesz_kb;
        }
    }
    if (!get_wss) {
        vma.usage.vss += pagesz_kb * num_pages;
    }
    return true;
}

为什么要使用条件cur_page_cache_index == num_in_page_cache

这个条件的含义是:只有当cur_page_cache_index等于num_in_page_cache时,才会执行缓存页映射数据的逻辑。这样的设计有几个考虑因素:

  1. Efficient Cache Usage:

    • 此条件确保在每次循环中,只有当缓存已满时才会尝试重新填充。如果缓存还有可用的位置,就没有必要再次从文件中读取,这样可以节省系统资源。
  2. Minimize File Reads:

    • 避免在每次迭代时都从文件中读取数据,因为文件读取可能是相对较慢的操作。将数据缓存到本地,可以减少对文件的频繁访问,提高性能。
  3. Optimize I/O Operations:

    • pread64函数用于从文件中读取数据,而文件I/O是相对耗时的操作。只在必要时读取数据,可以降低系统的开销。
  4. Control Cache Size:

    • 通过限制缓存的大小(使用kMaxPages),可以防止过度使用内存。这是有意义的,因为在某些情况下,VMA范围可能很大,但您不想一次性将所有数据都加载到内存中。

这个条件的目的是确保在需要时才从文件中读取新的页映射数据,以最大程度地减少文件I/O操作,提高代码的效率。

参考

Android 内存优化方法 | Light.Moon

文章来源:https://blog.csdn.net/root45/article/details/134965980
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。