Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* binder_alloc.c
0003  *
0004  * Android IPC Subsystem
0005  *
0006  * Copyright (C) 2007-2017 Google, Inc.
0007  */
0008 
0009 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0010 
0011 #include <linux/list.h>
0012 #include <linux/sched/mm.h>
0013 #include <linux/module.h>
0014 #include <linux/rtmutex.h>
0015 #include <linux/rbtree.h>
0016 #include <linux/seq_file.h>
0017 #include <linux/vmalloc.h>
0018 #include <linux/slab.h>
0019 #include <linux/sched.h>
0020 #include <linux/list_lru.h>
0021 #include <linux/ratelimit.h>
0022 #include <asm/cacheflush.h>
0023 #include <linux/uaccess.h>
0024 #include <linux/highmem.h>
0025 #include <linux/sizes.h>
0026 #include "binder_alloc.h"
0027 #include "binder_trace.h"
0028 
0029 struct list_lru binder_alloc_lru;
0030 
0031 static DEFINE_MUTEX(binder_alloc_mmap_lock);
0032 
0033 enum {
0034     BINDER_DEBUG_USER_ERROR             = 1U << 0,
0035     BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
0036     BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
0037     BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
0038 };
0039 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
0040 
0041 module_param_named(debug_mask, binder_alloc_debug_mask,
0042            uint, 0644);
0043 
0044 #define binder_alloc_debug(mask, x...) \
0045     do { \
0046         if (binder_alloc_debug_mask & mask) \
0047             pr_info_ratelimited(x); \
0048     } while (0)
0049 
0050 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
0051 {
0052     return list_entry(buffer->entry.next, struct binder_buffer, entry);
0053 }
0054 
0055 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
0056 {
0057     return list_entry(buffer->entry.prev, struct binder_buffer, entry);
0058 }
0059 
0060 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
0061                        struct binder_buffer *buffer)
0062 {
0063     if (list_is_last(&buffer->entry, &alloc->buffers))
0064         return alloc->buffer + alloc->buffer_size - buffer->user_data;
0065     return binder_buffer_next(buffer)->user_data - buffer->user_data;
0066 }
0067 
0068 static void binder_insert_free_buffer(struct binder_alloc *alloc,
0069                       struct binder_buffer *new_buffer)
0070 {
0071     struct rb_node **p = &alloc->free_buffers.rb_node;
0072     struct rb_node *parent = NULL;
0073     struct binder_buffer *buffer;
0074     size_t buffer_size;
0075     size_t new_buffer_size;
0076 
0077     BUG_ON(!new_buffer->free);
0078 
0079     new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
0080 
0081     binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0082              "%d: add free buffer, size %zd, at %pK\n",
0083               alloc->pid, new_buffer_size, new_buffer);
0084 
0085     while (*p) {
0086         parent = *p;
0087         buffer = rb_entry(parent, struct binder_buffer, rb_node);
0088         BUG_ON(!buffer->free);
0089 
0090         buffer_size = binder_alloc_buffer_size(alloc, buffer);
0091 
0092         if (new_buffer_size < buffer_size)
0093             p = &parent->rb_left;
0094         else
0095             p = &parent->rb_right;
0096     }
0097     rb_link_node(&new_buffer->rb_node, parent, p);
0098     rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
0099 }
0100 
0101 static void binder_insert_allocated_buffer_locked(
0102         struct binder_alloc *alloc, struct binder_buffer *new_buffer)
0103 {
0104     struct rb_node **p = &alloc->allocated_buffers.rb_node;
0105     struct rb_node *parent = NULL;
0106     struct binder_buffer *buffer;
0107 
0108     BUG_ON(new_buffer->free);
0109 
0110     while (*p) {
0111         parent = *p;
0112         buffer = rb_entry(parent, struct binder_buffer, rb_node);
0113         BUG_ON(buffer->free);
0114 
0115         if (new_buffer->user_data < buffer->user_data)
0116             p = &parent->rb_left;
0117         else if (new_buffer->user_data > buffer->user_data)
0118             p = &parent->rb_right;
0119         else
0120             BUG();
0121     }
0122     rb_link_node(&new_buffer->rb_node, parent, p);
0123     rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
0124 }
0125 
0126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
0127         struct binder_alloc *alloc,
0128         uintptr_t user_ptr)
0129 {
0130     struct rb_node *n = alloc->allocated_buffers.rb_node;
0131     struct binder_buffer *buffer;
0132     void __user *uptr;
0133 
0134     uptr = (void __user *)user_ptr;
0135 
0136     while (n) {
0137         buffer = rb_entry(n, struct binder_buffer, rb_node);
0138         BUG_ON(buffer->free);
0139 
0140         if (uptr < buffer->user_data)
0141             n = n->rb_left;
0142         else if (uptr > buffer->user_data)
0143             n = n->rb_right;
0144         else {
0145             /*
0146              * Guard against user threads attempting to
0147              * free the buffer when in use by kernel or
0148              * after it's already been freed.
0149              */
0150             if (!buffer->allow_user_free)
0151                 return ERR_PTR(-EPERM);
0152             buffer->allow_user_free = 0;
0153             return buffer;
0154         }
0155     }
0156     return NULL;
0157 }
0158 
0159 /**
0160  * binder_alloc_prepare_to_free() - get buffer given user ptr
0161  * @alloc:  binder_alloc for this proc
0162  * @user_ptr:   User pointer to buffer data
0163  *
0164  * Validate userspace pointer to buffer data and return buffer corresponding to
0165  * that user pointer. Search the rb tree for buffer that matches user data
0166  * pointer.
0167  *
0168  * Return:  Pointer to buffer or NULL
0169  */
0170 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
0171                            uintptr_t user_ptr)
0172 {
0173     struct binder_buffer *buffer;
0174 
0175     mutex_lock(&alloc->mutex);
0176     buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
0177     mutex_unlock(&alloc->mutex);
0178     return buffer;
0179 }
0180 
0181 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
0182                     void __user *start, void __user *end)
0183 {
0184     void __user *page_addr;
0185     unsigned long user_page_addr;
0186     struct binder_lru_page *page;
0187     struct vm_area_struct *vma = NULL;
0188     struct mm_struct *mm = NULL;
0189     bool need_mm = false;
0190 
0191     binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0192              "%d: %s pages %pK-%pK\n", alloc->pid,
0193              allocate ? "allocate" : "free", start, end);
0194 
0195     if (end <= start)
0196         return 0;
0197 
0198     trace_binder_update_page_range(alloc, allocate, start, end);
0199 
0200     if (allocate == 0)
0201         goto free_range;
0202 
0203     for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
0204         page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
0205         if (!page->page_ptr) {
0206             need_mm = true;
0207             break;
0208         }
0209     }
0210 
0211     if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
0212         mm = alloc->vma_vm_mm;
0213 
0214     if (mm) {
0215         mmap_read_lock(mm);
0216         vma = vma_lookup(mm, alloc->vma_addr);
0217     }
0218 
0219     if (!vma && need_mm) {
0220         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
0221                    "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
0222                    alloc->pid);
0223         goto err_no_vma;
0224     }
0225 
0226     for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
0227         int ret;
0228         bool on_lru;
0229         size_t index;
0230 
0231         index = (page_addr - alloc->buffer) / PAGE_SIZE;
0232         page = &alloc->pages[index];
0233 
0234         if (page->page_ptr) {
0235             trace_binder_alloc_lru_start(alloc, index);
0236 
0237             on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
0238             WARN_ON(!on_lru);
0239 
0240             trace_binder_alloc_lru_end(alloc, index);
0241             continue;
0242         }
0243 
0244         if (WARN_ON(!vma))
0245             goto err_page_ptr_cleared;
0246 
0247         trace_binder_alloc_page_start(alloc, index);
0248         page->page_ptr = alloc_page(GFP_KERNEL |
0249                         __GFP_HIGHMEM |
0250                         __GFP_ZERO);
0251         if (!page->page_ptr) {
0252             pr_err("%d: binder_alloc_buf failed for page at %pK\n",
0253                 alloc->pid, page_addr);
0254             goto err_alloc_page_failed;
0255         }
0256         page->alloc = alloc;
0257         INIT_LIST_HEAD(&page->lru);
0258 
0259         user_page_addr = (uintptr_t)page_addr;
0260         ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
0261         if (ret) {
0262             pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
0263                    alloc->pid, user_page_addr);
0264             goto err_vm_insert_page_failed;
0265         }
0266 
0267         if (index + 1 > alloc->pages_high)
0268             alloc->pages_high = index + 1;
0269 
0270         trace_binder_alloc_page_end(alloc, index);
0271     }
0272     if (mm) {
0273         mmap_read_unlock(mm);
0274         mmput(mm);
0275     }
0276     return 0;
0277 
0278 free_range:
0279     for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
0280         bool ret;
0281         size_t index;
0282 
0283         index = (page_addr - alloc->buffer) / PAGE_SIZE;
0284         page = &alloc->pages[index];
0285 
0286         trace_binder_free_lru_start(alloc, index);
0287 
0288         ret = list_lru_add(&binder_alloc_lru, &page->lru);
0289         WARN_ON(!ret);
0290 
0291         trace_binder_free_lru_end(alloc, index);
0292         if (page_addr == start)
0293             break;
0294         continue;
0295 
0296 err_vm_insert_page_failed:
0297         __free_page(page->page_ptr);
0298         page->page_ptr = NULL;
0299 err_alloc_page_failed:
0300 err_page_ptr_cleared:
0301         if (page_addr == start)
0302             break;
0303     }
0304 err_no_vma:
0305     if (mm) {
0306         mmap_read_unlock(mm);
0307         mmput(mm);
0308     }
0309     return vma ? -ENOMEM : -ESRCH;
0310 }
0311 
0312 
0313 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
0314         struct vm_area_struct *vma)
0315 {
0316     unsigned long vm_start = 0;
0317 
0318     /*
0319      * Allow clearing the vma with holding just the read lock to allow
0320      * munmapping downgrade of the write lock before freeing and closing the
0321      * file using binder_alloc_vma_close().
0322      */
0323     if (vma) {
0324         vm_start = vma->vm_start;
0325         mmap_assert_write_locked(alloc->vma_vm_mm);
0326     } else {
0327         mmap_assert_locked(alloc->vma_vm_mm);
0328     }
0329 
0330     alloc->vma_addr = vm_start;
0331 }
0332 
0333 static inline struct vm_area_struct *binder_alloc_get_vma(
0334         struct binder_alloc *alloc)
0335 {
0336     struct vm_area_struct *vma = NULL;
0337 
0338     if (alloc->vma_addr)
0339         vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
0340 
0341     return vma;
0342 }
0343 
0344 static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
0345 {
0346     /*
0347      * Find the amount and size of buffers allocated by the current caller;
0348      * The idea is that once we cross the threshold, whoever is responsible
0349      * for the low async space is likely to try to send another async txn,
0350      * and at some point we'll catch them in the act. This is more efficient
0351      * than keeping a map per pid.
0352      */
0353     struct rb_node *n;
0354     struct binder_buffer *buffer;
0355     size_t total_alloc_size = 0;
0356     size_t num_buffers = 0;
0357 
0358     for (n = rb_first(&alloc->allocated_buffers); n != NULL;
0359          n = rb_next(n)) {
0360         buffer = rb_entry(n, struct binder_buffer, rb_node);
0361         if (buffer->pid != pid)
0362             continue;
0363         if (!buffer->async_transaction)
0364             continue;
0365         total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
0366             + sizeof(struct binder_buffer);
0367         num_buffers++;
0368     }
0369 
0370     /*
0371      * Warn if this pid has more than 50 transactions, or more than 50% of
0372      * async space (which is 25% of total buffer size). Oneway spam is only
0373      * detected when the threshold is exceeded.
0374      */
0375     if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
0376         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
0377                  "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
0378                   alloc->pid, pid, num_buffers, total_alloc_size);
0379         if (!alloc->oneway_spam_detected) {
0380             alloc->oneway_spam_detected = true;
0381             return true;
0382         }
0383     }
0384     return false;
0385 }
0386 
0387 static struct binder_buffer *binder_alloc_new_buf_locked(
0388                 struct binder_alloc *alloc,
0389                 size_t data_size,
0390                 size_t offsets_size,
0391                 size_t extra_buffers_size,
0392                 int is_async,
0393                 int pid)
0394 {
0395     struct rb_node *n = alloc->free_buffers.rb_node;
0396     struct binder_buffer *buffer;
0397     size_t buffer_size;
0398     struct rb_node *best_fit = NULL;
0399     void __user *has_page_addr;
0400     void __user *end_page_addr;
0401     size_t size, data_offsets_size;
0402     int ret;
0403 
0404     mmap_read_lock(alloc->vma_vm_mm);
0405     if (!binder_alloc_get_vma(alloc)) {
0406         mmap_read_unlock(alloc->vma_vm_mm);
0407         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
0408                    "%d: binder_alloc_buf, no vma\n",
0409                    alloc->pid);
0410         return ERR_PTR(-ESRCH);
0411     }
0412     mmap_read_unlock(alloc->vma_vm_mm);
0413 
0414     data_offsets_size = ALIGN(data_size, sizeof(void *)) +
0415         ALIGN(offsets_size, sizeof(void *));
0416 
0417     if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
0418         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0419                 "%d: got transaction with invalid size %zd-%zd\n",
0420                 alloc->pid, data_size, offsets_size);
0421         return ERR_PTR(-EINVAL);
0422     }
0423     size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
0424     if (size < data_offsets_size || size < extra_buffers_size) {
0425         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0426                 "%d: got transaction with invalid extra_buffers_size %zd\n",
0427                 alloc->pid, extra_buffers_size);
0428         return ERR_PTR(-EINVAL);
0429     }
0430     if (is_async &&
0431         alloc->free_async_space < size + sizeof(struct binder_buffer)) {
0432         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0433                  "%d: binder_alloc_buf size %zd failed, no async space left\n",
0434                   alloc->pid, size);
0435         return ERR_PTR(-ENOSPC);
0436     }
0437 
0438     /* Pad 0-size buffers so they get assigned unique addresses */
0439     size = max(size, sizeof(void *));
0440 
0441     while (n) {
0442         buffer = rb_entry(n, struct binder_buffer, rb_node);
0443         BUG_ON(!buffer->free);
0444         buffer_size = binder_alloc_buffer_size(alloc, buffer);
0445 
0446         if (size < buffer_size) {
0447             best_fit = n;
0448             n = n->rb_left;
0449         } else if (size > buffer_size)
0450             n = n->rb_right;
0451         else {
0452             best_fit = n;
0453             break;
0454         }
0455     }
0456     if (best_fit == NULL) {
0457         size_t allocated_buffers = 0;
0458         size_t largest_alloc_size = 0;
0459         size_t total_alloc_size = 0;
0460         size_t free_buffers = 0;
0461         size_t largest_free_size = 0;
0462         size_t total_free_size = 0;
0463 
0464         for (n = rb_first(&alloc->allocated_buffers); n != NULL;
0465              n = rb_next(n)) {
0466             buffer = rb_entry(n, struct binder_buffer, rb_node);
0467             buffer_size = binder_alloc_buffer_size(alloc, buffer);
0468             allocated_buffers++;
0469             total_alloc_size += buffer_size;
0470             if (buffer_size > largest_alloc_size)
0471                 largest_alloc_size = buffer_size;
0472         }
0473         for (n = rb_first(&alloc->free_buffers); n != NULL;
0474              n = rb_next(n)) {
0475             buffer = rb_entry(n, struct binder_buffer, rb_node);
0476             buffer_size = binder_alloc_buffer_size(alloc, buffer);
0477             free_buffers++;
0478             total_free_size += buffer_size;
0479             if (buffer_size > largest_free_size)
0480                 largest_free_size = buffer_size;
0481         }
0482         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
0483                    "%d: binder_alloc_buf size %zd failed, no address space\n",
0484                    alloc->pid, size);
0485         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
0486                    "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
0487                    total_alloc_size, allocated_buffers,
0488                    largest_alloc_size, total_free_size,
0489                    free_buffers, largest_free_size);
0490         return ERR_PTR(-ENOSPC);
0491     }
0492     if (n == NULL) {
0493         buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
0494         buffer_size = binder_alloc_buffer_size(alloc, buffer);
0495     }
0496 
0497     binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0498              "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
0499               alloc->pid, size, buffer, buffer_size);
0500 
0501     has_page_addr = (void __user *)
0502         (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
0503     WARN_ON(n && buffer_size != size);
0504     end_page_addr =
0505         (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
0506     if (end_page_addr > has_page_addr)
0507         end_page_addr = has_page_addr;
0508     ret = binder_update_page_range(alloc, 1, (void __user *)
0509         PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
0510     if (ret)
0511         return ERR_PTR(ret);
0512 
0513     if (buffer_size != size) {
0514         struct binder_buffer *new_buffer;
0515 
0516         new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
0517         if (!new_buffer) {
0518             pr_err("%s: %d failed to alloc new buffer struct\n",
0519                    __func__, alloc->pid);
0520             goto err_alloc_buf_struct_failed;
0521         }
0522         new_buffer->user_data = (u8 __user *)buffer->user_data + size;
0523         list_add(&new_buffer->entry, &buffer->entry);
0524         new_buffer->free = 1;
0525         binder_insert_free_buffer(alloc, new_buffer);
0526     }
0527 
0528     rb_erase(best_fit, &alloc->free_buffers);
0529     buffer->free = 0;
0530     buffer->allow_user_free = 0;
0531     binder_insert_allocated_buffer_locked(alloc, buffer);
0532     binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0533              "%d: binder_alloc_buf size %zd got %pK\n",
0534               alloc->pid, size, buffer);
0535     buffer->data_size = data_size;
0536     buffer->offsets_size = offsets_size;
0537     buffer->async_transaction = is_async;
0538     buffer->extra_buffers_size = extra_buffers_size;
0539     buffer->pid = pid;
0540     buffer->oneway_spam_suspect = false;
0541     if (is_async) {
0542         alloc->free_async_space -= size + sizeof(struct binder_buffer);
0543         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
0544                  "%d: binder_alloc_buf size %zd async free %zd\n",
0545                   alloc->pid, size, alloc->free_async_space);
0546         if (alloc->free_async_space < alloc->buffer_size / 10) {
0547             /*
0548              * Start detecting spammers once we have less than 20%
0549              * of async space left (which is less than 10% of total
0550              * buffer size).
0551              */
0552             buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
0553         } else {
0554             alloc->oneway_spam_detected = false;
0555         }
0556     }
0557     return buffer;
0558 
0559 err_alloc_buf_struct_failed:
0560     binder_update_page_range(alloc, 0, (void __user *)
0561                  PAGE_ALIGN((uintptr_t)buffer->user_data),
0562                  end_page_addr);
0563     return ERR_PTR(-ENOMEM);
0564 }
0565 
0566 /**
0567  * binder_alloc_new_buf() - Allocate a new binder buffer
0568  * @alloc:              binder_alloc for this proc
0569  * @data_size:          size of user data buffer
0570  * @offsets_size:       user specified buffer offset
0571  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
0572  * @is_async:           buffer for async transaction
0573  * @pid:                pid to attribute allocation to (used for debugging)
0574  *
0575  * Allocate a new buffer given the requested sizes. Returns
0576  * the kernel version of the buffer pointer. The size allocated
0577  * is the sum of the three given sizes (each rounded up to
0578  * pointer-sized boundary)
0579  *
0580  * Return:  The allocated buffer or %NULL if error
0581  */
0582 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
0583                        size_t data_size,
0584                        size_t offsets_size,
0585                        size_t extra_buffers_size,
0586                        int is_async,
0587                        int pid)
0588 {
0589     struct binder_buffer *buffer;
0590 
0591     mutex_lock(&alloc->mutex);
0592     buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
0593                          extra_buffers_size, is_async, pid);
0594     mutex_unlock(&alloc->mutex);
0595     return buffer;
0596 }
0597 
0598 static void __user *buffer_start_page(struct binder_buffer *buffer)
0599 {
0600     return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
0601 }
0602 
0603 static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
0604 {
0605     return (void __user *)
0606         (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
0607 }
0608 
0609 static void binder_delete_free_buffer(struct binder_alloc *alloc,
0610                       struct binder_buffer *buffer)
0611 {
0612     struct binder_buffer *prev, *next = NULL;
0613     bool to_free = true;
0614 
0615     BUG_ON(alloc->buffers.next == &buffer->entry);
0616     prev = binder_buffer_prev(buffer);
0617     BUG_ON(!prev->free);
0618     if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
0619         to_free = false;
0620         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0621                    "%d: merge free, buffer %pK share page with %pK\n",
0622                    alloc->pid, buffer->user_data,
0623                    prev->user_data);
0624     }
0625 
0626     if (!list_is_last(&buffer->entry, &alloc->buffers)) {
0627         next = binder_buffer_next(buffer);
0628         if (buffer_start_page(next) == buffer_start_page(buffer)) {
0629             to_free = false;
0630             binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0631                        "%d: merge free, buffer %pK share page with %pK\n",
0632                        alloc->pid,
0633                        buffer->user_data,
0634                        next->user_data);
0635         }
0636     }
0637 
0638     if (PAGE_ALIGNED(buffer->user_data)) {
0639         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0640                    "%d: merge free, buffer start %pK is page aligned\n",
0641                    alloc->pid, buffer->user_data);
0642         to_free = false;
0643     }
0644 
0645     if (to_free) {
0646         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0647                    "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
0648                    alloc->pid, buffer->user_data,
0649                    prev->user_data,
0650                    next ? next->user_data : NULL);
0651         binder_update_page_range(alloc, 0, buffer_start_page(buffer),
0652                      buffer_start_page(buffer) + PAGE_SIZE);
0653     }
0654     list_del(&buffer->entry);
0655     kfree(buffer);
0656 }
0657 
0658 static void binder_free_buf_locked(struct binder_alloc *alloc,
0659                    struct binder_buffer *buffer)
0660 {
0661     size_t size, buffer_size;
0662 
0663     buffer_size = binder_alloc_buffer_size(alloc, buffer);
0664 
0665     size = ALIGN(buffer->data_size, sizeof(void *)) +
0666         ALIGN(buffer->offsets_size, sizeof(void *)) +
0667         ALIGN(buffer->extra_buffers_size, sizeof(void *));
0668 
0669     binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0670              "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
0671               alloc->pid, buffer, size, buffer_size);
0672 
0673     BUG_ON(buffer->free);
0674     BUG_ON(size > buffer_size);
0675     BUG_ON(buffer->transaction != NULL);
0676     BUG_ON(buffer->user_data < alloc->buffer);
0677     BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
0678 
0679     if (buffer->async_transaction) {
0680         alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
0681 
0682         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
0683                  "%d: binder_free_buf size %zd async free %zd\n",
0684                   alloc->pid, size, alloc->free_async_space);
0685     }
0686 
0687     binder_update_page_range(alloc, 0,
0688         (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
0689         (void __user *)(((uintptr_t)
0690               buffer->user_data + buffer_size) & PAGE_MASK));
0691 
0692     rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
0693     buffer->free = 1;
0694     if (!list_is_last(&buffer->entry, &alloc->buffers)) {
0695         struct binder_buffer *next = binder_buffer_next(buffer);
0696 
0697         if (next->free) {
0698             rb_erase(&next->rb_node, &alloc->free_buffers);
0699             binder_delete_free_buffer(alloc, next);
0700         }
0701     }
0702     if (alloc->buffers.next != &buffer->entry) {
0703         struct binder_buffer *prev = binder_buffer_prev(buffer);
0704 
0705         if (prev->free) {
0706             binder_delete_free_buffer(alloc, buffer);
0707             rb_erase(&prev->rb_node, &alloc->free_buffers);
0708             buffer = prev;
0709         }
0710     }
0711     binder_insert_free_buffer(alloc, buffer);
0712 }
0713 
0714 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
0715                    struct binder_buffer *buffer);
0716 /**
0717  * binder_alloc_free_buf() - free a binder buffer
0718  * @alloc:  binder_alloc for this proc
0719  * @buffer: kernel pointer to buffer
0720  *
0721  * Free the buffer allocated via binder_alloc_new_buf()
0722  */
0723 void binder_alloc_free_buf(struct binder_alloc *alloc,
0724                 struct binder_buffer *buffer)
0725 {
0726     /*
0727      * We could eliminate the call to binder_alloc_clear_buf()
0728      * from binder_alloc_deferred_release() by moving this to
0729      * binder_alloc_free_buf_locked(). However, that could
0730      * increase contention for the alloc mutex if clear_on_free
0731      * is used frequently for large buffers. The mutex is not
0732      * needed for correctness here.
0733      */
0734     if (buffer->clear_on_free) {
0735         binder_alloc_clear_buf(alloc, buffer);
0736         buffer->clear_on_free = false;
0737     }
0738     mutex_lock(&alloc->mutex);
0739     binder_free_buf_locked(alloc, buffer);
0740     mutex_unlock(&alloc->mutex);
0741 }
0742 
0743 /**
0744  * binder_alloc_mmap_handler() - map virtual address space for proc
0745  * @alloc:  alloc structure for this proc
0746  * @vma:    vma passed to mmap()
0747  *
0748  * Called by binder_mmap() to initialize the space specified in
0749  * vma for allocating binder buffers
0750  *
0751  * Return:
0752  *      0 = success
0753  *      -EBUSY = address space already mapped
0754  *      -ENOMEM = failed to map memory to given address space
0755  */
0756 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
0757                   struct vm_area_struct *vma)
0758 {
0759     int ret;
0760     const char *failure_string;
0761     struct binder_buffer *buffer;
0762 
0763     mutex_lock(&binder_alloc_mmap_lock);
0764     if (alloc->buffer_size) {
0765         ret = -EBUSY;
0766         failure_string = "already mapped";
0767         goto err_already_mapped;
0768     }
0769     alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
0770                    SZ_4M);
0771     mutex_unlock(&binder_alloc_mmap_lock);
0772 
0773     alloc->buffer = (void __user *)vma->vm_start;
0774 
0775     alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
0776                    sizeof(alloc->pages[0]),
0777                    GFP_KERNEL);
0778     if (alloc->pages == NULL) {
0779         ret = -ENOMEM;
0780         failure_string = "alloc page array";
0781         goto err_alloc_pages_failed;
0782     }
0783 
0784     buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
0785     if (!buffer) {
0786         ret = -ENOMEM;
0787         failure_string = "alloc buffer struct";
0788         goto err_alloc_buf_struct_failed;
0789     }
0790 
0791     buffer->user_data = alloc->buffer;
0792     list_add(&buffer->entry, &alloc->buffers);
0793     buffer->free = 1;
0794     binder_insert_free_buffer(alloc, buffer);
0795     alloc->free_async_space = alloc->buffer_size / 2;
0796     binder_alloc_set_vma(alloc, vma);
0797 
0798     return 0;
0799 
0800 err_alloc_buf_struct_failed:
0801     kfree(alloc->pages);
0802     alloc->pages = NULL;
0803 err_alloc_pages_failed:
0804     alloc->buffer = NULL;
0805     mutex_lock(&binder_alloc_mmap_lock);
0806     alloc->buffer_size = 0;
0807 err_already_mapped:
0808     mutex_unlock(&binder_alloc_mmap_lock);
0809     binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
0810                "%s: %d %lx-%lx %s failed %d\n", __func__,
0811                alloc->pid, vma->vm_start, vma->vm_end,
0812                failure_string, ret);
0813     return ret;
0814 }
0815 
0816 
0817 void binder_alloc_deferred_release(struct binder_alloc *alloc)
0818 {
0819     struct rb_node *n;
0820     int buffers, page_count;
0821     struct binder_buffer *buffer;
0822 
0823     buffers = 0;
0824     mutex_lock(&alloc->mutex);
0825     BUG_ON(alloc->vma_addr &&
0826            vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
0827 
0828     while ((n = rb_first(&alloc->allocated_buffers))) {
0829         buffer = rb_entry(n, struct binder_buffer, rb_node);
0830 
0831         /* Transaction should already have been freed */
0832         BUG_ON(buffer->transaction);
0833 
0834         if (buffer->clear_on_free) {
0835             binder_alloc_clear_buf(alloc, buffer);
0836             buffer->clear_on_free = false;
0837         }
0838         binder_free_buf_locked(alloc, buffer);
0839         buffers++;
0840     }
0841 
0842     while (!list_empty(&alloc->buffers)) {
0843         buffer = list_first_entry(&alloc->buffers,
0844                       struct binder_buffer, entry);
0845         WARN_ON(!buffer->free);
0846 
0847         list_del(&buffer->entry);
0848         WARN_ON_ONCE(!list_empty(&alloc->buffers));
0849         kfree(buffer);
0850     }
0851 
0852     page_count = 0;
0853     if (alloc->pages) {
0854         int i;
0855 
0856         for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
0857             void __user *page_addr;
0858             bool on_lru;
0859 
0860             if (!alloc->pages[i].page_ptr)
0861                 continue;
0862 
0863             on_lru = list_lru_del(&binder_alloc_lru,
0864                           &alloc->pages[i].lru);
0865             page_addr = alloc->buffer + i * PAGE_SIZE;
0866             binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
0867                      "%s: %d: page %d at %pK %s\n",
0868                      __func__, alloc->pid, i, page_addr,
0869                      on_lru ? "on lru" : "active");
0870             __free_page(alloc->pages[i].page_ptr);
0871             page_count++;
0872         }
0873         kfree(alloc->pages);
0874     }
0875     mutex_unlock(&alloc->mutex);
0876     if (alloc->vma_vm_mm)
0877         mmdrop(alloc->vma_vm_mm);
0878 
0879     binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
0880              "%s: %d buffers %d, pages %d\n",
0881              __func__, alloc->pid, buffers, page_count);
0882 }
0883 
0884 static void print_binder_buffer(struct seq_file *m, const char *prefix,
0885                 struct binder_buffer *buffer)
0886 {
0887     seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
0888            prefix, buffer->debug_id, buffer->user_data,
0889            buffer->data_size, buffer->offsets_size,
0890            buffer->extra_buffers_size,
0891            buffer->transaction ? "active" : "delivered");
0892 }
0893 
0894 /**
0895  * binder_alloc_print_allocated() - print buffer info
0896  * @m:     seq_file for output via seq_printf()
0897  * @alloc: binder_alloc for this proc
0898  *
0899  * Prints information about every buffer associated with
0900  * the binder_alloc state to the given seq_file
0901  */
0902 void binder_alloc_print_allocated(struct seq_file *m,
0903                   struct binder_alloc *alloc)
0904 {
0905     struct rb_node *n;
0906 
0907     mutex_lock(&alloc->mutex);
0908     for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
0909         print_binder_buffer(m, "  buffer",
0910                     rb_entry(n, struct binder_buffer, rb_node));
0911     mutex_unlock(&alloc->mutex);
0912 }
0913 
0914 /**
0915  * binder_alloc_print_pages() - print page usage
0916  * @m:     seq_file for output via seq_printf()
0917  * @alloc: binder_alloc for this proc
0918  */
0919 void binder_alloc_print_pages(struct seq_file *m,
0920                   struct binder_alloc *alloc)
0921 {
0922     struct binder_lru_page *page;
0923     int i;
0924     int active = 0;
0925     int lru = 0;
0926     int free = 0;
0927 
0928     mutex_lock(&alloc->mutex);
0929     /*
0930      * Make sure the binder_alloc is fully initialized, otherwise we might
0931      * read inconsistent state.
0932      */
0933 
0934     mmap_read_lock(alloc->vma_vm_mm);
0935     if (binder_alloc_get_vma(alloc) == NULL) {
0936         mmap_read_unlock(alloc->vma_vm_mm);
0937         goto uninitialized;
0938     }
0939 
0940     mmap_read_unlock(alloc->vma_vm_mm);
0941     for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
0942         page = &alloc->pages[i];
0943         if (!page->page_ptr)
0944             free++;
0945         else if (list_empty(&page->lru))
0946             active++;
0947         else
0948             lru++;
0949     }
0950 
0951 uninitialized:
0952     mutex_unlock(&alloc->mutex);
0953     seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
0954     seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
0955 }
0956 
0957 /**
0958  * binder_alloc_get_allocated_count() - return count of buffers
0959  * @alloc: binder_alloc for this proc
0960  *
0961  * Return: count of allocated buffers
0962  */
0963 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
0964 {
0965     struct rb_node *n;
0966     int count = 0;
0967 
0968     mutex_lock(&alloc->mutex);
0969     for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
0970         count++;
0971     mutex_unlock(&alloc->mutex);
0972     return count;
0973 }
0974 
0975 
0976 /**
0977  * binder_alloc_vma_close() - invalidate address space
0978  * @alloc: binder_alloc for this proc
0979  *
0980  * Called from binder_vma_close() when releasing address space.
0981  * Clears alloc->vma to prevent new incoming transactions from
0982  * allocating more buffers.
0983  */
0984 void binder_alloc_vma_close(struct binder_alloc *alloc)
0985 {
0986     binder_alloc_set_vma(alloc, NULL);
0987 }
0988 
0989 /**
0990  * binder_alloc_free_page() - shrinker callback to free pages
0991  * @item:   item to free
0992  * @lock:   lock protecting the item
0993  * @cb_arg: callback argument
0994  *
0995  * Called from list_lru_walk() in binder_shrink_scan() to free
0996  * up pages when the system is under memory pressure.
0997  */
0998 enum lru_status binder_alloc_free_page(struct list_head *item,
0999                        struct list_lru_one *lru,
1000                        spinlock_t *lock,
1001                        void *cb_arg)
1002     __must_hold(lock)
1003 {
1004     struct mm_struct *mm = NULL;
1005     struct binder_lru_page *page = container_of(item,
1006                             struct binder_lru_page,
1007                             lru);
1008     struct binder_alloc *alloc;
1009     uintptr_t page_addr;
1010     size_t index;
1011     struct vm_area_struct *vma;
1012 
1013     alloc = page->alloc;
1014     if (!mutex_trylock(&alloc->mutex))
1015         goto err_get_alloc_mutex_failed;
1016 
1017     if (!page->page_ptr)
1018         goto err_page_already_freed;
1019 
1020     index = page - alloc->pages;
1021     page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
1022 
1023     mm = alloc->vma_vm_mm;
1024     if (!mmget_not_zero(mm))
1025         goto err_mmget;
1026     if (!mmap_read_trylock(mm))
1027         goto err_mmap_read_lock_failed;
1028     vma = binder_alloc_get_vma(alloc);
1029 
1030     list_lru_isolate(lru, item);
1031     spin_unlock(lock);
1032 
1033     if (vma) {
1034         trace_binder_unmap_user_start(alloc, index);
1035 
1036         zap_page_range(vma, page_addr, PAGE_SIZE);
1037 
1038         trace_binder_unmap_user_end(alloc, index);
1039     }
1040     mmap_read_unlock(mm);
1041     mmput_async(mm);
1042 
1043     trace_binder_unmap_kernel_start(alloc, index);
1044 
1045     __free_page(page->page_ptr);
1046     page->page_ptr = NULL;
1047 
1048     trace_binder_unmap_kernel_end(alloc, index);
1049 
1050     spin_lock(lock);
1051     mutex_unlock(&alloc->mutex);
1052     return LRU_REMOVED_RETRY;
1053 
1054 err_mmap_read_lock_failed:
1055     mmput_async(mm);
1056 err_mmget:
1057 err_page_already_freed:
1058     mutex_unlock(&alloc->mutex);
1059 err_get_alloc_mutex_failed:
1060     return LRU_SKIP;
1061 }
1062 
1063 static unsigned long
1064 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1065 {
1066     return list_lru_count(&binder_alloc_lru);
1067 }
1068 
1069 static unsigned long
1070 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1071 {
1072     return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1073                 NULL, sc->nr_to_scan);
1074 }
1075 
1076 static struct shrinker binder_shrinker = {
1077     .count_objects = binder_shrink_count,
1078     .scan_objects = binder_shrink_scan,
1079     .seeks = DEFAULT_SEEKS,
1080 };
1081 
1082 /**
1083  * binder_alloc_init() - called by binder_open() for per-proc initialization
1084  * @alloc: binder_alloc for this proc
1085  *
1086  * Called from binder_open() to initialize binder_alloc fields for
1087  * new binder proc
1088  */
1089 void binder_alloc_init(struct binder_alloc *alloc)
1090 {
1091     alloc->pid = current->group_leader->pid;
1092     alloc->vma_vm_mm = current->mm;
1093     mmgrab(alloc->vma_vm_mm);
1094     mutex_init(&alloc->mutex);
1095     INIT_LIST_HEAD(&alloc->buffers);
1096 }
1097 
1098 int binder_alloc_shrinker_init(void)
1099 {
1100     int ret = list_lru_init(&binder_alloc_lru);
1101 
1102     if (ret == 0) {
1103         ret = register_shrinker(&binder_shrinker, "android-binder");
1104         if (ret)
1105             list_lru_destroy(&binder_alloc_lru);
1106     }
1107     return ret;
1108 }
1109 
1110 /**
1111  * check_buffer() - verify that buffer/offset is safe to access
1112  * @alloc: binder_alloc for this proc
1113  * @buffer: binder buffer to be accessed
1114  * @offset: offset into @buffer data
1115  * @bytes: bytes to access from offset
1116  *
1117  * Check that the @offset/@bytes are within the size of the given
1118  * @buffer and that the buffer is currently active and not freeable.
1119  * Offsets must also be multiples of sizeof(u32). The kernel is
1120  * allowed to touch the buffer in two cases:
1121  *
1122  * 1) when the buffer is being created:
1123  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1124  * 2) when the buffer is being torn down:
1125  *     (buffer->free == 0 && buffer->transaction == NULL).
1126  *
1127  * Return: true if the buffer is safe to access
1128  */
1129 static inline bool check_buffer(struct binder_alloc *alloc,
1130                 struct binder_buffer *buffer,
1131                 binder_size_t offset, size_t bytes)
1132 {
1133     size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1134 
1135     return buffer_size >= bytes &&
1136         offset <= buffer_size - bytes &&
1137         IS_ALIGNED(offset, sizeof(u32)) &&
1138         !buffer->free &&
1139         (!buffer->allow_user_free || !buffer->transaction);
1140 }
1141 
1142 /**
1143  * binder_alloc_get_page() - get kernel pointer for given buffer offset
1144  * @alloc: binder_alloc for this proc
1145  * @buffer: binder buffer to be accessed
1146  * @buffer_offset: offset into @buffer data
1147  * @pgoffp: address to copy final page offset to
1148  *
1149  * Lookup the struct page corresponding to the address
1150  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1151  * NULL, the byte-offset into the page is written there.
1152  *
1153  * The caller is responsible to ensure that the offset points
1154  * to a valid address within the @buffer and that @buffer is
1155  * not freeable by the user. Since it can't be freed, we are
1156  * guaranteed that the corresponding elements of @alloc->pages[]
1157  * cannot change.
1158  *
1159  * Return: struct page
1160  */
1161 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1162                       struct binder_buffer *buffer,
1163                       binder_size_t buffer_offset,
1164                       pgoff_t *pgoffp)
1165 {
1166     binder_size_t buffer_space_offset = buffer_offset +
1167         (buffer->user_data - alloc->buffer);
1168     pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1169     size_t index = buffer_space_offset >> PAGE_SHIFT;
1170     struct binder_lru_page *lru_page;
1171 
1172     lru_page = &alloc->pages[index];
1173     *pgoffp = pgoff;
1174     return lru_page->page_ptr;
1175 }
1176 
1177 /**
1178  * binder_alloc_clear_buf() - zero out buffer
1179  * @alloc: binder_alloc for this proc
1180  * @buffer: binder buffer to be cleared
1181  *
1182  * memset the given buffer to 0
1183  */
1184 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1185                    struct binder_buffer *buffer)
1186 {
1187     size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1188     binder_size_t buffer_offset = 0;
1189 
1190     while (bytes) {
1191         unsigned long size;
1192         struct page *page;
1193         pgoff_t pgoff;
1194 
1195         page = binder_alloc_get_page(alloc, buffer,
1196                          buffer_offset, &pgoff);
1197         size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1198         memset_page(page, pgoff, 0, size);
1199         bytes -= size;
1200         buffer_offset += size;
1201     }
1202 }
1203 
1204 /**
1205  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1206  * @alloc: binder_alloc for this proc
1207  * @buffer: binder buffer to be accessed
1208  * @buffer_offset: offset into @buffer data
1209  * @from: userspace pointer to source buffer
1210  * @bytes: bytes to copy
1211  *
1212  * Copy bytes from source userspace to target buffer.
1213  *
1214  * Return: bytes remaining to be copied
1215  */
1216 unsigned long
1217 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1218                  struct binder_buffer *buffer,
1219                  binder_size_t buffer_offset,
1220                  const void __user *from,
1221                  size_t bytes)
1222 {
1223     if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1224         return bytes;
1225 
1226     while (bytes) {
1227         unsigned long size;
1228         unsigned long ret;
1229         struct page *page;
1230         pgoff_t pgoff;
1231         void *kptr;
1232 
1233         page = binder_alloc_get_page(alloc, buffer,
1234                          buffer_offset, &pgoff);
1235         size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1236         kptr = kmap_local_page(page) + pgoff;
1237         ret = copy_from_user(kptr, from, size);
1238         kunmap_local(kptr);
1239         if (ret)
1240             return bytes - size + ret;
1241         bytes -= size;
1242         from += size;
1243         buffer_offset += size;
1244     }
1245     return 0;
1246 }
1247 
1248 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1249                        bool to_buffer,
1250                        struct binder_buffer *buffer,
1251                        binder_size_t buffer_offset,
1252                        void *ptr,
1253                        size_t bytes)
1254 {
1255     /* All copies must be 32-bit aligned and 32-bit size */
1256     if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1257         return -EINVAL;
1258 
1259     while (bytes) {
1260         unsigned long size;
1261         struct page *page;
1262         pgoff_t pgoff;
1263 
1264         page = binder_alloc_get_page(alloc, buffer,
1265                          buffer_offset, &pgoff);
1266         size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1267         if (to_buffer)
1268             memcpy_to_page(page, pgoff, ptr, size);
1269         else
1270             memcpy_from_page(ptr, page, pgoff, size);
1271         bytes -= size;
1272         pgoff = 0;
1273         ptr = ptr + size;
1274         buffer_offset += size;
1275     }
1276     return 0;
1277 }
1278 
1279 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1280                 struct binder_buffer *buffer,
1281                 binder_size_t buffer_offset,
1282                 void *src,
1283                 size_t bytes)
1284 {
1285     return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1286                        src, bytes);
1287 }
1288 
1289 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1290                   void *dest,
1291                   struct binder_buffer *buffer,
1292                   binder_size_t buffer_offset,
1293                   size_t bytes)
1294 {
1295     return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1296                        dest, bytes);
1297 }
1298