0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/virtio.h>
0011 #include <linux/virtio_mem.h>
0012 #include <linux/workqueue.h>
0013 #include <linux/slab.h>
0014 #include <linux/module.h>
0015 #include <linux/mm.h>
0016 #include <linux/memory_hotplug.h>
0017 #include <linux/memory.h>
0018 #include <linux/hrtimer.h>
0019 #include <linux/crash_dump.h>
0020 #include <linux/mutex.h>
0021 #include <linux/bitmap.h>
0022 #include <linux/lockdep.h>
0023 #include <linux/log2.h>
0024
0025 #include <acpi/acpi_numa.h>
0026
0027 static bool unplug_online = true;
0028 module_param(unplug_online, bool, 0644);
0029 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
0030
0031 static bool force_bbm;
0032 module_param(force_bbm, bool, 0444);
0033 MODULE_PARM_DESC(force_bbm,
0034 "Force Big Block Mode. Default is 0 (auto-selection)");
0035
0036 static unsigned long bbm_block_size;
0037 module_param(bbm_block_size, ulong, 0444);
0038 MODULE_PARM_DESC(bbm_block_size,
0039 "Big Block size in bytes. Default is 0 (auto-detection).");
0040
0041 static bool bbm_safe_unplug = true;
0042 module_param(bbm_safe_unplug, bool, 0444);
0043 MODULE_PARM_DESC(bbm_safe_unplug,
0044 "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 enum virtio_mem_sbm_mb_state {
0071
0072 VIRTIO_MEM_SBM_MB_UNUSED = 0,
0073
0074 VIRTIO_MEM_SBM_MB_PLUGGED,
0075
0076 VIRTIO_MEM_SBM_MB_OFFLINE,
0077
0078 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
0079
0080 VIRTIO_MEM_SBM_MB_KERNEL,
0081
0082 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
0083
0084 VIRTIO_MEM_SBM_MB_MOVABLE,
0085
0086 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
0087 VIRTIO_MEM_SBM_MB_COUNT
0088 };
0089
0090
0091
0092
0093 enum virtio_mem_bbm_bb_state {
0094
0095 VIRTIO_MEM_BBM_BB_UNUSED = 0,
0096
0097 VIRTIO_MEM_BBM_BB_PLUGGED,
0098
0099 VIRTIO_MEM_BBM_BB_ADDED,
0100
0101 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
0102 VIRTIO_MEM_BBM_BB_COUNT
0103 };
0104
0105 struct virtio_mem {
0106 struct virtio_device *vdev;
0107
0108
0109 bool unplug_all_required;
0110
0111
0112 struct work_struct wq;
0113 atomic_t wq_active;
0114 atomic_t config_changed;
0115
0116
0117 struct virtqueue *vq;
0118
0119
0120 wait_queue_head_t host_resp;
0121
0122
0123 struct virtio_mem_req req;
0124 struct virtio_mem_resp resp;
0125
0126
0127 uint64_t plugged_size;
0128
0129 uint64_t requested_size;
0130
0131
0132 uint64_t device_block_size;
0133
0134 int nid;
0135
0136 uint64_t addr;
0137
0138 uint64_t region_size;
0139
0140
0141 struct resource *parent_resource;
0142
0143
0144
0145
0146 const char *resource_name;
0147
0148 int mgid;
0149
0150
0151
0152
0153
0154
0155 #define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
0156 atomic64_t offline_size;
0157 uint64_t offline_threshold;
0158
0159
0160 bool in_sbm;
0161
0162 union {
0163 struct {
0164
0165 unsigned long first_mb_id;
0166
0167 unsigned long last_usable_mb_id;
0168
0169 unsigned long next_mb_id;
0170
0171
0172 uint64_t sb_size;
0173
0174 uint32_t sbs_per_mb;
0175
0176
0177 unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
0178
0179
0180
0181
0182
0183
0184
0185
0186 uint8_t *mb_states;
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 unsigned long *sb_states;
0199 } sbm;
0200
0201 struct {
0202
0203 unsigned long first_bb_id;
0204
0205 unsigned long last_usable_bb_id;
0206
0207 unsigned long next_bb_id;
0208
0209
0210 unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
0211
0212
0213 uint8_t *bb_states;
0214
0215
0216 uint64_t bb_size;
0217 } bbm;
0218 };
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 struct mutex hotplug_mutex;
0232 bool hotplug_active;
0233
0234
0235 bool broken;
0236
0237
0238 bool in_kdump;
0239
0240
0241 spinlock_t removal_lock;
0242 bool removing;
0243
0244
0245 struct hrtimer retry_timer;
0246 unsigned int retry_timer_ms;
0247 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
0248 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
0249
0250
0251 struct notifier_block memory_notifier;
0252
0253 #ifdef CONFIG_PROC_VMCORE
0254
0255 struct vmcore_cb vmcore_cb;
0256 uint64_t last_block_addr;
0257 bool last_block_plugged;
0258 #endif
0259
0260
0261 struct list_head next;
0262 };
0263
0264
0265
0266
0267
0268 static DEFINE_MUTEX(virtio_mem_mutex);
0269 static LIST_HEAD(virtio_mem_devices);
0270
0271 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
0272 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
0273 unsigned long nr_pages);
0274 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
0275 unsigned long nr_pages);
0276 static void virtio_mem_retry(struct virtio_mem *vm);
0277 static int virtio_mem_create_resource(struct virtio_mem *vm);
0278 static void virtio_mem_delete_resource(struct virtio_mem *vm);
0279
0280
0281
0282
0283
0284 static int register_virtio_mem_device(struct virtio_mem *vm)
0285 {
0286 int rc = 0;
0287
0288
0289 mutex_lock(&virtio_mem_mutex);
0290 if (list_empty(&virtio_mem_devices))
0291 rc = set_online_page_callback(&virtio_mem_online_page_cb);
0292 if (!rc)
0293 list_add_rcu(&vm->next, &virtio_mem_devices);
0294 mutex_unlock(&virtio_mem_mutex);
0295
0296 return rc;
0297 }
0298
0299
0300
0301
0302
0303 static void unregister_virtio_mem_device(struct virtio_mem *vm)
0304 {
0305
0306 mutex_lock(&virtio_mem_mutex);
0307 list_del_rcu(&vm->next);
0308 if (list_empty(&virtio_mem_devices))
0309 restore_online_page_callback(&virtio_mem_online_page_cb);
0310 mutex_unlock(&virtio_mem_mutex);
0311
0312 synchronize_rcu();
0313 }
0314
0315
0316
0317
0318 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
0319 {
0320 return addr / memory_block_size_bytes();
0321 }
0322
0323
0324
0325
0326 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
0327 {
0328 return mb_id * memory_block_size_bytes();
0329 }
0330
0331
0332
0333
0334 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
0335 uint64_t addr)
0336 {
0337 return addr / vm->bbm.bb_size;
0338 }
0339
0340
0341
0342
0343 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
0344 unsigned long bb_id)
0345 {
0346 return bb_id * vm->bbm.bb_size;
0347 }
0348
0349
0350
0351
0352 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
0353 unsigned long addr)
0354 {
0355 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
0356 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
0357
0358 return (addr - mb_addr) / vm->sbm.sb_size;
0359 }
0360
0361
0362
0363
0364 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
0365 unsigned long bb_id,
0366 enum virtio_mem_bbm_bb_state state)
0367 {
0368 const unsigned long idx = bb_id - vm->bbm.first_bb_id;
0369 enum virtio_mem_bbm_bb_state old_state;
0370
0371 old_state = vm->bbm.bb_states[idx];
0372 vm->bbm.bb_states[idx] = state;
0373
0374 BUG_ON(vm->bbm.bb_count[old_state] == 0);
0375 vm->bbm.bb_count[old_state]--;
0376 vm->bbm.bb_count[state]++;
0377 }
0378
0379
0380
0381
0382 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
0383 unsigned long bb_id)
0384 {
0385 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
0386 }
0387
0388
0389
0390
0391 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
0392 {
0393 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
0394 unsigned long new_bytes = old_bytes + 1;
0395 int old_pages = PFN_UP(old_bytes);
0396 int new_pages = PFN_UP(new_bytes);
0397 uint8_t *new_array;
0398
0399 if (vm->bbm.bb_states && old_pages == new_pages)
0400 return 0;
0401
0402 new_array = vzalloc(new_pages * PAGE_SIZE);
0403 if (!new_array)
0404 return -ENOMEM;
0405
0406 mutex_lock(&vm->hotplug_mutex);
0407 if (vm->bbm.bb_states)
0408 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
0409 vfree(vm->bbm.bb_states);
0410 vm->bbm.bb_states = new_array;
0411 mutex_unlock(&vm->hotplug_mutex);
0412
0413 return 0;
0414 }
0415
0416 #define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
0417 for (_bb_id = vm->bbm.first_bb_id; \
0418 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
0419 _bb_id++) \
0420 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
0421
0422 #define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
0423 for (_bb_id = vm->bbm.next_bb_id - 1; \
0424 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
0425 _bb_id--) \
0426 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
0427
0428
0429
0430
0431 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
0432 unsigned long mb_id, uint8_t state)
0433 {
0434 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
0435 uint8_t old_state;
0436
0437 old_state = vm->sbm.mb_states[idx];
0438 vm->sbm.mb_states[idx] = state;
0439
0440 BUG_ON(vm->sbm.mb_count[old_state] == 0);
0441 vm->sbm.mb_count[old_state]--;
0442 vm->sbm.mb_count[state]++;
0443 }
0444
0445
0446
0447
0448 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
0449 unsigned long mb_id)
0450 {
0451 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
0452
0453 return vm->sbm.mb_states[idx];
0454 }
0455
0456
0457
0458
0459 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
0460 {
0461 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
0462 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
0463 uint8_t *new_array;
0464
0465 if (vm->sbm.mb_states && old_pages == new_pages)
0466 return 0;
0467
0468 new_array = vzalloc(new_pages * PAGE_SIZE);
0469 if (!new_array)
0470 return -ENOMEM;
0471
0472 mutex_lock(&vm->hotplug_mutex);
0473 if (vm->sbm.mb_states)
0474 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
0475 vfree(vm->sbm.mb_states);
0476 vm->sbm.mb_states = new_array;
0477 mutex_unlock(&vm->hotplug_mutex);
0478
0479 return 0;
0480 }
0481
0482 #define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
0483 for (_mb_id = _vm->sbm.first_mb_id; \
0484 _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
0485 _mb_id++) \
0486 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
0487
0488 #define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
0489 for (_mb_id = _vm->sbm.next_mb_id - 1; \
0490 _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
0491 _mb_id--) \
0492 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
0493
0494
0495
0496
0497
0498 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
0499 unsigned long mb_id, int sb_id)
0500 {
0501 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
0502 }
0503
0504
0505
0506
0507
0508
0509 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
0510 unsigned long mb_id, int sb_id,
0511 int count)
0512 {
0513 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
0514
0515 __bitmap_set(vm->sbm.sb_states, bit, count);
0516 }
0517
0518
0519
0520
0521
0522
0523 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
0524 unsigned long mb_id, int sb_id,
0525 int count)
0526 {
0527 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
0528
0529 __bitmap_clear(vm->sbm.sb_states, bit, count);
0530 }
0531
0532
0533
0534
0535 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
0536 unsigned long mb_id, int sb_id,
0537 int count)
0538 {
0539 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
0540
0541 if (count == 1)
0542 return test_bit(bit, vm->sbm.sb_states);
0543
0544
0545 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
0546 bit + count;
0547 }
0548
0549
0550
0551
0552 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
0553 unsigned long mb_id, int sb_id,
0554 int count)
0555 {
0556 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
0557
0558
0559 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
0560 bit + count;
0561 }
0562
0563
0564
0565
0566
0567 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
0568 unsigned long mb_id)
0569 {
0570 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
0571
0572 return find_next_zero_bit(vm->sbm.sb_states,
0573 bit + vm->sbm.sbs_per_mb, bit) - bit;
0574 }
0575
0576
0577
0578
0579 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
0580 {
0581 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
0582 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
0583 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
0584 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
0585 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
0586 unsigned long *new_bitmap, *old_bitmap;
0587
0588 if (vm->sbm.sb_states && old_pages == new_pages)
0589 return 0;
0590
0591 new_bitmap = vzalloc(new_pages * PAGE_SIZE);
0592 if (!new_bitmap)
0593 return -ENOMEM;
0594
0595 mutex_lock(&vm->hotplug_mutex);
0596 if (vm->sbm.sb_states)
0597 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
0598
0599 old_bitmap = vm->sbm.sb_states;
0600 vm->sbm.sb_states = new_bitmap;
0601 mutex_unlock(&vm->hotplug_mutex);
0602
0603 vfree(old_bitmap);
0604 return 0;
0605 }
0606
0607
0608
0609
0610
0611 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
0612 {
0613 if (WARN_ON_ONCE(size > vm->offline_threshold))
0614 return false;
0615
0616 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
0628 uint64_t size)
0629 {
0630 int rc;
0631
0632
0633
0634
0635
0636 if (!vm->resource_name) {
0637 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
0638 GFP_KERNEL);
0639 if (!vm->resource_name)
0640 return -ENOMEM;
0641 }
0642
0643 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
0644 addr + size - 1);
0645
0646 atomic64_add(size, &vm->offline_size);
0647 rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
0648 MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
0649 if (rc) {
0650 atomic64_sub(size, &vm->offline_size);
0651 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
0652
0653
0654
0655
0656 }
0657 return rc;
0658 }
0659
0660
0661
0662
0663 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
0664 {
0665 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
0666 const uint64_t size = memory_block_size_bytes();
0667
0668 return virtio_mem_add_memory(vm, addr, size);
0669 }
0670
0671
0672
0673
0674 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
0675 {
0676 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
0677 const uint64_t size = vm->bbm.bb_size;
0678
0679 return virtio_mem_add_memory(vm, addr, size);
0680 }
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
0692 uint64_t size)
0693 {
0694 int rc;
0695
0696 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
0697 addr + size - 1);
0698 rc = remove_memory(addr, size);
0699 if (!rc) {
0700 atomic64_sub(size, &vm->offline_size);
0701
0702
0703
0704
0705 virtio_mem_retry(vm);
0706 } else {
0707 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
0708 }
0709 return rc;
0710 }
0711
0712
0713
0714
0715 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
0716 {
0717 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
0718 const uint64_t size = memory_block_size_bytes();
0719
0720 return virtio_mem_remove_memory(vm, addr, size);
0721 }
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
0732 uint64_t addr,
0733 uint64_t size)
0734 {
0735 int rc;
0736
0737 dev_dbg(&vm->vdev->dev,
0738 "offlining and removing memory: 0x%llx - 0x%llx\n", addr,
0739 addr + size - 1);
0740
0741 rc = offline_and_remove_memory(addr, size);
0742 if (!rc) {
0743 atomic64_sub(size, &vm->offline_size);
0744
0745
0746
0747
0748 virtio_mem_retry(vm);
0749 } else {
0750 dev_dbg(&vm->vdev->dev,
0751 "offlining and removing memory failed: %d\n", rc);
0752 }
0753 return rc;
0754 }
0755
0756
0757
0758
0759
0760 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
0761 unsigned long mb_id)
0762 {
0763 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
0764 const uint64_t size = memory_block_size_bytes();
0765
0766 return virtio_mem_offline_and_remove_memory(vm, addr, size);
0767 }
0768
0769
0770
0771
0772
0773 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
0774 unsigned long bb_id)
0775 {
0776 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
0777 const uint64_t size = vm->bbm.bb_size;
0778
0779 return virtio_mem_offline_and_remove_memory(vm, addr, size);
0780 }
0781
0782
0783
0784
0785 static void virtio_mem_retry(struct virtio_mem *vm)
0786 {
0787 unsigned long flags;
0788
0789 spin_lock_irqsave(&vm->removal_lock, flags);
0790 if (!vm->removing)
0791 queue_work(system_freezable_wq, &vm->wq);
0792 spin_unlock_irqrestore(&vm->removal_lock, flags);
0793 }
0794
0795 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
0796 {
0797 int node = NUMA_NO_NODE;
0798
0799 #if defined(CONFIG_ACPI_NUMA)
0800 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
0801 node = pxm_to_node(node_id);
0802 #endif
0803 return node;
0804 }
0805
0806
0807
0808
0809
0810 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
0811 uint64_t size)
0812 {
0813 return start < vm->addr + vm->region_size && vm->addr < start + size;
0814 }
0815
0816
0817
0818
0819
0820 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
0821 uint64_t size)
0822 {
0823 return start >= vm->addr && start + size <= vm->addr + vm->region_size;
0824 }
0825
0826 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
0827 unsigned long mb_id)
0828 {
0829 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
0830 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
0831 case VIRTIO_MEM_SBM_MB_OFFLINE:
0832 return NOTIFY_OK;
0833 default:
0834 break;
0835 }
0836 dev_warn_ratelimited(&vm->vdev->dev,
0837 "memory block onlining denied\n");
0838 return NOTIFY_BAD;
0839 }
0840
0841 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
0842 unsigned long mb_id)
0843 {
0844 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
0845 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
0846 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
0847 virtio_mem_sbm_set_mb_state(vm, mb_id,
0848 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
0849 break;
0850 case VIRTIO_MEM_SBM_MB_KERNEL:
0851 case VIRTIO_MEM_SBM_MB_MOVABLE:
0852 virtio_mem_sbm_set_mb_state(vm, mb_id,
0853 VIRTIO_MEM_SBM_MB_OFFLINE);
0854 break;
0855 default:
0856 BUG();
0857 break;
0858 }
0859 }
0860
0861 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
0862 unsigned long mb_id,
0863 unsigned long start_pfn)
0864 {
0865 const bool is_movable = is_zone_movable_page(pfn_to_page(start_pfn));
0866 int new_state;
0867
0868 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
0869 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
0870 new_state = VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL;
0871 if (is_movable)
0872 new_state = VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL;
0873 break;
0874 case VIRTIO_MEM_SBM_MB_OFFLINE:
0875 new_state = VIRTIO_MEM_SBM_MB_KERNEL;
0876 if (is_movable)
0877 new_state = VIRTIO_MEM_SBM_MB_MOVABLE;
0878 break;
0879 default:
0880 BUG();
0881 break;
0882 }
0883 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
0884 }
0885
0886 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
0887 unsigned long mb_id)
0888 {
0889 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
0890 unsigned long pfn;
0891 int sb_id;
0892
0893 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
0894 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
0895 continue;
0896 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
0897 sb_id * vm->sbm.sb_size);
0898 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
0899 }
0900 }
0901
0902 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
0903 unsigned long mb_id)
0904 {
0905 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
0906 unsigned long pfn;
0907 int sb_id;
0908
0909 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
0910 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
0911 continue;
0912 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
0913 sb_id * vm->sbm.sb_size);
0914 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
0915 }
0916 }
0917
0918 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
0919 unsigned long bb_id,
0920 unsigned long pfn,
0921 unsigned long nr_pages)
0922 {
0923
0924
0925
0926
0927 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
0928 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
0929 return;
0930 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
0931 }
0932
0933 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
0934 unsigned long bb_id,
0935 unsigned long pfn,
0936 unsigned long nr_pages)
0937 {
0938 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
0939 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
0940 return;
0941 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
0942 }
0943
0944
0945
0946
0947
0948
0949 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
0950 unsigned long action, void *arg)
0951 {
0952 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
0953 memory_notifier);
0954 struct memory_notify *mhp = arg;
0955 const unsigned long start = PFN_PHYS(mhp->start_pfn);
0956 const unsigned long size = PFN_PHYS(mhp->nr_pages);
0957 int rc = NOTIFY_OK;
0958 unsigned long id;
0959
0960 if (!virtio_mem_overlaps_range(vm, start, size))
0961 return NOTIFY_DONE;
0962
0963 if (vm->in_sbm) {
0964 id = virtio_mem_phys_to_mb_id(start);
0965
0966
0967
0968
0969
0970 if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
0971 !IS_ALIGNED(start, memory_block_size_bytes())))
0972 return NOTIFY_BAD;
0973 } else {
0974 id = virtio_mem_phys_to_bb_id(vm, start);
0975
0976
0977
0978
0979
0980
0981 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
0982 return NOTIFY_BAD;
0983 }
0984
0985
0986
0987
0988
0989
0990
0991 lockdep_off();
0992
0993 switch (action) {
0994 case MEM_GOING_OFFLINE:
0995 mutex_lock(&vm->hotplug_mutex);
0996 if (vm->removing) {
0997 rc = notifier_from_errno(-EBUSY);
0998 mutex_unlock(&vm->hotplug_mutex);
0999 break;
1000 }
1001 vm->hotplug_active = true;
1002 if (vm->in_sbm)
1003 virtio_mem_sbm_notify_going_offline(vm, id);
1004 else
1005 virtio_mem_bbm_notify_going_offline(vm, id,
1006 mhp->start_pfn,
1007 mhp->nr_pages);
1008 break;
1009 case MEM_GOING_ONLINE:
1010 mutex_lock(&vm->hotplug_mutex);
1011 if (vm->removing) {
1012 rc = notifier_from_errno(-EBUSY);
1013 mutex_unlock(&vm->hotplug_mutex);
1014 break;
1015 }
1016 vm->hotplug_active = true;
1017 if (vm->in_sbm)
1018 rc = virtio_mem_sbm_notify_going_online(vm, id);
1019 break;
1020 case MEM_OFFLINE:
1021 if (vm->in_sbm)
1022 virtio_mem_sbm_notify_offline(vm, id);
1023
1024 atomic64_add(size, &vm->offline_size);
1025
1026
1027
1028
1029 if (!unplug_online)
1030 virtio_mem_retry(vm);
1031
1032 vm->hotplug_active = false;
1033 mutex_unlock(&vm->hotplug_mutex);
1034 break;
1035 case MEM_ONLINE:
1036 if (vm->in_sbm)
1037 virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
1038
1039 atomic64_sub(size, &vm->offline_size);
1040
1041
1042
1043
1044
1045
1046 if (!atomic_read(&vm->wq_active) &&
1047 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
1048 virtio_mem_retry(vm);
1049
1050 vm->hotplug_active = false;
1051 mutex_unlock(&vm->hotplug_mutex);
1052 break;
1053 case MEM_CANCEL_OFFLINE:
1054 if (!vm->hotplug_active)
1055 break;
1056 if (vm->in_sbm)
1057 virtio_mem_sbm_notify_cancel_offline(vm, id);
1058 else
1059 virtio_mem_bbm_notify_cancel_offline(vm, id,
1060 mhp->start_pfn,
1061 mhp->nr_pages);
1062 vm->hotplug_active = false;
1063 mutex_unlock(&vm->hotplug_mutex);
1064 break;
1065 case MEM_CANCEL_ONLINE:
1066 if (!vm->hotplug_active)
1067 break;
1068 vm->hotplug_active = false;
1069 mutex_unlock(&vm->hotplug_mutex);
1070 break;
1071 default:
1072 break;
1073 }
1074
1075 lockdep_on();
1076
1077 return rc;
1078 }
1079
1080
1081
1082
1083
1084 static void virtio_mem_set_fake_offline(unsigned long pfn,
1085 unsigned long nr_pages, bool onlined)
1086 {
1087 page_offline_begin();
1088 for (; nr_pages--; pfn++) {
1089 struct page *page = pfn_to_page(pfn);
1090
1091 __SetPageOffline(page);
1092 if (!onlined) {
1093 SetPageDirty(page);
1094
1095 ClearPageReserved(page);
1096 }
1097 }
1098 page_offline_end();
1099 }
1100
1101
1102
1103
1104
1105 static void virtio_mem_clear_fake_offline(unsigned long pfn,
1106 unsigned long nr_pages, bool onlined)
1107 {
1108 for (; nr_pages--; pfn++) {
1109 struct page *page = pfn_to_page(pfn);
1110
1111 __ClearPageOffline(page);
1112 if (!onlined)
1113 ClearPageDirty(page);
1114 }
1115 }
1116
1117
1118
1119
1120
1121 static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
1122 {
1123 unsigned long order = MAX_ORDER - 1;
1124 unsigned long i;
1125
1126
1127
1128
1129
1130
1131 while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
1132 order--;
1133
1134 for (i = 0; i < nr_pages; i += 1 << order) {
1135 struct page *page = pfn_to_page(pfn + i);
1136
1137
1138
1139
1140
1141
1142
1143 if (PageDirty(page)) {
1144 virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
1145 generic_online_page(page, order);
1146 } else {
1147 virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
1148 free_contig_range(pfn + i, 1 << order);
1149 adjust_managed_page_count(page, 1 << order);
1150 }
1151 }
1152 }
1153
1154
1155
1156
1157
1158 static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
1159 {
1160 const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
1161 int rc, retry_count;
1162
1163
1164
1165
1166
1167
1168
1169
1170 for (retry_count = 0; retry_count < 5; retry_count++) {
1171 rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
1172 GFP_KERNEL);
1173 if (rc == -ENOMEM)
1174
1175 return rc;
1176 else if (rc && !is_movable)
1177 break;
1178 else if (rc)
1179 continue;
1180
1181 virtio_mem_set_fake_offline(pfn, nr_pages, true);
1182 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1183 return 0;
1184 }
1185
1186 return -EBUSY;
1187 }
1188
1189
1190
1191
1192
1193 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
1194 unsigned long nr_pages)
1195 {
1196 struct page *page;
1197 unsigned long i;
1198
1199
1200
1201
1202
1203
1204 adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
1205
1206 for (i = 0; i < nr_pages; i++) {
1207 page = pfn_to_page(pfn + i);
1208 if (WARN_ON(!page_ref_dec_and_test(page)))
1209 dump_page(page, "fake-offline page referenced");
1210 }
1211 }
1212
1213
1214
1215
1216
1217 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
1218 unsigned long nr_pages)
1219 {
1220 unsigned long i;
1221
1222
1223
1224
1225
1226 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1227 for (i = 0; i < nr_pages; i++)
1228 page_ref_inc(pfn_to_page(pfn + i));
1229 }
1230
1231 static void virtio_mem_online_page(struct virtio_mem *vm,
1232 struct page *page, unsigned int order)
1233 {
1234 const unsigned long start = page_to_phys(page);
1235 const unsigned long end = start + PFN_PHYS(1 << order);
1236 unsigned long addr, next, id, sb_id, count;
1237 bool do_online;
1238
1239
1240
1241
1242
1243
1244
1245
1246 for (addr = start; addr < end; ) {
1247 next = addr + PFN_PHYS(1 << order);
1248
1249 if (vm->in_sbm) {
1250 id = virtio_mem_phys_to_mb_id(addr);
1251 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
1252 count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1;
1253
1254 if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) {
1255
1256 do_online = true;
1257 } else if (count == 1 ||
1258 virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) {
1259
1260 do_online = false;
1261 } else {
1262
1263
1264
1265
1266
1267 order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT;
1268 do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1);
1269 continue;
1270 }
1271 } else {
1272
1273
1274
1275
1276 id = virtio_mem_phys_to_bb_id(vm, addr);
1277 do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
1278 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
1279 }
1280
1281 if (do_online)
1282 generic_online_page(pfn_to_page(PFN_DOWN(addr)), order);
1283 else
1284 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
1285 false);
1286 addr = next;
1287 }
1288 }
1289
1290 static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
1291 {
1292 const unsigned long addr = page_to_phys(page);
1293 struct virtio_mem *vm;
1294
1295 rcu_read_lock();
1296 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
1297
1298
1299
1300
1301 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
1302 continue;
1303
1304
1305
1306
1307
1308
1309
1310 rcu_read_unlock();
1311
1312 virtio_mem_online_page(vm, page, order);
1313 return;
1314 }
1315 rcu_read_unlock();
1316
1317
1318 generic_online_page(page, order);
1319 }
1320
1321 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
1322 const struct virtio_mem_req *req)
1323 {
1324 struct scatterlist *sgs[2], sg_req, sg_resp;
1325 unsigned int len;
1326 int rc;
1327
1328
1329 vm->req = *req;
1330
1331
1332 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
1333 sgs[0] = &sg_req;
1334
1335
1336 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
1337 sgs[1] = &sg_resp;
1338
1339 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
1340 if (rc < 0)
1341 return rc;
1342
1343 virtqueue_kick(vm->vq);
1344
1345
1346 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
1347
1348 return virtio16_to_cpu(vm->vdev, vm->resp.type);
1349 }
1350
1351 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
1352 uint64_t size)
1353 {
1354 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1355 const struct virtio_mem_req req = {
1356 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
1357 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
1358 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1359 };
1360 int rc = -ENOMEM;
1361
1362 if (atomic_read(&vm->config_changed))
1363 return -EAGAIN;
1364
1365 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
1366 addr + size - 1);
1367
1368 switch (virtio_mem_send_request(vm, &req)) {
1369 case VIRTIO_MEM_RESP_ACK:
1370 vm->plugged_size += size;
1371 return 0;
1372 case VIRTIO_MEM_RESP_NACK:
1373 rc = -EAGAIN;
1374 break;
1375 case VIRTIO_MEM_RESP_BUSY:
1376 rc = -ETXTBSY;
1377 break;
1378 case VIRTIO_MEM_RESP_ERROR:
1379 rc = -EINVAL;
1380 break;
1381 default:
1382 break;
1383 }
1384
1385 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
1386 return rc;
1387 }
1388
1389 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
1390 uint64_t size)
1391 {
1392 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1393 const struct virtio_mem_req req = {
1394 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
1395 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
1396 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1397 };
1398 int rc = -ENOMEM;
1399
1400 if (atomic_read(&vm->config_changed))
1401 return -EAGAIN;
1402
1403 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
1404 addr + size - 1);
1405
1406 switch (virtio_mem_send_request(vm, &req)) {
1407 case VIRTIO_MEM_RESP_ACK:
1408 vm->plugged_size -= size;
1409 return 0;
1410 case VIRTIO_MEM_RESP_BUSY:
1411 rc = -ETXTBSY;
1412 break;
1413 case VIRTIO_MEM_RESP_ERROR:
1414 rc = -EINVAL;
1415 break;
1416 default:
1417 break;
1418 }
1419
1420 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
1421 return rc;
1422 }
1423
1424 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
1425 {
1426 const struct virtio_mem_req req = {
1427 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
1428 };
1429 int rc = -ENOMEM;
1430
1431 dev_dbg(&vm->vdev->dev, "unplugging all memory");
1432
1433 switch (virtio_mem_send_request(vm, &req)) {
1434 case VIRTIO_MEM_RESP_ACK:
1435 vm->unplug_all_required = false;
1436 vm->plugged_size = 0;
1437
1438 atomic_set(&vm->config_changed, 1);
1439 return 0;
1440 case VIRTIO_MEM_RESP_BUSY:
1441 rc = -ETXTBSY;
1442 break;
1443 default:
1444 break;
1445 }
1446
1447 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
1448 return rc;
1449 }
1450
1451
1452
1453
1454
1455 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
1456 int sb_id, int count)
1457 {
1458 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1459 sb_id * vm->sbm.sb_size;
1460 const uint64_t size = count * vm->sbm.sb_size;
1461 int rc;
1462
1463 rc = virtio_mem_send_plug_request(vm, addr, size);
1464 if (!rc)
1465 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
1466 return rc;
1467 }
1468
1469
1470
1471
1472
1473 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
1474 int sb_id, int count)
1475 {
1476 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1477 sb_id * vm->sbm.sb_size;
1478 const uint64_t size = count * vm->sbm.sb_size;
1479 int rc;
1480
1481 rc = virtio_mem_send_unplug_request(vm, addr, size);
1482 if (!rc)
1483 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
1484 return rc;
1485 }
1486
1487
1488
1489
1490
1491
1492 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
1493 {
1494 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1495 const uint64_t size = vm->bbm.bb_size;
1496
1497 return virtio_mem_send_unplug_request(vm, addr, size);
1498 }
1499
1500
1501
1502
1503
1504
1505 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
1506 {
1507 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1508 const uint64_t size = vm->bbm.bb_size;
1509
1510 return virtio_mem_send_plug_request(vm, addr, size);
1511 }
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522 static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
1523 unsigned long mb_id, uint64_t *nb_sb)
1524 {
1525 int sb_id, count;
1526 int rc;
1527
1528 sb_id = vm->sbm.sbs_per_mb - 1;
1529 while (*nb_sb) {
1530
1531 while (sb_id >= 0 &&
1532 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
1533 sb_id--;
1534 if (sb_id < 0)
1535 break;
1536
1537 count = 1;
1538 while (count < *nb_sb && sb_id > 0 &&
1539 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1540 count++;
1541 sb_id--;
1542 }
1543
1544 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1545 if (rc)
1546 return rc;
1547 *nb_sb -= count;
1548 sb_id--;
1549 }
1550
1551 return 0;
1552 }
1553
1554
1555
1556
1557
1558
1559
1560
1561 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
1562 {
1563 uint64_t nb_sb = vm->sbm.sbs_per_mb;
1564
1565 return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
1566 }
1567
1568
1569
1570
1571 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
1572 unsigned long *mb_id)
1573 {
1574 int rc;
1575
1576 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
1577 return -ENOSPC;
1578
1579
1580 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
1581 if (rc)
1582 return rc;
1583
1584
1585 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
1586 if (rc)
1587 return rc;
1588
1589 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
1590 *mb_id = vm->sbm.next_mb_id++;
1591 return 0;
1592 }
1593
1594
1595
1596
1597
1598
1599
1600 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
1601 unsigned long mb_id, uint64_t *nb_sb)
1602 {
1603 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
1604 int rc;
1605
1606 if (WARN_ON_ONCE(!count))
1607 return -EINVAL;
1608
1609
1610
1611
1612
1613 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
1614 if (rc)
1615 return rc;
1616
1617
1618
1619
1620
1621 if (count == vm->sbm.sbs_per_mb)
1622 virtio_mem_sbm_set_mb_state(vm, mb_id,
1623 VIRTIO_MEM_SBM_MB_OFFLINE);
1624 else
1625 virtio_mem_sbm_set_mb_state(vm, mb_id,
1626 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1627
1628
1629 rc = virtio_mem_sbm_add_mb(vm, mb_id);
1630 if (rc) {
1631 int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
1632
1633 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
1634 new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
1635 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
1636 return rc;
1637 }
1638
1639 *nb_sb -= count;
1640 return 0;
1641 }
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
1652 unsigned long mb_id, uint64_t *nb_sb)
1653 {
1654 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1655 unsigned long pfn, nr_pages;
1656 int sb_id, count;
1657 int rc;
1658
1659 if (WARN_ON_ONCE(!*nb_sb))
1660 return -EINVAL;
1661
1662 while (*nb_sb) {
1663 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
1664 if (sb_id >= vm->sbm.sbs_per_mb)
1665 break;
1666 count = 1;
1667 while (count < *nb_sb &&
1668 sb_id + count < vm->sbm.sbs_per_mb &&
1669 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
1670 count++;
1671
1672 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
1673 if (rc)
1674 return rc;
1675 *nb_sb -= count;
1676 if (old_state == VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
1677 continue;
1678
1679
1680 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1681 sb_id * vm->sbm.sb_size);
1682 nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
1683 virtio_mem_fake_online(pfn, nr_pages);
1684 }
1685
1686 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1687 virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
1688
1689 return 0;
1690 }
1691
1692 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1693 {
1694 const int mb_states[] = {
1695 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
1696 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
1697 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
1698 };
1699 uint64_t nb_sb = diff / vm->sbm.sb_size;
1700 unsigned long mb_id;
1701 int rc, i;
1702
1703 if (!nb_sb)
1704 return 0;
1705
1706
1707 mutex_lock(&vm->hotplug_mutex);
1708
1709 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
1710 virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
1711 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
1712 if (rc || !nb_sb)
1713 goto out_unlock;
1714 cond_resched();
1715 }
1716 }
1717
1718
1719
1720
1721
1722 mutex_unlock(&vm->hotplug_mutex);
1723
1724
1725 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
1726 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1727 return -ENOSPC;
1728
1729 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1730 if (rc || !nb_sb)
1731 return rc;
1732 cond_resched();
1733 }
1734
1735
1736 while (nb_sb) {
1737 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1738 return -ENOSPC;
1739
1740 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
1741 if (rc)
1742 return rc;
1743 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1744 if (rc)
1745 return rc;
1746 cond_resched();
1747 }
1748
1749 return 0;
1750 out_unlock:
1751 mutex_unlock(&vm->hotplug_mutex);
1752 return rc;
1753 }
1754
1755
1756
1757
1758
1759
1760 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
1761 unsigned long bb_id)
1762 {
1763 int rc;
1764
1765 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
1766 VIRTIO_MEM_BBM_BB_UNUSED))
1767 return -EINVAL;
1768
1769 rc = virtio_mem_bbm_plug_bb(vm, bb_id);
1770 if (rc)
1771 return rc;
1772 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
1773
1774 rc = virtio_mem_bbm_add_bb(vm, bb_id);
1775 if (rc) {
1776 if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
1777 virtio_mem_bbm_set_bb_state(vm, bb_id,
1778 VIRTIO_MEM_BBM_BB_UNUSED);
1779 else
1780
1781 virtio_mem_bbm_set_bb_state(vm, bb_id,
1782 VIRTIO_MEM_BBM_BB_PLUGGED);
1783 return rc;
1784 }
1785 return 0;
1786 }
1787
1788
1789
1790
1791 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
1792 unsigned long *bb_id)
1793 {
1794 int rc;
1795
1796 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
1797 return -ENOSPC;
1798
1799
1800 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
1801 if (rc)
1802 return rc;
1803
1804 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
1805 *bb_id = vm->bbm.next_bb_id;
1806 vm->bbm.next_bb_id++;
1807 return 0;
1808 }
1809
1810 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1811 {
1812 uint64_t nb_bb = diff / vm->bbm.bb_size;
1813 unsigned long bb_id;
1814 int rc;
1815
1816 if (!nb_bb)
1817 return 0;
1818
1819
1820 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
1821 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1822 return -ENOSPC;
1823
1824 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1825 if (!rc)
1826 nb_bb--;
1827 if (rc || !nb_bb)
1828 return rc;
1829 cond_resched();
1830 }
1831
1832
1833 while (nb_bb) {
1834 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1835 return -ENOSPC;
1836
1837 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
1838 if (rc)
1839 return rc;
1840 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1841 if (!rc)
1842 nb_bb--;
1843 if (rc)
1844 return rc;
1845 cond_resched();
1846 }
1847
1848 return 0;
1849 }
1850
1851
1852
1853
1854 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1855 {
1856 if (vm->in_sbm)
1857 return virtio_mem_sbm_plug_request(vm, diff);
1858 return virtio_mem_bbm_plug_request(vm, diff);
1859 }
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
1871 unsigned long mb_id,
1872 uint64_t *nb_sb)
1873 {
1874 int rc;
1875
1876 rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
1877
1878
1879 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1880 virtio_mem_sbm_set_mb_state(vm, mb_id,
1881 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1882 if (rc)
1883 return rc;
1884
1885 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1886
1887
1888
1889
1890
1891
1892 virtio_mem_sbm_set_mb_state(vm, mb_id,
1893 VIRTIO_MEM_SBM_MB_UNUSED);
1894
1895 mutex_unlock(&vm->hotplug_mutex);
1896 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
1897 BUG_ON(rc);
1898 mutex_lock(&vm->hotplug_mutex);
1899 }
1900 return 0;
1901 }
1902
1903
1904
1905
1906
1907
1908 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
1909 unsigned long mb_id, int sb_id,
1910 int count)
1911 {
1912 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
1913 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1914 unsigned long start_pfn;
1915 int rc;
1916
1917 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1918 sb_id * vm->sbm.sb_size);
1919
1920 rc = virtio_mem_fake_offline(start_pfn, nr_pages);
1921 if (rc)
1922 return rc;
1923
1924
1925 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1926 if (rc) {
1927
1928 virtio_mem_fake_online(start_pfn, nr_pages);
1929 return rc;
1930 }
1931
1932 switch (old_state) {
1933 case VIRTIO_MEM_SBM_MB_KERNEL:
1934 virtio_mem_sbm_set_mb_state(vm, mb_id,
1935 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL);
1936 break;
1937 case VIRTIO_MEM_SBM_MB_MOVABLE:
1938 virtio_mem_sbm_set_mb_state(vm, mb_id,
1939 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL);
1940 break;
1941 }
1942
1943 return 0;
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
1957 unsigned long mb_id,
1958 uint64_t *nb_sb)
1959 {
1960 int rc, sb_id;
1961
1962
1963 if (*nb_sb >= vm->sbm.sbs_per_mb &&
1964 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1965 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
1966 vm->sbm.sbs_per_mb);
1967 if (!rc) {
1968 *nb_sb -= vm->sbm.sbs_per_mb;
1969 goto unplugged;
1970 } else if (rc != -EBUSY)
1971 return rc;
1972 }
1973
1974
1975 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1976
1977 while (sb_id >= 0 &&
1978 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
1979 sb_id--;
1980 if (sb_id < 0)
1981 break;
1982
1983 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
1984 if (rc == -EBUSY)
1985 continue;
1986 else if (rc)
1987 return rc;
1988 *nb_sb -= 1;
1989 }
1990
1991 unplugged:
1992
1993
1994
1995
1996
1997 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1998 mutex_unlock(&vm->hotplug_mutex);
1999 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
2000 mutex_lock(&vm->hotplug_mutex);
2001 if (!rc)
2002 virtio_mem_sbm_set_mb_state(vm, mb_id,
2003 VIRTIO_MEM_SBM_MB_UNUSED);
2004 }
2005
2006 return 0;
2007 }
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
2022 unsigned long mb_id,
2023 uint64_t *nb_sb)
2024 {
2025 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
2026
2027 switch (old_state) {
2028 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
2029 case VIRTIO_MEM_SBM_MB_KERNEL:
2030 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
2031 case VIRTIO_MEM_SBM_MB_MOVABLE:
2032 return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
2033 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
2034 case VIRTIO_MEM_SBM_MB_OFFLINE:
2035 return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
2036 }
2037 return -EINVAL;
2038 }
2039
2040 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2041 {
2042 const int mb_states[] = {
2043 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
2044 VIRTIO_MEM_SBM_MB_OFFLINE,
2045 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
2046 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
2047 VIRTIO_MEM_SBM_MB_MOVABLE,
2048 VIRTIO_MEM_SBM_MB_KERNEL,
2049 };
2050 uint64_t nb_sb = diff / vm->sbm.sb_size;
2051 unsigned long mb_id;
2052 int rc, i;
2053
2054 if (!nb_sb)
2055 return 0;
2056
2057
2058
2059
2060
2061
2062 mutex_lock(&vm->hotplug_mutex);
2063
2064
2065
2066
2067
2068
2069
2070
2071 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
2072 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
2073 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
2074 if (rc || !nb_sb)
2075 goto out_unlock;
2076 mutex_unlock(&vm->hotplug_mutex);
2077 cond_resched();
2078 mutex_lock(&vm->hotplug_mutex);
2079 }
2080 if (!unplug_online && i == 1) {
2081 mutex_unlock(&vm->hotplug_mutex);
2082 return 0;
2083 }
2084 }
2085
2086 mutex_unlock(&vm->hotplug_mutex);
2087 return nb_sb ? -EBUSY : 0;
2088 out_unlock:
2089 mutex_unlock(&vm->hotplug_mutex);
2090 return rc;
2091 }
2092
2093
2094
2095
2096
2097
2098
2099
2100 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
2101 unsigned long bb_id)
2102 {
2103 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2104 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2105 unsigned long end_pfn = start_pfn + nr_pages;
2106 unsigned long pfn;
2107 struct page *page;
2108 int rc;
2109
2110 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
2111 VIRTIO_MEM_BBM_BB_ADDED))
2112 return -EINVAL;
2113
2114 if (bbm_safe_unplug) {
2115
2116
2117
2118
2119
2120
2121 mutex_lock(&vm->hotplug_mutex);
2122 virtio_mem_bbm_set_bb_state(vm, bb_id,
2123 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
2124
2125 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2126 page = pfn_to_online_page(pfn);
2127 if (!page)
2128 continue;
2129
2130 rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
2131 if (rc) {
2132 end_pfn = pfn;
2133 goto rollback_safe_unplug;
2134 }
2135 }
2136 mutex_unlock(&vm->hotplug_mutex);
2137 }
2138
2139 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
2140 if (rc) {
2141 if (bbm_safe_unplug) {
2142 mutex_lock(&vm->hotplug_mutex);
2143 goto rollback_safe_unplug;
2144 }
2145 return rc;
2146 }
2147
2148 rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
2149 if (rc)
2150 virtio_mem_bbm_set_bb_state(vm, bb_id,
2151 VIRTIO_MEM_BBM_BB_PLUGGED);
2152 else
2153 virtio_mem_bbm_set_bb_state(vm, bb_id,
2154 VIRTIO_MEM_BBM_BB_UNUSED);
2155 return rc;
2156
2157 rollback_safe_unplug:
2158 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2159 page = pfn_to_online_page(pfn);
2160 if (!page)
2161 continue;
2162 virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
2163 }
2164 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
2165 mutex_unlock(&vm->hotplug_mutex);
2166 return rc;
2167 }
2168
2169
2170
2171
2172 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
2173 unsigned long bb_id)
2174 {
2175 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2176 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2177 unsigned long pfn;
2178
2179 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2180 pfn += PAGES_PER_SECTION) {
2181 if (pfn_to_online_page(pfn))
2182 return false;
2183 }
2184
2185 return true;
2186 }
2187
2188
2189
2190
2191 static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
2192 unsigned long bb_id)
2193 {
2194 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2195 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2196 struct page *page;
2197 unsigned long pfn;
2198
2199 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2200 pfn += PAGES_PER_SECTION) {
2201 page = pfn_to_online_page(pfn);
2202 if (!page)
2203 continue;
2204 if (page_zonenum(page) != ZONE_MOVABLE)
2205 return false;
2206 }
2207
2208 return true;
2209 }
2210
2211 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2212 {
2213 uint64_t nb_bb = diff / vm->bbm.bb_size;
2214 uint64_t bb_id;
2215 int rc, i;
2216
2217 if (!nb_bb)
2218 return 0;
2219
2220
2221
2222
2223
2224 for (i = 0; i < 3; i++) {
2225 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
2226 cond_resched();
2227
2228
2229
2230
2231
2232 if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
2233 continue;
2234 if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
2235 continue;
2236 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
2237 if (rc == -EBUSY)
2238 continue;
2239 if (!rc)
2240 nb_bb--;
2241 if (rc || !nb_bb)
2242 return rc;
2243 }
2244 if (i == 0 && !unplug_online)
2245 return 0;
2246 }
2247
2248 return nb_bb ? -EBUSY : 0;
2249 }
2250
2251
2252
2253
2254 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
2255 {
2256 if (vm->in_sbm)
2257 return virtio_mem_sbm_unplug_request(vm, diff);
2258 return virtio_mem_bbm_unplug_request(vm, diff);
2259 }
2260
2261
2262
2263
2264
2265 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
2266 {
2267 unsigned long id;
2268 int rc;
2269
2270 if (!vm->in_sbm) {
2271 virtio_mem_bbm_for_each_bb(vm, id,
2272 VIRTIO_MEM_BBM_BB_PLUGGED) {
2273 rc = virtio_mem_bbm_unplug_bb(vm, id);
2274 if (rc)
2275 return rc;
2276 virtio_mem_bbm_set_bb_state(vm, id,
2277 VIRTIO_MEM_BBM_BB_UNUSED);
2278 }
2279 return 0;
2280 }
2281
2282 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
2283 rc = virtio_mem_sbm_unplug_mb(vm, id);
2284 if (rc)
2285 return rc;
2286 virtio_mem_sbm_set_mb_state(vm, id,
2287 VIRTIO_MEM_SBM_MB_UNUSED);
2288 }
2289
2290 return 0;
2291 }
2292
2293
2294
2295
2296 static void virtio_mem_refresh_config(struct virtio_mem *vm)
2297 {
2298 const struct range pluggable_range = mhp_get_pluggable_range(true);
2299 uint64_t new_plugged_size, usable_region_size, end_addr;
2300
2301
2302 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2303 &new_plugged_size);
2304 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
2305 vm->plugged_size = new_plugged_size;
2306
2307
2308 virtio_cread_le(vm->vdev, struct virtio_mem_config,
2309 usable_region_size, &usable_region_size);
2310 end_addr = min(vm->addr + usable_region_size - 1,
2311 pluggable_range.end);
2312
2313 if (vm->in_sbm) {
2314 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
2315 if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
2316 vm->sbm.last_usable_mb_id--;
2317 } else {
2318 vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
2319 end_addr);
2320 if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
2321 vm->bbm.last_usable_bb_id--;
2322 }
2323
2324
2325
2326
2327
2328
2329
2330
2331 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
2332 &vm->requested_size);
2333
2334 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
2335 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
2336 }
2337
2338
2339
2340
2341 static void virtio_mem_run_wq(struct work_struct *work)
2342 {
2343 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
2344 uint64_t diff;
2345 int rc;
2346
2347 if (unlikely(vm->in_kdump)) {
2348 dev_warn_once(&vm->vdev->dev,
2349 "unexpected workqueue run in kdump kernel\n");
2350 return;
2351 }
2352
2353 hrtimer_cancel(&vm->retry_timer);
2354
2355 if (vm->broken)
2356 return;
2357
2358 atomic_set(&vm->wq_active, 1);
2359 retry:
2360 rc = 0;
2361
2362
2363 if (unlikely(vm->unplug_all_required))
2364 rc = virtio_mem_send_unplug_all_request(vm);
2365
2366 if (atomic_read(&vm->config_changed)) {
2367 atomic_set(&vm->config_changed, 0);
2368 virtio_mem_refresh_config(vm);
2369 }
2370
2371
2372 if (!rc)
2373 rc = virtio_mem_unplug_pending_mb(vm);
2374
2375 if (!rc && vm->requested_size != vm->plugged_size) {
2376 if (vm->requested_size > vm->plugged_size) {
2377 diff = vm->requested_size - vm->plugged_size;
2378 rc = virtio_mem_plug_request(vm, diff);
2379 } else {
2380 diff = vm->plugged_size - vm->requested_size;
2381 rc = virtio_mem_unplug_request(vm, diff);
2382 }
2383 }
2384
2385 switch (rc) {
2386 case 0:
2387 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2388 break;
2389 case -ENOSPC:
2390
2391
2392
2393
2394 break;
2395 case -ETXTBSY:
2396
2397
2398
2399
2400 case -EBUSY:
2401
2402
2403
2404
2405 case -ENOMEM:
2406
2407 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
2408 HRTIMER_MODE_REL);
2409 break;
2410 case -EAGAIN:
2411
2412 goto retry;
2413 default:
2414
2415 dev_err(&vm->vdev->dev,
2416 "unknown error, marking device broken: %d\n", rc);
2417 vm->broken = true;
2418 }
2419
2420 atomic_set(&vm->wq_active, 0);
2421 }
2422
2423 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
2424 {
2425 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
2426 retry_timer);
2427
2428 virtio_mem_retry(vm);
2429 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
2430 VIRTIO_MEM_RETRY_TIMER_MAX_MS);
2431 return HRTIMER_NORESTART;
2432 }
2433
2434 static void virtio_mem_handle_response(struct virtqueue *vq)
2435 {
2436 struct virtio_mem *vm = vq->vdev->priv;
2437
2438 wake_up(&vm->host_resp);
2439 }
2440
2441 static int virtio_mem_init_vq(struct virtio_mem *vm)
2442 {
2443 struct virtqueue *vq;
2444
2445 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
2446 "guest-request");
2447 if (IS_ERR(vq))
2448 return PTR_ERR(vq);
2449 vm->vq = vq;
2450
2451 return 0;
2452 }
2453
2454 static int virtio_mem_init_hotplug(struct virtio_mem *vm)
2455 {
2456 const struct range pluggable_range = mhp_get_pluggable_range(true);
2457 uint64_t unit_pages, sb_size, addr;
2458 int rc;
2459
2460
2461 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
2462 dev_warn(&vm->vdev->dev,
2463 "The alignment of the physical start address can make some memory unusable.\n");
2464 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
2465 dev_warn(&vm->vdev->dev,
2466 "The alignment of the physical end address can make some memory unusable.\n");
2467 if (vm->addr < pluggable_range.start ||
2468 vm->addr + vm->region_size - 1 > pluggable_range.end)
2469 dev_warn(&vm->vdev->dev,
2470 "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
2471
2472
2473 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
2474 VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
2475
2476
2477
2478
2479
2480 sb_size = PAGE_SIZE * pageblock_nr_pages;
2481 sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
2482
2483 if (sb_size < memory_block_size_bytes() && !force_bbm) {
2484
2485 vm->in_sbm = true;
2486 vm->sbm.sb_size = sb_size;
2487 vm->sbm.sbs_per_mb = memory_block_size_bytes() /
2488 vm->sbm.sb_size;
2489
2490
2491 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2492 memory_block_size_bytes() - 1;
2493 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
2494 vm->sbm.next_mb_id = vm->sbm.first_mb_id;
2495 } else {
2496
2497 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
2498 memory_block_size_bytes());
2499
2500 if (bbm_block_size) {
2501 if (!is_power_of_2(bbm_block_size)) {
2502 dev_warn(&vm->vdev->dev,
2503 "bbm_block_size is not a power of 2");
2504 } else if (bbm_block_size < vm->bbm.bb_size) {
2505 dev_warn(&vm->vdev->dev,
2506 "bbm_block_size is too small");
2507 } else {
2508 vm->bbm.bb_size = bbm_block_size;
2509 }
2510 }
2511
2512
2513 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2514 vm->bbm.bb_size - 1;
2515 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
2516 vm->bbm.next_bb_id = vm->bbm.first_bb_id;
2517
2518
2519 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
2520 vm->offline_threshold);
2521 }
2522
2523 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
2524 memory_block_size_bytes());
2525 if (vm->in_sbm)
2526 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
2527 (unsigned long long)vm->sbm.sb_size);
2528 else
2529 dev_info(&vm->vdev->dev, "big block size: 0x%llx",
2530 (unsigned long long)vm->bbm.bb_size);
2531
2532
2533 rc = virtio_mem_create_resource(vm);
2534 if (rc)
2535 return rc;
2536
2537
2538 if (vm->in_sbm)
2539 unit_pages = PHYS_PFN(memory_block_size_bytes());
2540 else
2541 unit_pages = PHYS_PFN(vm->bbm.bb_size);
2542 rc = memory_group_register_dynamic(vm->nid, unit_pages);
2543 if (rc < 0)
2544 goto out_del_resource;
2545 vm->mgid = rc;
2546
2547
2548
2549
2550
2551
2552 if (vm->plugged_size) {
2553 vm->unplug_all_required = true;
2554 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
2555 }
2556
2557
2558 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
2559 rc = register_memory_notifier(&vm->memory_notifier);
2560 if (rc)
2561 goto out_unreg_group;
2562 rc = register_virtio_mem_device(vm);
2563 if (rc)
2564 goto out_unreg_mem;
2565
2566 return 0;
2567 out_unreg_mem:
2568 unregister_memory_notifier(&vm->memory_notifier);
2569 out_unreg_group:
2570 memory_group_unregister(vm->mgid);
2571 out_del_resource:
2572 virtio_mem_delete_resource(vm);
2573 return rc;
2574 }
2575
2576 #ifdef CONFIG_PROC_VMCORE
2577 static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr,
2578 uint64_t size)
2579 {
2580 const uint64_t nb_vm_blocks = size / vm->device_block_size;
2581 const struct virtio_mem_req req = {
2582 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE),
2583 .u.state.addr = cpu_to_virtio64(vm->vdev, addr),
2584 .u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
2585 };
2586 int rc = -ENOMEM;
2587
2588 dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr,
2589 addr + size - 1);
2590
2591 switch (virtio_mem_send_request(vm, &req)) {
2592 case VIRTIO_MEM_RESP_ACK:
2593 return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state);
2594 case VIRTIO_MEM_RESP_ERROR:
2595 rc = -EINVAL;
2596 break;
2597 default:
2598 break;
2599 }
2600
2601 dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc);
2602 return rc;
2603 }
2604
2605 static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
2606 unsigned long pfn)
2607 {
2608 struct virtio_mem *vm = container_of(cb, struct virtio_mem,
2609 vmcore_cb);
2610 uint64_t addr = PFN_PHYS(pfn);
2611 bool is_ram;
2612 int rc;
2613
2614 if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
2615 return true;
2616 if (!vm->plugged_size)
2617 return false;
2618
2619
2620
2621
2622
2623 mutex_lock(&vm->hotplug_mutex);
2624
2625 addr = ALIGN_DOWN(addr, vm->device_block_size);
2626 if (addr != vm->last_block_addr) {
2627 rc = virtio_mem_send_state_request(vm, addr,
2628 vm->device_block_size);
2629
2630 if (rc == VIRTIO_MEM_STATE_PLUGGED)
2631 vm->last_block_plugged = true;
2632 else
2633 vm->last_block_plugged = false;
2634 vm->last_block_addr = addr;
2635 }
2636
2637 is_ram = vm->last_block_plugged;
2638 mutex_unlock(&vm->hotplug_mutex);
2639 return is_ram;
2640 }
2641 #endif
2642
2643 static int virtio_mem_init_kdump(struct virtio_mem *vm)
2644 {
2645 #ifdef CONFIG_PROC_VMCORE
2646 dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
2647 vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
2648 register_vmcore_cb(&vm->vmcore_cb);
2649 return 0;
2650 #else
2651 dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n");
2652 return -EBUSY;
2653 #endif
2654 }
2655
2656 static int virtio_mem_init(struct virtio_mem *vm)
2657 {
2658 uint16_t node_id;
2659
2660 if (!vm->vdev->config->get) {
2661 dev_err(&vm->vdev->dev, "config access disabled\n");
2662 return -EINVAL;
2663 }
2664
2665
2666 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2667 &vm->plugged_size);
2668 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
2669 &vm->device_block_size);
2670 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
2671 &node_id);
2672 vm->nid = virtio_mem_translate_node_id(vm, node_id);
2673 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
2674 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
2675 &vm->region_size);
2676
2677
2678 if (vm->nid == NUMA_NO_NODE)
2679 vm->nid = memory_add_physaddr_to_nid(vm->addr);
2680
2681 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
2682 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
2683 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
2684 (unsigned long long)vm->device_block_size);
2685 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
2686 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
2687
2688
2689
2690
2691
2692 if (vm->in_kdump)
2693 return virtio_mem_init_kdump(vm);
2694 return virtio_mem_init_hotplug(vm);
2695 }
2696
2697 static int virtio_mem_create_resource(struct virtio_mem *vm)
2698 {
2699
2700
2701
2702
2703 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
2704
2705 if (!name)
2706 return -ENOMEM;
2707
2708
2709 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
2710 name, IORESOURCE_SYSTEM_RAM |
2711 IORESOURCE_EXCLUSIVE);
2712 if (!vm->parent_resource) {
2713 kfree(name);
2714 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
2715 dev_info(&vm->vdev->dev,
2716 "reloading the driver is not supported\n");
2717 return -EBUSY;
2718 }
2719
2720
2721 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
2722 return 0;
2723 }
2724
2725 static void virtio_mem_delete_resource(struct virtio_mem *vm)
2726 {
2727 const char *name;
2728
2729 if (!vm->parent_resource)
2730 return;
2731
2732 name = vm->parent_resource->name;
2733 release_resource(vm->parent_resource);
2734 kfree(vm->parent_resource);
2735 kfree(name);
2736 vm->parent_resource = NULL;
2737 }
2738
2739 static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
2740 {
2741 return 1;
2742 }
2743
2744 static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
2745 {
2746 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
2747
2748 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
2749 vm->addr + vm->region_size, NULL,
2750 virtio_mem_range_has_system_ram) == 1;
2751 }
2752
2753 static int virtio_mem_probe(struct virtio_device *vdev)
2754 {
2755 struct virtio_mem *vm;
2756 int rc;
2757
2758 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
2759 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
2760
2761 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2762 if (!vm)
2763 return -ENOMEM;
2764
2765 init_waitqueue_head(&vm->host_resp);
2766 vm->vdev = vdev;
2767 INIT_WORK(&vm->wq, virtio_mem_run_wq);
2768 mutex_init(&vm->hotplug_mutex);
2769 INIT_LIST_HEAD(&vm->next);
2770 spin_lock_init(&vm->removal_lock);
2771 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2772 vm->retry_timer.function = virtio_mem_timer_expired;
2773 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2774 vm->in_kdump = is_kdump_kernel();
2775
2776
2777 rc = virtio_mem_init_vq(vm);
2778 if (rc)
2779 goto out_free_vm;
2780
2781
2782 rc = virtio_mem_init(vm);
2783 if (rc)
2784 goto out_del_vq;
2785
2786 virtio_device_ready(vdev);
2787
2788
2789 if (!vm->in_kdump) {
2790 atomic_set(&vm->config_changed, 1);
2791 queue_work(system_freezable_wq, &vm->wq);
2792 }
2793
2794 return 0;
2795 out_del_vq:
2796 vdev->config->del_vqs(vdev);
2797 out_free_vm:
2798 kfree(vm);
2799 vdev->priv = NULL;
2800
2801 return rc;
2802 }
2803
2804 static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
2805 {
2806 unsigned long mb_id;
2807 int rc;
2808
2809
2810
2811
2812
2813 mutex_lock(&vm->hotplug_mutex);
2814 spin_lock_irq(&vm->removal_lock);
2815 vm->removing = true;
2816 spin_unlock_irq(&vm->removal_lock);
2817 mutex_unlock(&vm->hotplug_mutex);
2818
2819
2820 cancel_work_sync(&vm->wq);
2821 hrtimer_cancel(&vm->retry_timer);
2822
2823 if (vm->in_sbm) {
2824
2825
2826
2827
2828 virtio_mem_sbm_for_each_mb(vm, mb_id,
2829 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
2830 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
2831 BUG_ON(rc);
2832 virtio_mem_sbm_set_mb_state(vm, mb_id,
2833 VIRTIO_MEM_SBM_MB_UNUSED);
2834 }
2835
2836
2837
2838
2839
2840 }
2841
2842
2843 unregister_virtio_mem_device(vm);
2844 unregister_memory_notifier(&vm->memory_notifier);
2845
2846
2847
2848
2849
2850
2851 if (virtio_mem_has_memory_added(vm)) {
2852 dev_warn(&vm->vdev->dev,
2853 "device still has system memory added\n");
2854 } else {
2855 virtio_mem_delete_resource(vm);
2856 kfree_const(vm->resource_name);
2857 memory_group_unregister(vm->mgid);
2858 }
2859
2860
2861 if (vm->in_sbm) {
2862 vfree(vm->sbm.mb_states);
2863 vfree(vm->sbm.sb_states);
2864 } else {
2865 vfree(vm->bbm.bb_states);
2866 }
2867 }
2868
2869 static void virtio_mem_deinit_kdump(struct virtio_mem *vm)
2870 {
2871 #ifdef CONFIG_PROC_VMCORE
2872 unregister_vmcore_cb(&vm->vmcore_cb);
2873 #endif
2874 }
2875
2876 static void virtio_mem_remove(struct virtio_device *vdev)
2877 {
2878 struct virtio_mem *vm = vdev->priv;
2879
2880 if (vm->in_kdump)
2881 virtio_mem_deinit_kdump(vm);
2882 else
2883 virtio_mem_deinit_hotplug(vm);
2884
2885
2886 virtio_reset_device(vdev);
2887 vdev->config->del_vqs(vdev);
2888
2889 kfree(vm);
2890 vdev->priv = NULL;
2891 }
2892
2893 static void virtio_mem_config_changed(struct virtio_device *vdev)
2894 {
2895 struct virtio_mem *vm = vdev->priv;
2896
2897 if (unlikely(vm->in_kdump))
2898 return;
2899
2900 atomic_set(&vm->config_changed, 1);
2901 virtio_mem_retry(vm);
2902 }
2903
2904 #ifdef CONFIG_PM_SLEEP
2905 static int virtio_mem_freeze(struct virtio_device *vdev)
2906 {
2907
2908
2909
2910
2911 dev_err(&vdev->dev, "save/restore not supported.\n");
2912 return -EPERM;
2913 }
2914
2915 static int virtio_mem_restore(struct virtio_device *vdev)
2916 {
2917 return -EPERM;
2918 }
2919 #endif
2920
2921 static unsigned int virtio_mem_features[] = {
2922 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
2923 VIRTIO_MEM_F_ACPI_PXM,
2924 #endif
2925 VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
2926 };
2927
2928 static const struct virtio_device_id virtio_mem_id_table[] = {
2929 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
2930 { 0 },
2931 };
2932
2933 static struct virtio_driver virtio_mem_driver = {
2934 .feature_table = virtio_mem_features,
2935 .feature_table_size = ARRAY_SIZE(virtio_mem_features),
2936 .driver.name = KBUILD_MODNAME,
2937 .driver.owner = THIS_MODULE,
2938 .id_table = virtio_mem_id_table,
2939 .probe = virtio_mem_probe,
2940 .remove = virtio_mem_remove,
2941 .config_changed = virtio_mem_config_changed,
2942 #ifdef CONFIG_PM_SLEEP
2943 .freeze = virtio_mem_freeze,
2944 .restore = virtio_mem_restore,
2945 #endif
2946 };
2947
2948 module_virtio_driver(virtio_mem_driver);
2949 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
2950 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
2951 MODULE_DESCRIPTION("Virtio-mem driver");
2952 MODULE_LICENSE("GPL");